summaryrefslogtreecommitdiffstats
path: root/foreman
diff options
context:
space:
mode:
Diffstat (limited to 'foreman')
-rw-r--r--foreman/build/Makefile6
-rw-r--r--foreman/build/cache.mk2
-rw-r--r--foreman/build/opnfv-genesis.spec17
-rw-r--r--foreman/ci/Vagrantfile13
-rwxr-xr-xforeman/ci/bootstrap.sh5
-rwxr-xr-xforeman/ci/clean.sh217
-rwxr-xr-xforeman/ci/deploy.sh1672
-rw-r--r--foreman/ci/inventory/lf_pod2_ksgen_settings.yml36
-rw-r--r--foreman/ci/opnfv_ksgen_settings.yml35
-rw-r--r--foreman/ci/opnfv_ksgen_settings_no_HA.yml272
-rw-r--r--foreman/ci/reload_playbook.yml1
-rwxr-xr-xforeman/ci/resize_lvm.sh37
-rwxr-xr-xforeman/ci/resize_partition.sh33
-rwxr-xr-xforeman/ci/vm_nodes_provision.sh59
-rw-r--r--foreman/docs/src/installation-instructions.rst448
-rw-r--r--foreman/docs/src/release-notes.rst161
16 files changed, 2325 insertions, 689 deletions
diff --git a/foreman/build/Makefile b/foreman/build/Makefile
index 8b87ce61e..2d2a2a7ad 100644
--- a/foreman/build/Makefile
+++ b/foreman/build/Makefile
@@ -26,7 +26,7 @@ export VBOXDNLD = http://download.virtualbox.org/virtualbox/rpm/el/7.1/x86_64/Vi
export VBOXRPM = $(shell pwd)/VirtualBox-4.3-4.3.26_98988_el7-1.x86_64.rpm
export VAGRANTDNLD = https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm
export VAGRANTRPM = $(shell pwd)/vagrant_1.7.2_x86_64.rpm
-export GENESISRPM = $(shell pwd)/x86_64/opnfv-genesis-0.1-1.x86_64.rpm
+export GENESISRPM = $(shell pwd)/x86_64/opnfv-genesis-0.2-1.x86_64.rpm
# Note! Invoke with "make REVSTATE=RXXXX all" to make release build!
# Invoke with ICOCACHE=/full/path/to/iso if cached ISO is in non-standard location.
@@ -106,8 +106,8 @@ rpm-clean:
.PHONY: rpm
rpm:
- pushd ../../ && git archive --format=tar --prefix=opnfv-genesis-0.1/ HEAD | gzip > foreman/build/opnfv-genesis.tar.gz
- rpmbuild -ba opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
+ pushd ../../ && git archive --format=tar --prefix=opnfv-genesis-0.2/ HEAD | gzip > foreman/build/opnfv-genesis.tar.gz
+ rpmbuild -ba opnfv-genesis.spec -D '_topdir %(echo `pwd`)' -D '_builddir %(echo `pwd`)' -D '_sourcedir %(echo `pwd`)' -D '_rpmdir %(echo `pwd`)' -D '_specdir %(echo `pwd`)' -D '_srcrpmdir %(echo `pwd`)'
@make rpm-clean
diff --git a/foreman/build/cache.mk b/foreman/build/cache.mk
index fdfd0034a..56b72731b 100644
--- a/foreman/build/cache.mk
+++ b/foreman/build/cache.mk
@@ -16,6 +16,8 @@ CACHECLEAN := $(addsuffix .clean,$(CACHEFILES) $(CACHEDIRS))
# BEGIN of variables to customize
#
CACHEFILES += .versions
+CACHEFILES += $(shell basename $(VAGRANTRPM))
+CACHEFILES += $(shell basename $(VBOXRPM))
CACHEFILES += $(shell basename $(ISOSRC))
#
# END of variables to customize
diff --git a/foreman/build/opnfv-genesis.spec b/foreman/build/opnfv-genesis.spec
index 674760fea..30692b4e1 100644
--- a/foreman/build/opnfv-genesis.spec
+++ b/foreman/build/opnfv-genesis.spec
@@ -1,5 +1,5 @@
Name: opnfv-genesis
-Version: 0.1
+Version: 0.2
Release: 1
Summary: The files from the OPNFV genesis repo
@@ -8,8 +8,8 @@ License: Apache 2.0
URL: https://gerrit.opnfv.org/gerrit/genesis.git
Source0: opnfv-genesis.tar.gz
-#BuildRequires:
-Requires: vagrant, VirtualBox-4.3
+#BuildRequires:
+Requires: vagrant, VirtualBox-4.3, net-tools
%description
The files from the OPNFV genesis repo
@@ -21,13 +21,16 @@ The files from the OPNFV genesis repo
%build
%install
-mkdir -p %{buildroot}/usr/bin/
-cp foreman/ci/deploy.sh %{buildroot}/usr/bin/
+mkdir -p %{buildroot}/root/genesis
+cp -r foreman/ %{buildroot}/root/genesis
+cp -r common/ %{buildroot}/root/genesis
%files
-/usr/bin/deploy.sh
+/root/genesis
%changelog
-* Fri Apr 24 2015 Dan Radez <dradez@redhatcom> - 0.1-1
+* Tue Sep 15 2015 Dan Radez <dradez@redhat.com> - 0.2-1
+- Updating the install files and cleaning up white space
+* Fri Apr 24 2015 Dan Radez <dradez@redhat.com> - 0.1-1
- Initial Packaging
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile
index 100e12db0..c7dfc0335 100644
--- a/foreman/ci/Vagrantfile
+++ b/foreman/ci/Vagrantfile
@@ -12,7 +12,7 @@ Vagrant.configure(2) do |config|
# Every Vagrant development environment requires a box. You can search for
# boxes at https://atlas.hashicorp.com/search.
- config.vm.box = "chef/centos-7.0"
+ config.vm.box = "opnfv/centos-7.0"
# Disable automatic box update checking. If you disable this, then
# boxes will only be checked for updates when the user runs
@@ -41,6 +41,9 @@ Vagrant.configure(2) do |config|
default_gw = ""
nat_flag = false
+ # Disable dhcp flag
+ disable_dhcp_flag = false
+
# Share an additional folder to the guest VM. The first argument is
# the path on the host to the actual folder. The second argument is
# the path on the guest to mount the folder. And the optional third
@@ -77,10 +80,11 @@ Vagrant.configure(2) do |config|
# sudo apt-get update
# sudo apt-get install -y apache2
# SHELL
-
+
config.ssh.username = 'root'
config.ssh.password = 'vagrant'
config.ssh.insert_key = 'true'
+ config.vm.provision :shell, path: "resize_partition.sh"
config.vm.provision "ansible" do |ansible|
ansible.playbook = "reload_playbook.yml"
end
@@ -90,4 +94,9 @@ Vagrant.configure(2) do |config|
config.vm.provision :shell, path: "nat_setup.sh"
end
config.vm.provision :shell, path: "bootstrap.sh"
+ if disable_dhcp_flag
+ config.vm.provision :shell, :inline => "systemctl stop dhcpd"
+ config.vm.provision :shell, :inline => "systemctl disable dhcpd"
+ end
+ config.vm.provision :shell, path: "resize_lvm.sh"
end
diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh
index 4bc22ed26..c98f00e6c 100755
--- a/foreman/ci/bootstrap.sh
+++ b/foreman/ci/bootstrap.sh
@@ -25,8 +25,7 @@ green=`tput setaf 2`
yum install -y epel-release-7*
# Install other required packages
-# Major version is pinned to force some consistency for Arno
-if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then
+if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then
printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2
exit 1
fi
@@ -36,7 +35,7 @@ cd /opt
echo "Cloning khaleesi to /opt"
if [ ! -d khaleesi ]; then
- if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then
+ if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then
printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2
exit 1
fi
diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh
index f61ac9372..345864b27 100755
--- a/foreman/ci/clean.sh
+++ b/foreman/ci/clean.sh
@@ -3,22 +3,23 @@
#Clean script to uninstall provisioning server for Foreman/QuickStack
#author: Tim Rozet (trozet@redhat.com)
#
-#Uses Vagrant and VirtualBox
+#Removes Libvirt, KVM, Vagrant, VirtualBox
#
-#Destroys Vagrant VM running in /tmp/bgs_vagrant
+#Destroys Vagrant VMs running in $vm_dir/
#Shuts down all nodes found in Khaleesi settings
-#Removes hypervisor kernel modules (VirtualBox)
+#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt)
##VARS
reset=`tput sgr0`
blue=`tput setaf 4`
red=`tput setaf 1`
green=`tput setaf 2`
+vm_dir=/var/opt/opnfv
##END VARS
##FUNCTIONS
display_usage() {
- echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+ echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo -e "\nUsage:\n$0 [arguments] \n"
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n"
@@ -31,7 +32,7 @@ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
exit 0
fi
-echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n"
+echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n"
echo "Use -h to display help"
sleep 2
@@ -50,54 +51,55 @@ do
esac
done
-
-# Install ipmitool
-# Major version is pinned to force some consistency for Arno
-if ! yum list installed | grep -i ipmitool; then
- if ! yum -y install ipmitool-1*; then
- echo "${red}Unable to install ipmitool!${reset}"
- exit 1
- fi
-else
- echo "${blue}Skipping ipmitool as it is already installed!${reset}"
-fi
-
-###find all the bmc IPs and number of nodes
-node_counter=0
-output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
-for line in ${output} ; do
- bmc_ip[$node_counter]=$line
- ((node_counter++))
-done
-
-max_nodes=$((node_counter-1))
-
-###find bmc_users per node
-node_counter=0
-output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
-for line in ${output} ; do
- bmc_user[$node_counter]=$line
- ((node_counter++))
-done
-
-###find bmc_pass per node
-node_counter=0
-output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
-for line in ${output} ; do
- bmc_pass[$node_counter]=$line
- ((node_counter++))
-done
-
-for mynode in `seq 0 $max_nodes`; do
- echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
- if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
- echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Unable to install ipmitool!${reset}"
+ exit 1
+ fi
else
- echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
- exit 1
+ echo "${blue}Skipping ipmitool as it is already installed!${reset}"
fi
-done
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then
+ echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}"
+ else
+ echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}"
+ exit 1
+ fi
+ done
+else
+ echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}"
+fi
###check to see if vbox is installed
vboxpkg=`rpm -qa | grep VirtualBox`
if [ $? -eq 0 ]; then
@@ -106,39 +108,120 @@ else
skip_vagrant=1
fi
+###legacy VM location check
+###remove me later
+if [ -d /tmp/bgs_vagrant ]; then
+ cd /tmp/bgs_vagrant
+ vagrant destroy -f
+ rm -rf /tmp/bgs_vagrant
+fi
+
###destroy vagrant
if [ $skip_vagrant -eq 0 ]; then
- cd /tmp/bgs_vagrant
- if vagrant destroy -f; then
- echo "${blue}Successfully destroyed Foreman VM ${reset}"
+ if [ -d $vm_dir ]; then
+ ##all vm directories
+ for vm in $( ls $vm_dir ); do
+ cd $vm_dir/$vm
+ if vagrant destroy -f; then
+ echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}"
+ else
+ echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}"
+ killall vagrant
+ echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
+ if ps axf | grep vagrant | grep -v 'grep'; then
+ echo "${red}Vagrant process still exists after kill...exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
+ fi
+ fi
+
+ ##Vagrant boxes appear as VboxHeadless processes
+ ##try to gracefully destroy the VBox VM if it still exists
+ if vboxmanage list runningvms | grep $vm; then
+ echo "${red} $vm VBoxHeadless process still exists...Removing${reset}"
+ vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g')
+ vboxmanage controlvm $vbox_id poweroff
+ if vboxmanage unregistervm --delete $vbox_id; then
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ else
+ echo "${red} Unable to delete VM $vm ...Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}$vm VM is successfully deleted! ${reset}"
+ fi
+ done
else
- echo "${red}Unable to destroy Foreman VM ${reset}"
- echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}"
- if ps axf | grep vagrant; then
- echo "${red}Vagrant VM still exists...exiting ${reset}"
- exit 1
- else
- echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}"
- fi
+ echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}"
fi
+ echo "${blue}Checking for any remaining virtual box processes...${reset}"
###kill virtualbox
- echo "${blue}Killing VirtualBox ${reset}"
- killall virtualbox
- killall VBoxHeadless
+ if ps axf | grep virtualbox | grep -v 'grep'; then
+ echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}"
+ killall virtualbox
+ fi
+
+ ###kill any leftover VMs (brute force)
+ if ps axf | grep VBoxHeadless | grep -v 'grep'; then
+ echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}"
+ killall VBoxHeadless
+ fi
###remove virtualbox
- echo "${blue}Removing VirtualBox ${reset}"
+ echo "${blue}Removing VirtualBox... ${reset}"
yum -y remove $vboxpkg
else
- echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}"
+ echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}"
fi
+###remove working vm directory
+echo "${blue}Removing working VM directory: $vm_dir ${reset}"
+rm -rf $vm_dir
+
+###check to see if libvirt is installed
+echo "${blue}Checking if libvirt/KVM is installed"
+if rpm -qa | grep -iE 'libvirt|kvm'; then
+ echo "${blue}Libvirt/KVM is installed${reset}"
+ echo "${blue}Checking for any QEMU/KVM VMs...${reset}"
+ vm_count=0
+ while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1)
+ if [ $vm_count -gt 0 ]; then
+ echo "${blue}VMs Found: $vm_count${reset}"
+ vm_runnning=0
+ while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running)
+ echo "${blue}Powering off $vm_running VM(s)${reset}"
+ while read -r vm; do
+ if ! virsh destroy $vm; then
+ echo "${red}WARNING: Unable to power off VM ${vm}${reset}"
+ else
+ echo "${blue}VM $vm powered off!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}')
+ echo "${blue}Destroying libvirt VMs...${reset}"
+ while read -r vm; do
+ if ! virsh undefine --remove-all-storage $vm; then
+ echo "${red}ERROR: Unable to remove the VM ${vm}${reset}"
+ exit 1
+ else
+ echo "${blue}VM $vm removed!${reset}"
+ fi
+ done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}')
+ else
+ echo "${blue}No VMs found for removal"
+ fi
+ echo "${blue}Removing libvirt and kvm packages"
+ yum -y remove libvirt-*
+ yum -y remove *qemu*
+else
+ echo "${blue}libvirt/KVM is not installed${reset}"
+fi
###remove kernel modules
echo "${blue}Removing kernel modules ${reset}"
-for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do
+for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do
if ! rmmod $kernel_mod; then
if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then
echo "${blue} $kernel_mod is not currently loaded! ${reset}"
diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh
index 86f03a743..6bf8f12aa 100755
--- a/foreman/ci/deploy.sh
+++ b/foreman/ci/deploy.sh
@@ -25,6 +25,14 @@ red=`tput setaf 1`
green=`tput setaf 2`
declare -A interface_arr
+declare -A controllers_ip_arr
+declare -A admin_ip_arr
+declare -A public_ip_arr
+
+vagrant_box_dir=~/.vagrant.d/boxes/opnfv-VAGRANTSLASH-centos-7.0/1.0.0/virtualbox/
+vagrant_box_vmdk=box-disk1.vmdk
+vm_dir=/var/opt/opnfv
+script=`realpath $0`
##END VARS
##FUNCTIONS
@@ -35,6 +43,38 @@ display_usage() {
echo -e "\n -no_parse : No variable parsing into config. Flag. \n"
echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n"
echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n"
+ echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n"
+ echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n"
+ echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n"
+ echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n"
+ echo -e "\n -admin_nic : Baremetal NIC for the admin network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -admin_nic em1"
+ echo -e "\n -private_nic : Baremetal NIC for the private network. Required if other "nic" arguments are used. \
+Not applicable with -virtual. Example: -private_nic em2"
+ echo -e "\n -public_nic : Baremetal NIC for the public network. Required if other "nic" arguments are used. \
+Can also be used with -virtual. Example: -public_nic em3"
+ echo -e "\n -storage_nic : Baremetal NIC for the storage network. Optional. Not applicable with -virtual. \
+Private NIC will be used for storage if not specified. Example: -storage_nic em4"
+ echo -e "\n -single_baremetal_nic : Baremetal NIC for the all in one network. Optional. Not applicable with -virtual. \
+Example: -single_baremetal_nic em1"
+}
+
+##verify vm dir exists
+##params: none
+function verify_vm_dir {
+ if [ -d "$vm_dir" ]; then
+ echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n"
+ exit 1
+ else
+ mkdir -p $vm_dir
+ fi
+
+ chmod 700 $vm_dir
+
+ if [ ! -d $vm_dir ]; then
+ echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n"
+ exit -1
+ fi
}
##find ip of interface
@@ -51,6 +91,41 @@ function find_subnet {
printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))"
}
+##verify subnet has at least n IPs
+##params: subnet mask, n IPs
+function verify_subnet_size {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ num_ips_required=$2
+
+ ##this function assumes you would never need more than 254
+ ##we check here to make sure
+ if [ "$num_ips_required" -ge 254 ]; then
+ echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n"
+ return 1
+ fi
+
+ ##we just return if 3rd octet is not 255
+ ##because we know the subnet is big enough
+ if [ "$i3" -ne 255 ]; then
+ return 0
+ elif [ $((254-$i4)) -ge "$num_ips_required" ]; then
+ return 0
+ else
+ echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n"
+ return 1
+ fi
+}
+
+##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask
+## Warning: This function only works for IPv4 at the moment.
+##params: ip, netmask
+function find_last_ip_subnet {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ IFS=. read -r m1 m2 m3 m4 <<< "$2"
+ IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))"
+ printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))"
+}
+
##increments subnet by a value
##params: ip, value
##assumes low value
@@ -87,6 +162,19 @@ function next_ip {
echo $baseaddr.$lsv
}
+##subtracts a value from an IP address
+##params: last ip, ip_count
+##assumes ip_count is less than the last octect of the address
+subtract_ip() {
+ IFS=. read -r i1 i2 i3 i4 <<< "$1"
+ ip_count=$2
+ if [ $i4 -lt $ip_count ]; then
+ echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n"
+ exit 1
+ fi
+ printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))"
+}
+
##removes the network interface config from Vagrantfile
##params: interface
##assumes you are in the directory of Vagrantfile
@@ -149,19 +237,21 @@ parse_yaml() {
}'
}
-##END FUNCTIONS
-
-if [[ ( $1 == "--help") || $1 == "-h" ]]; then
+##translates the command line paramaters into variables
+##params: $@ the entire command line is passed
+##usage: parse_cmd_line() "$@"
+parse_cmdline() {
+ if [[ ( $1 == "--help") || $1 == "-h" ]]; then
display_usage
exit 0
-fi
+ fi
-echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
-echo "Use -h to display help"
-sleep 2
+ echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n"
+ echo "Use -h to display help"
+ sleep 2
-while [ "`echo $1 | cut -c1`" = "-" ]
-do
+ while [ "`echo $1 | cut -c1`" = "-" ]
+ do
echo $1
case "$1" in
-base_config)
@@ -176,35 +266,152 @@ do
virtual="TRUE"
shift 1
;;
+ -enable_virtual_dhcp)
+ enable_virtual_dhcp="TRUE"
+ shift 1
+ ;;
+ -static_ip_range)
+ static_ip_range=$2
+ shift 2
+ ;;
+ -ping_site)
+ ping_site=$2
+ shift 2
+ ;;
+ -floating_ip_count)
+ floating_ip_count=$2
+ shift 2
+ ;;
+ -admin_nic)
+ admin_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -private_nic)
+ private_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -public_nic)
+ public_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -storage_nic)
+ storage_nic=$2
+ shift 2
+ nic_arg_flag=1
+ ;;
+ -single_baremetal_nic)
+ single_baremetal_nic=$2
+ shift 2
+ ;;
*)
display_usage
exit 1
;;
-esac
-done
+ esac
+ done
+
+ if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ if [ ! -z "$enable_virtual_dhcp" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ elif [ ! -z "$static_ip_range" ]; then
+ echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n"
+ exit 1
+ fi
+ fi
+
+ if [ -z "$floating_ip_count" ]; then
+ floating_ip_count=20
+ fi
+
+ ##Validate nic args
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ if [ ! -z "$single_baremetal_nic" ]; then
+ echo "${red}Please do not specify other nic types along with single_baremetal_nic!${reset}"
+ exit 1
+ fi
+
+ if [ -z "$virtual" ]; then
+ for nic_type in admin_nic private_nic public_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ -z "$nic_value" ]; then
+ echo "${red}$nic_type is empty or not defined. Required when other nic args are given!${reset}"
+ exit 1
+ fi
+ interface_ip=$(find_ip $nic_value)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}$nic_value does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ done
+ else
+ ##if virtual only public_nic should be specified
+ for nic_type in admin_nic private_nic storage_nic single_baremetal_nic; do
+ eval "nic_value=\$$nic_type"
+ if [ ! -z "$nic_value" ]; then
+ echo "${red}$nic_type is not a valid argument using -virtual. Please only specify public_nic!${reset}"
+ exit 1
+ fi
+ done
+
+ interface_ip=$(find_ip $public_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Public NIC: $public_nic does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ fi
+ elif [ ! -z "$single_baremetal_nic" ]; then
+ interface_ip=$(find_ip $single_baremetal_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Single Baremetal NIC: $single_baremetal_nic does not have an IP address! Exiting... ${reset}"
+ exit 1
+ fi
+ fi
+}
##disable selinux
-/sbin/setenforce 0
-
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
-
-# Install other required packages
-# Major versions are pinned to force some consistency for Arno
-if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
- printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
- exit 1
-fi
-
-##install VirtualBox repo
-if cat /etc/*release | grep -i "Fedora release"; then
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
-else
- vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
-fi
-
-cat > /etc/yum.repos.d/virtualbox.repo << EOM
+##params: none
+##usage: disable_selinux()
+disable_selinux() {
+ /sbin/setenforce 0
+}
+
+##Install the EPEL repository and additional packages
+##params: none
+##usage: install_EPEL()
+install_EPEL() {
+ # Install EPEL repo for access to many other yum repos
+ # Major version is pinned to force some consistency for Arno
+ yum install -y epel-release-7*
+
+ # Install other required packages
+ # Major versions are pinned to force some consistency for Arno
+ if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then
+ printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2
+ exit 1
+ fi
+}
+
+##Download and install virtual box
+##params: none
+##usage: install_vbox()
+install_vbox() {
+ ##install VirtualBox repo
+ if cat /etc/*release | grep -i "Fedora release"; then
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch
+ else
+ vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch
+ fi
+
+ cat > /etc/yum.repos.d/virtualbox.repo << EOM
[virtualbox]
name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox
baseurl=$vboxurl
@@ -215,364 +422,374 @@ skip_if_unavailable = 1
keepcache = 0
EOM
-##install VirtualBox
-if ! yum list installed | grep -i virtualbox; then
- if ! yum -y install VirtualBox-4.3; then
- printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
- exit 1
+ ##install VirtualBox
+ if ! yum list installed | grep -i virtualbox; then
+ if ! yum -y install VirtualBox-4.3; then
+ printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2
+ exit 1
+ fi
fi
-fi
-##install kmod-VirtualBox
-if ! lsmod | grep vboxdrv; then
- if ! sudo /etc/init.d/vboxdrv setup; then
- printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
- exit 1
+ ##install kmod-VirtualBox
+ if ! lsmod | grep vboxdrv; then
+ sudo /etc/init.d/vboxdrv setup
+ if ! lsmod | grep vboxdrv; then
+ printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed'
-fi
+}
-##install Ansible
-if ! yum list installed | grep -i ansible; then
- if ! yum -y install ansible-1*; then
- printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
- exit 1
+##install Ansible using yum
+##params: none
+##usage: install_ansible()
+install_ansible() {
+ if ! yum list installed | grep -i ansible; then
+ if ! yum -y install ansible-1*; then
+ printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2
+ exit 1
+ fi
fi
-fi
+}
-##install Vagrant
-if ! rpm -qa | grep vagrant; then
- if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
- printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
- exit 1
+##install Vagrant RPM directly with the bintray.com site
+##params: none
+##usage: install_vagrant()
+install_vagrant() {
+ if ! rpm -qa | grep vagrant; then
+ if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then
+ printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.'
-fi
-##add centos 7 box to vagrant
-if ! vagrant box list | grep chef/centos-7.0; then
- if ! vagrant box add chef/centos-7.0 --provider virtualbox; then
- printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
- exit 1
+ ##add centos 7 box to vagrant
+ if ! vagrant box list | grep opnfv/centos-7.0; then
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2
+ exit 1
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.'
-fi
-##install workaround for centos7
-if ! vagrant plugin list | grep vagrant-centos7_fix; then
- if ! vagrant plugin install vagrant-centos7_fix; then
- printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ ##install workaround for centos7
+ if ! vagrant plugin list | grep vagrant-centos7_fix; then
+ if ! vagrant plugin install vagrant-centos7_fix; then
+ printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2
+ fi
+ else
+ printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
fi
-else
- printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.'
-fi
+}
-cd /tmp/
##remove bgs vagrant incase it wasn't cleaned up
-rm -rf /tmp/bgs_vagrant
-
-##clone bgs vagrant
-##will change this to be opnfv repo when commit is done
-if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
-fi
-
-cd bgs_vagrant
-
-echo "${blue}Detecting network configuration...${reset}"
-##detect host 1 or 3 interface configuration
-#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
-output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
-
-if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
-fi
-
-##find number of interfaces with ip and substitute in VagrantFile
-if_counter=0
-for interface in ${output}; do
-
- if [ "$if_counter" -ge 4 ]; then
- break
- fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
- fi
- new_ip=$(next_usable_ip $interface_ip)
- if [ ! "$new_ip" ]; then
- continue
- fi
- interface_arr[$interface]=$if_counter
- interface_ip_arr[$if_counter]=$new_ip
- subnet_mask=$(find_netmask $interface)
- if [ "$if_counter" -eq 1 ]; then
- private_subnet_mask=$subnet_mask
- private_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 2 ]; then
- public_subnet_mask=$subnet_mask
- public_short_subnet_mask=$(find_short_netmask $interface)
- fi
- if [ "$if_counter" -eq 3 ]; then
- storage_subnet_mask=$subnet_mask
- fi
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
- ((if_counter++))
-done
-
-##now remove interface config in Vagrantfile for 1 node
-##if 1, 3, or 4 interfaces set deployment type
-##if 2 interfaces remove 2nd interface and set deployment type
-if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
-elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
-elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
-else
- deployment_type="multi_network"
-fi
-
-echo "${blue}Network detected: ${deployment_type}! ${reset}"
-
-if route | grep default; then
- echo "${blue}Default Gateway Detected ${reset}"
- host_default_gw=$(ip route | grep default | awk '{print $3}')
- echo "${blue}Default Gateway: $host_default_gw ${reset}"
- default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
- case "${interface_arr[$default_gw_interface]}" in
- 0)
- echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- node_default_gw=$host_default_gw
- ;;
- 1)
- echo "${red}Default Gateway Detected on Private Interface!${reset}"
- echo "${red}Private subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- 2)
- echo "${blue}Default Gateway Detected on Public Interface!${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
- echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
- sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
- echo "${blue}Setting node gateway to be VM Admin IP${reset}"
- node_default_gw=${interface_ip_arr[0]}
- public_gateway=$default_gw
- ;;
- 3)
- echo "${red}Default Gateway Detected on Storage Interface!${reset}"
- echo "${red}Storage subnet should be private and not have Internet access!${reset}"
- exit 1
- ;;
- *)
- echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
- exit 1
- ;;
- esac
-else
- #assumes 24 bit mask
- defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
- firstip=.1
- defaultgw=$defaultgw$firstip
- echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
- node_default_gw=$defaultgw
-fi
-
-if [ $base_config ]; then
- if ! cp -f $base_config opnfv_ksgen_settings.yml; then
- echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
- exit 1
- fi
-fi
-
-if [ $no_parse ]; then
-echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
-
-else
-
-echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
-##Edit the ksgen settings appropriately
-##ksgen settings will be stored in /vagrant on the vagrant machine
-##if single node deployment all the variables will have the same ip
-##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
-
-sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+##params: none
+##usage: clean_tmp()
+clean_tmp() {
+ rm -rf $vm_dir/foreman_vm
+}
-##replace private interface parameter
-##private interface will be of hosts, so we need to know the provisioned host interface name
-##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
-##replace IP for parameters with next IP that will be given to controller
-if [ "$deployment_type" == "single_network" ]; then
- ##we also need to assign IP addresses to nodes
- ##for single node, foreman is managing the single network, so we can't reserve them
- ##not supporting single network anymore for now
- echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}"
- exit 0
+##clone genesis and move to node vm dir
+##params: destination directory
+##usage: clone_bgs /tmp/myvm/
+clone_bgs() {
+ script_dir="`dirname "$script"`"
+ cp -fr $script_dir/ $1
+ cp -fr $script_dir/../../common/puppet-opnfv $1
+}
-elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+##validates the network settings and update VagrantFile with network settings
+##params: none
+##usage: configure_network()
+configure_network() {
+ cd $vm_dir/foreman_vm
+
+ ##if nic_arg_flag is set, then we don't figure out
+ ##NICs dynamically
+ if [[ $nic_arg_flag -eq 1 ]]; then
+ echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}"
+ if [ $virtual ]; then
+ nic_list="$public_nic"
+ elif [ -z "$storage_nic" ]; then
+ echo "${blue}storage_nic not defined, will combine storage into private VLAN ${reset}"
+ nic_list="$admin_nic $private_nic $public_nic"
+ else
+ nic_list="$admin_nic $private_nic $public_nic $storage_nic"
+ fi
+ nic_array=( $nic_list )
+ output=$nic_list
+ elif [ ! -z "$single_baremetal_nic" ]; then
+ output=$single_baremetal_nic
+ else
+ echo "${blue}Detecting network configuration...${reset}"
+ ##detect host 1 or 3 interface configuration
+ #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'`
+ #output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f10`
+ output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | awk {'print $9'}`
+ fi
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ if [ ! "$output" ]; then
+ printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
+ exit 1
fi
- sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+ ##virtual we only find 1 interface
+ if [ $virtual ]; then
+ if [ ! -z "${nic_array[0]}" ]; then
+ echo "${blue}Public Interface specified: ${nic_array[0]}${reset}"
+ this_default_gw_interface=${nic_array[0]}
+ else
+ ##find interface with default gateway
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
+ fi
- ##get ip addresses for private network on controllers to make dhcp entries
- ##required for controllers_ip_array global param
- next_private_ip=${interface_ip_arr[1]}
- type=_private
- for node in controller1 controller2 controller3; do
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
- exit 1
+ ##find interface IP, make sure its valid
+ interface_ip=$(find_ip $this_default_gw_interface)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}"
+ exit 1
fi
- sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
- controller_ip_array=$controller_ip_array$next_private_ip,
- done
- ##replace global param for contollers_ip_array
- controller_ip_array=${controller_ip_array%?}
- sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+ ##set variable info
+ if [ ! -z "$static_ip_range" ]; then
+ new_ip=$(echo $static_ip_range | cut -d , -f1)
+ subnet_mask=$(find_netmask $this_default_gw_interface)
+ host_subnet=$(find_subnet $interface_ip $subnet_mask)
+ ip_range_subnet=$(find_subnet $new_ip $subnet_mask)
+ if [ "$ip_range_subnet" != "$host_subnet" ]; then
+ echo "${red}static_ip_range: ${static_ip_range} is not in the same subnet as your default gateway interface: ${host_subnet}. Please use a correct range!${reset}"
+ exit 1
+ fi
+ else
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}"
+ exit 1
+ fi
+ fi
+ interface=$this_default_gw_interface
+ public_interface=$interface
+ interface_arr[$interface]=2
+ interface_ip_arr[2]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
- ##now replace all the VIP variables. admin//private can be the same IP
- ##we have to use IP's here that won't be allocated to hosts at provisioning time
- ##therefore we increment the ip by 10 to make sure we have a safe buffer
- next_private_ip=$(increment_ip $next_private_ip 10)
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
- grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
- next_private_ip=$(next_usable_ip $next_private_ip)
- if [ ! "$next_private_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
- exit 1
+ ##set that interface to be public
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ if_counter=1
+ else
+ if [ ! -z $single_baremetal_nic ]; then
+ interface_ip=$(find_ip $single_baremetal_nic)
+ if [ ! "$interface_ip" ]; then
+ echo "${red}Unable to determine IP address of $single_baremetal_nic. Exiting...${reset}"
+ exit 1
+ fi
+ subnet_mask=$(find_netmask $single_baremetal_nic)
+ public_subnet_mask=$subnet_mask
+ if ! verify_subnet_size $public_subnet_mask 50; then
+ echo "${red} Not enough IPs in subnet: $interface_ip $subnet_mask. Need at least 50 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ echo "${red}Unable to allocate new IP address: $interface_ip $subnet_mask Exiting...${reset}"
+ exit 1
+ fi
+
+ this_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $this_default_gw ${reset}"
+ this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}')
+ if [ "$this_default_gw_interface" != "$single_baremetal_nic" ]; then
+ echo "${red}Error: Your default gateway interface: $this_default_gw_interface does not \
+match the baremetal nic you provided: ${single_baremetal_nic}. Exiting...${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$single_baremetal_nic"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ interface_ip_arr[0]=$new_ip
+ interface_arr[$single_baremetal_nic]=0
+ admin_ip=$new_ip
+ admin_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $single_baremetal_nic)
+ if_counter=1
+ else
+ ##find number of interfaces with ip and substitute in VagrantFile
+ if_counter=0
+ for interface in ${output}; do
+
+ if [ "$if_counter" -ge 4 ]; then
+ break
+ fi
+ interface_ip=$(find_ip $interface)
+ if [ ! "$interface_ip" ]; then
+ continue
+ fi
+ new_ip=$(next_usable_ip $interface_ip)
+ if [ ! "$new_ip" ]; then
+ continue
+ fi
+ interface_arr[$interface]=$if_counter
+ interface_ip_arr[$if_counter]=$new_ip
+ subnet_mask=$(find_netmask $interface)
+ if [ "$if_counter" -eq 0 ]; then
+ admin_subnet_mask=$subnet_mask
+ admin_ip=$new_ip
+ if ! verify_subnet_size $admin_subnet_mask 5; then
+ echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+
+ elif [ "$if_counter" -eq 1 ]; then
+ private_subnet_mask=$subnet_mask
+ private_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $private_subnet_mask 15; then
+ echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 2 ]; then
+ public_subnet_mask=$subnet_mask
+ public_short_subnet_mask=$(find_short_netmask $interface)
+
+ if ! verify_subnet_size $public_subnet_mask 25; then
+ echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ elif [ "$if_counter" -eq 3 ]; then
+ storage_subnet_mask=$subnet_mask
+
+ if ! verify_subnet_size $storage_subnet_mask 10; then
+ echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}"
+ exit 1
+ fi
+ else
+ echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}"
+ exit 1
+ fi
+ sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile
+ ((if_counter++))
+ done
fi
- done
+ fi
- ##replace foreman site
- next_public_ip=${interface_ip_arr[2]}
- sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
- ##replace public vips
- next_public_ip=$(increment_ip $next_public_ip 10)
- grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do
- sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
- next_public_ip=$(next_usable_ip $next_public_ip)
- if [ ! "$next_public_ip" ]; then
- printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ ##now remove interface config in Vagrantfile for 1 node
+ ##if 1, 3, or 4 interfaces set deployment type
+ ##if 2 interfaces remove 2nd interface and set deployment type
+ if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then
+ if [ $virtual ]; then
+ deployment_type="single_network"
+ echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}"
+ private_internal_ip=155.1.2.2
+ admin_internal_ip=156.1.2.2
+ private_subnet_mask=255.255.255.0
+ private_short_subnet_mask=/24
+ interface_ip_arr[1]=$private_internal_ip
+ interface_ip_arr[0]=$admin_internal_ip
+ admin_subnet_mask=255.255.255.0
+ admin_short_subnet_mask=/24
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ remove_vagrant_network eth_replace3
+ deployment_type=three_network
+ elif [[ "$if_counter" == 1 ]]; then
+ echo "${blue}Single network detected for Baremetal deployment! ${reset}"
+ remove_vagrant_network eth_replace1
+ remove_vagrant_network eth_replace2
+ remove_vagrant_network eth_replace3
+ deployment_type="single_network"
+ else
+ echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}"
exit 1
fi
- done
-
- ##replace public_network param
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_network param
- private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
- sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
- ##replace storage_network
- if [ "$deployment_type" == "three_network" ]; then
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ elif [ "$if_counter" == 3 ]; then
+ deployment_type="three_network"
+ remove_vagrant_network eth_replace3
else
- next_storage_ip=${interface_ip_arr[3]}
- storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
- sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
- fi
-
- ##replace public_subnet param
- public_subnet=$public_subnet'\'$public_short_subnet_mask
- sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
- ##replace private_subnet param
- private_subnet=$private_subnet'\'$private_short_subnet_mask
- sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
-
- ##replace public_dns param to be foreman server
- sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
-
- ##replace public_gateway
- if [ -z "$public_gateway" ]; then
- ##if unset then we assume its the first IP in the public subnet
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_gateway=$(increment_subnet $public_subnet 1)
- fi
- sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
-
- ##we have to define an allocation range of the public subnet to give
- ##to neutron to use as floating IPs
- ##we should control this subnet, so this range should work .150-200
- ##but generally this is a bad idea and we are assuming at least a /24 subnet here
- public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
- public_allocation_start=$(increment_subnet $public_subnet 150)
- public_allocation_end=$(increment_subnet $public_subnet 200)
-
- sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
- sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
-
-else
- printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
- exit 1
-fi
-
-echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
-
-fi
-
-if [ $virtual ]; then
- echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
- sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
-fi
-
-echo "${blue}Starting Vagrant! ${reset}"
-
-##stand up vagrant
-if ! vagrant up; then
- printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2
- exit 1
-else
- echo "${blue}Foreman VM is up! ${reset}"
-fi
-
-if [ $virtual ]; then
-
-##Bring up VM nodes
-echo "${blue}Setting VMs up... ${reset}"
-nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
-##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
-##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
-##3 static controllers
-compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
-controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
-nodes=${controller_nodes}${compute_nodes}
-
-for node in ${nodes}; do
- cd /tmp
-
- ##remove VM nodes incase it wasn't cleaned up
- rm -rf /tmp/$node
-
- ##clone bgs vagrant
- ##will change this to be opnfv repo when commit is done
- if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then
- printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2
- exit 1
+ deployment_type="multi_network"
+ fi
+
+ echo "${blue}Network detected: ${deployment_type}! ${reset}"
+
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile
+ if [ $static_ip_range ]; then
+ ##verify static range is at least 20 IPs
+ static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1)
+ static_ip_range_end=$(echo $static_ip_range | cut -d , -f2)
+ ##verify range is at least 20 ips
+ ##assumes less than 255 range pool
+ begin_octet=$(echo $static_ip_range_begin | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_count=$((end_octet-begin_octet+1))
+ if [ "$ip_count" -lt 20 ]; then
+ echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}"
+ exit 1
+ else
+ echo "${blue}Static IP range is size $ip_count ${reset}"
+ fi
+ fi
+ fi
fi
- cd $node
+ if route | grep default; then
+ echo "${blue}Default Gateway Detected ${reset}"
+ host_default_gw=$(ip route | grep default | awk '{print $3}')
+ echo "${blue}Default Gateway: $host_default_gw ${reset}"
+ default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}')
+ case "${interface_arr[$default_gw_interface]}" in
+ 0)
+ echo "${blue}Default Gateway Detected on Admin Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ node_default_gw=$host_default_gw
+ ;;
+ 1)
+ echo "${red}Default Gateway Detected on Private Interface!${reset}"
+ echo "${red}Private subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ 2)
+ echo "${blue}Default Gateway Detected on Public Interface!${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile
+ echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}"
+ sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile
+ echo "${blue}Setting node gateway to be VM Admin IP${reset}"
+ node_default_gw=${interface_ip_arr[0]}
+ public_gateway=$host_default_gw
+ ;;
+ 3)
+ echo "${red}Default Gateway Detected on Storage Interface!${reset}"
+ echo "${red}Storage subnet should be private and not have Internet access!${reset}"
+ exit 1
+ ;;
+ *)
+ echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}"
+ exit 1
+ ;;
+ esac
+ else
+ #assumes 24 bit mask
+ defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3`
+ firstip=.1
+ defaultgw=$defaultgw$firstip
+ echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}"
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile
+ node_default_gw=$defaultgw
+ fi
if [ $base_config ]; then
if ! cp -f $base_config opnfv_ksgen_settings.yml; then
@@ -581,114 +798,671 @@ for node in ${nodes}; do
fi
fi
- ##parse yaml into variables
- eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
- ##find node type
- node_type=config_nodes_${node}_type
- node_type=$(eval echo \$$node_type)
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ echo "${blue}Controller nodes found in settings: ${controller_nodes}${reset}"
+ my_controller_array=( $controller_nodes )
+ num_control_nodes=${#my_controller_array[@]}
+ if [ "$num_control_nodes" -ne 3 ]; then
+ if cat opnfv_ksgen_settings.yml | grep ha_flag | grep true; then
+ echo "${red}Error: You must define exactly 3 control nodes when HA flag is true!${reset}"
+ exit 1
+ fi
+ else
+ echo "${blue}Number of Controller nodes detected: ${num_control_nodes}${reset}"
+ fi
- ##find number of interfaces with ip and substitute in VagrantFile
- output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'`
+ if [ $no_parse ]; then
+ echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}"
+
+ else
+
+ echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}"
+ ##Edit the ksgen settings appropriately
+ ##ksgen settings will be stored in /vagrant on the vagrant machine
+ ##if single node deployment all the variables will have the same ip
+ ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7
+
+ sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml
+
+ ##replace private interface parameter
+ ##private interface will be of hosts, so we need to know the provisioned host interface name
+ ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts
+ ##replace IP for parameters with next IP that will be given to controller
+
+ if [[ "$deployment_type" == "single_network" || "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then
+
+ if [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml
+ elif [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*network_type:.*$/network_type: single_network/' opnfv_ksgen_settings.yml
+ next_single_ip=${interface_ip_arr[0]}
+ foreman_ip=$next_single_ip
+ next_single_ip=$(next_usable_ip $next_single_ip)
+ fi
+
+ sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml
+
+ ##get ip addresses for private network on controllers to make dhcp entries
+ ##required for controllers_ip_array global param
+ if [ "$deployment_type" == "single_network" ]; then
+ next_private_ip=$next_single_ip
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type1=_admin
+ type2=_private
+ control_count=0
+ for node in ${controller_nodes}; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "${red} Unable to find an unused IP for $node ! ${reset}"
+ exit 1
+ else
+ sed -i 's/'"$node$type1"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ sed -i 's/'"$node$type2"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ fi
+ done
+
+ for node in ${compute_nodes}; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "${red} Unable to find an unused IP for $node ! ${reset}"
+ exit 1
+ else
+ sed -i 's/'"$node$type1"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ else
+ next_private_ip=${interface_ip_arr[1]}
+
+ type=_private
+ control_count=0
+ for node in controller1 controller2 controller3; do
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2
+ exit 1
+ fi
+ sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml
+ controller_ip_array=$controller_ip_array$next_private_ip,
+ controllers_ip_arr[$control_count]=$next_private_ip
+ ((control_count++))
+ done
+ fi
+
+ if [[ "$deployment_type" != "single_network" ]]; then
+ next_public_ip=${interface_ip_arr[2]}
+ foreman_ip=$next_public_ip
+ fi
+
+ ##if no dhcp, find all the Admin IPs for nodes in advance
+ if [ $virtual ]; then
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ next_admin_ip=${interface_ip_arr[0]}
+ type=_admin
+ for node in ${nodes}; do
+ next_admin_ip=$(next_ip $next_admin_ip)
+ if [ ! "$next_admin_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ admin_ip_arr[$node]=$next_admin_ip
+ sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml
+ fi
+ done
+
+ ##allocate node public IPs
+ for node in ${nodes}; do
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}"
+ exit 1
+ else
+ public_ip_arr[$node]=$next_public_ip
+ fi
+ done
+ fi
+ fi
+ ##replace global param for controllers_ip_array
+ controller_ip_array=${controller_ip_array%?}
+ sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml
+
+ ##now replace all the VIP variables. admin//private can be the same IP
+ ##we have to use IP's here that won't be allocated to hosts at provisioning time
+ ##therefore we increment the ip by 10 to make sure we have a safe buffer
+ next_private_ip=$(increment_ip $next_private_ip 10)
+
+ private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$private_output" ]; then
+ while read -r line; do
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml
+ next_private_ip=$(next_usable_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2
+ exit 1
+ fi
+ done <<< "$private_output"
+ fi
+
+ ##replace odl_control_ip (non-HA only)
+ odl_control_ip=${controllers_ip_arr[0]}
+ sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace controller_ip (non-HA only)
+ sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml
+
+ ##replace foreman site
+ sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml
+ ##replace public vips
+
+ ##if single_network deployment we continue next_public_ip from next_private_ip
+ if [[ "$deployment_type" == "single_network" ]]; then
+ next_public_ip=$(next_usable_ip $next_private_ip)
+ else
+ ##no need to do this if no dhcp
+ if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ else
+ next_public_ip=$(increment_ip $next_public_ip 10)
+ fi
+ fi
+
+ public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml)
+ if [ ! -z "$public_output" ]; then
+ while read -r line; do
+ if echo $line | grep horizon_public_vip; then
+ horizon_public_vip=$next_public_ip
+ fi
+ sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml
+ next_public_ip=$(next_usable_ip $next_public_ip)
+ if [ ! "$next_public_ip" ]; then
+ printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2
+ exit 1
+ fi
+ done <<< "$public_output"
+ fi
+
+ ##replace admin_network param for bare metal deployments
+ if [[ -z "$virtual" && -z "$single_network" ]]; then
+ admin_subnet=$(find_subnet $admin_ip $admin_subnet_mask)
+ sed -i 's/^.*admin_network:.*$/ admin_network:'" $admin_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ sed -i 's/^.*admin_network:.*$/ admin_network:'" \"false\""'/' opnfv_ksgen_settings.yml
+ fi
+ ##replace public_network param
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*private_network:.*$/ private_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ ##replace private_network param
+ private_subnet=$(find_subnet $next_private_ip $private_subnet_mask)
+ sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace storage_network
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ elif [ "$deployment_type" == "three_network" ]; then
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ next_storage_ip=${interface_ip_arr[3]}
+ storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask)
+ sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_subnet param
+ public_subnet=$public_subnet'\'$public_short_subnet_mask
+ sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml
+ else
+ ##replace private_subnet param
+ private_subnet=$private_subnet'\'$private_short_subnet_mask
+ sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_dns param to be foreman server
+ if [ "$deployment_type" == "single_network" ]; then
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[0]}'/' opnfv_ksgen_settings.yml
+ else
+ sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml
+ fi
+
+ ##replace public_gateway
+ if [ -z "$public_gateway" ]; then
+ if [ "$deployment_type" == "single_network" ]; then
+ public_gateway=$node_default_gw
+ else
+ ##if unset then we assume its the first IP in the public subnet
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ public_gateway=$(increment_subnet $public_subnet 1)
+ fi
+ fi
+ sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml
+
+ ##we have to define an allocation range of the public subnet to give
+ ##to neutron to use as floating IPs
+ ##if static ip range, then we take the difference of the end range and current ip
+ ## to be the allocation pool
+ ##if not static ip, we will use the last 20 IP from the subnet
+ ## note that this is not a really good idea because the subnet must be at least a /27 for this to work...
+ public_subnet=$(find_subnet $next_public_ip $public_subnet_mask)
+ if [ ! -z "$static_ip_range" ]; then
+ begin_octet=$(echo $next_public_ip | cut -d . -f4)
+ end_octet=$(echo $static_ip_range_end | cut -d . -f4)
+ ip_diff=$((end_octet-begin_octet))
+ if [ $ip_diff -le 0 ]; then
+ echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}"
+ exit 1
+ else
+ public_allocation_start=$(next_ip $next_public_ip)
+ public_allocation_end=$static_ip_range_end
+ fi
+ else
+ last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask)
+ public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count )
+ public_allocation_end=${last_ip_subnet}
+ fi
+ echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}"
+
+ sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml
+ sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml
+
+ else
+ printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2
+ exit 1
+ fi
+
+ echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}"
- if [ ! "$output" ]; then
- printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2
- exit 1
fi
+}
+##Configure bootstrap.sh to use the virtual Khaleesi playbook
+##params: none
+##usage: configure_virtual()
+configure_virtual() {
+ if [ $virtual ]; then
+ echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}"
+ sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh
+ fi
+}
+
+##Starts Foreman VM with Vagrant
+##params: none
+##usage: start_vagrant()
+start_foreman() {
+ echo "${blue}Starting Vagrant! ${reset}"
- if_counter=0
- for interface in ${output}; do
+ ##stand up vagrant
+ if ! vagrant up; then
+ printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2
+ exit 1
+ else
+ echo "${blue}Foreman VM is up! ${reset}"
+ fi
+}
- if [ "$if_counter" -ge 4 ]; then
- break
+##start the VM if this is a virtual installation
+##this function does nothing if baremetal servers are being used
+##params: none
+##usage: start_virtual_nodes()
+start_virtual_nodes() {
+ if [ $virtual ]; then
+
+ ##Bring up VM nodes
+ echo "${blue}Setting VMs up... ${reset}"
+ nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'`
+ ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first
+ ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have
+ ##3 static controllers
+ compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "`
+ controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "`
+ nodes=${controller_nodes}${compute_nodes}
+ controller_count=0
+ compute_wait_completed=false
+
+ for node in ${nodes}; do
+
+ ##remove VM nodes incase it wasn't cleaned up
+ rm -rf $vm_dir/$node
+ rm -rf /tmp/genesis/
+
+ ##clone genesis and move into node folder
+ clone_bgs $vm_dir/$node
+
+ cd $vm_dir/$node
+
+ if [ $base_config ]; then
+ if ! cp -f $base_config opnfv_ksgen_settings.yml; then
+ echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}"
+ exit 1
+ fi
+ fi
+
+ ##parse yaml into variables
+ eval $(parse_yaml opnfv_ksgen_settings.yml "config_")
+ ##find node type
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+
+ ##modify memory and cpu
+ node_memory=$(eval echo \${config_nodes_${node}_memory})
+ node_vcpus=$(eval echo \${config_nodes_${node}_cpus})
+ node_storage=$(eval echo \${config_nodes_${node}_disk})
+
+ sed -i 's/^.*vb.memory =.*$/ vb.memory = '"$node_memory"'/' Vagrantfile
+ sed -i 's/^.*vb.cpus =.*$/ vb.cpus = '"$node_vcpus"'/' Vagrantfile
+
+ if ! resize_vagrant_disk $node_storage; then
+ echo "${red}Error while resizing vagrant box to size $node_storage for $node! ${reset}"
+ exit 1
+ fi
+
+ ##trozet test make compute nodes wait 20 minutes
+ if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then
+ echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..."
+ compute_wait_completed=true
+ sleep 1400
+ fi
+
+ ## Add Admin interface
+ mac_string=config_nodes_${node}_mac_address
+ mac_addr=$(eval echo \$$mac_string)
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find mac_address for $node! ${reset}"
+ exit 1
+ fi
+ this_admin_ip=${admin_ip_arr[$node]}
+ sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile
+
+ ## Add private interface
+ if [ "$node_type" == "controller" ]; then
+ mac_string=config_nodes_${node}_private_mac
+ mac_addr=$(eval echo \$$mac_string)
+ if [ $mac_addr == "" ]; then
+ echo "${red} Unable to find private_mac for $node! ${reset}"
+ exit 1
+ fi
+ else
+ ##generate random mac
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ fi
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ if [ "$node_type" == "controller" ]; then
+ new_node_ip=${controllers_ip_arr[$controller_count]}
+ if [ ! "$new_node_ip" ]; then
+ echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}"
+ exit 1
+ fi
+ ((controller_count++))
+ else
+ next_private_ip=$(next_ip $next_private_ip)
+ if [ ! "$next_private_ip" ]; then
+ echo "{red}ERROR: Could not find private ip for $node ${reset}"
+ exit 1
+ fi
+ new_node_ip=$next_private_ip
+ fi
+ sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile
+ ##replace host_ip in vm_nodes_provision with private ip
+ sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh
+ ##replace ping site
+ if [ ! -z "$ping_site" ]; then
+ sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh
+ fi
+
+ ##find public ip info and add public interface
+ mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
+ mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
+ this_public_ip=${public_ip_arr[$node]}
+
+ if [ -z "$enable_virtual_dhcp" ]; then
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile
+ else
+ sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
+ fi
+ remove_vagrant_network eth_replace3
+
+ ##modify provisioning to do puppet install, config, and foreman check-in
+ ##substitute host_name and dns_server in the provisioning script
+ host_string=config_nodes_${node}_short_name
+ short_host_name=$(eval echo \$$host_string)
+ ##substitute domain_name
+ domain_name=$config_domain_name
+ sed -i 's/^domain_name=REPLACE/domain_name='$domain_name'/' vm_nodes_provision.sh
+ host_name=${short_host_name}.${domain_name}
+ sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
+ ##dns server should be the foreman server
+ sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+ ## remove bootstrap and NAT provisioning
+ sed -i '/nat_setup.sh/d' Vagrantfile
+ sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ ## modify default_gw to be node_default_gw
+ sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ echo "${blue}Starting Vagrant Node $node! ${reset}"
+ ##stand up vagrant
+ if ! vagrant up; then
+ echo "${red} Unable to start $node ${reset}"
+ exit 1
+ else
+ echo "${blue} $node VM is up! ${reset}"
+ fi
+ done
+ echo "${blue} All VMs are UP! ${reset}"
+ echo "${blue} Waiting for puppet to complete on the nodes... ${reset}"
+ ##check puppet is complete
+ ##ssh into foreman server, run check to verify puppet is complete
+ pushd $vm_dir/foreman_vm
+ if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then
+ echo "${red} Failed to validate puppet completion on nodes ${reset}"
+ exit 1
+ else
+ echo "{$blue} Puppet complete on all nodes! ${reset}"
fi
- interface_ip=$(find_ip $interface)
- if [ ! "$interface_ip" ]; then
- continue
+ popd
+ ##add routes back to nodes
+ for node in ${nodes}; do
+ pushd $vm_dir/$node
+ if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then
+ echo "${blue} Adding public route back to $node! ${reset}"
+ vagrant ssh -c "route add default gw $this_default_gw"
+ vagrant ssh -c "route delete default gw 10.0.2.2"
+ fi
+ popd
+ done
+ if [ ! -z "$horizon_public_vip" ]; then
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}"
+ else
+ ##Find public IP of controller
+ for node in ${nodes}; do
+ node_type=config_nodes_${node}_type
+ node_type=$(eval echo \$$node_type)
+ if [ "$node_type" == "controller" ]; then
+ pushd $vm_dir/$node
+ horizon_ip=`vagrant ssh -c "ifconfig enp0s10" | grep -Eo "inet [0-9\.]+" | awk {'print $2'}`
+ popd
+ break
+ fi
+ done
+ if [ -z "$horizon_ip" ]; then
+ echo "${red}Warn: Unable to determine horizon IP, please login to your controller node to find it${reset}"
+ fi
+ echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_ip} ${reset}"
fi
- case "${if_counter}" in
- 0)
- mac_string=config_nodes_${node}_mac_address
- mac_addr=$(eval echo \$$mac_string)
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find mac_address for $node! ${reset}"
- exit 1
- fi
- ;;
- 1)
- if [ "$node_type" == "controller" ]; then
- mac_string=config_nodes_${node}_private_mac
- mac_addr=$(eval echo \$$mac_string)
- if [ $mac_addr == "" ]; then
- echo "${red} Unable to find private_mac for $node! ${reset}"
- exit 1
- fi
- else
- ##generate random mac
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- fi
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- *)
- mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"')
- mac_addr=$(echo $mac_addr | sed 's/:\|-//g')
- ;;
- esac
- sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile
- ((if_counter++))
- done
+ fi
+}
- ##now remove interface config in Vagrantfile for 1 node
- ##if 1, 3, or 4 interfaces set deployment type
- ##if 2 interfaces remove 2nd interface and set deployment type
- if [ "$if_counter" == 1 ]; then
- deployment_type="single_network"
- remove_vagrant_network eth_replace1
- remove_vagrant_network eth_replace2
- remove_vagrant_network eth_replace3
- elif [ "$if_counter" == 2 ]; then
- deployment_type="single_network"
- second_interface=`echo $output | awk '{print $2}'`
- remove_vagrant_network $second_interface
- remove_vagrant_network eth_replace2
- elif [ "$if_counter" == 3 ]; then
- deployment_type="three_network"
- remove_vagrant_network eth_replace3
+##check to make sure nodes are powered off
+##this function does nothing if virtual
+##params: none
+##usage: check_baremetal_nodes()
+check_baremetal_nodes() {
+ if [ $virtual ]; then
+ echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}"
else
- deployment_type="multi_network"
+ echo "${blue}Checking Baremetal nodes power state... ${reset}"
+ if [ ! -z "$base_config" ]; then
+ # Install ipmitool
+ # Major version is pinned to force some consistency for Arno
+ if ! yum list installed | grep -i ipmitool; then
+ echo "${blue}Installing ipmitool...${reset}"
+ if ! yum -y install ipmitool-1*; then
+ echo "${red}Failed to install ipmitool!${reset}"
+ exit 1
+ fi
+ fi
+
+ ###find all the bmc IPs and number of nodes
+ node_counter=0
+ output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'`
+ for line in ${output} ; do
+ bmc_ip[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ max_nodes=$((node_counter-1))
+
+ ###find bmc_users per node
+ node_counter=0
+ output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'`
+ for line in ${output} ; do
+ bmc_user[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ ###find bmc_pass per node
+ node_counter=0
+ output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'`
+ for line in ${output} ; do
+ bmc_pass[$node_counter]=$line
+ ((node_counter++))
+ done
+
+ for mynode in `seq 0 $max_nodes`; do
+ echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}"
+ ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \
+ | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]`
+ if [ "$ipmi_output" == "on" ]; then
+ echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}"
+ echo "${red}Please run clean.sh before running deploy! ${reset}"
+ exit 1
+ elif [ "$ipmi_output" == "off" ]; then
+ echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}"
+ else
+ echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}"
+ fi
+ done
+ else
+ echo "${red}base_config was not provided for a baremetal install! Exiting${reset}"
+ exit 1
+ fi
fi
+}
- ##modify provisioning to do puppet install, config, and foreman check-in
- ##substitute host_name and dns_server in the provisioning script
- host_string=config_nodes_${node}_hostname
- host_name=$(eval echo \$$host_string)
- sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh
- ##dns server should be the foreman server
- sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh
+##resizes vagrant disk (cannot shrink)
+##params: size in GB
+##usage: resize_vagrant_disk 100
+resize_vagrant_disk() {
+ if [[ "$1" < 40 ]]; then
+ echo "${blue}Warn: Requested disk size cannot be less than 40, using 40 as new size${reset}"
+ new_size_gb=40
+ else
+ new_size_gb=$1
+ fi
- ## remove bootstrap and NAT provisioning
- sed -i '/nat_setup.sh/d' Vagrantfile
- sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile
+ if ! vagrant box list | grep opnfv; then
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ fi
- ## modify default_gw to be node_default_gw
- sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile
+ pushd $vagrant_box_dir
+
+ # Close medium to make sure we can modify it
+ vboxmanage closemedium disk $vagrant_box_vmdk
+
+ cur_size=$(vboxmanage showhdinfo $vagrant_box_vmdk | grep -i capacity | grep -Eo [0-9]+)
+ cur_size_gb=$((cur_size / 1024))
+
+ if [ "$cur_size_gb" -eq "$new_size_gb" ]; then
+ echo "${blue}Info: Disk size already ${cur_size_gb} ${reset}"
+ popd
+ return
+ elif [[ "$new_size_gb" < "$cur_size_gb" ]] ; then
+ echo "${blue}Info: Requested disk is less than ${cur_size_gb} ${reset}"
+ echo "${blue}Re-adding vagrant box${reset}"
+ if vagrant box list | grep opnfv; then
+ popd
+ vagrant box remove -f opnfv/centos-7.0
+ if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then
+ echo "${red}Unable to reclone vagrant box! Exiting...${reset}"
+ exit 1
+ fi
+ pushd $vagrant_box_dir
+ fi
+ fi
- ## modify VM memory to be 4gig
- sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile
+ new_size=$((new_size_gb * 1024))
+ if ! vboxmanage clonehd $vagrant_box_vmdk tmp-disk.vdi --format vdi; then
+ echo "${red}Error: Unable to clone ${vagrant_box_vmdk}${reset}"
+ popd
+ return 1
+ fi
- echo "${blue}Starting Vagrant Node $node! ${reset}"
+ if ! vboxmanage modifyhd tmp-disk.vdi --resize $new_size; then
+ echo "${red}Error: Unable modify tmp-disk.vdi to ${new_size}${reset}"
+ popd
+ return 1
+ fi
- ##stand up vagrant
- if ! vagrant up; then
- echo "${red} Unable to start $node ${reset}"
- exit 1
- else
- echo "${blue} $node VM is up! ${reset}"
+ if ! vboxmanage clonehd tmp-disk.vdi resized-disk.vmdk --format vmdk; then
+ echo "${red}Error: Unable clone tmp-disk.vdi to vmdk${reset}"
+ popd
+ return 1
fi
-done
+ vboxmanage closemedium disk tmp-disk.vdi --delete
+ rm -f tmp-disk.vdi $vagrant_box_vmdk
+ cp -f resized-disk.vmdk $vagrant_box_vmdk
+ vboxmanage closemedium disk resized-disk.vmdk --delete
+ popd
+}
+
+##END FUNCTIONS
- echo "${blue} All VMs are UP! ${reset}"
+main() {
+ parse_cmdline "$@"
+ disable_selinux
+ check_baremetal_nodes
+ install_EPEL
+ install_vbox
+ install_ansible
+ install_vagrant
+ clean_tmp
+ verify_vm_dir
+ clone_bgs $vm_dir/foreman_vm
+ configure_network
+ configure_virtual
+ start_foreman
+ start_virtual_nodes
+}
-fi
+main "$@"
diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
index 72935c9ad..2c146a07a 100644
--- a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
+++ b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml
@@ -105,9 +105,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:5e"
- bmc_ip: 172.30.8.74
- bmc_mac: "74:a2:e6:a4:14:9c"
+ mac_address: "00:25:B5:A0:00:2A"
+ bmc_ip: 172.30.8.75
+ bmc_mac: "a8:9d:21:c9:8b:56"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -125,9 +125,9 @@ nodes:
type: compute
host_type: baremetal
hostgroup: Compute
- mac_address: "00:25:b5:a0:00:3e"
- bmc_ip: 172.30.8.73
- bmc_mac: "a8:9d:21:a0:15:9c"
+ mac_address: "00:25:B5:A0:00:3A"
+ bmc_ip: 172.30.8.65
+ bmc_mac: "a8:9d:21:c9:4d:26"
bmc_user: admin
bmc_pass: octopus
ansible_ssh_pass: "Op3nStack"
@@ -145,13 +145,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network_ODL
- mac_address: "00:25:b5:a0:00:af"
- bmc_ip: 172.30.8.66
- bmc_mac: "a8:9d:21:c9:8b:56"
+ mac_address: "00:25:B5:A0:00:4A"
+ bmc_ip: 172.30.8.74
+ bmc_mac: "a8:9d:21:c9:3a:92"
bmc_user: admin
bmc_pass: octopus
private_ip: controller1_private
- private_mac: "00:25:b5:b0:00:1f"
+ private_mac: "00:25:B5:A0:00:4B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -167,13 +167,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:9e"
- bmc_ip: 172.30.8.75
- bmc_mac: "a8:9d:21:c9:4d:26"
+ mac_address: "00:25:B5:A0:00:5A"
+ bmc_ip: 172.30.8.73
+ bmc_mac: "74:a2:e6:a4:14:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller2_private
- private_mac: "00:25:b5:b0:00:de"
+ private_mac: "00:25:B5:A0:00:5B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
@@ -189,13 +189,13 @@ nodes:
type: controller
host_type: baremetal
hostgroup: Controller_Network
- mac_address: "00:25:b5:a0:00:7e"
- bmc_ip: 172.30.8.65
- bmc_mac: "a8:9d:21:c9:3a:92"
+ mac_address: "00:25:B5:A0:00:6A"
+ bmc_ip: 172.30.8.72
+ bmc_mac: "a8:9d:21:a0:15:9c"
bmc_user: admin
bmc_pass: octopus
private_ip: controller3_private
- private_mac: "00:25:b5:b0:00:be"
+ private_mac: "00:25:B5:A0:00:6B"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
groups:
diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml
index 21840ddf8..28596163a 100644
--- a/foreman/ci/opnfv_ksgen_settings.yml
+++ b/foreman/ci/opnfv_ksgen_settings.yml
@@ -7,6 +7,7 @@ global_params:
controllers_hostnames_array: oscontroller1,oscontroller2,oscontroller3
controllers_ip_array:
amqp_vip:
+ admin_network:
private_subnet:
cinder_admin_vip:
cinder_private_vip:
@@ -44,6 +45,8 @@ global_params:
deployment_type:
network_type: multi_network
default_gw:
+no_dhcp: false
+domain_name: opnfv.com
foreman:
seed_values:
- { name: heat_cfn, oldvalue: true, newvalue: false }
@@ -99,8 +102,8 @@ workaround_vif_plugging: false
openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
nodes:
compute:
- name: oscompute11.opnfv.com
- hostname: oscompute11.opnfv.com
+ name: oscompute11.{{ domain_name }}
+ hostname: oscompute11.{{ domain_name }}
short_name: oscompute11
type: compute
host_type: baremetal
@@ -110,8 +113,12 @@ nodes:
bmc_mac: "10:23:45:67:88:AB"
bmc_user: root
bmc_pass: root
+ admin_ip: compute_admin
ansible_ssh_pass: "Op3nStack"
admin_password: ""
+ cpus: 2
+ memory: 2048
+ disk: 40
groups:
- compute
- foreman_nodes
@@ -119,8 +126,8 @@ nodes:
- rdo
- neutron
controller1:
- name: oscontroller1.opnfv.com
- hostname: oscontroller1.opnfv.com
+ name: oscontroller1.{{ domain_name }}
+ hostname: oscontroller1.{{ domain_name }}
short_name: oscontroller1
type: controller
host_type: baremetal
@@ -130,10 +137,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AC"
bmc_user: root
bmc_pass: root
+ admin_ip: controller1_admin
private_ip: controller1_private
private_mac: "10:23:45:67:87:AC"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
@@ -141,8 +152,8 @@ nodes:
- rdo
- neutron
controller2:
- name: oscontroller2.opnfv.com
- hostname: oscontroller2.opnfv.com
+ name: oscontroller2.{{ domain_name }}
+ hostname: oscontroller2.{{ domain_name }}
short_name: oscontroller2
type: controller
host_type: baremetal
@@ -152,10 +163,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AD"
bmc_user: root
bmc_pass: root
+ admin_ip: controller2_admin
private_ip: controller2_private
private_mac: "10:23:45:67:87:AD"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
@@ -163,8 +178,8 @@ nodes:
- rdo
- neutron
controller3:
- name: oscontroller3.opnfv.com
- hostname: oscontroller3.opnfv.com
+ name: oscontroller3.{{ domain_name }}
+ hostname: oscontroller3.{{ domain_name }}
short_name: oscontroller3
type: controller
host_type: baremetal
@@ -174,10 +189,14 @@ nodes:
bmc_mac: "10:23:45:67:88:AE"
bmc_user: root
bmc_pass: root
+ admin_ip: controller3_admin
private_ip: controller3_private
private_mac: "10:23:45:67:87:AE"
ansible_ssh_pass: "Op3nStack"
admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
groups:
- controller
- foreman_nodes
diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
new file mode 100644
index 000000000..306603826
--- /dev/null
+++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml
@@ -0,0 +1,272 @@
+global_params:
+ admin_email: opnfv@opnfv.com
+ ha_flag: "false"
+ odl_flag: "true"
+ odl_control_ip:
+ admin_network:
+ private_network:
+ storage_network:
+ public_network:
+ private_subnet:
+ deployment_type:
+ controller_ip:
+network_type: multi_network
+default_gw:
+no_dhcp: false
+domain_name: opnfv.com
+foreman:
+ seed_values:
+ - { name: heat_cfn, oldvalue: true, newvalue: false }
+workaround_puppet_version_lock: false
+opm_branch: master
+installer:
+ name: puppet
+ short_name: pupt
+ network:
+ auto_assign_floating_ip: false
+ variant:
+ short_name: m2vx
+ plugin:
+ name: neutron
+workaround_openstack_packstack_rpm: false
+tempest:
+ repo:
+ Fedora:
+ '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/
+ '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/
+ RedHat:
+ '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/
+ use_virtual_env: false
+ public_allocation_end: 10.2.84.71
+ skip:
+ files: null
+ tests: null
+ public_allocation_start: 10.2.84.51
+ physnet: physnet1
+ use_custom_repo: false
+ public_subnet_cidr: 10.2.84.0/24
+ public_subnet_gateway: 10.2.84.1
+ additional_default_settings:
+ - section: compute
+ option: flavor_ref
+ value: 1
+ cirros_image_file: cirros-0.3.1-x86_64-disk.img
+ setup_method: tempest/rpm
+ test_name: all
+ rdo:
+ version: juno
+ rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ rpm:
+ version: 20141201
+ dir: ~{{ nodes.tempest.remote_user }}/tempest-dir
+tmp:
+ node_prefix: '{{ node.prefix | reject("none") | join("-") }}-'
+ anchors:
+ - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ - http://repos.fedorapeople.org/repos/openstack/openstack-juno/
+opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git
+workaround_vif_plugging: false
+openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm
+nodes:
+ compute:
+ name: oscompute11.{{ domain_name }}
+ hostname: oscompute11.{{ domain_name }}
+ short_name: oscompute11
+ type: compute
+ host_type: baremetal
+ hostgroup: Compute
+ mac_address: "10:23:45:67:89:AB"
+ bmc_ip: 10.4.17.2
+ bmc_mac: "10:23:45:67:88:AB"
+ bmc_user: root
+ bmc_pass: root
+ admin_ip: compute_admin
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: ""
+ cpus: 2
+ memory: 2048
+ disk: 40
+ groups:
+ - compute
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+ controller1:
+ name: oscontroller1.{{ domain_name }}
+ hostname: oscontroller1.{{ domain_name }}
+ short_name: oscontroller1
+ type: controller
+ host_type: baremetal
+ hostgroup: Controller_Network_ODL
+ mac_address: "10:23:45:67:89:AC"
+ bmc_ip: 10.4.17.3
+ bmc_mac: "10:23:45:67:88:AC"
+ bmc_user: root
+ bmc_pass: root
+ private_ip: controller1_private
+ admin_ip: controller1_admin
+ private_mac: "10:23:45:67:87:AC"
+ ansible_ssh_pass: "Op3nStack"
+ admin_password: "octopus"
+ cpus: 2
+ memory: 4096
+ disk: 40
+ groups:
+ - controller
+ - foreman_nodes
+ - puppet
+ - rdo
+ - neutron
+workaround_mysql_centos7: true
+distro:
+ name: centos
+ centos:
+ '7.0':
+ repos: []
+ short_name: c
+ short_version: 70
+ version: '7.0'
+ rhel:
+ '7.0':
+ kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/
+ repos:
+ - section: rhel7-server-rpms
+ name: Packages for RHEL 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-update-rpms
+ name: Update Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/
+ gpgcheck: 0
+ - section: rhel-7-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/
+ gpgcheck: 0
+ - section: rhel-7-server-extras-rpms
+ name: Optional Packages for Enterprise Linux 7 - $basearch
+ baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/
+ gpgcheck: 0
+ '6.5':
+ kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/
+ repos:
+ - section: rhel6.5-server-rpms
+ name: Packages for RHEL 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server
+ gpgcheck: 0
+ - section: rhel-6.5-server-update-rpms
+ name: Update Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/
+ gpgcheck: 0
+ - section: rhel-6.5-server-optional-rpms
+ name: Optional Packages for Enterprise Linux 6.5 - $basearch
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os
+ gpgcheck: 0
+ - section: rhel6.5-server-rpms-32bit
+ name: Packages for RHEL 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-update-rpms-32bit
+ name: Update Packages for Enterprise Linux 6.5 - i686
+ baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/
+ gpgcheck: 0
+ enabled: 1
+ - section: rhel-6.5-server-optional-rpms-32bit
+ name: Optional Packages for Enterprise Linux 6.5 - i386
+ baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os
+ gpgcheck: 0
+ enabled: 1
+ subscription:
+ username: REPLACE_ME
+ password: HWj8TE28Qi0eP2c
+ pool: 8a85f9823e3d5e43013e3ddd4e2a0977
+ config:
+ selinux: permissive
+ ntp_server: 0.pool.ntp.org
+ dns_servers:
+ - 10.4.1.1
+ - 10.4.0.2
+ reboot_delay: 1
+ initial_boot_timeout: 180
+node:
+ prefix:
+ - rdo
+ - pupt
+ - ffqiotcxz1
+ - null
+product:
+ repo_type: production
+ name: rdo
+ short_name: rdo
+ rpm:
+ CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
+ short_version: ju
+ repo:
+ production:
+ CentOS:
+ 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ Fedora:
+ '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20
+ '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21
+ RedHat:
+ '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6
+ '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7
+ version: juno
+ config:
+ enable_epel: y
+ short_repo: prod
+tester:
+ name: tempest
+distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' '
+job:
+ verbosity: 1
+ archive:
+ - '{{ tempest.dir }}/etc/tempest.conf'
+ - '{{ tempest.dir }}/etc/tempest.conf.sample'
+ - '{{ tempest.dir }}/*.log'
+ - '{{ tempest.dir }}/*.xml'
+ - /root/
+ - /var/log/
+ - /etc/nova
+ - /etc/ceilometer
+ - /etc/cinder
+ - /etc/glance
+ - /etc/keystone
+ - /etc/neutron
+ - /etc/ntp
+ - /etc/puppet
+ - /etc/qpid
+ - /etc/qpidd.conf
+ - /root
+ - /etc/yum.repos.d
+ - /etc/yum.repos.d
+topology:
+ name: multinode
+ short_name: mt
+workaround_neutron_ovs_udev_loop: true
+workaround_glance_table_utf8: false
+verbosity:
+ debug: 0
+ info: 1
+ warning: 2
+ warn: 2
+ errors: 3
+provisioner:
+ username: admin
+ network:
+ type: nova
+ name: external
+ skip: skip_provision
+ foreman_url: https://10.2.84.2/api/v2/
+ password: octopus
+ type: foreman
+workaround_nova_compute_fix: false
+workarounds:
+ enabled: true
+
diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml
index 9e3d053b5..9b3a4d4bb 100644
--- a/foreman/ci/reload_playbook.yml
+++ b/foreman/ci/reload_playbook.yml
@@ -14,3 +14,4 @@
delay=60
timeout=180
sudo: false
+ - pause: minutes=1
diff --git a/foreman/ci/resize_lvm.sh b/foreman/ci/resize_lvm.sh
new file mode 100755
index 000000000..64a9c6252
--- /dev/null
+++ b/foreman/ci/resize_lvm.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+
+#script for resizing volumes in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses resize_partition.sh
+#
+#Pre-requisties:
+#Vagrant box disk size already resized
+#Partition already resized
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+echo "${blue}Resizing physical volume${reset}"
+if ! pvresize /dev/sda2; then
+ echo "${red}Unable to resize physical volume${reset}"
+ exit 1
+else
+ new_part_size=`pvdisplay | grep -Eo "PV Size\s*[0-9]+\." | awk {'print $3'} | tr -d .`
+ echo "${blue}New physical volume size: ${new_part_size}${reset}"
+fi
+
+echo "${blue}Resizing logical volume${reset}"
+if ! lvextend /dev/mapper/centos-root -r -l +100%FREE; then
+ echo "${red}Unable to resize logical volume${reset}"
+ exit 1
+else
+ new_fs_size=`df -h | grep centos-root | awk '{print $2}'`
+ echo "${blue}Filesystem resized to: ${new_fs_size}${reset}"
+fi
diff --git a/foreman/ci/resize_partition.sh b/foreman/ci/resize_partition.sh
new file mode 100755
index 000000000..4c5581dd2
--- /dev/null
+++ b/foreman/ci/resize_partition.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+#script for extending disk partition in Foreman/QuickStack VM
+#author: Tim Rozet (trozet@redhat.com)
+#
+#Uses Vagrant and VirtualBox
+#VagrantFile uses resize_partition.sh
+#
+#Pre-requisties:
+#Vagrant box disk size already resized
+
+##VARS
+reset=`tput sgr0`
+blue=`tput setaf 4`
+red=`tput setaf 1`
+green=`tput setaf 2`
+
+##END VARS
+
+echo "${blue}Extending partition...${reset}"
+echo "d
+2
+n
+p
+
+
+
+p
+t
+2
+8e
+w
+"|fdisk /dev/sda; true
diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh
index d0bba6452..ef2b325ce 100755
--- a/foreman/ci/vm_nodes_provision.sh
+++ b/foreman/ci/vm_nodes_provision.sh
@@ -18,6 +18,8 @@ green=`tput setaf 2`
host_name=REPLACE
dns_server=REPLACE
+host_ip=REPLACE
+domain_name=REPLACE
##END VARS
##set hostname
@@ -31,27 +33,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then
systemctl restart NetworkManager
fi
-if ! ping www.google.com -c 5; then
+##modify /etc/resolv.conf to point to foreman
+echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}"
+cat > /etc/resolv.conf << EOF
+search $domain_name
+nameserver $dns_server
+nameserver 8.8.8.8
+
+EOF
+
+##modify /etc/hosts to add own IP for rabbitmq workaround
+host_short_name=`echo $host_name | cut -d . -f 1`
+echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}"
+cat > /etc/hosts << EOF
+$host_ip $host_short_name $host_name
+127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+EOF
+
+if ! ping www.google.com -c 5; then
echo "${red} No internet connection, check your route and DNS setup ${reset}"
exit 1
fi
-# Install EPEL repo for access to many other yum repos
-# Major version is pinned to force some consistency for Arno
-yum install -y epel-release-7*
+##install EPEL
+if ! yum repolist | grep "epel/"; then
+ if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then
+ printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2
+ exit 1
+ fi
+else
+ printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.'
+fi
-# Update device-mapper-libs, needed for libvirtd on compute nodes
-# Major version is pinned to force some consistency for Arno
-if ! yum -y upgrade device-mapper-libs-1*; then
+##install device-mapper-libs
+##needed for libvirtd on compute nodes
+if ! yum -y upgrade device-mapper-libs; then
echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}"
fi
-# Install other required packages
-# Major version is pinned to force some consistency for Arno
echo "${blue} Installing Puppet ${reset}"
-if ! yum install -y puppet-3*; then
- printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2
- exit 1
+##install puppet
+if ! yum list installed | grep -i puppet; then
+ if ! yum -y install puppet; then
+ printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2
+ exit 1
+ fi
fi
echo "${blue} Configuring puppet ${reset}"
@@ -68,10 +95,10 @@ pluginsync = true
report = true
ignoreschedules = true
daemon = false
-ca_server = foreman-server.opnfv.com
+ca_server = foreman-server.$domain_name
certname = $host_name
environment = production
-server = foreman-server.opnfv.com
+server = foreman-server.$domain_name
runinterval = 600
EOF
@@ -79,13 +106,13 @@ EOF
# Setup puppet to run on system reboot
/sbin/chkconfig --level 345 puppet on
-/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.opnfv.com --no-daemonize
+/usr/bin/puppet agent --config /etc/puppet/puppet.conf -o --tags no_such_tag --server foreman-server.$domain_name --no-daemonize
sync
# Inform the build system that we are done.
echo "Informing Foreman that we are built"
-wget -q -O /dev/null --no-check-certificate http://foreman-server.opnfv.com:80/unattended/built
+wget -q -O /dev/null --no-check-certificate http://foreman-server.$domain_name:80/unattended/built
echo "Starting puppet"
systemctl start puppet
diff --git a/foreman/docs/src/installation-instructions.rst b/foreman/docs/src/installation-instructions.rst
index 2ac872d13..73b900e58 100644
--- a/foreman/docs/src/installation-instructions.rst
+++ b/foreman/docs/src/installation-instructions.rst
@@ -1,6 +1,6 @@
-=======================================================================================================
-OPNFV Installation instructions for the Arno release of OPNFV when using Foreman as a deployment tool
-=======================================================================================================
+=========================================================================================================
+OPNFV Installation Instructions for the Arno SR1 Release of OPNFV when using Foreman as a deployment tool
+=========================================================================================================
.. contents:: Table of Contents
@@ -10,15 +10,19 @@ OPNFV Installation instructions for the Arno release of OPNFV when using Foreman
Abstract
========
-This document describes how to install the Arno release of OPNFV when using Foreman/Quickstack as a deployment tool covering it's limitations, dependencies and required system resources.
+This document describes how to install the Arno SR1 release of OPNFV when using Foreman/Quickstack as
+a
+deployment tool covering it's limitations, dependencies and required system resources.
License
=======
-Arno release of OPNFV when using Foreman as a deployment tool Docs (c) by Tim Rozet (RedHat)
+Arno SR1 release of OPNFV when using Foreman as a deployment tool Docs (c) by Tim Rozet (RedHat)
-Arno release of OPNFV when using Foreman as a deployment tool Docs are licensed under a Creative Commons Attribution 4.0 International License. You should have received a copy of the license along with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
+Arno SR1 release of OPNFV when using Foreman as a deployment tool Docs are licensed under a Creative
+Commons Attribution 4.0 International License. You should have received a copy of the license along
+with this. If not, see <http://creativecommons.org/licenses/by/4.0/>.
-Version history
+Version History
===================
+--------------------+--------------------+--------------------+--------------------+
@@ -37,25 +41,43 @@ Version history
| 2015-06-03 | 0.0.4 | Ildiko Vancsa | Minor changes |
| | | (Ericsson) | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-09-10 | 0.2.0 | Tim Rozet | Update to SR1 |
+| | | (Red Hat) | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-09-25 | 0.2.1 | Randy Levensalor | Added CLI |
+| | | (CableLabs) | verification |
++--------------------+--------------------+--------------------+--------------------+
Introduction
============
-This document describes the steps to install an OPNFV Arno reference platform, as defined by the Bootstrap/Getting-Started (BGS) Project using the Foreman/QuickStack installer.
+This document describes the steps to install an OPNFV Arno SR1 reference platform, as defined by the
+Bootstrap/Getting-Started (BGS) Project using the Foreman/QuickStack installer.
The audience is assumed to have a good background in networking and Linux administration.
Preface
=======
-Foreman/QuickStack uses the Foreman Open Source project as a server management tool, which in turn manages and executes Genesis/QuickStack. Genesis/QuickStack consists of layers of Puppet modules that are capable of provisioning the OPNFV Target System (3 controllers, n number of compute nodes).
+Foreman/QuickStack uses the Foreman Open Source project as a server management tool, which in turn
+manages and executes Genesis/QuickStack. Genesis/QuickStack consists of layers of Puppet modules that
+are capable of provisioning the OPNFV Target System (3 controllers, n number of compute nodes).
-The Genesis repo contains the necessary tools to get install and deploy an OPNFV target system using Foreman/QuickStack. These tools consist of the Foreman/QuickStack bootable ISO (``arno.2015.1.0.foreman.iso``), and the automatic deployment script (``deploy.sh``).
+The Genesis repo contains the necessary tools to get install and deploy an OPNFV target system using
+Foreman/QuickStack. These tools consist of the Foreman/QuickStack bootable ISO
+(``arno.2015.2.0.foreman.iso``), and the automatic deployment script (``deploy.sh``).
-An OPNFV install requires a "Jumphost" in order to operate. The bootable ISO will allow you to install a customized CentOS 7 release to the Jumphost, which then gives you the required packages needed to run ``deploy.sh``. If you already have a Jumphost with CentOS 7 installed, you may choose to ignore the ISO step and instead move directly to running ``deploy.sh``. In this case, ``deploy.sh`` will install the necessary packages for you in order to execute.
+An OPNFV install requires a "Jumphost" in order to operate. The bootable ISO will allow you to
+install
+a customized CentOS 7 release to the Jumphost, which then gives you the required packages needed to
+run ``deploy.sh``. If you already have a Jumphost with CentOS 7 installed, you may choose to ignore
+the ISO step and instead move directly to cloning the git repository and running ``deploy.sh``. In
+this case, ``deploy.sh`` will install the necessary packages for you in order to execute.
-``deploy.sh`` installs Foreman/QuickStack VM server using Vagrant with VirtualBox as its provider. This VM is then used to provision the OPNFV target system (3 controllers, n compute nodes). These nodes can be either virtual or bare metal. This guide contains instructions for installing both.
+``deploy.sh`` installs Foreman/QuickStack VM server using Vagrant with VirtualBox as its provider.
+This VM is then used to provision the OPNFV target system (3 controllers, n compute nodes). These
+nodes can be either virtual or bare metal. This guide contains instructions for installing both.
Setup Requirements
==================
@@ -71,26 +93,32 @@ The Jumphost requirements are outlined below:
3. libvirt or other hypervisors disabled (no kernel modules loaded).
-4. 3-4 NICs, untagged (no 802.1Q tagging), with IP addresses.
+4. 3-4 NICs for bare metal deployment/only 1 NIC required for virtual deployment, untagged
+ (no 802.1Q tagging), with IP addresses.
5. Internet access for downloading packages, with a default gateway configured.
-6. 4 GB of RAM for a bare metal deployment, 24 GB of RAM for a VM deployment.
+6. 4 GB of RAM for a bare metal deployment, 18 GB (HA) or 8 GB (non-HA) of RAM for a VM
+ deployment.
Network Requirements
--------------------
Network requirements include:
-1. No DHCP or TFTP server running on networks used by OPNFV.
+1. No DHCP or TFTP server running on networks used by OPNFV (bare metal deployment only).
-2. 3-4 separate VLANs (untagged) with connectivity between Jumphost and nodes (bare metal deployment only). These make up the admin, private, public and optional storage networks.
+2. 1, 3, or 4 separate VLANs (untagged) with connectivity between Jumphost and nodes (bare metal
+ deployment only). These make up the admin, private, public and optional storage networks. If
+ only 1 VLAN network used for baremetal, then all of the previously listed logical networks will
+ be consolidated to that single network.
-3. Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment only).
+3. Lights out OOB network access from Jumphost with IPMI node enabled (bare metal deployment
+ only).
4. Admin or public network has Internet access, meaning a gateway and DNS availability.
-*Note: Storage network will be consolidated to the private network if only 3 networks are used.*
+**Note: Storage network will be consolidated to the private network if only 3 networks are used.**
Bare Metal Node Requirements
----------------------------
@@ -116,46 +144,85 @@ In order to execute a deployment, one must gather the following information:
4. MAC address of private interfaces on 3 nodes that will be controllers.
+**Note: For single NIC/network barmetal deployment, the MAC address of the admin and private
+interface will be the same.**
Installation High-Level Overview - Bare Metal Deployment
========================================================
-The setup presumes that you have 6 bare metal servers and have already setup connectivity on at least 3 interfaces for all servers via a TOR switch or other network implementation.
+The setup presumes that you have 6 bare metal servers and have already setup connectivity on at least
+1 or 3 interfaces for all servers via a TOR switch or other network implementation.
-The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All the networks involved in the OPNFV infrastructure as well as the provider networks and the private tenant VLANs needs to be manually configured.
+The physical TOR switches are **not** automatically configured from the OPNFV reference platform. All
+the networks involved in the OPNFV infrastructure as well as the provider networks and the private
+tenant VLANs needs to be manually configured.
-The Jumphost can be installed using the bootable ISO. The Jumphost should then be configured with an IP gateway on its admin or public interface and configured with a working DNS server. The Jumphost should also have routable access to the lights out network.
+The Jumphost can be installed using the bootable ISO. The Jumphost should then be configured with an
+IP gateway on its admin or public interface and configured with a working DNS server. The Jumphost
+should also have routable access to the lights out network.
-``deploy.sh`` is then executed in order to install the Foreman/QuickStack Vagrant VM. ``deploy.sh`` uses a configuration file with YAML format in order to know how to install and provision the OPNFV target system. The information gathered under section `Execution Requirements (Bare Metal Only)`_ is put into this configuration file.
+``deploy.sh`` is then executed in order to install the Foreman/QuickStack Vagrant VM. ``deploy.sh``
+uses a configuration file with YAML format in order to know how to install and provision the OPNFV
+target system. The information gathered under section `Execution Requirements (Bare Metal Only)`_
+is put into this configuration file.
-``deploy.sh`` brings up a CentOS 7 Vagrant VM, provided by VirtualBox. The VM then executes an Ansible project called Khaleesi in order to install Foreman and QuickStack. Once the Foreman/QuickStack VM is up, Foreman will be configured with the nodes' information. This includes MAC address, IPMI, OpenStack type (controller, compute, OpenDaylight controller) and other information. At this point Khaleesi makes a REST API call to Foreman to instruct it to provision the hardware.
+``deploy.sh`` brings up a CentOS 7 Vagrant VM, provided by VirtualBox. The VM then executes an
+Ansible project called Khaleesi in order to install Foreman and QuickStack. Once the
+Foreman/QuickStack VM is up, Foreman will be configured with the nodes' information. This includes
+MAC address, IPMI, OpenStack type (controller, compute, OpenDaylight controller) and other
+information.
+At this point Khaleesi makes a REST API call to Foreman to instruct it to provision the hardware.
-Foreman will then reboot the nodes via IPMI. The nodes should already be set to PXE boot first off the admin interface. Foreman will then allow the nodes to PXE and install CentOS 7 as well as Puppet. Foreman/QuickStack VM server runs a Puppet Master and the nodes query this master to get their appropriate OPNFV configuration. The nodes will then reboot one more time and once back up, will DHCP on their private, public and storage NICs to gain IP addresses. The nodes will now check in via Puppet and start installing OPNFV.
+Foreman will then reboot the nodes via IPMI. The nodes should already be set to PXE boot first off
+the
+admin interface. Foreman will then allow the nodes to PXE and install CentOS 7 as well as Puppet.
+Foreman/QuickStack VM server runs a Puppet Master and the nodes query this master to get their
+appropriate OPNFV configuration. The nodes will then reboot one more time and once back up, will DHCP
+on their private, public and storage NICs to gain IP addresses. The nodes will now check in via
+Puppet and start installing OPNFV.
-Khaleesi will wait until these nodes are fully provisioned and then return a success or failure based on the outcome of the Puppet application.
+Khaleesi will wait until these nodes are fully provisioned and then return a success or failure based
+on the outcome of the Puppet application.
Installation High-Level Overview - VM Deployment
================================================
-The VM nodes deployment operates almost the same way as the bare metal deployment with a few differences. ``deploy.sh`` still installs Foreman/QuickStack VM the exact same way, however the part of the Khaleesi Ansible playbook which IPMI reboots/PXE boots the servers is ignored. Instead, ``deploy.sh`` brings up N number more Vagrant VMs (where N is 3 control nodes + n compute). These VMs already come up with CentOS 7 so instead of re-provisioning the entire VM, ``deploy.sh`` initiates a small Bash script that will signal to Foreman that those nodes are built and install/configure Puppet on them.
+The VM nodes deployment operates almost the same way as the bare metal deployment with a few
+differences. ``deploy.sh`` still installs Foreman/QuickStack VM the exact same way, however the part
+of the Khaleesi Ansible playbook which IPMI reboots/PXE boots the servers is ignored. Instead,
+``deploy.sh`` brings up N number more Vagrant VMs (where N is 3 control nodes + n compute). These VMs
+already come up with CentOS 7 so instead of re-provisioning the entire VM, ``deploy.sh`` initiates a
+small Bash script that will signal to Foreman that those nodes are built and install/configure Puppet
+on them.
To Foreman these nodes look like they have just built and register the same way as bare metal nodes.
+VM deployment will automatically use the default gateway interface on the host for all of the VMs
+internet access via bridging the VMs NICs (public network). The other networks - such as admin,
+private, storage will all be created as internal VirtualBox networks. Therefore only a single
+interface on the host is needed for VM deployment.
+
Installation Guide - Bare Metal Deployment
==========================================
-This section goes step-by-step on how to correctly install and provision the OPNFV target system to bare metal nodes.
+This section goes step-by-step on how to correctly install and provision the OPNFV target system to
+bare metal nodes.
Install Bare Metal Jumphost
---------------------------
-1. If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh install, then download the Foreman/QuickStack bootable ISO <http://artifacts.opnfv.org/arno.2015.1.0/foreman/arno.2015.1.0.foreman.iso> here.
+1. If your Jumphost does not have CentOS 7 already on it, or you would like to do a fresh install,
+ then download the Foreman/QuickStack bootable ISO
+ `here <http://artifacts.opnfv.org/arno.2015.2.0/foreman/arno.2015.2.0.foreman.iso>`_. If you
+ already have a CentOS 7 install that you would like to use then go to step 3.
2. Boot the ISO off of a USB or other installation media and walk through installing OPNFV CentOS 7.
-3. After OS is installed login to your Jumphost as root.
+3. After OS is installed login to your Jumphost as root. If /root/genesis does not exist then
+ ``git clone -b arno.2015.2.0 https://gerrit.opnfv.org/gerrit/genesis /root/genesis``
-4. Configure IP addresses on 3-4 interfaces that you have selected as your admin, private, public, and storage (optional) networks.
+4. Configure IP addresses on 3-4 interfaces that you have selected as your admin, private, public,
+ and storage (optional) networks.
5. Configure the IP gateway to the Internet either, preferably on the public interface.
@@ -174,15 +241,26 @@ Install Bare Metal Jumphost
Creating an Inventory File
--------------------------
-You now need to take the MAC address/IPMI info gathered in section `Execution Requirements (Bare Metal Only)`_ and create the YAML inventory (also known as configuration) file for ``deploy.sh``.
+You now need to take the MAC address/IPMI info gathered in section
+`Execution Requirements (Bare Metal Only)`_ and create the YAML inventory (also known as
+configuration)
+file for ``deploy.sh``.
-1. Copy the ``opnfv_ksgen_settings.yml`` file from ``/root/bgs_vagrant/`` to another directory and rename it to be what you want EX: ``/root/my_ksgen_settings.yml``
+1. Copy the ``opnfv_ksgen_settings.yml`` file (for HA) or ``opnfv_ksgen_settings_no_HA.yml`` from
+ ``/root/genesis/foreman/ci/`` to another directory and rename it to be what you want Example:
+ ``/root/my_ksgen_settings.yml``
-2. Edit the file in your favorite editor. There is a lot of information in this file, but you really only need to be concerned with the "nodes:" dictionary.
+2. Edit the file in your favorite editor. There is a lot of information in this file, but you
+ really only need to be concerned with the "nodes:" dictionary.
-3. The nodes dictionary contains each bare metal host you want to deploy. You can have 1 or more compute nodes and must have 3 controller nodes (these are already defined for you). It is optional at this point to add more compute nodes into the dictionary. You must use a different name, hostname, short_name and dictionary keyname for each node.
+3. The nodes dictionary contains each bare metal host you want to deploy. You can have 1 or more
+ compute nodes and must have 3 controller nodes (these are already defined for you) if ha_flag is
+ set to true. If ha_flag is set to false, please only define 1 controller node. It is optional at
+ this point to add more compute nodes into the dictionary. You must use a different name, hostname
+ , short_name and dictionary keyname for each node.
-4. Once you have decided on your node definitions you now need to modify the MAC address/IPMI info dependent on your hardware. Edit the following values for each node:
+4. Once you have decided on your node definitions you now need to modify the MAC address/IPMI info
+ dependent on your hardware. Edit the following values for each node:
- ``mac_address``: change to MAC address of that node's admin NIC (defaults to 1st NIC)
- ``bmc_ip``: change to IP Address of BMC (out-of-band)/IPMI IP
@@ -194,56 +272,79 @@ You now need to take the MAC address/IPMI info gathered in section `Execution Re
- ``private_mac`` - change to MAC address of node's private NIC (default to 2nd NIC)
-6. Save your changes.
+6. You may also define a unique domain name by editing the ``domain_name`` global parameter.
+
+7. Save your changes.
Running ``deploy.sh``
---------------------
-You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will be running out of ``/tmp/bgs_vagrant``.
+You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/var/opt/opnfv/`` directory to store
+its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will be running out of
+``/var/opt/opnfv/foreman_vm/``.
-It is also recommended that you power off your nodes before running ``deploy.sh`` If there are DHCP servers or other network services that are on those nodes it may conflict with the installation.
+It is also recommended that you power off your nodes before running ``deploy.sh`` If there are DHCP
+servers or other network services that are on those nodes it may conflict with the installation.
Follow the steps below to execute:
-1. ``cd /root/bgs_vagrant``
+1. ``cd /root/genesis/foreman/ci/``
-2. ``./deploy.sh -base_config </root/my_ksgen_settings.yml>``
+2. ``./deploy.sh -base_config /root/my_ksgen_settings.yml``
-3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost. You will also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes. Look for "PLAY [Deploy Nodes]" as a sign that Foreman/QuickStack is finished installing and now your nodes are being rebuilt.
+**Note: This is for default detection of at least 3 VLAN/interfaces configured on your jumphost
+with defaulting interface assignment by the NIC order (1st Admin, 2nd Private, 3rd Public). If you
+wish to use a single interface for baremetal install, see help output for "-single_baremetal_nic".
+If you would like to specify the NIC mapping to logical network, see help output for "-admin_nic",
+"-private_nic", "-public_nic", "-storage_nic".**
-4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When complete you will see "Finished: SUCCESS"
+3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during
+ this part of the process, it is most likely a problem with the setup of your Jumphost. You will
+ also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:"
+ this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your
+ nodes. Look for "PLAY [Deploy Nodes]" as a sign that Foreman/QuickStack is finished installing
+ and now your nodes are being rebuilt.
-.. _setup_verify:
+4. Your nodes will take 40-60 minutes to re-install CentOS 7 and install/configure OPNFV. When
+ complete you will see "Finished: SUCCESS"
Verifying the Setup
-------------------
-Now that the installer has finished it is a good idea to check and make sure things are working correctly. To access your Foreman/QuickStack VM:
-
-1. ``cd /tmp/bgs_vagrant``
+Now that the installer has finished it is a good idea to check and make sure things are working
+correctly. To access your Foreman/QuickStack VM:
-2. ``vagrant ssh`` (password is "vagrant")
+1. As root: ``cd /var/opt/opnfv/foreman_vm/``
-3. You are now in the VM and can check the status of Foreman service, etc. For example: ``systemctl status foreman``
+2. ``vagrant ssh`` (no password is required)
-4. Type "exit" and leave the Vagrant VM. Now execute: ``cat /tmp/bgs_vagrant/opnfv_ksgen_settings.yml | grep foreman_url``
+3. You are now in the VM and can check the status of Foreman service, etc. For example:
+ ``systemctl status foreman``
-5. This is your Foreman URL on your public interface. You can go to your web browser, ``http://<foreman_ip>``, login will be "admin"/"octopus". This way you can look around in Foreman and check that your hosts are in a good state, etc.
+4. Type "exit" and leave the Vagrant VM. Now execute:
+ ``cat /var/opt/opnfv/foreman_vm/opnfv_ksgen_settings.yml | grep foreman_url``
-6. In Foreman GUI, you can now go to Infrastructure -> Global Parameters. This is a list of all the variables being handed to Puppet for configuring OPNFV. Look for ``horizon_public_vip``. This is your IP address to Horizon GUI.
+5. This is your Foreman URL on your public interface. You can go to your web browser,
+ ``http://<foreman_ip>``, login will be "admin"/"octopus". This way you can look around in
+ Foreman and check that your hosts are in a good state, etc.
- **Note: You can find out more about how to ues Foreman by going to http://www.theforeman.org/ or by watching a walkthrough video here: https://bluejeans.com/s/89gb/**
+6. In Foreman GUI, you can now go to Infrastructure -> Global Parameters. This is a list of all the
+ variables being handed to Puppet for configuring OPNFV. Look for ``horizon_public_vip``. This is
+ your IP address to Horizon GUI.
-7. Now go to your web browser and insert the Horizon public VIP. The login will be "admin"/"octopus".
+**Note: You can find out more about how to use Foreman by going to http://www.theforeman.org/ or
+by watching a walkthrough video here: https://bluejeans.com/s/89gb/**
-8. You are now able to follow the `OpenStack Verification <openstack_verify_>`_ section.
+7. Now go to your web browser and insert the Horizon public VIP. The login will be
+ "admin"/"octopus".
-.. _openstack_verify:
+8. You are now able to follow the `OpenStack Verification`_ section.
OpenStack Verification
----------------------
-Now that you have Horizon access, let's make sure OpenStack the OPNFV target system are working correctly:
+Now that you have Horizon access, let's make sure OpenStack the OPNFV target system are working
+correctly:
1. In Horizon, click Project -> Compute -> Volumes, Create Volume
@@ -251,7 +352,8 @@ Now that you have Horizon access, let's make sure OpenStack the OPNFV target sys
3. Now in the left pane, click Compute -> Images, click Create Image
-4. Insert a name "cirros", Insert an Image Location ``http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img``
+4. Insert a name "cirros", Insert an Image Location
+ ``http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img``
5. Select format "QCOW2", select Public, then hit Create Image
@@ -263,75 +365,244 @@ Now that you have Horizon access, let's make sure OpenStack the OPNFV target sys
9. Enter ``10.0.0.5,10.0.0.9`` under Allocation Pools, then hit Create
-10. Now go to Project -> Compute -> Instances, click Launch Instance
+**Note: You may also want to expand this pool by giving a larger range,
+or you can simply hit Create with entering nothing and the entire subnet
+range will be used for DHCP**
+
+10. Go to Project -> Network -> Routers
+
+11. Click "provider_router". Then select "Add Interface"
+
+12. From the pop up menu, select test_subnet in the "Subnet" field. Press "Add interface"
+
+13. Verify your Network Topology looks correct in Project -> Network -> Network Topology
-11. Enter Instance Name "cirros1", select Instance Boot Source "Boot from image", and then select Image Name "cirros"
+14. Now go to Project -> Compute -> Instances, click Launch Instance
-12. Click Launch, status should show "Spawning" while it is being built
+15. Enter Instance Name "cirros1", select Instance Boot Source "Boot from image", and then select
+ Image Name "cirros"
-13. You can now repeat steps 11 and 12, but create a "cirros2" named instance
+16. Click Launch, status should show "Spawning" while it is being built
-14. Once both instances are up you can see their IP addresses on the Instances page. Click the Instance Name of cirros1.
+17. You can now repeat steps 15 and 16, but create a "cirros2" named instance
-15. Now click the "Console" tab and login as "cirros"/"cubswin" :)
+18. Once both instances are up you can see their IP addresses on the Instances page. Click the
+ Instance Name of cirros1.
-16. Verify you can ping the IP address of cirros2
+19. Now click the "Console" tab and login as "cirros"/"cubswin:)"
+
+20. Verify you can ping the IP address of cirros2
+
+21. Continue to the next steps to provide external network access to cirros1.
+
+22. Go to Project -> Compute -> Instances. From the drop down menu under "Actions" select
+ "Associate Floating IP"
+
+23. Press the "+" symbol next under "IP Address". Select "Allocate IP" on the new pop up.
+
+24. You should now see an external IP address filled into the "IP Address" field. Click
+ "Associate".
+
+25. Now from your external network you should be able to ping/ssh to the floating IP address.
Congratulations you have successfully installed OPNFV!
+OpenStack CLI Verification
+--------------------------
+
+This section is for users who do not have web access or prefer to use command line rather
+than a web browser to validate the OpenStack installation. Do not run this if you have
+already completed the OpenStack verification, since this uses the same names.
+
+1. Install the OpenStack CLI tools or log-in to one of the compute or control servers.
+
+2. Find the IP of keystone public VIP. As root:
+
+ cat /var/opt/opnfv/foreman_vm/opnfv_ksgen_settings.yml | \
+ grep keystone_public_vip
+
+3. Set the environment variables. Substitute the keystone public VIP for <VIP> below.
+
+ | export OS_AUTH_URL=http://<VIP>:5000/v2.0
+ | export OS_TENANT_NAME="admin"
+ | export OS_USERNAME="admin"
+ | export OS_PASSWORD="octopus"
+
+4. Load the CirrOS image into glance.
+
+ glance image-create --copy-from \
+ http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img \
+ --disk-format qcow2 --container-format bare --name 'CirrOS'
+
+5. Verify the image is downloaded. The status will be "active" when the download completes.
+
+ ``glance image-show CirrOS``
+
+6. Create a private tenant network.
+
+ ``neutron net-create test_network``
+
+7. Verify the network has been created by running the command below.
+
+ ``neutron net-show test_network``
+
+8. Create a subnet for the tenant network.
+
+ ``neutron subnet-create test_network --name test_subnet --dns-nameserver 8.8.8.8 10.0.0.0/24``
+
+9. Verify the subnet was created.
+
+ ``neutron subnet-show test_subnet``
+
+10. Add an interface from the test_subnet to the provider router.
+
+ ``neutron router-interface-add provider_router test_subnet``
+
+11. Verify the interface was added.
+
+ ``neutron router-port-list``
+
+12. Deploy a VM.
+
+ ``nova boot --flavor 1 --image CirrOS cirros1``
+
+13. Wait for the VM to complete booting. This can be completed by viewing the console log until a
+ login prompt appears.
+
+ ``nova console-log cirros1``
+
+14. Get the local ip from the VM.
+
+ ``nova show cirros1 | grep test_network``
+
+15. Get the port ID for the ip from the previous command. Replace <IP> with the IP from the previous
+ command. The port id is the first series of numbers and letters.
+
+ ``neutron port-list | grep 10.0.0.2 | awk ' { print $2 } '``
+
+16. Assign a floating ip to the VM. Substitue the port-id from the previous command for <PORT_ID>
+
+ ``neutron floatingip-create --port-id <PORT_ID> provider_network``
+
+17. Log into the vm. Substitute FLOATING_IP for the floating_ip_address displayed in the output in
+ the above command.
+
+ ``ssh cirros@<FLOATING_IP>``
+
+18. Logout and create a second VM.
+
+ ``nova boot --flavor 1 --image CirrOS cirros2``
+
+19. Get the ip for cirros2.
+
+ ``nova show cirros2 | grep test_network``
+
+20. Redo step 17 to log back into cirros1 and ping cirros2. Replace <CIRROS2> with the ip from the
+ previous step.
+
+ ``ping <CIRROS2>``
+
Installation Guide - VM Deployment
==================================
-This section goes step-by-step on how to correctly install and provision the OPNFV target system to VM nodes.
+This section goes step-by-step on how to correctly install and provision the OPNFV target system
+to VM nodes.
Install Jumphost
----------------
-Follow the instructions in the `Install Bare Metal Jumphost`_ section.
+Follow the instructions in the `Install Bare Metal Jumphost`_ section, except that you only need 1
+network interface on the host system with internet connectivity.
+
+Creating an Inventory File
+--------------------------
+
+It is optional to create an inventory file for virtual deployments. Since the nodes are virtual you
+are welcome to use the provided opnfv_ksgen_settings files. You may also elect to customize your
+deployment. Those options include modifying domain name of your deployment as well as allocating
+specific resources per node.
+
+Modifying VM resources is necessary for bigger virtual deployments in order to run more nova
+instances. To modify these resources you can edit each of the follow node paramters in the
+Inventory file:
+
+1. memory - set in KiB
+
+2. cpus - number of vcpus to allocate to this VM
+
+3. disk - size in GB (cannot be less than 40)
Running ``deploy.sh``
----------------------------
+---------------------
-You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/tmp/`` directory to store its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will run out of ``/tmp/bgs_vagrant``. Your compute and subsequent controller nodes will run in:
+You are now ready to deploy OPNFV! ``deploy.sh`` will use your ``/var/opt/opnfv/`` directory to store
+its Vagrant VMs. Your Foreman/QuickStack Vagrant VM will run out of ``/var/opt/opnfv/foreman_vm/``.
+Your compute and subsequent controller nodes will run in:
-- ``/tmp/compute``
-- ``/tmp/controller1``
-- ``/tmp/controller2``
-- ``/tmp/controller3``
+- ``/var/opt/opnfv/compute``
+- ``/var/opt/opnfv/controller1``
+- ``/var/opt/opnfv/controller2``
+- ``/var/opt/opnfv/controller3``
-Each VM will be brought up and bridged to your Jumphost NICs. ``deploy.sh`` will first bring up your Foreman/QuickStack Vagrant VM and afterwards it will bring up each of the nodes listed above, in order.
+Each VM will be brought up and bridged to your Jumphost NIC for the public network. ``deploy.sh``
+will
+first bring up your Foreman/QuickStack Vagrant VM and afterwards it will bring up each of the nodes
+listed above, in order of controllers first.
Follow the steps below to execute:
-1. ``cd /root/bgs_vagrant``
+1. ``cd /root/genesis/foreman/ci/``
+
+2. ``./deploy.sh -virtual -static_ip_range <your_range>``, Where <your_range> is a range of at least
+ 20 IP addresses (non-HA you need only 5) that are useable on your public subnet.
+ ``Ex: -static_ip_range 192.168.1.101,192.168.1.120``
-2. ``./deploy.sh -virtual``
+**Note: You may also wish to use other options like manually selecting the NIC to be used on your
+host,
+etc. Please use "deploy.sh -h" to see a full list of options available.**
-3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during this part of the process, it is most likely a problem with the setup of your Jumphost. You will also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:" this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your nodes. When you see "Foreman is up!", that means deploy will now move on to bringing up your other nodes.
+3. It will take about 20-25 minutes to install Foreman/QuickStack VM. If something goes wrong during
+ this part of the process, it is most likely a problem with the setup of your Jumphost. You will
+ also notice different outputs in your shell. When you see messages that say "TASK:" or "PLAY:"
+ this is Khalessi running and installing Foreman/QuickStack inside of your VM or deploying your
+ nodes. When you see "Foreman is up!", that means deploy will now move on to bringing up your
+ other nodes.
-4. ``deploy.sh`` will now bring up your other nodes, look for logging messages like "Starting Vagrant Node <node name>", "<node name> VM is up!" These are indicators of how far along in the process you are. ``deploy.sh`` will start each Vagrant VM, then run provisioning scripts to inform Foreman they are built and initiate Puppet.
+4. ``deploy.sh`` will now bring up your other nodes, look for logging messages like "Starting Vagrant
+ Node <node name>", "<node name> VM is up!" These are indicators of how far along in the process
+ you are. ``deploy.sh`` will start each Vagrant VM, then run provisioning scripts to inform
+ Foreman they are built and initiate Puppet.
-5. The speed at which nodes are provisioned is totally dependent on your Jumphost server specs. When complete you will see "All VMs are UP!"
+5. The speed at which nodes are provisioned is totally dependent on your Jumphost server specs. When
+ complete you will see "All VMs are UP!"
+
+6. The deploy will then print out the URL for your foreman server as well as the URL to access
+ horizon.
Verifying the Setup - VMs
-------------------------
-Follow the instructions in the `Verifying the Setup <setup_verify_>`_ section.
+Follow the instructions in the `Verifying the Setup`_ section.
-Also, for VM deployment you are able to easily access your nodes by going to ``/tmp/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to go to a controller and check OpenStack services, OpenDaylight, etc.
+Also, for VM deployment you are able to easily access your nodes by going to
+``/var/opt/opnfv/<node name>`` and then ``vagrant ssh`` (password is "vagrant"). You can use this to
+go to a controller and check OpenStack services, OpenDaylight, etc.
OpenStack Verification - VMs
----------------------------
-Follow the steps in `OpenStack Verification <openstack_verify_>`_ section.
+Follow the steps in `OpenStack Verification`_ section.
Frequently Asked Questions
==========================
+Please see the `Arno FAQ <https://wiki.opnfv.org/releases/arno/faq>`_.
+
License
=======
-All Foreman/QuickStack and "common" entities are protected by the `Apache 2.0 License <http://www.apache.org/licenses/>`_.
+All Foreman/QuickStack and "common" entities are protected by the
+`Apache 2.0 License <http://www.apache.org/licenses/>`_.
References
==========
@@ -353,7 +624,15 @@ OpenStack
OpenDaylight
------------
-`OpenDaylight artifacts <http://www.opendaylight.org/software/downloads>`_
+Upstream OpenDaylight provides `a number of packaging and deployment options
+<https://wiki.opendaylight.org/view/Deployment>`_ meant for consumption by downstream projects like
+OPNFV.
+
+Currently, OPNFV Foreman uses `OpenDaylight's Puppet module
+<https://github.com/dfarrell07/puppet-opendaylight>`_, which in turn depends on `OpenDaylight's RPM
+<https://github.com/opendaylight/integration-packaging/tree/master/rpm>`_ hosted on the `CentOS
+Community
+Build System <http://cbs.centos.org/repos/nfv7-opendaylight-2-candidate/x86_64/os/Packages/>`_.
Foreman
-------
@@ -361,11 +640,10 @@ Foreman
`Foreman documentation <http://theforeman.org/documentation.html>`_
:Authors: Tim Rozet (trozet@redhat.com)
-:Version: 0.0.3
+:Version: 0.2.0
**Documentation tracking**
Revision: _sha1_
Build date: _date_
-
diff --git a/foreman/docs/src/release-notes.rst b/foreman/docs/src/release-notes.rst
index f9fcb37e5..613f56181 100644
--- a/foreman/docs/src/release-notes.rst
+++ b/foreman/docs/src/release-notes.rst
@@ -1,6 +1,6 @@
-===========================================================================================
-OPNFV Release Note for the Arno release of OPNFV when using Foreman as a deployment tool
-===========================================================================================
+=============================================================================================
+OPNFV Release Notes for the Arno SR1 release of OPNFV when using Foreman as a deployment tool
+=============================================================================================
.. contents:: Table of Contents
@@ -10,12 +10,14 @@ OPNFV Release Note for the Arno release of OPNFV when using Foreman as a deploy
Abstract
========
-This document provides the release notes for Arno release with the Foreman/QuickStack deployment toolchain.
+This document provides the release notes for Arno SR1 release with the Foreman/QuickStack deployment
+toolchain.
License
=======
-All Foreman/QuickStack and "common" entities are protected by the Apache License ( http://www.apache.org/licenses/ )
+All Foreman/QuickStack and "common" entities are protected by the Apache License
+( http://www.apache.org/licenses/ )
Version history
@@ -34,18 +36,31 @@ Version history
| 2015-06-03 | 0.1.2 | Tim Rozet | Minor Edits |
| | | | |
+--------------------+--------------------+--------------------+--------------------+
+| 2015-09-10 | 0.2.0 | Tim Rozet | Updated for SR1 |
+| | | | |
++--------------------+--------------------+--------------------+--------------------+
+| 2015-09-25 | 0.2.1 | Randy Levensalor | Added Workaround |
+| | | | for DHCP issue |
++--------------------+--------------------+--------------------+--------------------+
+
Important notes
===============
-This is the initial OPNFV Arno release that implements the deploy stage of the OPNFV CI pipeline.
+This is the OPNFV Arno SR1 release that implements the deploy stage of the OPNFV CI pipeline.
-Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using Foreman/QuickStack installer.
+Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using
+Foreman/QuickStack installer.
Summary
=======
-Arno release with the Foreman/QuickStack deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure. The current definition of an OPNFV target system is and OpenStack Juno version combined with OpenDaylight version: Helium. The system is deployed with OpenStack High Availability (HA) for most OpenStack services. OpenDaylight is deployed in non-HA form as HA is not availble for Arno release. Ceph storage is used as Cinder backend, and is the only supported storage for Arno. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node.
+Arno release with the Foreman/QuickStack deployment toolchain will establish an OPNFV target system on
+a Pharos compliant lab infrastructure. The current definition of an OPNFV target system is and
+OpenStack Juno version combined with OpenDaylight version: Helium. The system is deployed with
+OpenStack High Availability (HA) for most OpenStack services. OpenDaylight is deployed in non-HA form
+as HA is not availble for Arno SR1 release. Ceph storage is used as Cinder backend, and is the only
+supported storage for Arno. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node.
- Documentation is built by Jenkins
- .iso image is built by Jenkins
@@ -58,16 +73,16 @@ Release Data
| **Project** | genesis |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | genesis/arno.2015.1.0 |
+| **Repo/tag** | genesis/arno.2015.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | arno.2015.1.0 |
+| **Release designation** | arno.2015.2.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | 2015-06-04 |
+| **Release date** | 2015-09-23 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Arno release |
+| **Purpose of the delivery** | OPNFV Arno SR1 release |
| | |
+--------------------------------------+--------------------------------------+
@@ -76,7 +91,8 @@ Version change
Module version changes
~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of the Arno release with the Foreman/QuickStack deployment toolchain. It is based on following upstream versions:
+This is the Service Release 1 version of the Arno release with the Foreman/QuickStack deployment
+toolchain. It is based on following upstream versions:
- OpenStack (Juno release)
@@ -87,10 +103,11 @@ This is the first tracked version of the Arno release with the Foreman/QuickStac
Document version changes
~~~~~~~~~~~~~~~~~~~~~~~~
-This is the first tracked version of Arno release with the Foreman/QuickStack deployment toolchain. The following documentation is provided with this release:
+This is the SR1 version of Arno release with the Foreman/QuickStack deployment toolchain. The following
+documentation is provided with this release:
-- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 0.2.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 0.2.0 (this document)
Feature additions
~~~~~~~~~~~~~~~~~
@@ -99,8 +116,27 @@ Feature additions
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: BGS-4 | OPNFV base system install |
-| | using Foreman/Quickstack. |
+| JIRA: BGS-73 | Changes Virtual deployments to |
+| | only require 1 interface, and adds |
+| | accesbility in China |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-75 | Adds ability to specify number of |
+| | floating IPs |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-3 | clean now removes all VMs |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-4 | Adds ability to specify NICs to |
+| | bridge to on the jumphost |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-86 | Adds ability to specify domain name |
+| | for deployment |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-1 | Adds ability to specify VM resources |
+| | such as disk size, memory, vcpus |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-33 | Adds ability to use single interface |
+| | for baremetal installs |
+--------------------------------------+--------------------------------------+
Bug corrections
@@ -112,9 +148,60 @@ Bug corrections
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
+| JIRA: BGS-65 | Fixes external network bridge and |
+| | increases neutron quota limits |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-74 | Fixes verification of vbox drivers |
+| | |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-59 | Adds ODL Deployment stack docs to |
+| | Foreman Guide |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-60 | Migrates github bgs_vagrant project |
+| | into Genesis |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-89 | Fixes public allocation IP |
| | |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-71 | Adds check to ensure subnets are the |
+| | minimum size required |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-78 | Fixes Foreman clean to not hang and |
+| | now also removes libvirt |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-7 | Adds check to make sure 3 control |
+| | nodes are set when HA is enabled |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-68 | Adds check to make sure baremetal |
+| | nodes are powered off when deploying |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-14 | Fixes Vagrant base box to be opnfv |
| | |
+--------------------------------------+--------------------------------------+
+| JIRA: APEX-8 | Fixes puppet modules to come from |
+| | the Genesis repo |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-17 | Fixes clean to kill vagrant processes|
+| | correctly |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-2 | Removes default vagrant route from |
+| | virtual nodes |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-9 | Fixes external network to be created |
+| | by the services tenant |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-10 | Disables DHCP on external neutron |
+| | network |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-19 | Adds check to ensure provided arg |
+| | static_ip_range is correct |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-12 | Fixes horizon IP URL for non-HA |
+| | deployments |
++--------------------------------------+--------------------------------------+
+| JIRA: BGS-84 | Set default route to public |
+| | gateway |
++--------------------------------------+--------------------------------------+
Deliverables
------------
@@ -122,12 +209,12 @@ Deliverables
Software deliverables
~~~~~~~~~~~~~~~~~~~~~
Foreman/QuickStack@OPNFV .iso file
-deploy.sh - Automatically deploys Target OPNFV System to Bare Metal
+deploy.sh - Automatically deploys Target OPNFV System to Bare Metal or VMs
Documentation deliverables
~~~~~~~~~~~~~~~~~~~~~~~~~~
-- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0
-- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.0.0 (this document)
+- OPNFV Installation instructions for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.2.0
+- OPNFV Release Notes for the Arno release with the Foreman/QuickStack deployment toolchain - ver. 1.2.0 (this document)
Known Limitations, Issues and Workarounds
=========================================
@@ -153,27 +240,39 @@ Known issues
| **JIRA REFERENCE** | **SLOGAN** |
| | |
+--------------------------------------+--------------------------------------+
-| JIRA: BGS-13 | bridge br-ex is not auto configured |
-| | by puppet |
+| JIRA: APEX-13 | Keystone Config: bind host is wrong |
+| | for admin user |
++--------------------------------------+--------------------------------------+
+| JIRA: APEX-38 | Neutron fails to provide DHCP address|
+| | to instance |
+--------------------------------------+--------------------------------------+
Workarounds
-----------
**-**
+JIRA: APEX-38 - Neutron fails to provide DHCP address to instance
+
+1. Find the controller that is running the DHCP service. ssh to oscontroller[1-3] and
+ run the command below until the command returns a namespace that start with with "qdhcp".
+
+ ``ip netns | grep qdhcp``
+
+2. Restart the neturon server and the neutron DHCP service.
+
+ ``systemctl restart neutron-server``
+
+ ``systemctl restart neutron-dhcp-agent``
+
+3. Restart the interface on the VM or restart the VM.
Test Result
===========
-The Arno release with the Foreman/QuickStack deployment toolchain has undergone QA test runs with the following results:
-
-+--------------------------------------+--------------------------------------+
-| **TEST-SUITE** | **Results:** |
-| | |
-+--------------------------------------+--------------------------------------+
-| **-** | **-** |
-+--------------------------------------+--------------------------------------+
+The Arno release with the Foreman/QuickStack deployment toolchain has undergone QA test runs with the
+following results:
+https://wiki.opnfv.org/arno_sr1_result_page?rev=1443626728
References
==========