diff options
Diffstat (limited to 'foreman/ci')
-rw-r--r-- | foreman/ci/Vagrantfile | 9 | ||||
-rwxr-xr-x | foreman/ci/bootstrap.sh | 5 | ||||
-rwxr-xr-x | foreman/ci/clean.sh | 217 | ||||
-rwxr-xr-x | foreman/ci/deploy.sh | 1437 | ||||
-rw-r--r-- | foreman/ci/opnfv_ksgen_settings.yml | 5 | ||||
-rw-r--r-- | foreman/ci/opnfv_ksgen_settings_no_HA.yml | 264 | ||||
-rw-r--r-- | foreman/ci/reload_playbook.yml | 1 | ||||
-rwxr-xr-x | foreman/ci/vm_nodes_provision.sh | 50 |
8 files changed, 1453 insertions, 535 deletions
diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile index 100e12d..5550976 100644 --- a/foreman/ci/Vagrantfile +++ b/foreman/ci/Vagrantfile @@ -12,7 +12,7 @@ Vagrant.configure(2) do |config| # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. - config.vm.box = "chef/centos-7.0" + config.vm.box = "opnfv/centos-7.0" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs @@ -41,6 +41,9 @@ Vagrant.configure(2) do |config| default_gw = "" nat_flag = false + # Disable dhcp flag + disable_dhcp_flag = false + # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third @@ -90,4 +93,8 @@ Vagrant.configure(2) do |config| config.vm.provision :shell, path: "nat_setup.sh" end config.vm.provision :shell, path: "bootstrap.sh" + if disable_dhcp_flag + config.vm.provision :shell, :inline => "systemctl stop dhcpd" + config.vm.provision :shell, :inline => "systemctl disable dhcpd" + end end diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh index 4bc22ed..c98f00e 100755 --- a/foreman/ci/bootstrap.sh +++ b/foreman/ci/bootstrap.sh @@ -25,8 +25,7 @@ green=`tput setaf 2` yum install -y epel-release-7* # Install other required packages -# Major version is pinned to force some consistency for Arno -if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then +if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2 exit 1 fi @@ -36,7 +35,7 @@ cd /opt echo "Cloning khaleesi to /opt" if [ ! -d khaleesi ]; then - if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then + if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2 exit 1 fi diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index f61ac93..345864b 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -3,22 +3,23 @@ #Clean script to uninstall provisioning server for Foreman/QuickStack #author: Tim Rozet (trozet@redhat.com) # -#Uses Vagrant and VirtualBox +#Removes Libvirt, KVM, Vagrant, VirtualBox # -#Destroys Vagrant VM running in /tmp/bgs_vagrant +#Destroys Vagrant VMs running in $vm_dir/ #Shuts down all nodes found in Khaleesi settings -#Removes hypervisor kernel modules (VirtualBox) +#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt) ##VARS reset=`tput sgr0` blue=`tput setaf 4` red=`tput setaf 1` green=`tput setaf 2` +vm_dir=/var/opt/opnfv ##END VARS ##FUNCTIONS display_usage() { - echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n" + echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n" echo -e "\nUsage:\n$0 [arguments] \n" echo -e "\n -no_parse : No variable parsing into config. Flag. \n" echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n" @@ -31,7 +32,7 @@ if [[ ( $1 == "--help") || $1 == "-h" ]]; then exit 0 fi -echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n" +echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n" echo "Use -h to display help" sleep 2 @@ -50,54 +51,55 @@ do esac done - -# Install ipmitool -# Major version is pinned to force some consistency for Arno -if ! yum list installed | grep -i ipmitool; then - if ! yum -y install ipmitool-1*; then - echo "${red}Unable to install ipmitool!${reset}" - exit 1 - fi -else - echo "${blue}Skipping ipmitool as it is already installed!${reset}" -fi - -###find all the bmc IPs and number of nodes -node_counter=0 -output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` -for line in ${output} ; do - bmc_ip[$node_counter]=$line - ((node_counter++)) -done - -max_nodes=$((node_counter-1)) - -###find bmc_users per node -node_counter=0 -output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` -for line in ${output} ; do - bmc_user[$node_counter]=$line - ((node_counter++)) -done - -###find bmc_pass per node -node_counter=0 -output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` -for line in ${output} ; do - bmc_pass[$node_counter]=$line - ((node_counter++)) -done - -for mynode in `seq 0 $max_nodes`; do - echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" - if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then - echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}" +if [ ! -z "$base_config" ]; then + # Install ipmitool + # Major version is pinned to force some consistency for Arno + if ! yum list installed | grep -i ipmitool; then + if ! yum -y install ipmitool-1*; then + echo "${red}Unable to install ipmitool!${reset}" + exit 1 + fi else - echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}" - exit 1 + echo "${blue}Skipping ipmitool as it is already installed!${reset}" fi -done + ###find all the bmc IPs and number of nodes + node_counter=0 + output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` + for line in ${output} ; do + bmc_ip[$node_counter]=$line + ((node_counter++)) + done + + max_nodes=$((node_counter-1)) + + ###find bmc_users per node + node_counter=0 + output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` + for line in ${output} ; do + bmc_user[$node_counter]=$line + ((node_counter++)) + done + + ###find bmc_pass per node + node_counter=0 + output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` + for line in ${output} ; do + bmc_pass[$node_counter]=$line + ((node_counter++)) + done + for mynode in `seq 0 $max_nodes`; do + echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" + if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then + echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}" + else + echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}" + exit 1 + fi + done +else + echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}" +fi ###check to see if vbox is installed vboxpkg=`rpm -qa | grep VirtualBox` if [ $? -eq 0 ]; then @@ -106,39 +108,120 @@ else skip_vagrant=1 fi +###legacy VM location check +###remove me later +if [ -d /tmp/bgs_vagrant ]; then + cd /tmp/bgs_vagrant + vagrant destroy -f + rm -rf /tmp/bgs_vagrant +fi + ###destroy vagrant if [ $skip_vagrant -eq 0 ]; then - cd /tmp/bgs_vagrant - if vagrant destroy -f; then - echo "${blue}Successfully destroyed Foreman VM ${reset}" + if [ -d $vm_dir ]; then + ##all vm directories + for vm in $( ls $vm_dir ); do + cd $vm_dir/$vm + if vagrant destroy -f; then + echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}" + else + echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}" + killall vagrant + echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" + if ps axf | grep vagrant | grep -v 'grep'; then + echo "${red}Vagrant process still exists after kill...exiting ${reset}" + exit 1 + else + echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" + fi + fi + + ##Vagrant boxes appear as VboxHeadless processes + ##try to gracefully destroy the VBox VM if it still exists + if vboxmanage list runningvms | grep $vm; then + echo "${red} $vm VBoxHeadless process still exists...Removing${reset}" + vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g') + vboxmanage controlvm $vbox_id poweroff + if vboxmanage unregistervm --delete $vbox_id; then + echo "${blue}$vm VM is successfully deleted! ${reset}" + else + echo "${red} Unable to delete VM $vm ...Exiting ${reset}" + exit 1 + fi + else + echo "${blue}$vm VM is successfully deleted! ${reset}" + fi + done else - echo "${red}Unable to destroy Foreman VM ${reset}" - echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" - if ps axf | grep vagrant; then - echo "${red}Vagrant VM still exists...exiting ${reset}" - exit 1 - else - echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" - fi + echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}" fi + echo "${blue}Checking for any remaining virtual box processes...${reset}" ###kill virtualbox - echo "${blue}Killing VirtualBox ${reset}" - killall virtualbox - killall VBoxHeadless + if ps axf | grep virtualbox | grep -v 'grep'; then + echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}" + killall virtualbox + fi + + ###kill any leftover VMs (brute force) + if ps axf | grep VBoxHeadless | grep -v 'grep'; then + echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}" + killall VBoxHeadless + fi ###remove virtualbox - echo "${blue}Removing VirtualBox ${reset}" + echo "${blue}Removing VirtualBox... ${reset}" yum -y remove $vboxpkg else - echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}" + echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}" fi +###remove working vm directory +echo "${blue}Removing working VM directory: $vm_dir ${reset}" +rm -rf $vm_dir + +###check to see if libvirt is installed +echo "${blue}Checking if libvirt/KVM is installed" +if rpm -qa | grep -iE 'libvirt|kvm'; then + echo "${blue}Libvirt/KVM is installed${reset}" + echo "${blue}Checking for any QEMU/KVM VMs...${reset}" + vm_count=0 + while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1) + if [ $vm_count -gt 0 ]; then + echo "${blue}VMs Found: $vm_count${reset}" + vm_runnning=0 + while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running) + echo "${blue}Powering off $vm_running VM(s)${reset}" + while read -r vm; do + if ! virsh destroy $vm; then + echo "${red}WARNING: Unable to power off VM ${vm}${reset}" + else + echo "${blue}VM $vm powered off!${reset}" + fi + done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}') + echo "${blue}Destroying libvirt VMs...${reset}" + while read -r vm; do + if ! virsh undefine --remove-all-storage $vm; then + echo "${red}ERROR: Unable to remove the VM ${vm}${reset}" + exit 1 + else + echo "${blue}VM $vm removed!${reset}" + fi + done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}') + else + echo "${blue}No VMs found for removal" + fi + echo "${blue}Removing libvirt and kvm packages" + yum -y remove libvirt-* + yum -y remove *qemu* +else + echo "${blue}libvirt/KVM is not installed${reset}" +fi ###remove kernel modules echo "${blue}Removing kernel modules ${reset}" -for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do +for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do if ! rmmod $kernel_mod; then if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then echo "${blue} $kernel_mod is not currently loaded! ${reset}" diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 86f03a7..6771da0 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -25,6 +25,12 @@ red=`tput setaf 1` green=`tput setaf 2` declare -A interface_arr +declare -A controllers_ip_arr +declare -A admin_ip_arr +declare -A public_ip_arr + +vm_dir=/var/opt/opnfv +script=`realpath $0` ##END VARS ##FUNCTIONS @@ -35,6 +41,36 @@ display_usage() { echo -e "\n -no_parse : No variable parsing into config. Flag. \n" echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n" echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n" + echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n" + echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n" + echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n" + echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n" + echo -e "\n -admin_nic : Baremetal NIC for the admin network. Required if other "nic" arguments are used. \ +Not applicable with -virtual. Example: -admin_nic em1" + echo -e "\n -private_nic : Baremetal NIC for the private network. Required if other "nic" arguments are used. \ +Not applicable with -virtual. Example: -private_nic em2" + echo -e "\n -public_nic : Baremetal NIC for the public network. Required if other "nic" arguments are used. \ +Can also be used with -virtual. Example: -public_nic em3" + echo -e "\n -storage_nic : Baremetal NIC for the storage network. Optional. Not applicable with -virtual. \ +Private NIC will be used for storage if not specified. Example: -storage_nic em4" +} + +##verify vm dir exists +##params: none +function verify_vm_dir { + if [ -d "$vm_dir" ]; then + echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n" + exit 1 + else + mkdir -p $vm_dir + fi + + chmod 700 $vm_dir + + if [ ! -d $vm_dir ]; then + echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n" + exit -1 + fi } ##find ip of interface @@ -51,6 +87,41 @@ function find_subnet { printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))" } +##verify subnet has at least n IPs +##params: subnet mask, n IPs +function verify_subnet_size { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + num_ips_required=$2 + + ##this function assumes you would never need more than 254 + ##we check here to make sure + if [ "$num_ips_required" -ge 254 ]; then + echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n" + return 1 + fi + + ##we just return if 3rd octet is not 255 + ##because we know the subnet is big enough + if [ "$i3" -ne 255 ]; then + return 0 + elif [ $((254-$i4)) -ge "$num_ips_required" ]; then + return 0 + else + echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n" + return 1 + fi +} + +##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask +## Warning: This function only works for IPv4 at the moment. +##params: ip, netmask +function find_last_ip_subnet { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + IFS=. read -r m1 m2 m3 m4 <<< "$2" + IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))" + printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))" +} + ##increments subnet by a value ##params: ip, value ##assumes low value @@ -87,6 +158,19 @@ function next_ip { echo $baseaddr.$lsv } +##subtracts a value from an IP address +##params: last ip, ip_count +##assumes ip_count is less than the last octect of the address +subtract_ip() { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + ip_count=$2 + if [ $i4 -lt $ip_count ]; then + echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n" + exit 1 + fi + printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))" +} + ##removes the network interface config from Vagrantfile ##params: interface ##assumes you are in the directory of Vagrantfile @@ -149,19 +233,21 @@ parse_yaml() { }' } -##END FUNCTIONS - -if [[ ( $1 == "--help") || $1 == "-h" ]]; then +##translates the command line paramaters into variables +##params: $@ the entire command line is passed +##usage: parse_cmd_line() "$@" +parse_cmdline() { + if [[ ( $1 == "--help") || $1 == "-h" ]]; then display_usage exit 0 -fi + fi -echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" -echo "Use -h to display help" -sleep 2 + echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" + echo "Use -h to display help" + sleep 2 -while [ "`echo $1 | cut -c1`" = "-" ] -do + while [ "`echo $1 | cut -c1`" = "-" ] + do echo $1 case "$1" in -base_config) @@ -176,35 +262,137 @@ do virtual="TRUE" shift 1 ;; + -enable_virtual_dhcp) + enable_virtual_dhcp="TRUE" + shift 1 + ;; + -static_ip_range) + static_ip_range=$2 + shift 2 + ;; + -ping_site) + ping_site=$2 + shift 2 + ;; + -floating_ip_count) + floating_ip_count=$2 + shift 2 + ;; + -admin_nic) + admin_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -private_nic) + private_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -public_nic) + public_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -storage_nic) + storage_nic=$2 + shift 2 + nic_arg_flag=1 + ;; *) display_usage exit 1 ;; -esac -done + esac + done + + if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n" + exit 1 + fi + + if [ -z "$virtual" ]; then + if [ ! -z "$enable_virtual_dhcp" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n" + exit 1 + elif [ ! -z "$static_ip_range" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n" + exit 1 + fi + fi + + if [ -z "$floating_ip_count" ]; then + floating_ip_count=20 + fi + + ##Validate nic args + if [[ $nic_arg_flag -eq 1 ]]; then + if [ -z "$virtual" ]; then + for nic_type in admin_nic private_nic public_nic; do + eval "nic_value=\$$nic_type" + if [ -z "$nic_value" ]; then + echo "${red}$nic_type is empty or not defined. Required when other nic args are given!${reset}" + exit 1 + fi + interface_ip=$(find_ip $nic_value) + if [ ! "$interface_ip" ]; then + echo "${red}$nic_value does not have an IP address! Exiting... ${reset}" + exit 1 + fi + done + else + ##if virtual only public_nic should be specified + for nic_type in admin_nic private_nic storage_nic; do + eval "nic_value=\$$nic_type" + if [ ! -z "$nic_value" ]; then + echo "${red}$nic_type is not a valid argument using -virtual. Please only specify public_nic!${reset}" + exit 1 + fi + done + + interface_ip=$(find_ip $public_nic) + if [ ! "$interface_ip" ]; then + echo "${red}Public NIC: $public_nic does not have an IP address! Exiting... ${reset}" + exit 1 + fi + fi + fi +} ##disable selinux -/sbin/setenforce 0 - -# Install EPEL repo for access to many other yum repos -# Major version is pinned to force some consistency for Arno -yum install -y epel-release-7* - -# Install other required packages -# Major versions are pinned to force some consistency for Arno -if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then - printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 - exit 1 -fi - -##install VirtualBox repo -if cat /etc/*release | grep -i "Fedora release"; then - vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch -else - vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch -fi - -cat > /etc/yum.repos.d/virtualbox.repo << EOM +##params: none +##usage: disable_selinux() +disable_selinux() { + /sbin/setenforce 0 +} + +##Install the EPEL repository and additional packages +##params: none +##usage: install_EPEL() +install_EPEL() { + # Install EPEL repo for access to many other yum repos + # Major version is pinned to force some consistency for Arno + yum install -y epel-release-7* + + # Install other required packages + # Major versions are pinned to force some consistency for Arno + if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then + printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 + exit 1 + fi +} + +##Download and install virtual box +##params: none +##usage: install_vbox() +install_vbox() { + ##install VirtualBox repo + if cat /etc/*release | grep -i "Fedora release"; then + vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch + else + vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch + fi + + cat > /etc/yum.repos.d/virtualbox.repo << EOM [virtualbox] name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox baseurl=$vboxurl @@ -215,365 +403,321 @@ skip_if_unavailable = 1 keepcache = 0 EOM -##install VirtualBox -if ! yum list installed | grep -i virtualbox; then - if ! yum -y install VirtualBox-4.3; then - printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 - exit 1 + ##install VirtualBox + if ! yum list installed | grep -i virtualbox; then + if ! yum -y install VirtualBox-4.3; then + printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 + exit 1 + fi fi -fi -##install kmod-VirtualBox -if ! lsmod | grep vboxdrv; then - if ! sudo /etc/init.d/vboxdrv setup; then - printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 - exit 1 + ##install kmod-VirtualBox + if ! lsmod | grep vboxdrv; then + sudo /etc/init.d/vboxdrv setup + if ! lsmod | grep vboxdrv; then + printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' fi -else - printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' -fi +} -##install Ansible -if ! yum list installed | grep -i ansible; then - if ! yum -y install ansible-1*; then - printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 - exit 1 +##install Ansible using yum +##params: none +##usage: install_ansible() +install_ansible() { + if ! yum list installed | grep -i ansible; then + if ! yum -y install ansible-1*; then + printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 + exit 1 + fi fi -fi +} -##install Vagrant -if ! rpm -qa | grep vagrant; then - if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then - printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 - exit 1 +##install Vagrant RPM directly with the bintray.com site +##params: none +##usage: install_vagrant() +install_vagrant() { + if ! rpm -qa | grep vagrant; then + if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then + printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' -fi -##add centos 7 box to vagrant -if ! vagrant box list | grep chef/centos-7.0; then - if ! vagrant box add chef/centos-7.0 --provider virtualbox; then - printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 - exit 1 + ##add centos 7 box to vagrant + if ! vagrant box list | grep opnfv/centos-7.0; then + if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then + printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' -fi -##install workaround for centos7 -if ! vagrant plugin list | grep vagrant-centos7_fix; then - if ! vagrant plugin install vagrant-centos7_fix; then - printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 + ##install workaround for centos7 + if ! vagrant plugin list | grep vagrant-centos7_fix; then + if ! vagrant plugin install vagrant-centos7_fix; then + printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' -fi +} -cd /tmp/ ##remove bgs vagrant incase it wasn't cleaned up -rm -rf /tmp/bgs_vagrant - -##clone bgs vagrant -##will change this to be opnfv repo when commit is done -if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 - exit 1 -fi - -cd bgs_vagrant - -echo "${blue}Detecting network configuration...${reset}" -##detect host 1 or 3 interface configuration -#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` -output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` - -if [ ! "$output" ]; then - printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 - exit 1 -fi - -##find number of interfaces with ip and substitute in VagrantFile -if_counter=0 -for interface in ${output}; do - - if [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) - if [ ! "$interface_ip" ]; then - continue - fi - new_ip=$(next_usable_ip $interface_ip) - if [ ! "$new_ip" ]; then - continue - fi - interface_arr[$interface]=$if_counter - interface_ip_arr[$if_counter]=$new_ip - subnet_mask=$(find_netmask $interface) - if [ "$if_counter" -eq 1 ]; then - private_subnet_mask=$subnet_mask - private_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 2 ]; then - public_subnet_mask=$subnet_mask - public_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 3 ]; then - storage_subnet_mask=$subnet_mask - fi - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile - ((if_counter++)) -done - -##now remove interface config in Vagrantfile for 1 node -##if 1, 3, or 4 interfaces set deployment type -##if 2 interfaces remove 2nd interface and set deployment type -if [ "$if_counter" == 1 ]; then - deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 - remove_vagrant_network eth_replace3 -elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 -elif [ "$if_counter" == 3 ]; then - deployment_type="three_network" - remove_vagrant_network eth_replace3 -else - deployment_type="multi_network" -fi - -echo "${blue}Network detected: ${deployment_type}! ${reset}" - -if route | grep default; then - echo "${blue}Default Gateway Detected ${reset}" - host_default_gw=$(ip route | grep default | awk '{print $3}') - echo "${blue}Default Gateway: $host_default_gw ${reset}" - default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') - case "${interface_arr[$default_gw_interface]}" in - 0) - echo "${blue}Default Gateway Detected on Admin Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - node_default_gw=$host_default_gw - ;; - 1) - echo "${red}Default Gateway Detected on Private Interface!${reset}" - echo "${red}Private subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - 2) - echo "${blue}Default Gateway Detected on Public Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" - sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile - echo "${blue}Setting node gateway to be VM Admin IP${reset}" - node_default_gw=${interface_ip_arr[0]} - public_gateway=$default_gw - ;; - 3) - echo "${red}Default Gateway Detected on Storage Interface!${reset}" - echo "${red}Storage subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - *) - echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" - exit 1 - ;; - esac -else - #assumes 24 bit mask - defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` - firstip=.1 - defaultgw=$defaultgw$firstip - echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile - node_default_gw=$defaultgw -fi - -if [ $base_config ]; then - if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" - exit 1 - fi -fi - -if [ $no_parse ]; then -echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" - -else - -echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" -##Edit the ksgen settings appropriately -##ksgen settings will be stored in /vagrant on the vagrant machine -##if single node deployment all the variables will have the same ip -##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 - -sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml +##params: none +##usage: clean_tmp() +clean_tmp() { + rm -rf $vm_dir/foreman_vm +} -##replace private interface parameter -##private interface will be of hosts, so we need to know the provisioned host interface name -##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts -##replace IP for parameters with next IP that will be given to controller -if [ "$deployment_type" == "single_network" ]; then - ##we also need to assign IP addresses to nodes - ##for single node, foreman is managing the single network, so we can't reserve them - ##not supporting single network anymore for now - echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" - exit 0 +##clone genesis and move to node vm dir +##params: destination directory +##usage: clone_bgs /tmp/myvm/ +clone_bgs() { + script_dir="`dirname "$script"`" + cp -fr $script_dir/ $1 + cp -fr $script_dir/../../common/puppet-opnfv $1 +} -elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then +##validates the network settings and update VagrantFile with network settings +##params: none +##usage: configure_network() +configure_network() { + cd $vm_dir/foreman_vm + + ##if nic_arg_flag is set, then we don't figure out + ##NICs dynamically + if [[ $nic_arg_flag -eq 1 ]]; then + echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}" + if [ $virtual ]; then + nic_list="$public_nic" + elif [ -z "$storage_nic" ]; then + echo "${blue}storage_nic not defined, will combine storage into private VLAN ${reset}" + nic_list="$admin_nic $private_nic $public_nic" + else + nic_list="$admin_nic $private_nic $public_nic $storage_nic" + fi + nic_array=( $nic_list ) + output=$nic_list + else + echo "${blue}Detecting network configuration...${reset}" + ##detect host 1 or 3 interface configuration + #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` + output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f10` + fi - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + if [ ! "$output" ]; then + printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 + exit 1 fi - sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml + ##virtual we only find 1 interface + if [ $virtual ]; then + if [ ! -z "${nic_array[0]}" ]; then + echo "${blue}Public Interface specified: ${nic_array[0]}${reset}" + this_default_gw_interface=${nic_array[0]} + else + ##find interface with default gateway + this_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $this_default_gw ${reset}" + this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}') + fi - ##get ip addresses for private network on controllers to make dhcp entries - ##required for controllers_ip_array global param - next_private_ip=${interface_ip_arr[1]} - type=_private - for node in controller1 controller2 controller3; do - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 - exit 1 + ##find interface IP, make sure its valid + interface_ip=$(find_ip $this_default_gw_interface) + if [ ! "$interface_ip" ]; then + echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}" + exit 1 fi - sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml - controller_ip_array=$controller_ip_array$next_private_ip, - done - ##replace global param for contollers_ip_array - controller_ip_array=${controller_ip_array%?} - sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml + ##set variable info + if [ ! -z "$static_ip_range" ]; then + new_ip=$(echo $static_ip_range | cut -d , -f1) + else + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}" + exit 1 + fi + fi + interface=$this_default_gw_interface + public_interface=$interface + interface_arr[$interface]=2 + interface_ip_arr[2]=$new_ip + subnet_mask=$(find_netmask $interface) + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) - ##now replace all the VIP variables. admin//private can be the same IP - ##we have to use IP's here that won't be allocated to hosts at provisioning time - ##therefore we increment the ip by 10 to make sure we have a safe buffer - next_private_ip=$(increment_ip $next_private_ip 10) + if ! verify_subnet_size $public_subnet_mask 25; then + echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi - grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 + ##set that interface to be public + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile + if_counter=1 + else + ##find number of interfaces with ip and substitute in VagrantFile + if_counter=0 + for interface in ${output}; do + + if [ "$if_counter" -ge 4 ]; then + break + fi + interface_ip=$(find_ip $interface) + if [ ! "$interface_ip" ]; then + continue + fi + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + continue + fi + interface_arr[$interface]=$if_counter + interface_ip_arr[$if_counter]=$new_ip + subnet_mask=$(find_netmask $interface) + if [ "$if_counter" -eq 0 ]; then + admin_subnet_mask=$subnet_mask + if ! verify_subnet_size $admin_subnet_mask 5; then + echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + + elif [ "$if_counter" -eq 1 ]; then + private_subnet_mask=$subnet_mask + private_short_subnet_mask=$(find_short_netmask $interface) + + if ! verify_subnet_size $private_subnet_mask 15; then + echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + elif [ "$if_counter" -eq 2 ]; then + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) + + if ! verify_subnet_size $public_subnet_mask 25; then + echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + elif [ "$if_counter" -eq 3 ]; then + storage_subnet_mask=$subnet_mask + + if ! verify_subnet_size $storage_subnet_mask 10; then + echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + else + echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}" + exit 1 + fi + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile + ((if_counter++)) + done + fi + ##now remove interface config in Vagrantfile for 1 node + ##if 1, 3, or 4 interfaces set deployment type + ##if 2 interfaces remove 2nd interface and set deployment type + if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then + if [ $virtual ]; then + deployment_type="single_network" + echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}" + private_internal_ip=155.1.2.2 + admin_internal_ip=156.1.2.2 + private_subnet_mask=255.255.255.0 + private_short_subnet_mask=/24 + interface_ip_arr[1]=$private_internal_ip + interface_ip_arr[0]=$admin_internal_ip + admin_subnet_mask=255.255.255.0 + admin_short_subnet_mask=/24 + sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + remove_vagrant_network eth_replace3 + deployment_type=three_network + else + echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}" exit 1 fi - done + elif [ "$if_counter" == 3 ]; then + deployment_type="three_network" + remove_vagrant_network eth_replace3 + else + deployment_type="multi_network" + fi - ##replace foreman site - next_public_ip=${interface_ip_arr[2]} - sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml - ##replace public vips - next_public_ip=$(increment_ip $next_public_ip 10) - grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml - next_public_ip=$(next_usable_ip $next_public_ip) - if [ ! "$next_public_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 - exit 1 + echo "${blue}Network detected: ${deployment_type}! ${reset}" + + if [ $virtual ]; then + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile + if [ $static_ip_range ]; then + ##verify static range is at least 20 IPs + static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1) + static_ip_range_end=$(echo $static_ip_range | cut -d , -f2) + ##verify range is at least 20 ips + ##assumes less than 255 range pool + begin_octet=$(echo $static_ip_range_begin | cut -d . -f4) + end_octet=$(echo $static_ip_range_end | cut -d . -f4) + ip_count=$((end_octet-begin_octet+1)) + if [ "$ip_count" -lt 20 ]; then + echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}" + exit 1 + else + echo "${blue}Static IP range is size $ip_count ${reset}" + fi + fi fi - done + fi - ##replace public_network param - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_network param - private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) - sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml - ##replace storage_network - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + if route | grep default; then + echo "${blue}Default Gateway Detected ${reset}" + host_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $host_default_gw ${reset}" + default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') + case "${interface_arr[$default_gw_interface]}" in + 0) + echo "${blue}Default Gateway Detected on Admin Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + node_default_gw=$host_default_gw + ;; + 1) + echo "${red}Default Gateway Detected on Private Interface!${reset}" + echo "${red}Private subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + 2) + echo "${blue}Default Gateway Detected on Public Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" + sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile + echo "${blue}Setting node gateway to be VM Admin IP${reset}" + node_default_gw=${interface_ip_arr[0]} + public_gateway=$default_gw + ;; + 3) + echo "${red}Default Gateway Detected on Storage Interface!${reset}" + echo "${red}Storage subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + *) + echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" + exit 1 + ;; + esac else - next_storage_ip=${interface_ip_arr[3]} - storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) - sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml - fi - - ##replace public_subnet param - public_subnet=$public_subnet'\'$public_short_subnet_mask - sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_subnet param - private_subnet=$private_subnet'\'$private_short_subnet_mask - sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml - - ##replace public_dns param to be foreman server - sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml - - ##replace public_gateway - if [ -z "$public_gateway" ]; then - ##if unset then we assume its the first IP in the public subnet - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_gateway=$(increment_subnet $public_subnet 1) - fi - sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml - - ##we have to define an allocation range of the public subnet to give - ##to neutron to use as floating IPs - ##we should control this subnet, so this range should work .150-200 - ##but generally this is a bad idea and we are assuming at least a /24 subnet here - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_allocation_start=$(increment_subnet $public_subnet 150) - public_allocation_end=$(increment_subnet $public_subnet 200) - - sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml - sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml - -else - printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 - exit 1 -fi - -echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - -fi - -if [ $virtual ]; then - echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" - sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh -fi - -echo "${blue}Starting Vagrant! ${reset}" - -##stand up vagrant -if ! vagrant up; then - printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 - exit 1 -else - echo "${blue}Foreman VM is up! ${reset}" -fi - -if [ $virtual ]; then - -##Bring up VM nodes -echo "${blue}Setting VMs up... ${reset}" -nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` -##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first -##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have -##3 static controllers -compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` -controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` -nodes=${controller_nodes}${compute_nodes} - -for node in ${nodes}; do - cd /tmp - - ##remove VM nodes incase it wasn't cleaned up - rm -rf /tmp/$node - - ##clone bgs vagrant - ##will change this to be opnfv repo when commit is done - if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 - exit 1 + #assumes 24 bit mask + defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` + firstip=.1 + defaultgw=$defaultgw$firstip + echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile + node_default_gw=$defaultgw fi - cd $node - if [ $base_config ]; then if ! cp -f $base_config opnfv_ksgen_settings.yml; then echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" @@ -581,114 +725,503 @@ for node in ${nodes}; do fi fi - ##parse yaml into variables - eval $(parse_yaml opnfv_ksgen_settings.yml "config_") - ##find node type - node_type=config_nodes_${node}_type - node_type=$(eval echo \$$node_type) - - ##find number of interfaces with ip and substitute in VagrantFile - output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` - - if [ ! "$output" ]; then - printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 - exit 1 + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + echo "${blue}Controller nodes found in settings: ${controller_nodes}${reset}" + my_controller_array=( $controller_nodes ) + num_control_nodes=${#my_controller_array[@]} + if [ "$num_control_nodes" -ne 3 ]; then + if cat opnfv_ksgen_settings.yml | grep ha_flag | grep true; then + echo "${red}Error: You must define exactly 3 control nodes when HA flag is true!${reset}" + exit 1 + fi + else + echo "${blue}Number of Controller nodes detected: ${num_control_nodes}${reset}" fi + if [ $no_parse ]; then + echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" - if_counter=0 - for interface in ${output}; do - - if [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) - if [ ! "$interface_ip" ]; then - continue - fi - case "${if_counter}" in - 0) - mac_string=config_nodes_${node}_mac_address - mac_addr=$(eval echo \$$mac_string) - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - if [ $mac_addr == "" ]; then - echo "${red} Unable to find mac_address for $node! ${reset}" - exit 1 - fi - ;; - 1) - if [ "$node_type" == "controller" ]; then - mac_string=config_nodes_${node}_private_mac - mac_addr=$(eval echo \$$mac_string) - if [ $mac_addr == "" ]; then - echo "${red} Unable to find private_mac for $node! ${reset}" - exit 1 - fi - else - ##generate random mac - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - fi - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - *) - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - esac - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile - ((if_counter++)) - done - - ##now remove interface config in Vagrantfile for 1 node - ##if 1, 3, or 4 interfaces set deployment type - ##if 2 interfaces remove 2nd interface and set deployment type - if [ "$if_counter" == 1 ]; then - deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 - remove_vagrant_network eth_replace3 - elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 - elif [ "$if_counter" == 3 ]; then - deployment_type="three_network" - remove_vagrant_network eth_replace3 else - deployment_type="multi_network" - fi - ##modify provisioning to do puppet install, config, and foreman check-in - ##substitute host_name and dns_server in the provisioning script - host_string=config_nodes_${node}_hostname - host_name=$(eval echo \$$host_string) - sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh - ##dns server should be the foreman server - sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" + ##Edit the ksgen settings appropriately + ##ksgen settings will be stored in /vagrant on the vagrant machine + ##if single node deployment all the variables will have the same ip + ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 + + sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml + + ##replace private interface parameter + ##private interface will be of hosts, so we need to know the provisioned host interface name + ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts + ##replace IP for parameters with next IP that will be given to controller + if [ "$deployment_type" == "single_network" ]; then + ##we also need to assign IP addresses to nodes + ##for single node, foreman is managing the single network, so we can't reserve them + ##not supporting single network anymore for now + echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" + exit 0 + + elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then + + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + fi + + sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml + + ##get ip addresses for private network on controllers to make dhcp entries + ##required for controllers_ip_array global param + next_private_ip=${interface_ip_arr[1]} + type=_private + control_count=0 + for node in controller1 controller2 controller3; do + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 + exit 1 + fi + sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml + controller_ip_array=$controller_ip_array$next_private_ip, + controllers_ip_arr[$control_count]=$next_private_ip + ((control_count++)) + done + + next_public_ip=${interface_ip_arr[2]} + foreman_ip=$next_public_ip + + ##if no dhcp, find all the Admin IPs for nodes in advance + if [ $virtual ]; then + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + nodes=${controller_nodes}${compute_nodes} + next_admin_ip=${interface_ip_arr[0]} + type=_admin + for node in ${nodes}; do + next_admin_ip=$(next_ip $next_admin_ip) + if [ ! "$next_admin_ip" ]; then + echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}" + exit 1 + else + admin_ip_arr[$node]=$next_admin_ip + sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml + fi + done + + ##allocate node public IPs + for node in ${nodes}; do + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}" + exit 1 + else + public_ip_arr[$node]=$next_public_ip + fi + done + fi + fi + ##replace global param for controllers_ip_array + controller_ip_array=${controller_ip_array%?} + sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml + + ##now replace all the VIP variables. admin//private can be the same IP + ##we have to use IP's here that won't be allocated to hosts at provisioning time + ##therefore we increment the ip by 10 to make sure we have a safe buffer + next_private_ip=$(increment_ip $next_private_ip 10) + + private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml) + if [ ! -z "$private_output" ]; then + while read -r line; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 + exit 1 + fi + done <<< "$private_output" + fi + + ##replace odl_control_ip (non-HA only) + odl_control_ip=${controllers_ip_arr[0]} + sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml + + ##replace controller_ip (non-HA only) + sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml + + ##replace foreman site + sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml + ##replace public vips + ##no need to do this if no dhcp + if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then + next_public_ip=$(next_usable_ip $next_public_ip) + else + next_public_ip=$(increment_ip $next_public_ip 10) + fi + + public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml) + if [ ! -z "$public_output" ]; then + while read -r line; do + if echo $line | grep horizon_public_vip; then + horizon_public_vip=$next_public_ip + fi + sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 + exit 1 + fi + done <<< "$public_output" + fi + + ##replace public_network param + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_network param + private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) + sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + ##replace storage_network + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + else + next_storage_ip=${interface_ip_arr[3]} + storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) + sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml + fi + + ##replace public_subnet param + public_subnet=$public_subnet'\'$public_short_subnet_mask + sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_subnet param + private_subnet=$private_subnet'\'$private_short_subnet_mask + sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml + + ##replace public_dns param to be foreman server + sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml + + ##replace public_gateway + if [ -z "$public_gateway" ]; then + ##if unset then we assume its the first IP in the public subnet + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + public_gateway=$(increment_subnet $public_subnet 1) + fi + sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml + + ##we have to define an allocation range of the public subnet to give + ##to neutron to use as floating IPs + ##if static ip range, then we take the difference of the end range and current ip + ## to be the allocation pool + ##if not static ip, we will use the last 20 IP from the subnet + ## note that this is not a really good idea because the subnet must be at least a /27 for this to work... + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + if [ ! -z "$static_ip_range" ]; then + begin_octet=$(echo $next_public_ip | cut -d . -f4) + end_octet=$(echo $static_ip_range_end | cut -d . -f4) + ip_diff=$((end_octet-begin_octet)) + if [ $ip_diff -le 0 ]; then + echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}" + exit 1 + else + public_allocation_start=$(next_ip $next_public_ip) + public_allocation_end=$static_ip_range_end + fi + else + last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask) + public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count ) + public_allocation_end=${last_ip_subnet} + fi + echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" + + sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml + sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml + + else + printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 + exit 1 + fi - ## remove bootstrap and NAT provisioning - sed -i '/nat_setup.sh/d' Vagrantfile - sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - ## modify default_gw to be node_default_gw - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + fi +} - ## modify VM memory to be 4gig - sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile +##Configure bootstrap.sh to use the virtual Khaleesi playbook +##params: none +##usage: configure_virtual() +configure_virtual() { + if [ $virtual ]; then + echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" + sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh + fi +} - echo "${blue}Starting Vagrant Node $node! ${reset}" +##Starts Foreman VM with Vagrant +##params: none +##usage: start_vagrant() +start_foreman() { + echo "${blue}Starting Vagrant! ${reset}" ##stand up vagrant if ! vagrant up; then - echo "${red} Unable to start $node ${reset}" + printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2 exit 1 else - echo "${blue} $node VM is up! ${reset}" + echo "${blue}Foreman VM is up! ${reset}" fi +} -done +##start the VM if this is a virtual installation +##this function does nothing if baremetal servers are being used +##params: none +##usage: start_virtual_nodes() +start_virtual_nodes() { + if [ $virtual ]; then + + ##Bring up VM nodes + echo "${blue}Setting VMs up... ${reset}" + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first + ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have + ##3 static controllers + compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + nodes=${controller_nodes}${compute_nodes} + controller_count=0 + compute_wait_completed=false + + for node in ${nodes}; do + + ##remove VM nodes incase it wasn't cleaned up + rm -rf $vm_dir/$node + rm -rf /tmp/genesis/ + + ##clone genesis and move into node folder + clone_bgs $vm_dir/$node + + cd $vm_dir/$node + + if [ $base_config ]; then + if ! cp -f $base_config opnfv_ksgen_settings.yml; then + echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + exit 1 + fi + fi + + ##parse yaml into variables + eval $(parse_yaml opnfv_ksgen_settings.yml "config_") + ##find node type + node_type=config_nodes_${node}_type + node_type=$(eval echo \$$node_type) + + ##trozet test make compute nodes wait 20 minutes + if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then + echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..." + compute_wait_completed=true + sleep 1400 + fi + + ## Add Admin interface + mac_string=config_nodes_${node}_mac_address + mac_addr=$(eval echo \$$mac_string) + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ $mac_addr == "" ]; then + echo "${red} Unable to find mac_address for $node! ${reset}" + exit 1 + fi + this_admin_ip=${admin_ip_arr[$node]} + sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile + + ## Add private interface + if [ "$node_type" == "controller" ]; then + mac_string=config_nodes_${node}_private_mac + mac_addr=$(eval echo \$$mac_string) + if [ $mac_addr == "" ]; then + echo "${red} Unable to find private_mac for $node! ${reset}" + exit 1 + fi + else + ##generate random mac + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + fi + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ "$node_type" == "controller" ]; then + new_node_ip=${controllers_ip_arr[$controller_count]} + if [ ! "$new_node_ip" ]; then + echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}" + exit 1 + fi + ((controller_count++)) + else + next_private_ip=$(next_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + echo "{red}ERROR: Could not find private ip for $node ${reset}" + exit 1 + fi + new_node_ip=$next_private_ip + fi + sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + ##replace host_ip in vm_nodes_provision with private ip + sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh + ##replace ping site + if [ ! -z "$ping_site" ]; then + sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh + fi + + ##find public ip info and add public interface + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + this_public_ip=${public_ip_arr[$node]} + + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile + else + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + fi + remove_vagrant_network eth_replace3 + + ##modify provisioning to do puppet install, config, and foreman check-in + ##substitute host_name and dns_server in the provisioning script + host_string=config_nodes_${node}_hostname + host_name=$(eval echo \$$host_string) + sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh + ##dns server should be the foreman server + sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + ## remove bootstrap and NAT provisioning + sed -i '/nat_setup.sh/d' Vagrantfile + sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + ## modify default_gw to be node_default_gw + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + ## modify VM memory to be 4gig + ##if node type is controller + if [ "$node_type" == "controller" ]; then + sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile + fi + echo "${blue}Starting Vagrant Node $node! ${reset}" + ##stand up vagrant + if ! vagrant up; then + echo "${red} Unable to start $node ${reset}" + exit 1 + else + echo "${blue} $node VM is up! ${reset}" + fi + done + echo "${blue} All VMs are UP! ${reset}" + echo "${blue} Waiting for puppet to complete on the nodes... ${reset}" + ##check puppet is complete + ##ssh into foreman server, run check to verify puppet is complete + pushd $vm_dir/foreman_vm + if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then + echo "${red} Failed to validate puppet completion on nodes ${reset}" + exit 1 + else + echo "{$blue} Puppet complete on all nodes! ${reset}" + fi + popd + ##add routes back to nodes + for node in ${nodes}; do + pushd $vm_dir/$node + if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then + echo "${blue} Adding public route back to $node! ${reset}" + vagrant ssh -c "route add default gw $this_default_gw" + vagrant ssh -c "route delete default gw 10.0.2.2" + fi + popd + done + if [ ! -z "$horizon_public_vip" ]; then + echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}" + else + echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${odl_control_ip} ${reset}" + fi + fi +} - echo "${blue} All VMs are UP! ${reset}" +##check to make sure nodes are powered off +##this function does nothing if virtual +##params: none +##usage: check_baremetal_nodes() +check_baremetal_nodes() { + if [ $virtual ]; then + echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}" + else + echo "${blue}Checking Baremetal nodes power state... ${reset}" + if [ ! -z "$base_config" ]; then + # Install ipmitool + # Major version is pinned to force some consistency for Arno + if ! yum list installed | grep -i ipmitool; then + echo "${blue}Installing ipmitool...${reset}" + if ! yum -y install ipmitool-1*; then + echo "${red}Failed to install ipmitool!${reset}" + exit 1 + fi + fi + + ###find all the bmc IPs and number of nodes + node_counter=0 + output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` + for line in ${output} ; do + bmc_ip[$node_counter]=$line + ((node_counter++)) + done + + max_nodes=$((node_counter-1)) + + ###find bmc_users per node + node_counter=0 + output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` + for line in ${output} ; do + bmc_user[$node_counter]=$line + ((node_counter++)) + done + + ###find bmc_pass per node + node_counter=0 + output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` + for line in ${output} ; do + bmc_pass[$node_counter]=$line + ((node_counter++)) + done + + for mynode in `seq 0 $max_nodes`; do + echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" + ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \ + | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]` + if [ "$ipmi_output" == "on" ]; then + echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}" + echo "${red}Please run clean.sh before running deploy! ${reset}" + exit 1 + elif [ "$ipmi_output" == "off" ]; then + echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}" + else + echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}" + fi + done + else + echo "${red}base_config was not provided for a baremetal install! Exiting${reset}" + exit 1 + fi + fi +} + +##END FUNCTIONS + +main() { + parse_cmdline "$@" + disable_selinux + check_baremetal_nodes + install_EPEL + install_vbox + install_ansible + install_vagrant + clean_tmp + verify_vm_dir + clone_bgs $vm_dir/foreman_vm + configure_network + configure_virtual + start_foreman + start_virtual_nodes +} -fi +main "$@" diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml index 21840dd..b41a41b 100644 --- a/foreman/ci/opnfv_ksgen_settings.yml +++ b/foreman/ci/opnfv_ksgen_settings.yml @@ -44,6 +44,7 @@ global_params: deployment_type: network_type: multi_network default_gw: +no_dhcp: false foreman: seed_values: - { name: heat_cfn, oldvalue: true, newvalue: false } @@ -110,6 +111,7 @@ nodes: bmc_mac: "10:23:45:67:88:AB" bmc_user: root bmc_pass: root + admin_ip: compute_admin ansible_ssh_pass: "Op3nStack" admin_password: "" groups: @@ -130,6 +132,7 @@ nodes: bmc_mac: "10:23:45:67:88:AC" bmc_user: root bmc_pass: root + admin_ip: controller1_admin private_ip: controller1_private private_mac: "10:23:45:67:87:AC" ansible_ssh_pass: "Op3nStack" @@ -152,6 +155,7 @@ nodes: bmc_mac: "10:23:45:67:88:AD" bmc_user: root bmc_pass: root + admin_ip: controller2_admin private_ip: controller2_private private_mac: "10:23:45:67:87:AD" ansible_ssh_pass: "Op3nStack" @@ -174,6 +178,7 @@ nodes: bmc_mac: "10:23:45:67:88:AE" bmc_user: root bmc_pass: root + admin_ip: controller3_admin private_ip: controller3_private private_mac: "10:23:45:67:87:AE" ansible_ssh_pass: "Op3nStack" diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml new file mode 100644 index 0000000..79db257 --- /dev/null +++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml @@ -0,0 +1,264 @@ +global_params: + admin_email: opnfv@opnfv.com + ha_flag: "false" + odl_flag: "true" + odl_control_ip: + private_network: + storage_network: + public_network: + private_subnet: + deployment_type: + controller_ip: +network_type: multi_network +default_gw: +no_dhcp: false +foreman: + seed_values: + - { name: heat_cfn, oldvalue: true, newvalue: false } +workaround_puppet_version_lock: false +opm_branch: master +installer: + name: puppet + short_name: pupt + network: + auto_assign_floating_ip: false + variant: + short_name: m2vx + plugin: + name: neutron +workaround_openstack_packstack_rpm: false +tempest: + repo: + Fedora: + '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/ + '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/ + RedHat: + '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/ + use_virtual_env: false + public_allocation_end: 10.2.84.71 + skip: + files: null + tests: null + public_allocation_start: 10.2.84.51 + physnet: physnet1 + use_custom_repo: false + public_subnet_cidr: 10.2.84.0/24 + public_subnet_gateway: 10.2.84.1 + additional_default_settings: + - section: compute + option: flavor_ref + value: 1 + cirros_image_file: cirros-0.3.1-x86_64-disk.img + setup_method: tempest/rpm + test_name: all + rdo: + version: juno + rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + rpm: + version: 20141201 + dir: ~{{ nodes.tempest.remote_user }}/tempest-dir +tmp: + node_prefix: '{{ node.prefix | reject("none") | join("-") }}-' + anchors: + - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + - http://repos.fedorapeople.org/repos/openstack/openstack-juno/ +opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git +workaround_vif_plugging: false +openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm +nodes: + compute: + name: oscompute11.opnfv.com + hostname: oscompute11.opnfv.com + short_name: oscompute11 + type: compute + host_type: baremetal + hostgroup: Compute + mac_address: "10:23:45:67:89:AB" + bmc_ip: 10.4.17.2 + bmc_mac: "10:23:45:67:88:AB" + bmc_user: root + bmc_pass: root + admin_ip: compute_admin + ansible_ssh_pass: "Op3nStack" + admin_password: "" + groups: + - compute + - foreman_nodes + - puppet + - rdo + - neutron + controller1: + name: oscontroller1.opnfv.com + hostname: oscontroller1.opnfv.com + short_name: oscontroller1 + type: controller + host_type: baremetal + hostgroup: Controller_Network_ODL + mac_address: "10:23:45:67:89:AC" + bmc_ip: 10.4.17.3 + bmc_mac: "10:23:45:67:88:AC" + bmc_user: root + bmc_pass: root + private_ip: controller1_private + admin_ip: controller1_admin + private_mac: "10:23:45:67:87:AC" + ansible_ssh_pass: "Op3nStack" + admin_password: "octopus" + groups: + - controller + - foreman_nodes + - puppet + - rdo + - neutron +workaround_mysql_centos7: true +distro: + name: centos + centos: + '7.0': + repos: [] + short_name: c + short_version: 70 + version: '7.0' + rhel: + '7.0': + kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/ + repos: + - section: rhel7-server-rpms + name: Packages for RHEL 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/ + gpgcheck: 0 + - section: rhel-7-server-update-rpms + name: Update Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/ + gpgcheck: 0 + - section: rhel-7-server-optional-rpms + name: Optional Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/ + gpgcheck: 0 + - section: rhel-7-server-extras-rpms + name: Optional Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/ + gpgcheck: 0 + '6.5': + kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/ + repos: + - section: rhel6.5-server-rpms + name: Packages for RHEL 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server + gpgcheck: 0 + - section: rhel-6.5-server-update-rpms + name: Update Packages for Enterprise Linux 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/ + gpgcheck: 0 + - section: rhel-6.5-server-optional-rpms + name: Optional Packages for Enterprise Linux 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os + gpgcheck: 0 + - section: rhel6.5-server-rpms-32bit + name: Packages for RHEL 6.5 - i386 + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server + gpgcheck: 0 + enabled: 1 + - section: rhel-6.5-server-update-rpms-32bit + name: Update Packages for Enterprise Linux 6.5 - i686 + baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/ + gpgcheck: 0 + enabled: 1 + - section: rhel-6.5-server-optional-rpms-32bit + name: Optional Packages for Enterprise Linux 6.5 - i386 + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os + gpgcheck: 0 + enabled: 1 + subscription: + username: REPLACE_ME + password: HWj8TE28Qi0eP2c + pool: 8a85f9823e3d5e43013e3ddd4e2a0977 + config: + selinux: permissive + ntp_server: 0.pool.ntp.org + dns_servers: + - 10.4.1.1 + - 10.4.0.2 + reboot_delay: 1 + initial_boot_timeout: 180 +node: + prefix: + - rdo + - pupt + - ffqiotcxz1 + - null +product: + repo_type: production + name: rdo + short_name: rdo + rpm: + CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + short_version: ju + repo: + production: + CentOS: + 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + Fedora: + '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20 + '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21 + RedHat: + '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + version: juno + config: + enable_epel: y + short_repo: prod +tester: + name: tempest +distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' ' +job: + verbosity: 1 + archive: + - '{{ tempest.dir }}/etc/tempest.conf' + - '{{ tempest.dir }}/etc/tempest.conf.sample' + - '{{ tempest.dir }}/*.log' + - '{{ tempest.dir }}/*.xml' + - /root/ + - /var/log/ + - /etc/nova + - /etc/ceilometer + - /etc/cinder + - /etc/glance + - /etc/keystone + - /etc/neutron + - /etc/ntp + - /etc/puppet + - /etc/qpid + - /etc/qpidd.conf + - /root + - /etc/yum.repos.d + - /etc/yum.repos.d +topology: + name: multinode + short_name: mt +workaround_neutron_ovs_udev_loop: true +workaround_glance_table_utf8: false +verbosity: + debug: 0 + info: 1 + warning: 2 + warn: 2 + errors: 3 +provisioner: + username: admin + network: + type: nova + name: external + skip: skip_provision + foreman_url: https://10.2.84.2/api/v2/ + password: octopus + type: foreman +workaround_nova_compute_fix: false +workarounds: + enabled: true + diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml index 9e3d053..9b3a4d4 100644 --- a/foreman/ci/reload_playbook.yml +++ b/foreman/ci/reload_playbook.yml @@ -14,3 +14,4 @@ delay=60 timeout=180 sudo: false + - pause: minutes=1 diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh index d0bba64..e64c0ad 100755 --- a/foreman/ci/vm_nodes_provision.sh +++ b/foreman/ci/vm_nodes_provision.sh @@ -18,6 +18,7 @@ green=`tput setaf 2` host_name=REPLACE dns_server=REPLACE +host_ip=REPLACE ##END VARS ##set hostname @@ -31,27 +32,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then systemctl restart NetworkManager fi -if ! ping www.google.com -c 5; then +##modify /etc/resolv.conf to point to foreman +echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}" +cat > /etc/resolv.conf << EOF +search ci.com opnfv.com +nameserver $dns_server +nameserver 8.8.8.8 + +EOF + +##modify /etc/hosts to add own IP for rabbitmq workaround +host_short_name=`echo $host_name | cut -d . -f 1` +echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}" +cat > /etc/hosts << EOF +$host_ip $host_short_name $host_name +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +EOF + +if ! ping www.google.com -c 5; then echo "${red} No internet connection, check your route and DNS setup ${reset}" exit 1 fi -# Install EPEL repo for access to many other yum repos -# Major version is pinned to force some consistency for Arno -yum install -y epel-release-7* +##install EPEL +if ! yum repolist | grep "epel/"; then + if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then + printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2 + exit 1 + fi +else + printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.' +fi -# Update device-mapper-libs, needed for libvirtd on compute nodes -# Major version is pinned to force some consistency for Arno -if ! yum -y upgrade device-mapper-libs-1*; then +##install device-mapper-libs +##needed for libvirtd on compute nodes +if ! yum -y upgrade device-mapper-libs; then echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}" fi -# Install other required packages -# Major version is pinned to force some consistency for Arno echo "${blue} Installing Puppet ${reset}" -if ! yum install -y puppet-3*; then - printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2 - exit 1 +##install puppet +if ! yum list installed | grep -i puppet; then + if ! yum -y install puppet; then + printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2 + exit 1 + fi fi echo "${blue} Configuring puppet ${reset}" |