From be75ed95cc956e1ef634d3878148701c21d15b5a Mon Sep 17 00:00:00 2001 From: randyl Date: Wed, 17 Jun 2015 15:23:45 -0600 Subject: Fixing verification of vbox drivers JIRA: BGS-74 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the foreman install the vboxdrv module is not loaded then the deploy.sh script attempts to load the driver by running ‘/etc/init.d/vboxdrv setup’ and checks the return code for success. However, /etc/init.d/vboxdrv will return 0 as long a valid parameter was passed on the command line. In the case of a failure, a failure message will be logged and the return code will still be 0. For instance if the kvm module is already loaded, the VirtualBox driver will never install. deploy.sh will now check for the VirtualBox kernel module with lsmod after the setup script is installed. The deploy.sh will now exit if the vboxdrv is not loaded. Change-Id: I702819cbf28afb08e0035e08918390af85c07674 Signed-off-by: randyl --- foreman/ci/deploy.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 86f03a7..31d41d2 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -225,7 +225,8 @@ fi ##install kmod-VirtualBox if ! lsmod | grep vboxdrv; then - if ! sudo /etc/init.d/vboxdrv setup; then + sudo /etc/init.d/vboxdrv setup + if ! lsmod | grep vboxdrv; then printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 exit 1 fi -- cgit 1.2.3-korg From 3fea98bc7bd7f0f411c2be1be7665a569ec8fb0a Mon Sep 17 00:00:00 2001 From: randyl Date: Wed, 24 Jun 2015 12:55:53 -0600 Subject: Moved 80% of Foreman deploy.sh into functions To help with readability and troubleshooting, the 80% of the script that was not in a function were added to functions based on logical blocks. Neither functional nor order of execution changes were made to the script. Since most of the file and other Foreman CI scripts use a 2 space indent, that was also applied uniformly across the file. Change-Id: I83b96e231d484813025f6aa900cf2ffc92e94397 Signed-off-by: randyl --- foreman/ci/deploy.sh | 944 +++++++++++++++++++++++++++------------------------ 1 file changed, 509 insertions(+), 435 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 31d41d2..46ba80e 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -149,19 +149,21 @@ parse_yaml() { }' } -##END FUNCTIONS - -if [[ ( $1 == "--help") || $1 == "-h" ]]; then +##translates the command line paramaters into variables +##params: $@ the entire command line is passed +##usage: parse_cmd_line() "$@" +parse_cmdline() { + if [[ ( $1 == "--help") || $1 == "-h" ]]; then display_usage exit 0 -fi + fi -echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" -echo "Use -h to display help" -sleep 2 + echo -e "\n\n${blue}This script is used to deploy Foreman/QuickStack Installer and Provision OPNFV Target System${reset}\n\n" + echo "Use -h to display help" + sleep 2 -while [ "`echo $1 | cut -c1`" = "-" ] -do + while [ "`echo $1 | cut -c1`" = "-" ] + do echo $1 case "$1" in -base_config) @@ -180,31 +182,45 @@ do display_usage exit 1 ;; -esac -done + esac + done +} ##disable selinux -/sbin/setenforce 0 - -# Install EPEL repo for access to many other yum repos -# Major version is pinned to force some consistency for Arno -yum install -y epel-release-7* - -# Install other required packages -# Major versions are pinned to force some consistency for Arno -if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then - printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 - exit 1 -fi - -##install VirtualBox repo -if cat /etc/*release | grep -i "Fedora release"; then - vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch -else - vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch -fi - -cat > /etc/yum.repos.d/virtualbox.repo << EOM +##params: none +##usage: disable_selinux() +disable_selinux() { + /sbin/setenforce 0 +} + +##Install the EPEL repository and additional packages +##params: none +##usage: install_EPEL() +install_EPEL() { + # Install EPEL repo for access to many other yum repos + # Major version is pinned to force some consistency for Arno + yum install -y epel-release-7* + + # Install other required packages + # Major versions are pinned to force some consistency for Arno + if ! yum install -y binutils-2* gcc-4* make-3* patch-2* libgomp-4* glibc-headers-2* glibc-devel-2* kernel-headers-3* kernel-devel-3* dkms-2* psmisc-22*; then + printf '%s\n' 'deploy.sh: Unable to install depdency packages' >&2 + exit 1 + fi +} + +##Download and install virtual box +##params: none +##usage: install_vbox() +install_vbox() { + ##install VirtualBox repo + if cat /etc/*release | grep -i "Fedora release"; then + vboxurl=http://download.virtualbox.org/virtualbox/rpm/fedora/\$releasever/\$basearch + else + vboxurl=http://download.virtualbox.org/virtualbox/rpm/el/\$releasever/\$basearch + fi + + cat > /etc/yum.repos.d/virtualbox.repo << EOM [virtualbox] name=Oracle Linux / RHEL / CentOS-\$releasever / \$basearch - VirtualBox baseurl=$vboxurl @@ -215,380 +231,101 @@ skip_if_unavailable = 1 keepcache = 0 EOM -##install VirtualBox -if ! yum list installed | grep -i virtualbox; then - if ! yum -y install VirtualBox-4.3; then - printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 - exit 1 + ##install VirtualBox + if ! yum list installed | grep -i virtualbox; then + if ! yum -y install VirtualBox-4.3; then + printf '%s\n' 'deploy.sh: Unable to install virtualbox package' >&2 + exit 1 + fi fi -fi -##install kmod-VirtualBox -if ! lsmod | grep vboxdrv; then - sudo /etc/init.d/vboxdrv setup + ##install kmod-VirtualBox if ! lsmod | grep vboxdrv; then - printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' -fi - -##install Ansible -if ! yum list installed | grep -i ansible; then - if ! yum -y install ansible-1*; then - printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 - exit 1 + sudo /etc/init.d/vboxdrv setup + if ! lsmod | grep vboxdrv; then + printf '%s\n' 'deploy.sh: Unable to install kernel module for virtualbox' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping kernel module for virtualbox. Already Installed' fi -fi +} -##install Vagrant -if ! rpm -qa | grep vagrant; then - if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then - printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' -fi - -##add centos 7 box to vagrant -if ! vagrant box list | grep chef/centos-7.0; then - if ! vagrant box add chef/centos-7.0 --provider virtualbox; then - printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 - exit 1 - fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' -fi - -##install workaround for centos7 -if ! vagrant plugin list | grep vagrant-centos7_fix; then - if ! vagrant plugin install vagrant-centos7_fix; then - printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 +##install Ansible using yum +##params: none +##usage: install_anible() +install_ansible() { + if ! yum list installed | grep -i ansible; then + if ! yum -y install ansible-1*; then + printf '%s\n' 'deploy.sh: Unable to install Ansible package' >&2 + exit 1 + fi fi -else - printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' -fi - -cd /tmp/ - -##remove bgs vagrant incase it wasn't cleaned up -rm -rf /tmp/bgs_vagrant - -##clone bgs vagrant -##will change this to be opnfv repo when commit is done -if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 - exit 1 -fi - -cd bgs_vagrant - -echo "${blue}Detecting network configuration...${reset}" -##detect host 1 or 3 interface configuration -#output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` -output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` - -if [ ! "$output" ]; then - printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 - exit 1 -fi - -##find number of interfaces with ip and substitute in VagrantFile -if_counter=0 -for interface in ${output}; do +} - if [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) - if [ ! "$interface_ip" ]; then - continue - fi - new_ip=$(next_usable_ip $interface_ip) - if [ ! "$new_ip" ]; then - continue - fi - interface_arr[$interface]=$if_counter - interface_ip_arr[$if_counter]=$new_ip - subnet_mask=$(find_netmask $interface) - if [ "$if_counter" -eq 1 ]; then - private_subnet_mask=$subnet_mask - private_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 2 ]; then - public_subnet_mask=$subnet_mask - public_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 3 ]; then - storage_subnet_mask=$subnet_mask - fi - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile - ((if_counter++)) -done - -##now remove interface config in Vagrantfile for 1 node -##if 1, 3, or 4 interfaces set deployment type -##if 2 interfaces remove 2nd interface and set deployment type -if [ "$if_counter" == 1 ]; then - deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 - remove_vagrant_network eth_replace3 -elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 -elif [ "$if_counter" == 3 ]; then - deployment_type="three_network" - remove_vagrant_network eth_replace3 -else - deployment_type="multi_network" -fi - -echo "${blue}Network detected: ${deployment_type}! ${reset}" - -if route | grep default; then - echo "${blue}Default Gateway Detected ${reset}" - host_default_gw=$(ip route | grep default | awk '{print $3}') - echo "${blue}Default Gateway: $host_default_gw ${reset}" - default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') - case "${interface_arr[$default_gw_interface]}" in - 0) - echo "${blue}Default Gateway Detected on Admin Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - node_default_gw=$host_default_gw - ;; - 1) - echo "${red}Default Gateway Detected on Private Interface!${reset}" - echo "${red}Private subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - 2) - echo "${blue}Default Gateway Detected on Public Interface!${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile - echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" - sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile - echo "${blue}Setting node gateway to be VM Admin IP${reset}" - node_default_gw=${interface_ip_arr[0]} - public_gateway=$default_gw - ;; - 3) - echo "${red}Default Gateway Detected on Storage Interface!${reset}" - echo "${red}Storage subnet should be private and not have Internet access!${reset}" - exit 1 - ;; - *) - echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" - exit 1 - ;; - esac -else - #assumes 24 bit mask - defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` - firstip=.1 - defaultgw=$defaultgw$firstip - echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile - node_default_gw=$defaultgw -fi - -if [ $base_config ]; then - if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" - exit 1 +##install Vagrant RPM directly with the bintray.com site +##params: none +##usage: install_vagrant() +install_vagrant() { + if ! rpm -qa | grep vagrant; then + if ! rpm -Uvh https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.rpm; then + printf '%s\n' 'deploy.sh: Unable to install vagrant package' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant install as it is already installed.' fi -fi - -if [ $no_parse ]; then -echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" - -else - -echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" -##Edit the ksgen settings appropriately -##ksgen settings will be stored in /vagrant on the vagrant machine -##if single node deployment all the variables will have the same ip -##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 -sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml - -##replace private interface parameter -##private interface will be of hosts, so we need to know the provisioned host interface name -##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts -##replace IP for parameters with next IP that will be given to controller -if [ "$deployment_type" == "single_network" ]; then - ##we also need to assign IP addresses to nodes - ##for single node, foreman is managing the single network, so we can't reserve them - ##not supporting single network anymore for now - echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" - exit 0 - -elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then - - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + ##add centos 7 box to vagrant + if ! vagrant box list | grep chef/centos-7.0; then + if ! vagrant box add chef/centos-7.0 --provider virtualbox; then + printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 + exit 1 + fi + else + printf '%s\n' 'deploy.sh: Skipping Vagrant box add as centos-7.0 is already installed.' fi - sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml - - ##get ip addresses for private network on controllers to make dhcp entries - ##required for controllers_ip_array global param - next_private_ip=${interface_ip_arr[1]} - type=_private - for node in controller1 controller2 controller3; do - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 - exit 1 + ##install workaround for centos7 + if ! vagrant plugin list | grep vagrant-centos7_fix; then + if ! vagrant plugin install vagrant-centos7_fix; then + printf '%s\n' 'deploy.sh: Warning: unable to install vagrant centos7 workaround' >&2 fi - sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml - controller_ip_array=$controller_ip_array$next_private_ip, - done + else + printf '%s\n' 'deploy.sh: Skipping Vagrant plugin as centos7 workaround is already installed.' + fi +} - ##replace global param for contollers_ip_array - controller_ip_array=${controller_ip_array%?} - sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml - - ##now replace all the VIP variables. admin//private can be the same IP - ##we have to use IP's here that won't be allocated to hosts at provisioning time - ##therefore we increment the ip by 10 to make sure we have a safe buffer - next_private_ip=$(increment_ip $next_private_ip 10) - - grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 - exit 1 - fi - done - ##replace foreman site - next_public_ip=${interface_ip_arr[2]} - sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml - ##replace public vips - next_public_ip=$(increment_ip $next_public_ip 10) - grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml - next_public_ip=$(next_usable_ip $next_public_ip) - if [ ! "$next_public_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 - exit 1 - fi - done +##remove bgs vagrant incase it wasn't cleaned up +##params: none +##usage: clean_tmp() +clean_tmp() { + rm -rf /tmp/bgs_vagrant +} - ##replace public_network param - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_network param - private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) - sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml - ##replace storage_network - if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml - else - next_storage_ip=${interface_ip_arr[3]} - storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) - sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml - fi +##clone bgs vagrant version 1.0 using git +##params: none +##usage: clone_bgs +clone_bgs() { + cd /tmp/ - ##replace public_subnet param - public_subnet=$public_subnet'\'$public_short_subnet_mask - sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml - ##replace private_subnet param - private_subnet=$private_subnet'\'$private_short_subnet_mask - sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml - - ##replace public_dns param to be foreman server - sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml - - ##replace public_gateway - if [ -z "$public_gateway" ]; then - ##if unset then we assume its the first IP in the public subnet - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_gateway=$(increment_subnet $public_subnet 1) - fi - sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml - - ##we have to define an allocation range of the public subnet to give - ##to neutron to use as floating IPs - ##we should control this subnet, so this range should work .150-200 - ##but generally this is a bad idea and we are assuming at least a /24 subnet here - public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_allocation_start=$(increment_subnet $public_subnet 150) - public_allocation_end=$(increment_subnet $public_subnet 200) - - sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml - sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml - -else - printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 - exit 1 -fi - -echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - -fi - -if [ $virtual ]; then - echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" - sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh -fi - -echo "${blue}Starting Vagrant! ${reset}" - -##stand up vagrant -if ! vagrant up; then - printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 - exit 1 -else - echo "${blue}Foreman VM is up! ${reset}" -fi - -if [ $virtual ]; then - -##Bring up VM nodes -echo "${blue}Setting VMs up... ${reset}" -nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` -##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first -##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have -##3 static controllers -compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` -controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` -nodes=${controller_nodes}${compute_nodes} - -for node in ${nodes}; do - cd /tmp - - ##remove VM nodes incase it wasn't cleaned up - rm -rf /tmp/$node - - ##clone bgs vagrant ##will change this to be opnfv repo when commit is done - if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then + if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi +} - cd $node - - if [ $base_config ]; then - if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" - exit 1 - fi - fi - - ##parse yaml into variables - eval $(parse_yaml opnfv_ksgen_settings.yml "config_") - ##find node type - node_type=config_nodes_${node}_type - node_type=$(eval echo \$$node_type) +##validates the netork settings and update VagrantFile with network settings +##params: none +##usage: configure_network() +configure_network() { + cd /tmp/bgs_vagrant - ##find number of interfaces with ip and substitute in VagrantFile + echo "${blue}Detecting network configuration...${reset}" + ##detect host 1 or 3 interface configuration + #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` if [ ! "$output" ]; then @@ -596,7 +333,7 @@ for node in ${nodes}; do exit 1 fi - + ##find number of interfaces with ip and substitute in VagrantFile if_counter=0 for interface in ${output}; do @@ -607,36 +344,25 @@ for node in ${nodes}; do if [ ! "$interface_ip" ]; then continue fi - case "${if_counter}" in - 0) - mac_string=config_nodes_${node}_mac_address - mac_addr=$(eval echo \$$mac_string) - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - if [ $mac_addr == "" ]; then - echo "${red} Unable to find mac_address for $node! ${reset}" - exit 1 - fi - ;; - 1) - if [ "$node_type" == "controller" ]; then - mac_string=config_nodes_${node}_private_mac - mac_addr=$(eval echo \$$mac_string) - if [ $mac_addr == "" ]; then - echo "${red} Unable to find private_mac for $node! ${reset}" - exit 1 - fi - else - ##generate random mac - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - fi - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - *) - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - esac - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + continue + fi + interface_arr[$interface]=$if_counter + interface_ip_arr[$if_counter]=$new_ip + subnet_mask=$(find_netmask $interface) + if [ "$if_counter" -eq 1 ]; then + private_subnet_mask=$subnet_mask + private_short_subnet_mask=$(find_short_netmask $interface) + fi + if [ "$if_counter" -eq 2 ]; then + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) + fi + if [ "$if_counter" -eq 3 ]; then + storage_subnet_mask=$subnet_mask + fi + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile ((if_counter++)) done @@ -660,36 +386,384 @@ for node in ${nodes}; do deployment_type="multi_network" fi - ##modify provisioning to do puppet install, config, and foreman check-in - ##substitute host_name and dns_server in the provisioning script - host_string=config_nodes_${node}_hostname - host_name=$(eval echo \$$host_string) - sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh - ##dns server should be the foreman server - sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + echo "${blue}Network detected: ${deployment_type}! ${reset}" + + if route | grep default; then + echo "${blue}Default Gateway Detected ${reset}" + host_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $host_default_gw ${reset}" + default_gw_interface=$(ip route get $host_default_gw | awk '{print $3}') + case "${interface_arr[$default_gw_interface]}" in + 0) + echo "${blue}Default Gateway Detected on Admin Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + node_default_gw=$host_default_gw + ;; + 1) + echo "${red}Default Gateway Detected on Private Interface!${reset}" + echo "${red}Private subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + 2) + echo "${blue}Default Gateway Detected on Public Interface!${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$host_default_gw"\"'/' Vagrantfile + echo "${blue}Will setup NAT from Admin -> Public Network on VM!${reset}" + sed -i 's/^.*nat_flag =.*$/ nat_flag = true/' Vagrantfile + echo "${blue}Setting node gateway to be VM Admin IP${reset}" + node_default_gw=${interface_ip_arr[0]} + public_gateway=$default_gw + ;; + 3) + echo "${red}Default Gateway Detected on Storage Interface!${reset}" + echo "${red}Storage subnet should be private and not have Internet access!${reset}" + exit 1 + ;; + *) + echo "${red}Unable to determine which interface default gateway is on..Exiting!${reset}" + exit 1 + ;; + esac + else + #assumes 24 bit mask + defaultgw=`echo ${interface_ip_arr[0]} | cut -d. -f1-3` + firstip=.1 + defaultgw=$defaultgw$firstip + echo "${blue}Unable to find default gateway. Assuming it is $defaultgw ${reset}" + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$defaultgw"\"'/' Vagrantfile + node_default_gw=$defaultgw + fi + + if [ $base_config ]; then + if ! cp -f $base_config opnfv_ksgen_settings.yml; then + echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + exit 1 + fi + fi + + if [ $no_parse ]; then + echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" + + else + + echo "${blue}Gathering network parameters for Target System...this may take a few minutes${reset}" + ##Edit the ksgen settings appropriately + ##ksgen settings will be stored in /vagrant on the vagrant machine + ##if single node deployment all the variables will have the same ip + ##interface names will be enp0s3, enp0s8, enp0s9 in chef/centos7 + + sed -i 's/^.*default_gw:.*$/default_gw:'" $node_default_gw"'/' opnfv_ksgen_settings.yml + + ##replace private interface parameter + ##private interface will be of hosts, so we need to know the provisioned host interface name + ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts + ##replace IP for parameters with next IP that will be given to controller + if [ "$deployment_type" == "single_network" ]; then + ##we also need to assign IP addresses to nodes + ##for single node, foreman is managing the single network, so we can't reserve them + ##not supporting single network anymore for now + echo "{blue}Single Network type is unsupported right now. Please check your interface configuration. Exiting. ${reset}" + exit 0 + + elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then + + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml + fi + + sed -i 's/^.*deployment_type:.*$/ deployment_type: '"$deployment_type"'/' opnfv_ksgen_settings.yml + + ##get ip addresses for private network on controllers to make dhcp entries + ##required for controllers_ip_array global param + next_private_ip=${interface_ip_arr[1]} + type=_private + for node in controller1 controller2 controller3; do + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 + exit 1 + fi + sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml + controller_ip_array=$controller_ip_array$next_private_ip, + done + + ##replace global param for contollers_ip_array + controller_ip_array=${controller_ip_array%?} + sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml + + ##now replace all the VIP variables. admin//private can be the same IP + ##we have to use IP's here that won't be allocated to hosts at provisioning time + ##therefore we increment the ip by 10 to make sure we have a safe buffer + next_private_ip=$(increment_ip $next_private_ip 10) + + grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 + exit 1 + fi + done + + ##replace foreman site + next_public_ip=${interface_ip_arr[2]} + sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml + ##replace public vips + next_public_ip=$(increment_ip $next_public_ip 10) + grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 + exit 1 + fi + done + + ##replace public_network param + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + sed -i 's/^.*public_network:.*$/ public_network:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_network param + private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) + sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + ##replace storage_network + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + else + next_storage_ip=${interface_ip_arr[3]} + storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) + sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml + fi + + ##replace public_subnet param + public_subnet=$public_subnet'\'$public_short_subnet_mask + sed -i 's/^.*public_subnet:.*$/ public_subnet:'" $public_subnet"'/' opnfv_ksgen_settings.yml + ##replace private_subnet param + private_subnet=$private_subnet'\'$private_short_subnet_mask + sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml + + ##replace public_dns param to be foreman server + sed -i 's/^.*public_dns:.*$/ public_dns: '${interface_ip_arr[2]}'/' opnfv_ksgen_settings.yml + + ##replace public_gateway + if [ -z "$public_gateway" ]; then + ##if unset then we assume its the first IP in the public subnet + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + public_gateway=$(increment_subnet $public_subnet 1) + fi + sed -i 's/^.*public_gateway:.*$/ public_gateway:'" $public_gateway"'/' opnfv_ksgen_settings.yml + + ##we have to define an allocation range of the public subnet to give + ##to neutron to use as floating IPs + ##we should control this subnet, so this range should work .150-200 + ##but generally this is a bad idea and we are assuming at least a /24 subnet here + public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) + public_allocation_start=$(increment_subnet $public_subnet 150) + public_allocation_end=$(increment_subnet $public_subnet 200) + + sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml + sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml + + else + printf '%s\n' 'deploy.sh: Unknown network type: $deployment_type' >&2 + exit 1 + fi - ## remove bootstrap and NAT provisioning - sed -i '/nat_setup.sh/d' Vagrantfile - sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + echo "${blue}Parameters Complete. Settings have been set for Foreman. ${reset}" - ## modify default_gw to be node_default_gw - sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + fi +} - ## modify VM memory to be 4gig - sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile +##Configure bootstrap.sh to use the virtual Khaleesi playbook +##params: none +##usage: configure_virtual() +configure_virtual() { + if [ $virtual ]; then + echo "${blue} Virtual flag detected, setting Khaleesi playbook to be opnfv-vm.yml ${reset}" + sed -i 's/opnfv.yml/opnfv-vm.yml/' bootstrap.sh + fi +} - echo "${blue}Starting Vagrant Node $node! ${reset}" +##Starts for forement VM with Vagrant +##params: none +##usage: start_vagrant() +start_foreman() { + echo "${blue}Starting Vagrant! ${reset}" ##stand up vagrant if ! vagrant up; then - echo "${red} Unable to start $node ${reset}" + printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 exit 1 else - echo "${blue} $node VM is up! ${reset}" + echo "${blue}Foreman VM is up! ${reset}" fi +} -done +##start the VM if this is a virtual installaion +##this function does nothing if baremetal servers are being used +##params: none +##usage: start_virtual_nodes() +start_virutal_nodes() { + if [ $virtual ]; then + + ##Bring up VM nodes + echo "${blue}Setting VMs up... ${reset}" + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + ##due to ODL Helium bug of OVS connecting to ODL too early, we need controllers to install first + ##this is fix kind of assumes more than I would like to, but for now it should be OK as we always have + ##3 static controllers + compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + nodes=${controller_nodes}${compute_nodes} + + for node in ${nodes}; do + cd /tmp + + ##remove VM nodes incase it wasn't cleaned up + rm -rf /tmp/$node + + ##clone bgs vagrant + ##will change this to be opnfv repo when commit is done + if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then + printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 + exit 1 + fi + + cd $node + + if [ $base_config ]; then + if ! cp -f $base_config opnfv_ksgen_settings.yml; then + echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + exit 1 + fi + fi + + ##parse yaml into variables + eval $(parse_yaml opnfv_ksgen_settings.yml "config_") + ##find node type + node_type=config_nodes_${node}_type + node_type=$(eval echo \$$node_type) + + ##find number of interfaces with ip and substitute in VagrantFile + output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` + + if [ ! "$output" ]; then + printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 + exit 1 + fi + + + if_counter=0 + for interface in ${output}; do + + if [ "$if_counter" -ge 4 ]; then + break + fi + interface_ip=$(find_ip $interface) + if [ ! "$interface_ip" ]; then + continue + fi + case "${if_counter}" in + 0) + mac_string=config_nodes_${node}_mac_address + mac_addr=$(eval echo \$$mac_string) + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ $mac_addr == "" ]; then + echo "${red} Unable to find mac_address for $node! ${reset}" + exit 1 + fi + ;; + 1) + if [ "$node_type" == "controller" ]; then + mac_string=config_nodes_${node}_private_mac + mac_addr=$(eval echo \$$mac_string) + if [ $mac_addr == "" ]; then + echo "${red} Unable to find private_mac for $node! ${reset}" + exit 1 + fi + else + ##generate random mac + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + fi + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + ;; + *) + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + ;; + esac + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + ((if_counter++)) + done + + ##now remove interface config in Vagrantfile for 1 node + ##if 1, 3, or 4 interfaces set deployment type + ##if 2 interfaces remove 2nd interface and set deployment type + if [ "$if_counter" == 1 ]; then + deployment_type="single_network" + remove_vagrant_network eth_replace1 + remove_vagrant_network eth_replace2 + remove_vagrant_network eth_replace3 + elif [ "$if_counter" == 2 ]; then + deployment_type="single_network" + second_interface=`echo $output | awk '{print $2}'` + remove_vagrant_network $second_interface + remove_vagrant_network eth_replace2 + elif [ "$if_counter" == 3 ]; then + deployment_type="three_network" + remove_vagrant_network eth_replace3 + else + deployment_type="multi_network" + fi + + ##modify provisioning to do puppet install, config, and foreman check-in + ##substitute host_name and dns_server in the provisioning script + host_string=config_nodes_${node}_hostname + host_name=$(eval echo \$$host_string) + sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh + ##dns server should be the foreman server + sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh + + ## remove bootstrap and NAT provisioning + sed -i '/nat_setup.sh/d' Vagrantfile + sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile + + ## modify default_gw to be node_default_gw + sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile + + ## modify VM memory to be 4gig + sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile + + echo "${blue}Starting Vagrant Node $node! ${reset}" + + ##stand up vagrant + if ! vagrant up; then + echo "${red} Unable to start $node ${reset}" + exit 1 + else + echo "${blue} $node VM is up! ${reset}" + fi + + done + + echo "${blue} All VMs are UP! ${reset}" + + fi +} - echo "${blue} All VMs are UP! ${reset}" +##END FUNCTIONS + +main() { + parse_cmdline "$@" + disable_selinux + install_EPEL + install_vbox + install_ansible + install_vagrant + clean_tmp + clone_bgs + configure_network + configure_virtual + start_foreman + start_virutal_nodes +} -fi +main "$@" -- cgit 1.2.3-korg From 9ff324952808fa85b6c897b496b332ef76f92441 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 26 Jun 2015 11:49:42 -0400 Subject: Fixes networking related constraints for Virtual deployments Patchset fixes the following: - Virtual deployment no longer requires 3 networks, does a lookup of the public interface on the server to bridge to, and then uses internal VirtualBox networks for the other admin and private networks - enable_virtual_dhcp option now allows enabling of Foreman DHCP server. This new default behavior is to use static IP address in virtual deployments. This is more useful in environments where disabling DHCP on the public network is not an option. - static_ip_range argument used to define which IP block to use out of the public network for the deployment. Valid only in virtual deployment. - puppet verification for virtual deployments added - horizon and foreman URLs will now be printed at the end of deployment - fixes and updates to non-HA deployments - ping_site argument now allows users to specify the site to verify VM internet connectivity. Default is google.com which is unreachable in China JIRA: BGS-73 Change-Id: I63b4c91477591d2f9436b5e6f59a2f2ee021d7d7 Signed-off-by: Tim Rozet --- foreman/ci/Vagrantfile | 7 + foreman/ci/deploy.sh | 430 +++++++++++++++++++++++------- foreman/ci/opnfv_ksgen_settings.yml | 5 + foreman/ci/opnfv_ksgen_settings_no_HA.yml | 264 ++++++++++++++++++ foreman/ci/vm_nodes_provision.sh | 50 +++- 5 files changed, 654 insertions(+), 102 deletions(-) create mode 100644 foreman/ci/opnfv_ksgen_settings_no_HA.yml (limited to 'foreman/ci') diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile index 100e12d..a01da70 100644 --- a/foreman/ci/Vagrantfile +++ b/foreman/ci/Vagrantfile @@ -41,6 +41,9 @@ Vagrant.configure(2) do |config| default_gw = "" nat_flag = false + # Disable dhcp flag + disable_dhcp_flag = false + # Share an additional folder to the guest VM. The first argument is # the path on the host to the actual folder. The second argument is # the path on the guest to mount the folder. And the optional third @@ -90,4 +93,8 @@ Vagrant.configure(2) do |config| config.vm.provision :shell, path: "nat_setup.sh" end config.vm.provision :shell, path: "bootstrap.sh" + if disable_dhcp_flag + config.vm.provision :shell, :inline => "systemctl stop dhcpd" + config.vm.provision :shell, :inline => "systemctl disable dhcpd" + end end diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 46ba80e..2ccb64b 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -25,6 +25,9 @@ red=`tput setaf 1` green=`tput setaf 2` declare -A interface_arr +declare -A controllers_ip_arr +declare -A admin_ip_arr +declare -A public_ip_arr ##END VARS ##FUNCTIONS @@ -35,6 +38,9 @@ display_usage() { echo -e "\n -no_parse : No variable parsing into config. Flag. \n" echo -e "\n -base_config : Full path of settings file to parse. Optional. Will provide a new base settings file rather than the default. Example: -base_config /opt/myinventory.yml \n" echo -e "\n -virtual : Node virtualization instead of baremetal. Flag. \n" + echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n" + echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n" + echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n" } ##find ip of interface @@ -178,12 +184,39 @@ parse_cmdline() { virtual="TRUE" shift 1 ;; + -enable_virtual_dhcp) + enable_virtual_dhcp="TRUE" + shift 1 + ;; + -static_ip_range) + static_ip_range=$2 + shift 2 + ;; + -ping_site) + ping_site=$2 + shift 2 + ;; *) display_usage exit 1 ;; esac done + + if [ ! -z "$enable_virtual_dhcp" ] && [ ! -z "$static_ip_range" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. Static IP range cannot be set when using DHCP!. Exiting${reset}\n\n" + exit 1 + fi + + if [ -z "$virtual" ]; then + if [ ! -z "$enable_virtual_dhcp" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. enable_virtual_dhcp can only be set when using -virtual!. Exiting${reset}\n\n" + exit 1 + elif [ ! -z "$static_ip_range" ]; then + echo -e "\n\n${red}ERROR: Incorrect Usage. static_ip_range can only be set when using -virtual!. Exiting${reset}\n\n" + exit 1 + fi + fi } ##disable selinux @@ -253,7 +286,7 @@ EOM ##install Ansible using yum ##params: none -##usage: install_anible() +##usage: install_ansible() install_ansible() { if ! yum list installed | grep -i ansible; then if ! yum -y install ansible-1*; then @@ -317,7 +350,7 @@ clone_bgs() { fi } -##validates the netork settings and update VagrantFile with network settings +##validates the network settings and update VagrantFile with network settings ##params: none ##usage: configure_network() configure_network() { @@ -333,52 +366,101 @@ configure_network() { exit 1 fi - ##find number of interfaces with ip and substitute in VagrantFile - if_counter=0 - for interface in ${output}; do + ##virtual we only find 1 interface + if [ $virtual ]; then + ##find interface with default gateway + this_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $this_default_gw ${reset}" + this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}') - if [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) + ##find interface IP, make sure its valid + interface_ip=$(find_ip $this_default_gw_interface) if [ ! "$interface_ip" ]; then - continue + echo "${red}Interface ${this_default_gw_interface} does not have an IP: $interface_ip ! Exiting ${reset}" + exit 1 fi - new_ip=$(next_usable_ip $interface_ip) - if [ ! "$new_ip" ]; then - continue + + ##set variable info + if [ ! -z "$static_ip_range" ]; then + new_ip=$(echo $static_ip_range | cut -d , -f1) + else + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + echo "${red} Cannot find next IP on interface ${this_default_gw_interface} new_ip: $new_ip ! Exiting ${reset}" + exit 1 + fi fi - interface_arr[$interface]=$if_counter - interface_ip_arr[$if_counter]=$new_ip + interface=$this_default_gw_interface + public_interface=$interface + interface_arr[$interface]=2 + interface_ip_arr[2]=$new_ip subnet_mask=$(find_netmask $interface) - if [ "$if_counter" -eq 1 ]; then - private_subnet_mask=$subnet_mask - private_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 2 ]; then - public_subnet_mask=$subnet_mask - public_short_subnet_mask=$(find_short_netmask $interface) - fi - if [ "$if_counter" -eq 3 ]; then - storage_subnet_mask=$subnet_mask - fi - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile - ((if_counter++)) - done + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) + ##set that interface to be public + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile + if_counter=1 + else + ##find number of interfaces with ip and substitute in VagrantFile + if_counter=0 + for interface in ${output}; do + + if [ "$if_counter" -ge 4 ]; then + break + fi + interface_ip=$(find_ip $interface) + if [ ! "$interface_ip" ]; then + continue + fi + new_ip=$(next_usable_ip $interface_ip) + if [ ! "$new_ip" ]; then + continue + fi + interface_arr[$interface]=$if_counter + interface_ip_arr[$if_counter]=$new_ip + subnet_mask=$(find_netmask $interface) + if [ "$if_counter" -eq 0 ]; then + admin_subnet_mask=$subnet_mask + elif [ "$if_counter" -eq 1 ]; then + private_subnet_mask=$subnet_mask + private_short_subnet_mask=$(find_short_netmask $interface) + elif [ "$if_counter" -eq 2 ]; then + public_subnet_mask=$subnet_mask + public_short_subnet_mask=$(find_short_netmask $interface) + elif [ "$if_counter" -eq 3 ]; then + storage_subnet_mask=$subnet_mask + else + echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}" + exit 1 + fi + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile + ((if_counter++)) + done + fi ##now remove interface config in Vagrantfile for 1 node ##if 1, 3, or 4 interfaces set deployment type ##if 2 interfaces remove 2nd interface and set deployment type - if [ "$if_counter" == 1 ]; then - deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 - remove_vagrant_network eth_replace3 - elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 + if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then + if [ $virtual ]; then + deployment_type="single_network" + echo "${blue}Single network detected for Virtual deployment...converting to three_network with internal networks! ${reset}" + private_internal_ip=155.1.2.2 + admin_internal_ip=156.1.2.2 + private_subnet_mask=255.255.255.0 + private_short_subnet_mask=/24 + interface_ip_arr[1]=$private_internal_ip + interface_ip_arr[0]=$admin_internal_ip + admin_subnet_mask=255.255.255.0 + admin_short_subnet_mask=/24 + sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", ip: '\""$private_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$admin_internal_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + remove_vagrant_network eth_replace3 + deployment_type=three_network + else + echo "${blue}Single network or 2 network detected for baremetal deployment. This is unsupported! Exiting. ${reset}" + exit 1 + fi elif [ "$if_counter" == 3 ]; then deployment_type="three_network" remove_vagrant_network eth_replace3 @@ -388,6 +470,28 @@ configure_network() { echo "${blue}Network detected: ${deployment_type}! ${reset}" + if [ $virtual ]; then + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*disable_dhcp_flag =.*$/ disable_dhcp_flag = true/' Vagrantfile + if [ $static_ip_range ]; then + ##verify static range is at least 20 IPs + static_ip_range_begin=$(echo $static_ip_range | cut -d , -f1) + static_ip_range_end=$(echo $static_ip_range | cut -d , -f2) + ##verify range is at least 20 ips + ##assumes less than 255 range pool + begin_octet=$(echo $static_ip_range_begin | cut -d . -f4) + end_octet=$(echo $static_ip_range_end | cut -d . -f4) + ip_count=$((end_octet-begin_octet+1)) + if [ "$ip_count" -lt 20 ]; then + echo "${red}Static range is less than 20 ips: ${ip_count}, exiting ${reset}" + exit 1 + else + echo "${blue}Static IP range is size $ip_count ${reset}" + fi + fi + fi + fi + if route | grep default; then echo "${blue}Default Gateway Detected ${reset}" host_default_gw=$(ip route | grep default | awk '{print $3}') @@ -476,17 +580,56 @@ configure_network() { ##required for controllers_ip_array global param next_private_ip=${interface_ip_arr[1]} type=_private + control_count=0 for node in controller1 controller2 controller3; do next_private_ip=$(next_usable_ip $next_private_ip) if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 - exit 1 + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for control nodes' >&2 + exit 1 fi sed -i 's/'"$node$type"'/'"$next_private_ip"'/g' opnfv_ksgen_settings.yml controller_ip_array=$controller_ip_array$next_private_ip, + controllers_ip_arr[$control_count]=$next_private_ip + ((control_count++)) done - ##replace global param for contollers_ip_array + next_public_ip=${interface_ip_arr[2]} + foreman_ip=$next_public_ip + + ##if no dhcp, find all the Admin IPs for nodes in advance + if [ $virtual ]; then + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*no_dhcp:.*$/no_dhcp: true/' opnfv_ksgen_settings.yml + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + nodes=${controller_nodes}${compute_nodes} + next_admin_ip=${interface_ip_arr[0]} + type=_admin + for node in ${nodes}; do + next_admin_ip=$(next_ip $next_admin_ip) + if [ ! "$next_admin_ip" ]; then + echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}" + exit 1 + else + admin_ip_arr[$node]=$next_admin_ip + sed -i 's/'"$node$type"'/'"$next_admin_ip"'/g' opnfv_ksgen_settings.yml + fi + done + + ##allocate node public IPs + for node in ${nodes}; do + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + echo "${red} Unable to find an unused IP in admin_network for $node ! ${reset}" + exit 1 + else + public_ip_arr[$node]=$next_public_ip + fi + done + fi + fi + ##replace global param for controllers_ip_array controller_ip_array=${controller_ip_array%?} sed -i 's/^.*controllers_ip_array:.*$/ controllers_ip_array: '"$controller_ip_array"'/' opnfv_ksgen_settings.yml @@ -495,28 +638,49 @@ configure_network() { ##therefore we increment the ip by 10 to make sure we have a safe buffer next_private_ip=$(increment_ip $next_private_ip 10) - grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml - next_private_ip=$(next_usable_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 - exit 1 + private_output=$(grep -E '*private_vip|loadbalancer_vip|db_vip|amqp_vip|*admin_vip' opnfv_ksgen_settings.yml) + if [ ! -z "$private_output" ]; then + while read -r line; do + sed -i 's/^.*'"$line"'.*$/ '"$line $next_private_ip"'/' opnfv_ksgen_settings.yml + next_private_ip=$(next_usable_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for private network for vip replacement' >&2 + exit 1 fi - done + done <<< "$private_output" + fi + + ##replace odl_control_ip (non-HA only) + odl_control_ip=${controllers_ip_arr[0]} + sed -i 's/^.*odl_control_ip:.*$/ odl_control_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml + + ##replace controller_ip (non-HA only) + sed -i 's/^.*controller_ip:.*$/ controller_ip: '"$odl_control_ip"'/' opnfv_ksgen_settings.yml ##replace foreman site - next_public_ip=${interface_ip_arr[2]} - sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml + sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml ##replace public vips - next_public_ip=$(increment_ip $next_public_ip 10) - grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do - sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml + ##no need to do this if virtual and no dhcp + if [ ! -z "$enable_virtual_dhcp" ]; then + next_public_ip=$(increment_ip $next_public_ip 10) + else next_public_ip=$(next_usable_ip $next_public_ip) - if [ ! "$next_public_ip" ]; then - printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 - exit 1 - fi - done + fi + + public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml) + if [ ! -z "$public_output" ]; then + while read -r line; do + if echo $line | grep horizon_public_vip; then + horizon_public_vip=$next_public_ip + fi + sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml + next_public_ip=$(next_usable_ip $next_public_ip) + if [ ! "$next_public_ip" ]; then + printf '%s\n' 'deploy.sh: Unable to find next ip for public network for vip replcement' >&2 + exit 1 + fi + done <<< "$public_output" + fi ##replace public_network param public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) @@ -555,9 +719,27 @@ configure_network() { ##to neutron to use as floating IPs ##we should control this subnet, so this range should work .150-200 ##but generally this is a bad idea and we are assuming at least a /24 subnet here + ##if static ip range, then we take the difference of the end range and current ip + ## to be the allocation pool public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) - public_allocation_start=$(increment_subnet $public_subnet 150) - public_allocation_end=$(increment_subnet $public_subnet 200) + if [ ! -z "$static_ip_range" ]; then + begin_octet=$(echo $next_public_ip | cut -d . -f4) + end_octet=$(echo $static_ip_range_end | cut -d . -f4) + ip_diff=$((end_octet-begin_octet)) + if [ $ip_diff -le 0 ]; then + echo "${red}ip range left for floating range is less than or equal to 0! $ipdiff ${reset}" + exit 1 + else + public_allocation_start=$(next_ip $next_public_ip) + public_allocation_end=$static_ip_range_end + echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" + fi + else + public_allocation_start=$(increment_subnet $public_subnet 150) + public_allocation_end=$(increment_subnet $public_subnet 200) + echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" + echo "${blue}Foreman VM is up! ${reset}" + fi sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml @@ -582,7 +764,7 @@ configure_virtual() { fi } -##Starts for forement VM with Vagrant +##Starts Foreman VM with Vagrant ##params: none ##usage: start_vagrant() start_foreman() { @@ -597,11 +779,11 @@ start_foreman() { fi } -##start the VM if this is a virtual installaion +##start the VM if this is a virtual installation ##this function does nothing if baremetal servers are being used ##params: none ##usage: start_virtual_nodes() -start_virutal_nodes() { +start_virtual_nodes() { if [ $virtual ]; then ##Bring up VM nodes @@ -613,6 +795,8 @@ start_virutal_nodes() { compute_nodes=`echo $nodes | tr " " "\n" | grep -v controller | tr "\n" " "` controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` nodes=${controller_nodes}${compute_nodes} + controller_count=0 + compute_wait_completed=false for node in ${nodes}; do cd /tmp @@ -622,7 +806,7 @@ start_virutal_nodes() { ##clone bgs vagrant ##will change this to be opnfv repo when commit is done - if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git $node; then + if ! git clone https://github.com/trozet/bgs_vagrant.git $node; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi @@ -631,7 +815,7 @@ start_virutal_nodes() { if [ $base_config ]; then if ! cp -f $base_config opnfv_ksgen_settings.yml; then - echo "{red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" + echo "${red}ERROR: Unable to copy $base_config to opnfv_ksgen_settings.yml${reset}" exit 1 fi fi @@ -642,6 +826,13 @@ start_virutal_nodes() { node_type=config_nodes_${node}_type node_type=$(eval echo \$$node_type) + ##trozet test make compute nodes wait 20 minutes + if [ "$compute_wait_completed" = false ] && [ "$node_type" != "controller" ]; then + echo "${blue}Waiting 20 minutes for Control nodes to install before continuing with Compute nodes..." + compute_wait_completed=true + sleep 1400 + fi + ##find number of interfaces with ip and substitute in VagrantFile output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` @@ -650,11 +841,14 @@ start_virutal_nodes() { exit 1 fi - if_counter=0 for interface in ${output}; do - if [ "$if_counter" -ge 4 ]; then + if [ -z "$enable_virtual_dhcp" ]; then + if [ "$if_counter" -ge 1 ]; then + break + fi + elif [ "$if_counter" -ge 4 ]; then break fi interface_ip=$(find_ip $interface) @@ -690,30 +884,66 @@ start_virutal_nodes() { mac_addr=$(echo $mac_addr | sed 's/:\|-//g') ;; esac - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", bridge: '\'"$interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile - ((if_counter++)) + this_admin_ip=${admin_ip_arr[$node]} + sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile + ((if_counter++)) done - ##now remove interface config in Vagrantfile for 1 node ##if 1, 3, or 4 interfaces set deployment type ##if 2 interfaces remove 2nd interface and set deployment type - if [ "$if_counter" == 1 ]; then + if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then deployment_type="single_network" - remove_vagrant_network eth_replace1 - remove_vagrant_network eth_replace2 + if [ "$node_type" == "controller" ]; then + mac_string=config_nodes_${node}_private_mac + mac_addr=$(eval echo \$$mac_string) + if [ $mac_addr == "" ]; then + echo "${red} Unable to find private_mac for $node! ${reset}" + exit 1 + fi + else + ##generate random mac + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + fi + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ "$node_type" == "controller" ]; then + new_node_ip=${controllers_ip_arr[$controller_count]} + if [ ! "$new_node_ip" ]; then + echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}" + exit 1 + fi + ((controller_count++)) + else + next_private_ip=$(next_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + echo "{red}ERROR: Could not find private ip for $node ${reset}" + exit 1 + fi + new_node_ip=$next_private_ip + fi + sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + ##replace host_ip in vm_nodes_provision with private ip + sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh + ##replace ping site + if [ ! -z "$ping_site" ]; then + sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh + fi + ##find public ip info + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + this_public_ip=${public_ip_arr[$node]} + + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile + else + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile + fi remove_vagrant_network eth_replace3 - elif [ "$if_counter" == 2 ]; then - deployment_type="single_network" - second_interface=`echo $output | awk '{print $2}'` - remove_vagrant_network $second_interface - remove_vagrant_network eth_replace2 elif [ "$if_counter" == 3 ]; then deployment_type="three_network" remove_vagrant_network eth_replace3 else deployment_type="multi_network" fi - ##modify provisioning to do puppet install, config, and foreman check-in ##substitute host_name and dns_server in the provisioning script host_string=config_nodes_${node}_hostname @@ -721,19 +951,17 @@ start_virutal_nodes() { sed -i 's/^host_name=REPLACE/host_name='$host_name'/' vm_nodes_provision.sh ##dns server should be the foreman server sed -i 's/^dns_server=REPLACE/dns_server='${interface_ip_arr[0]}'/' vm_nodes_provision.sh - ## remove bootstrap and NAT provisioning sed -i '/nat_setup.sh/d' Vagrantfile sed -i 's/bootstrap.sh/vm_nodes_provision.sh/' Vagrantfile - ## modify default_gw to be node_default_gw sed -i 's/^.*default_gw =.*$/ default_gw = '\""$node_default_gw"\"'/' Vagrantfile - ## modify VM memory to be 4gig - sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile - + ##if node type is controller + if [ "$node_type" == "controller" ]; then + sed -i 's/^.*vb.memory =.*$/ vb.memory = 4096/' Vagrantfile + fi echo "${blue}Starting Vagrant Node $node! ${reset}" - ##stand up vagrant if ! vagrant up; then echo "${red} Unable to start $node ${reset}" @@ -741,11 +969,33 @@ start_virutal_nodes() { else echo "${blue} $node VM is up! ${reset}" fi - done - echo "${blue} All VMs are UP! ${reset}" - + echo "${blue} Waiting for puppet to complete on the nodes... ${reset}" + ##check puppet is complete + ##ssh into foreman server, run check to verify puppet is complete + pushd /tmp/bgs_vagrant + if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then + echo "${red} Failed to validate puppet completion on nodes ${reset}" + exit 1 + else + echo "{$blue} Puppet complete on all nodes! ${reset}" + fi + popd + ##add routes back to nodes + for node in ${nodes}; do + pushd /tmp/$node + if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then + echo "${blue} Adding public route back to $node! ${reset}" + vagrant ssh -c "route add default gw $this_default_gw" + fi + popd + done + if [ ! -z "$horizon_public_vip" ]; then + echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${horizon_public_vip} ${reset}" + else + echo "${blue} Virtual deployment SUCCESS!! Foreman URL: http://${foreman_ip}, Horizon URL: http://${odl_control_ip} ${reset}" + fi fi } @@ -763,7 +1013,7 @@ main() { configure_network configure_virtual start_foreman - start_virutal_nodes + start_virtual_nodes } main "$@" diff --git a/foreman/ci/opnfv_ksgen_settings.yml b/foreman/ci/opnfv_ksgen_settings.yml index 21840dd..b41a41b 100644 --- a/foreman/ci/opnfv_ksgen_settings.yml +++ b/foreman/ci/opnfv_ksgen_settings.yml @@ -44,6 +44,7 @@ global_params: deployment_type: network_type: multi_network default_gw: +no_dhcp: false foreman: seed_values: - { name: heat_cfn, oldvalue: true, newvalue: false } @@ -110,6 +111,7 @@ nodes: bmc_mac: "10:23:45:67:88:AB" bmc_user: root bmc_pass: root + admin_ip: compute_admin ansible_ssh_pass: "Op3nStack" admin_password: "" groups: @@ -130,6 +132,7 @@ nodes: bmc_mac: "10:23:45:67:88:AC" bmc_user: root bmc_pass: root + admin_ip: controller1_admin private_ip: controller1_private private_mac: "10:23:45:67:87:AC" ansible_ssh_pass: "Op3nStack" @@ -152,6 +155,7 @@ nodes: bmc_mac: "10:23:45:67:88:AD" bmc_user: root bmc_pass: root + admin_ip: controller2_admin private_ip: controller2_private private_mac: "10:23:45:67:87:AD" ansible_ssh_pass: "Op3nStack" @@ -174,6 +178,7 @@ nodes: bmc_mac: "10:23:45:67:88:AE" bmc_user: root bmc_pass: root + admin_ip: controller3_admin private_ip: controller3_private private_mac: "10:23:45:67:87:AE" ansible_ssh_pass: "Op3nStack" diff --git a/foreman/ci/opnfv_ksgen_settings_no_HA.yml b/foreman/ci/opnfv_ksgen_settings_no_HA.yml new file mode 100644 index 0000000..79db257 --- /dev/null +++ b/foreman/ci/opnfv_ksgen_settings_no_HA.yml @@ -0,0 +1,264 @@ +global_params: + admin_email: opnfv@opnfv.com + ha_flag: "false" + odl_flag: "true" + odl_control_ip: + private_network: + storage_network: + public_network: + private_subnet: + deployment_type: + controller_ip: +network_type: multi_network +default_gw: +no_dhcp: false +foreman: + seed_values: + - { name: heat_cfn, oldvalue: true, newvalue: false } +workaround_puppet_version_lock: false +opm_branch: master +installer: + name: puppet + short_name: pupt + network: + auto_assign_floating_ip: false + variant: + short_name: m2vx + plugin: + name: neutron +workaround_openstack_packstack_rpm: false +tempest: + repo: + Fedora: + '19': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-19/ + '20': http://REPLACE_ME/~REPLACE_ME/openstack-tempest-icehouse/fedora-20/ + RedHat: + '7.0': https://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7/ + use_virtual_env: false + public_allocation_end: 10.2.84.71 + skip: + files: null + tests: null + public_allocation_start: 10.2.84.51 + physnet: physnet1 + use_custom_repo: false + public_subnet_cidr: 10.2.84.0/24 + public_subnet_gateway: 10.2.84.1 + additional_default_settings: + - section: compute + option: flavor_ref + value: 1 + cirros_image_file: cirros-0.3.1-x86_64-disk.img + setup_method: tempest/rpm + test_name: all + rdo: + version: juno + rpm: http://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + rpm: + version: 20141201 + dir: ~{{ nodes.tempest.remote_user }}/tempest-dir +tmp: + node_prefix: '{{ node.prefix | reject("none") | join("-") }}-' + anchors: + - https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + - http://repos.fedorapeople.org/repos/openstack/openstack-juno/ +opm_repo: https://github.com/redhat-openstack/openstack-puppet-modules.git +workaround_vif_plugging: false +openstack_packstack_rpm: http://REPLACE_ME/brewroot/packages/openstack-puppet-modules/2013.2/9.el6ost/noarch/openstack-puppet-modules-2013.2-9.el6ost.noarch.rpm +nodes: + compute: + name: oscompute11.opnfv.com + hostname: oscompute11.opnfv.com + short_name: oscompute11 + type: compute + host_type: baremetal + hostgroup: Compute + mac_address: "10:23:45:67:89:AB" + bmc_ip: 10.4.17.2 + bmc_mac: "10:23:45:67:88:AB" + bmc_user: root + bmc_pass: root + admin_ip: compute_admin + ansible_ssh_pass: "Op3nStack" + admin_password: "" + groups: + - compute + - foreman_nodes + - puppet + - rdo + - neutron + controller1: + name: oscontroller1.opnfv.com + hostname: oscontroller1.opnfv.com + short_name: oscontroller1 + type: controller + host_type: baremetal + hostgroup: Controller_Network_ODL + mac_address: "10:23:45:67:89:AC" + bmc_ip: 10.4.17.3 + bmc_mac: "10:23:45:67:88:AC" + bmc_user: root + bmc_pass: root + private_ip: controller1_private + admin_ip: controller1_admin + private_mac: "10:23:45:67:87:AC" + ansible_ssh_pass: "Op3nStack" + admin_password: "octopus" + groups: + - controller + - foreman_nodes + - puppet + - rdo + - neutron +workaround_mysql_centos7: true +distro: + name: centos + centos: + '7.0': + repos: [] + short_name: c + short_version: 70 + version: '7.0' + rhel: + '7.0': + kickstart_url: http://REPLACE_ME/released/RHEL-7/7.0/Server/x86_64/os/ + repos: + - section: rhel7-server-rpms + name: Packages for RHEL 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0/x86_64/ + gpgcheck: 0 + - section: rhel-7-server-update-rpms + name: Update Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/repos/rhel-7.0-z/x86_64/ + gpgcheck: 0 + - section: rhel-7-server-optional-rpms + name: Optional Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/released/RHEL-7/7.0/Server-optional/x86_64/os/ + gpgcheck: 0 + - section: rhel-7-server-extras-rpms + name: Optional Packages for Enterprise Linux 7 - $basearch + baseurl: http://REPLACE_ME/rel-eng/EXTRAS-7.0-RHEL-7-20140610.0/compose/Server/x86_64/os/ + gpgcheck: 0 + '6.5': + kickstart_url: http://REPLACE_ME/released/RHEL-6/6.5/Server/x86_64/os/ + repos: + - section: rhel6.5-server-rpms + name: Packages for RHEL 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/$basearch/os/Server + gpgcheck: 0 + - section: rhel-6.5-server-update-rpms + name: Update Packages for Enterprise Linux 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/$basearch/ + gpgcheck: 0 + - section: rhel-6.5-server-optional-rpms + name: Optional Packages for Enterprise Linux 6.5 - $basearch + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/$basearch/os + gpgcheck: 0 + - section: rhel6.5-server-rpms-32bit + name: Packages for RHEL 6.5 - i386 + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/i386/os/Server + gpgcheck: 0 + enabled: 1 + - section: rhel-6.5-server-update-rpms-32bit + name: Update Packages for Enterprise Linux 6.5 - i686 + baseurl: http://REPLACE_ME.REPLACE_ME/rel-eng/repos/RHEL-6.5-Z/i686/ + gpgcheck: 0 + enabled: 1 + - section: rhel-6.5-server-optional-rpms-32bit + name: Optional Packages for Enterprise Linux 6.5 - i386 + baseurl: http://REPLACE_ME.REPLACE_ME/released/RHEL-6/6.5/Server/optional/i386/os + gpgcheck: 0 + enabled: 1 + subscription: + username: REPLACE_ME + password: HWj8TE28Qi0eP2c + pool: 8a85f9823e3d5e43013e3ddd4e2a0977 + config: + selinux: permissive + ntp_server: 0.pool.ntp.org + dns_servers: + - 10.4.1.1 + - 10.4.0.2 + reboot_delay: 1 + initial_boot_timeout: 180 +node: + prefix: + - rdo + - pupt + - ffqiotcxz1 + - null +product: + repo_type: production + name: rdo + short_name: rdo + rpm: + CentOS: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + Fedora: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + RedHat: https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm + short_version: ju + repo: + production: + CentOS: + 7.0.1406: http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + Fedora: + '20': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-20 + '21': http://repos.fedorapeople.org/repos/openstack/openstack-juno/fedora-21 + RedHat: + '6.6': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '6.5': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-6 + '7.0': http://repos.fedorapeople.org/repos/openstack/openstack-juno/epel-7 + version: juno + config: + enable_epel: y + short_repo: prod +tester: + name: tempest +distro_reboot_options: '--no-wall '' Reboot is triggered by Ansible'' ' +job: + verbosity: 1 + archive: + - '{{ tempest.dir }}/etc/tempest.conf' + - '{{ tempest.dir }}/etc/tempest.conf.sample' + - '{{ tempest.dir }}/*.log' + - '{{ tempest.dir }}/*.xml' + - /root/ + - /var/log/ + - /etc/nova + - /etc/ceilometer + - /etc/cinder + - /etc/glance + - /etc/keystone + - /etc/neutron + - /etc/ntp + - /etc/puppet + - /etc/qpid + - /etc/qpidd.conf + - /root + - /etc/yum.repos.d + - /etc/yum.repos.d +topology: + name: multinode + short_name: mt +workaround_neutron_ovs_udev_loop: true +workaround_glance_table_utf8: false +verbosity: + debug: 0 + info: 1 + warning: 2 + warn: 2 + errors: 3 +provisioner: + username: admin + network: + type: nova + name: external + skip: skip_provision + foreman_url: https://10.2.84.2/api/v2/ + password: octopus + type: foreman +workaround_nova_compute_fix: false +workarounds: + enabled: true + diff --git a/foreman/ci/vm_nodes_provision.sh b/foreman/ci/vm_nodes_provision.sh index d0bba64..e64c0ad 100755 --- a/foreman/ci/vm_nodes_provision.sh +++ b/foreman/ci/vm_nodes_provision.sh @@ -18,6 +18,7 @@ green=`tput setaf 2` host_name=REPLACE dns_server=REPLACE +host_ip=REPLACE ##END VARS ##set hostname @@ -31,27 +32,52 @@ if ! grep 'PEERDNS=no' /etc/sysconfig/network-scripts/ifcfg-enp0s3; then systemctl restart NetworkManager fi -if ! ping www.google.com -c 5; then +##modify /etc/resolv.conf to point to foreman +echo "${blue} Configuring resolv.conf with DNS: $dns_server ${reset}" +cat > /etc/resolv.conf << EOF +search ci.com opnfv.com +nameserver $dns_server +nameserver 8.8.8.8 + +EOF + +##modify /etc/hosts to add own IP for rabbitmq workaround +host_short_name=`echo $host_name | cut -d . -f 1` +echo "${blue} Configuring hosts with: $host_name $host_ip ${reset}" +cat > /etc/hosts << EOF +$host_ip $host_short_name $host_name +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 +EOF + +if ! ping www.google.com -c 5; then echo "${red} No internet connection, check your route and DNS setup ${reset}" exit 1 fi -# Install EPEL repo for access to many other yum repos -# Major version is pinned to force some consistency for Arno -yum install -y epel-release-7* +##install EPEL +if ! yum repolist | grep "epel/"; then + if ! rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm; then + printf '%s\n' 'vm_provision_nodes.sh: Unable to configure EPEL repo' >&2 + exit 1 + fi +else + printf '%s\n' 'vm_nodes_provision.sh: Skipping EPEL repo as it is already configured.' +fi -# Update device-mapper-libs, needed for libvirtd on compute nodes -# Major version is pinned to force some consistency for Arno -if ! yum -y upgrade device-mapper-libs-1*; then +##install device-mapper-libs +##needed for libvirtd on compute nodes +if ! yum -y upgrade device-mapper-libs; then echo "${red} WARN: Unable to upgrade device-mapper-libs...nova-compute may not function ${reset}" fi -# Install other required packages -# Major version is pinned to force some consistency for Arno echo "${blue} Installing Puppet ${reset}" -if ! yum install -y puppet-3*; then - printf '%s\n' 'vm_nodes_provision.sh: failed to install required packages' >&2 - exit 1 +##install puppet +if ! yum list installed | grep -i puppet; then + if ! yum -y install puppet; then + printf '%s\n' 'vm_nodes_provision.sh: Unable to install puppet package' >&2 + exit 1 + fi fi echo "${blue} Configuring puppet ${reset}" -- cgit 1.2.3-korg From 75577b79e416103921fa58cf705c710e2a48f775 Mon Sep 17 00:00:00 2001 From: arnaudmorin Date: Tue, 7 Jul 2015 15:04:34 +0200 Subject: Add a 1 minute pause after ansible playbook For the LF pods, it seems that the ansible reload playbook is necessary, in our pod, we also need to wait 1 minute before trying to run shell provisionner in Vagrant VM. This patch adds a 1 minute pause into the ansible playground in order to be sure that the machine will be accessible. This avoid "SSH connection unexpectedly closed" issue. Change-Id: Iab2b47decb0120e8359a175b1f16ccbd1036e91f JIRA: BGS-85 Signed-off-by: arnaudmorin --- foreman/ci/reload_playbook.yml | 1 + 1 file changed, 1 insertion(+) (limited to 'foreman/ci') diff --git a/foreman/ci/reload_playbook.yml b/foreman/ci/reload_playbook.yml index 9e3d053..9b3a4d4 100644 --- a/foreman/ci/reload_playbook.yml +++ b/foreman/ci/reload_playbook.yml @@ -14,3 +14,4 @@ delay=60 timeout=180 sudo: false + - pause: minutes=1 -- cgit 1.2.3-korg From d2024f1dcb9297cc776e07e0d853c988c162cece Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 9 Jul 2015 14:07:42 -0400 Subject: Removes version pinning to khaleesi and other utils No longer needed post Arno Change-Id: I24ba94e48658372034e334d413f044f407f2cb72 Signed-off-by: Tim Rozet --- foreman/ci/bootstrap.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/bootstrap.sh b/foreman/ci/bootstrap.sh index 4bc22ed..c98f00e 100755 --- a/foreman/ci/bootstrap.sh +++ b/foreman/ci/bootstrap.sh @@ -25,8 +25,7 @@ green=`tput setaf 2` yum install -y epel-release-7* # Install other required packages -# Major version is pinned to force some consistency for Arno -if ! yum -y install python-pip-1* python-virtualenv-1* gcc-4* git-1* sshpass-1* ansible-1* python-requests-1*; then +if ! yum -y install python-pip python-virtualenv gcc git sshpass ansible python-requests; then printf '%s\n' 'bootstrap.sh: failed to install required packages' >&2 exit 1 fi @@ -36,7 +35,7 @@ cd /opt echo "Cloning khaleesi to /opt" if [ ! -d khaleesi ]; then - if ! git clone -b v1.0 https://github.com/trozet/khaleesi.git; then + if ! git clone -b opnfv https://github.com/trozet/khaleesi.git; then printf '%s\n' 'bootstrap.sh: Unable to git clone khaleesi' >&2 exit 1 fi -- cgit 1.2.3-korg From 1c6f0e2a85d30d1619e078eed61f1a01e6550e25 Mon Sep 17 00:00:00 2001 From: arnaudmorin Date: Tue, 7 Jul 2015 15:34:04 +0200 Subject: Take care of the subnet on public subnet This patch will take care of the subnet on public range. Instead of having a stoned /24, we can use now use at least /27 subnet (because foreman will try to use 20 IPs for public floating ip pool) This is not the best way to do that, but it's better than the current way. It also add a parameter to set the number of floating IP we want to use from the public subnet in provider network. Change-Id: I467f2a4098d2da3c6f666453cead64e18d0c655c JIRA: BGS-75 Signed-off-by: arnaudmorin --- foreman/ci/deploy.sh | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 2ccb64b..edad2a4 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -41,6 +41,7 @@ display_usage() { echo -e "\n -enable_virtual_dhcp : Run dhcp server instead of using static IPs. Use this with -virtual only. \n" echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n" echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n" + echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n" } ##find ip of interface @@ -57,6 +58,16 @@ function find_subnet { printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))" } +##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask +## Warning: This function only works for IPv4 at the moment. +##params: ip, netmask +function find_last_ip_subnet { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + IFS=. read -r m1 m2 m3 m4 <<< "$2" + IFS=. read -r s1 s2 s3 s4 <<< "$((i1 & m1)).$((i2 & m2)).$((i3 & m3)).$((i4 & m4))" + printf "%d.%d.%d.%d\n" "$((255 - $m1 + $s1))" "$((255 - $m2 + $s2))" "$((255 - $m3 + $s3))" "$((255 - $m4 + $s4 - 1))" +} + ##increments subnet by a value ##params: ip, value ##assumes low value @@ -196,6 +207,10 @@ parse_cmdline() { ping_site=$2 shift 2 ;; + -floating_ip_count) + floating_ip_count=$2 + shift 2 + ;; *) display_usage exit 1 @@ -217,6 +232,10 @@ parse_cmdline() { exit 1 fi fi + + if [ -z "$floating_ip_count" ]; then + floating_ip_count=20 + fi } ##disable selinux @@ -717,10 +736,10 @@ configure_network() { ##we have to define an allocation range of the public subnet to give ##to neutron to use as floating IPs - ##we should control this subnet, so this range should work .150-200 - ##but generally this is a bad idea and we are assuming at least a /24 subnet here ##if static ip range, then we take the difference of the end range and current ip ## to be the allocation pool + ##if not static ip, we will use the last 20 IP from the subnet + ## note that this is not a really good idea because the subnet must be at least a /27 for this to work... public_subnet=$(find_subnet $next_public_ip $public_subnet_mask) if [ ! -z "$static_ip_range" ]; then begin_octet=$(echo $next_public_ip | cut -d . -f4) @@ -735,8 +754,9 @@ configure_network() { echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" fi else - public_allocation_start=$(increment_subnet $public_subnet 150) - public_allocation_end=$(increment_subnet $public_subnet 200) + last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask) + public_allocation_start=$(increment_subnet $public_subnet $(( $last_ip_subnet - $floating_ip_count )) ) + public_allocation_end=$(increment_subnet $public_subnet $(( $last_ip_subnet )) ) echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" echo "${blue}Foreman VM is up! ${reset}" fi -- cgit 1.2.3-korg From 0174fdf6cbb16394eb8f57d8b421341b5f39bf36 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 13 Jul 2015 16:28:53 -0400 Subject: Migrates from github bgs_vagrant project to genesis No longer relies on bgs_vagrant github project. Now Foreman VM is created under /var/opt/opnfv/foreman_vm, instead of /tmp/bgs_vagrant for better naming convention and file location. Other VM nodes are also placed in /var/opt/opnfv/. JIRA: BGS-60 Change-Id: I009d907ad777750168b822ab86cd11515e28cdd7 Signed-off-by: Tim Rozet --- foreman/ci/clean.sh | 16 +++++++++++++-- foreman/ci/deploy.sh | 58 ++++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 17 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index f61ac93..05c35fc 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -5,7 +5,7 @@ # #Uses Vagrant and VirtualBox # -#Destroys Vagrant VM running in /tmp/bgs_vagrant +#Destroys Vagrant VM running in $vm_dir/foreman_vm #Shuts down all nodes found in Khaleesi settings #Removes hypervisor kernel modules (VirtualBox) @@ -14,6 +14,8 @@ reset=`tput sgr0` blue=`tput setaf 4` red=`tput setaf 1` green=`tput setaf 2` + +vm_dir=/var/opt/opnfv ##END VARS ##FUNCTIONS @@ -106,9 +108,17 @@ else skip_vagrant=1 fi +###legacy VM location check +###remove me later +if [ -d /tmp/bgs_vagrant ]; then + cd /tmp/bgs_vagrant + vagrant destroy -f + rm -rf /tmp/bgs_vagrant +fi + ###destroy vagrant if [ $skip_vagrant -eq 0 ]; then - cd /tmp/bgs_vagrant + cd $vm_dir/foreman_vm if vagrant destroy -f; then echo "${blue}Successfully destroyed Foreman VM ${reset}" else @@ -135,6 +145,8 @@ else echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}" fi +###remove working vm directory +rm -rf $vm_dir ###remove kernel modules echo "${blue}Removing kernel modules ${reset}" diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index edad2a4..46a09f5 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -28,6 +28,8 @@ declare -A interface_arr declare -A controllers_ip_arr declare -A admin_ip_arr declare -A public_ip_arr + +vm_dir=/var/opt/opnfv ##END VARS ##FUNCTIONS @@ -44,6 +46,24 @@ display_usage() { echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n" } +##verify vm dir exists +##params: none +function verify_vm_dir { + if [ -d "$vm_dir" ]; then + echo -e "\n\n${red}ERROR: VM Directory: $vm_dir already exists. Environment not clean. Please use clean.sh. Exiting${reset}\n\n" + exit 1 + else + mkdir -p $vm_dir + fi + + chmod 700 $vm_dir + + if [ ! -d $vm_dir ]; then + echo -e "\n\n${red}ERROR: Unable to create VM Directory: $vm_dir Exiting${reset}\n\n" + exit -1 + fi +} + ##find ip of interface ##params: interface name function find_ip { @@ -353,27 +373,31 @@ install_vagrant() { ##params: none ##usage: clean_tmp() clean_tmp() { - rm -rf /tmp/bgs_vagrant + rm -rf $vm_dir/foreman_vm } -##clone bgs vagrant version 1.0 using git +##clone genesis and move to node vm dir ##params: none ##usage: clone_bgs clone_bgs() { cd /tmp/ + rm -rf /tmp/genesis/ - ##will change this to be opnfv repo when commit is done - if ! git clone -b v1.0 https://github.com/trozet/bgs_vagrant.git; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 + ##clone artifacts and move into foreman_vm dir + if ! git clone https://gerrit.opnfv.org/gerrit/genesis; then + printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2 exit 1 fi + + mv -f /tmp/genesis/foreman/ci $vm_dir/foreman_vm + rm -rf /tmp/genesis/ } ##validates the network settings and update VagrantFile with network settings ##params: none ##usage: configure_network() configure_network() { - cd /tmp/bgs_vagrant + cd $vm_dir/foreman_vm echo "${blue}Detecting network configuration...${reset}" ##detect host 1 or 3 interface configuration @@ -792,7 +816,7 @@ start_foreman() { ##stand up vagrant if ! vagrant up; then - printf '%s\n' 'deploy.sh: Unable to start vagrant' >&2 + printf '%s\n' 'deploy.sh: Unable to complete Foreman VM install' >&2 exit 1 else echo "${blue}Foreman VM is up! ${reset}" @@ -819,19 +843,22 @@ start_virtual_nodes() { compute_wait_completed=false for node in ${nodes}; do - cd /tmp + cd /tmp/ ##remove VM nodes incase it wasn't cleaned up - rm -rf /tmp/$node + rm -rf $vm_dir/$node + rm -rf /tmp/genesis/ - ##clone bgs vagrant - ##will change this to be opnfv repo when commit is done - if ! git clone https://github.com/trozet/bgs_vagrant.git $node; then + ##clone genesis and move into node folder + if ! git clone https://gerrit.opnfv.org/gerrit/genesis; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi - cd $node + mv -f /tmp/genesis/foreman/ci $vm_dir/$node + rm -rf /tmp/genesis/ + + cd $vm_dir/$node if [ $base_config ]; then if ! cp -f $base_config opnfv_ksgen_settings.yml; then @@ -994,7 +1021,7 @@ start_virtual_nodes() { echo "${blue} Waiting for puppet to complete on the nodes... ${reset}" ##check puppet is complete ##ssh into foreman server, run check to verify puppet is complete - pushd /tmp/bgs_vagrant + pushd $vm_dir/foreman_vm if ! vagrant ssh -c "/opt/khaleesi/run.sh --no-logs --use /vagrant/opnfv_ksgen_settings.yml /opt/khaleesi/playbooks/validate_opnfv-vm.yml"; then echo "${red} Failed to validate puppet completion on nodes ${reset}" exit 1 @@ -1004,7 +1031,7 @@ start_virtual_nodes() { popd ##add routes back to nodes for node in ${nodes}; do - pushd /tmp/$node + pushd $vm_dir/$node if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then echo "${blue} Adding public route back to $node! ${reset}" vagrant ssh -c "route add default gw $this_default_gw" @@ -1029,6 +1056,7 @@ main() { install_ansible install_vagrant clean_tmp + verify_vm_dir clone_bgs configure_network configure_virtual -- cgit 1.2.3-korg From 3c085628e74362805ec5aebe7a2c0c0c85a4ee25 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 14 Jul 2015 17:41:06 -0400 Subject: Fixes clean bug where $vm_dir is assumed to exist JIRA: BGS-60 Change-Id: Ida9458cf638eca81e6a3d67941203c605d9e4a8a Signed-off-by: Tim Rozet --- foreman/ci/clean.sh | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index 05c35fc..0dc34ed 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -118,17 +118,19 @@ fi ###destroy vagrant if [ $skip_vagrant -eq 0 ]; then - cd $vm_dir/foreman_vm - if vagrant destroy -f; then - echo "${blue}Successfully destroyed Foreman VM ${reset}" - else - echo "${red}Unable to destroy Foreman VM ${reset}" - echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" - if ps axf | grep vagrant; then - echo "${red}Vagrant VM still exists...exiting ${reset}" - exit 1 + if [ -d $vm_dir/foreman_vm ]; then + cd $vm_dir/foreman_vm + if vagrant destroy -f; then + echo "${blue}Successfully destroyed Foreman VM ${reset}" else - echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" + echo "${red}Unable to destroy Foreman VM ${reset}" + echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" + if ps axf | grep vagrant; then + echo "${red}Vagrant VM still exists...exiting ${reset}" + exit 1 + else + echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" + fi fi fi -- cgit 1.2.3-korg From ebbc5db9c4758d714d71fd240c447052ee01abd2 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 14 Jul 2015 21:49:13 -0400 Subject: Fixes syntax error with git clone for genesis JIRA: BGS-60 Change-Id: I23a004d41ff8fd37a0ddcae1b4831a333000cae8 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 46a09f5..720dc75 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -384,7 +384,7 @@ clone_bgs() { rm -rf /tmp/genesis/ ##clone artifacts and move into foreman_vm dir - if ! git clone https://gerrit.opnfv.org/gerrit/genesis; then + if ! git clone https://gerrit.opnfv.org/gerrit/genesis.git; then printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2 exit 1 fi @@ -850,7 +850,7 @@ start_virtual_nodes() { rm -rf /tmp/genesis/ ##clone genesis and move into node folder - if ! git clone https://gerrit.opnfv.org/gerrit/genesis; then + if ! git clone https://gerrit.opnfv.org/gerrit/genesis.git; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi -- cgit 1.2.3-korg From 1a0c8ab300fdc4865683586ead5d803925d6bf93 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 14 Jul 2015 22:19:38 -0400 Subject: Fixes issue where git clone fails Looks like for some reason GIT_SSL_NO_VERIFY=true is required for the git clone to work in LF lab. I guess git cannot recognize the SSL cert. However, this same git clone command works on another non-LF server that is using the same exact git version without the need for this fix. JIRA: BGS-60 Change-Id: I2a71902b3fe01026644ada29bef67c3db3cda711 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 720dc75..a607350 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -384,7 +384,7 @@ clone_bgs() { rm -rf /tmp/genesis/ ##clone artifacts and move into foreman_vm dir - if ! git clone https://gerrit.opnfv.org/gerrit/genesis.git; then + if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2 exit 1 fi @@ -850,7 +850,7 @@ start_virtual_nodes() { rm -rf /tmp/genesis/ ##clone genesis and move into node folder - if ! git clone https://gerrit.opnfv.org/gerrit/genesis.git; then + if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 exit 1 fi -- cgit 1.2.3-korg From c621b81549551823ecd1447bb18aa854bb90d8f1 Mon Sep 17 00:00:00 2001 From: randyl Date: Wed, 15 Jul 2015 19:30:34 -0600 Subject: Fixed public IP allocation JIRA: BGS-89 For bare metal deployments, the list IP of the last IP of the subnet and 20 IPs before are reservered for neutron public IPs. A new function to subtract a number from an IP was added. Really fixed the logic for skipping 10 IP for VIPs was flipped to treat no_dhcp with static IPs as a special case. Removed a false print statement that the foreman VM is running. Consolidated two identical print messages with the public IP range. Change-Id: I189b9440dafd98e49d9a147515810e44b97ab256 Signed-off-by: randyl --- foreman/ci/deploy.sh | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a607350..d027779 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -124,6 +124,19 @@ function next_ip { echo $baseaddr.$lsv } +##subtracts a value from an IP address +##params: last ip, ip_count +##assumes ip_count is less than the last octect of the address +subtract_ip() { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + ip_count=$2 + if [ $i4 -lt $ip_count ]; then + echo -e "\n\n${red}ERROR: Can't subtract $ip_count from IP address $1 Exiting${reset}\n\n" + exit 1 + fi + printf "%d.%d.%d.%d\n" "$i1" "$i2" "$i3" "$((i4 - $ip_count ))" +} + ##removes the network interface config from Vagrantfile ##params: interface ##assumes you are in the directory of Vagrantfile @@ -703,11 +716,11 @@ configure_network() { ##replace foreman site sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$foreman_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml ##replace public vips - ##no need to do this if virtual and no dhcp - if [ ! -z "$enable_virtual_dhcp" ]; then - next_public_ip=$(increment_ip $next_public_ip 10) - else + ##no need to do this if no dhcp + if [[ -z "$enable_virtual_dhcp" && ! -z "$virtual" ]]; then next_public_ip=$(next_usable_ip $next_public_ip) + else + next_public_ip=$(increment_ip $next_public_ip 10) fi public_output=$(grep -E '*public_vip' opnfv_ksgen_settings.yml) @@ -775,15 +788,13 @@ configure_network() { else public_allocation_start=$(next_ip $next_public_ip) public_allocation_end=$static_ip_range_end - echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" fi else last_ip_subnet=$(find_last_ip_subnet $next_public_ip $public_subnet_mask) - public_allocation_start=$(increment_subnet $public_subnet $(( $last_ip_subnet - $floating_ip_count )) ) - public_allocation_end=$(increment_subnet $public_subnet $(( $last_ip_subnet )) ) - echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" - echo "${blue}Foreman VM is up! ${reset}" + public_allocation_start=$(subtract_ip $last_ip_subnet $floating_ip_count ) + public_allocation_end=${last_ip_subnet} fi + echo "${blue}Neutron Floating IP range: $public_allocation_start to $public_allocation_end ${reset}" sed -i 's/^.*public_allocation_start:.*$/ public_allocation_start:'" $public_allocation_start"'/' opnfv_ksgen_settings.yml sed -i 's/^.*public_allocation_end:.*$/ public_allocation_end:'" $public_allocation_end"'/' opnfv_ksgen_settings.yml -- cgit 1.2.3-korg From fdd726d803bae3d05051852eaf0e0c3349348bda Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 20 Jul 2015 17:41:20 -0400 Subject: Fixes clean to remove all VMs and be more effective Now all VMs in the vm_dir (/opt/var/opnfv) will be destroyed and removed. Also, virtual box VMs that are leftover even if Vagrant is stuck will attempted to be gracefully removed first, before forcefully destroying the process. The clean should work for both baremetal and virtual deployments. JIRA: APEX-3 Change-Id: Ib31cf1210651e4413ec8fe918ff91be63f4ad6d4 Signed-off-by: Tim Rozet --- foreman/ci/clean.sh | 69 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 19 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index 0dc34ed..1a16efd 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -5,7 +5,7 @@ # #Uses Vagrant and VirtualBox # -#Destroys Vagrant VM running in $vm_dir/foreman_vm +#Destroys Vagrant VMs running in $vm_dir/ #Shuts down all nodes found in Khaleesi settings #Removes hypervisor kernel modules (VirtualBox) @@ -87,7 +87,7 @@ node_counter=0 output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` for line in ${output} ; do bmc_pass[$node_counter]=$line - ((node_counter++)) + ((node_counter++)) done for mynode in `seq 0 $max_nodes`; do @@ -118,36 +118,67 @@ fi ###destroy vagrant if [ $skip_vagrant -eq 0 ]; then - if [ -d $vm_dir/foreman_vm ]; then - cd $vm_dir/foreman_vm - if vagrant destroy -f; then - echo "${blue}Successfully destroyed Foreman VM ${reset}" - else - echo "${red}Unable to destroy Foreman VM ${reset}" - echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" - if ps axf | grep vagrant; then - echo "${red}Vagrant VM still exists...exiting ${reset}" - exit 1 + if [ -d $vm_dir ]; then + ##all vm directories + for vm in $( ls $vm_dir ); do + cd $vm_dir/$vm + if vagrant destroy -f; then + echo "${blue}Successfully destroyed $vm Vagrant VM ${reset}" else - echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" + echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}" + killall vagrant + echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" + if ps axf | grep vagrant; then + echo "${red}Vagrant process still exists after kill...exiting ${reset}" + exit 1 + else + echo "${blue}Vagrant process doesn't exist. Moving on... ${reset}" + fi fi - fi + + ##Vagrant boxes appear as VboxHeadless processes + ##try to gracefully destroy the VBox VM if it still exists + if vboxmanage list runningvms | grep $vm; then + echo "${red} $vm VBoxHeadless process still exists...Removing${reset}" + vbox_id=$(vboxmanage list runningvms | grep $vm | awk '{print $1}' | sed 's/"//g') + vboxmanage controlvm $vbox_id poweroff + if vboxmanage unregistervm --delete $vbox_id; then + echo "${blue}$vm VM is successfully deleted! ${reset}" + else + echo "${red} Unable to delete VM $vm ...Exiting ${reset}" + exit 1 + fi + else + echo "${blue}$vm VM is successfully deleted! ${reset}" + fi + done + else + echo "${blue}${vm_dir} doesn't exist, no VMs in OPNFV directory to destroy! ${reset}" fi + echo "${blue}Checking for any remaining virtual box processes...${reset}" ###kill virtualbox - echo "${blue}Killing VirtualBox ${reset}" - killall virtualbox - killall VBoxHeadless + if ps axf | grep virtualbox; then + echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}" + killall virtualbox + fi + + ###kill any leftover VMs (brute force) + if ps axf | grep VBoxHeadless; then + echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}" + killall VBoxHeadless + fi ###remove virtualbox - echo "${blue}Removing VirtualBox ${reset}" + echo "${blue}Removing VirtualBox... ${reset}" yum -y remove $vboxpkg else - echo "${blue}Skipping Vagrant destroy + Vbox Removal as VirtualBox package is already removed ${reset}" + echo "${blue}Skipping Vagrant destroy + VBox Removal as VirtualBox package is already removed ${reset}" fi ###remove working vm directory +echo "${blue}Removing working VM directory: $vm_dir ${reset}" rm -rf $vm_dir ###remove kernel modules -- cgit 1.2.3-korg From 427941840ef4b0023fc51fd0b2e018fc06444541 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 20 Jul 2015 15:49:53 -0400 Subject: Adds check to make sure subnets are a minimum size per network Verifies that subnets are at least these sizes for deploy: - admin: 5 IPs - private: 10 IPs - public: 25 IPs - storage: 10 IPs JIRA: BGS-71 Change-Id: I0a6c373c9a8737fa9f0c2712851616dc301968f3 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index d027779..a05b3de 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -78,6 +78,31 @@ function find_subnet { printf "%d.%d.%d.%d\n" "$((i1 & m1))" "$((i2 & m2))" "$((i3 & m3))" "$((i4 & m4))" } +##verify subnet has at least n IPs +##params: subnet mask, n IPs +function verify_subnet_size { + IFS=. read -r i1 i2 i3 i4 <<< "$1" + num_ips_required=$2 + + ##this function assumes you would never need more than 254 + ##we check here to make sure + if [ "$num_ips_required" -ge 254 ]; then + echo -e "\n\n${red}ERROR: allocating more than 254 IPs is unsupported...Exiting${reset}\n\n" + return 1 + fi + + ##we just return if 3rd octet is not 255 + ##because we know the subnet is big enough + if [ "$i3" -ne 255 ]; then + return 0 + elif [ $((254-$i4)) -ge "$num_ips_required" ]; then + return 0 + else + echo -e "\n\n${red}ERROR: Subnet is too small${reset}\n\n" + return 1 + fi +} + ##finds last usable ip (broadcast minus 1) of a subnet from an IP and netmask ## Warning: This function only works for IPv4 at the moment. ##params: ip, netmask @@ -454,6 +479,11 @@ configure_network() { public_subnet_mask=$subnet_mask public_short_subnet_mask=$(find_short_netmask $interface) + if ! verify_subnet_size $public_subnet_mask 25; then + echo "${red} Not enough IPs in public subnet: $interface_ip_arr[2] ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + ##set that interface to be public sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile if_counter=1 @@ -478,14 +508,34 @@ configure_network() { subnet_mask=$(find_netmask $interface) if [ "$if_counter" -eq 0 ]; then admin_subnet_mask=$subnet_mask + if ! verify_subnet_size $admin_subnet_mask 5; then + echo "${red} Not enough IPs in admin subnet: ${interface_ip_arr[$if_counter]} ${admin_subnet_mask}. Need at least 5 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi + elif [ "$if_counter" -eq 1 ]; then private_subnet_mask=$subnet_mask private_short_subnet_mask=$(find_short_netmask $interface) + + if ! verify_subnet_size $private_subnet_mask 15; then + echo "${red} Not enough IPs in private subnet: ${interface_ip_arr[$if_counter]} ${private_subnet_mask}. Need at least 15 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi elif [ "$if_counter" -eq 2 ]; then public_subnet_mask=$subnet_mask public_short_subnet_mask=$(find_short_netmask $interface) + + if ! verify_subnet_size $public_subnet_mask 25; then + echo "${red} Not enough IPs in public subnet: ${interface_ip_arr[$if_counter]} ${public_subnet_mask}. Need at least 25 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi elif [ "$if_counter" -eq 3 ]; then storage_subnet_mask=$subnet_mask + + if ! verify_subnet_size $storage_subnet_mask 10; then + echo "${red} Not enough IPs in storage subnet: ${interface_ip_arr[$if_counter]} ${storage_subnet_mask}. Need at least 10 IPs. Please resize subnet! Exiting ${reset}" + exit 1 + fi else echo "${red}ERROR: interface counter outside valid range of 0 to 3: $if_counter ! ${reset}" exit 1 -- cgit 1.2.3-korg From 1facde0bf0d51035cfed819e7680145969c6174d Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Fri, 24 Jul 2015 12:35:19 -0400 Subject: Fixes IP/MAC of inventory for lab reconfig JIRA: OCTO-109 Change-Id: I272a1f9f12454ff71a8a1915ab7b712474b84832 Signed-off-by: Tim Rozet --- foreman/ci/inventory/lf_pod2_ksgen_settings.yml | 36 ++++++++++++------------- 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml index 72935c9..2c146a0 100644 --- a/foreman/ci/inventory/lf_pod2_ksgen_settings.yml +++ b/foreman/ci/inventory/lf_pod2_ksgen_settings.yml @@ -105,9 +105,9 @@ nodes: type: compute host_type: baremetal hostgroup: Compute - mac_address: "00:25:b5:a0:00:5e" - bmc_ip: 172.30.8.74 - bmc_mac: "74:a2:e6:a4:14:9c" + mac_address: "00:25:B5:A0:00:2A" + bmc_ip: 172.30.8.75 + bmc_mac: "a8:9d:21:c9:8b:56" bmc_user: admin bmc_pass: octopus ansible_ssh_pass: "Op3nStack" @@ -125,9 +125,9 @@ nodes: type: compute host_type: baremetal hostgroup: Compute - mac_address: "00:25:b5:a0:00:3e" - bmc_ip: 172.30.8.73 - bmc_mac: "a8:9d:21:a0:15:9c" + mac_address: "00:25:B5:A0:00:3A" + bmc_ip: 172.30.8.65 + bmc_mac: "a8:9d:21:c9:4d:26" bmc_user: admin bmc_pass: octopus ansible_ssh_pass: "Op3nStack" @@ -145,13 +145,13 @@ nodes: type: controller host_type: baremetal hostgroup: Controller_Network_ODL - mac_address: "00:25:b5:a0:00:af" - bmc_ip: 172.30.8.66 - bmc_mac: "a8:9d:21:c9:8b:56" + mac_address: "00:25:B5:A0:00:4A" + bmc_ip: 172.30.8.74 + bmc_mac: "a8:9d:21:c9:3a:92" bmc_user: admin bmc_pass: octopus private_ip: controller1_private - private_mac: "00:25:b5:b0:00:1f" + private_mac: "00:25:B5:A0:00:4B" ansible_ssh_pass: "Op3nStack" admin_password: "octopus" groups: @@ -167,13 +167,13 @@ nodes: type: controller host_type: baremetal hostgroup: Controller_Network - mac_address: "00:25:b5:a0:00:9e" - bmc_ip: 172.30.8.75 - bmc_mac: "a8:9d:21:c9:4d:26" + mac_address: "00:25:B5:A0:00:5A" + bmc_ip: 172.30.8.73 + bmc_mac: "74:a2:e6:a4:14:9c" bmc_user: admin bmc_pass: octopus private_ip: controller2_private - private_mac: "00:25:b5:b0:00:de" + private_mac: "00:25:B5:A0:00:5B" ansible_ssh_pass: "Op3nStack" admin_password: "octopus" groups: @@ -189,13 +189,13 @@ nodes: type: controller host_type: baremetal hostgroup: Controller_Network - mac_address: "00:25:b5:a0:00:7e" - bmc_ip: 172.30.8.65 - bmc_mac: "a8:9d:21:c9:3a:92" + mac_address: "00:25:B5:A0:00:6A" + bmc_ip: 172.30.8.72 + bmc_mac: "a8:9d:21:a0:15:9c" bmc_user: admin bmc_pass: octopus private_ip: controller3_private - private_mac: "00:25:b5:b0:00:be" + private_mac: "00:25:B5:A0:00:6B" ansible_ssh_pass: "Op3nStack" admin_password: "octopus" groups: -- cgit 1.2.3-korg From 107b6ec6f371480329d657d09fcf4710f32759ce Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 24 Aug 2015 13:01:09 -0400 Subject: Fixes Foreman clean to not hang and removes libvirt Fixes clean so that it does not hang when base_config is not provided. Also now removes libvirt libraries which conflict with VirtualBox required for deploy. JIRA: BGS-78 Change-Id: I7c71ca207f260eafb1fb97ace764b9d288fc258a Signed-off-by: Tim Rozet --- foreman/ci/clean.sh | 140 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 89 insertions(+), 51 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index 1a16efd..1bd1713 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -3,24 +3,23 @@ #Clean script to uninstall provisioning server for Foreman/QuickStack #author: Tim Rozet (trozet@redhat.com) # -#Uses Vagrant and VirtualBox +#Removes Libvirt, KVM, Vagrant, VirtualBox # #Destroys Vagrant VMs running in $vm_dir/ #Shuts down all nodes found in Khaleesi settings -#Removes hypervisor kernel modules (VirtualBox) +#Removes hypervisor kernel modules (VirtualBox & KVM/Libvirt) ##VARS reset=`tput sgr0` blue=`tput setaf 4` red=`tput setaf 1` green=`tput setaf 2` - vm_dir=/var/opt/opnfv ##END VARS ##FUNCTIONS display_usage() { - echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n" + echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n" echo -e "\nUsage:\n$0 [arguments] \n" echo -e "\n -no_parse : No variable parsing into config. Flag. \n" echo -e "\n -base_config : Full path of ksgen settings file to parse. Required. Will provide BMC info to shutdown hosts. Example: -base_config /opt/myinventory.yml \n" @@ -33,7 +32,7 @@ if [[ ( $1 == "--help") || $1 == "-h" ]]; then exit 0 fi -echo -e "\n\n${blue}This script is used to uninstall Foreman/QuickStack Installer and Clean OPNFV Target System${reset}\n\n" +echo -e "\n\n${blue}This script is used to uninstall and clean the OPNFV Target System${reset}\n\n" echo "Use -h to display help" sleep 2 @@ -52,54 +51,55 @@ do esac done - -# Install ipmitool -# Major version is pinned to force some consistency for Arno -if ! yum list installed | grep -i ipmitool; then - if ! yum -y install ipmitool-1*; then - echo "${red}Unable to install ipmitool!${reset}" - exit 1 - fi -else - echo "${blue}Skipping ipmitool as it is already installed!${reset}" -fi - -###find all the bmc IPs and number of nodes -node_counter=0 -output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` -for line in ${output} ; do - bmc_ip[$node_counter]=$line - ((node_counter++)) -done - -max_nodes=$((node_counter-1)) - -###find bmc_users per node -node_counter=0 -output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` -for line in ${output} ; do - bmc_user[$node_counter]=$line - ((node_counter++)) -done - -###find bmc_pass per node -node_counter=0 -output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` -for line in ${output} ; do - bmc_pass[$node_counter]=$line - ((node_counter++)) -done - -for mynode in `seq 0 $max_nodes`; do - echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" - if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then - echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}" +if [ ! -z "$base_config" ]; then + # Install ipmitool + # Major version is pinned to force some consistency for Arno + if ! yum list installed | grep -i ipmitool; then + if ! yum -y install ipmitool-1*; then + echo "${red}Unable to install ipmitool!${reset}" + exit 1 + fi else - echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}" - exit 1 + echo "${blue}Skipping ipmitool as it is already installed!${reset}" fi -done + ###find all the bmc IPs and number of nodes + node_counter=0 + output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` + for line in ${output} ; do + bmc_ip[$node_counter]=$line + ((node_counter++)) + done + + max_nodes=$((node_counter-1)) + + ###find bmc_users per node + node_counter=0 + output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` + for line in ${output} ; do + bmc_user[$node_counter]=$line + ((node_counter++)) + done + + ###find bmc_pass per node + node_counter=0 + output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` + for line in ${output} ; do + bmc_pass[$node_counter]=$line + ((node_counter++)) + done + for mynode in `seq 0 $max_nodes`; do + echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" + if ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis power off; then + echo "${blue}Node: $mynode, ${bmc_ip[$mynode]} powered off!${reset}" + else + echo "${red}Error: Unable to power off $mynode, ${bmc_ip[$mynode]} ${reset}" + exit 1 + fi + done +else + echo "${blue}Skipping Baremetal node poweroff as base_config was not provided${reset}" +fi ###check to see if vbox is installed vboxpkg=`rpm -qa | grep VirtualBox` if [ $? -eq 0 ]; then @@ -181,9 +181,47 @@ fi echo "${blue}Removing working VM directory: $vm_dir ${reset}" rm -rf $vm_dir +###check to see if libvirt is installed +echo "${blue}Checking if libvirt/KVM is installed" +if rpm -qa | grep -iE 'libvirt|kvm'; then + echo "${blue}Libvirt/KVM is installed${reset}" + echo "${blue}Checking for any QEMU/KVM VMs...${reset}" + vm_count=0 + while read -r line; do ((vm_count++)); done < <(virsh list --all | sed 1,2d | head -n -1) + if [ $vm_count -gt 0 ]; then + echo "${blue}VMs Found: $vm_count${reset}" + vm_runnning=0 + while read -r line; do ((vm_running++)); done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running) + echo "${blue}Powering off $vm_running VM(s)${reset}" + while read -r vm; do + if ! virsh destroy $vm; then + echo "${red}WARNING: Unable to power off VM ${vm}${reset}" + else + echo "${blue}VM $vm powered off!${reset}" + fi + done < <(virsh list --all | sed 1,2d | head -n -1| grep -i running | sed 's/^[ \t]*//' | awk '{print $2}') + echo "${blue}Destroying libvirt VMs...${reset}" + while read -r vm; do + if ! virsh undefine --remove-all-storage $vm; then + echo "${red}ERROR: Unable to remove the VM ${vm}${reset}" + exit 1 + else + echo "${blue}VM $vm removed!${reset}" + fi + done < <(virsh list --all | sed 1,2d | head -n -1| awk '{print $2}') + else + echo "${blue}No VMs found for removal" + fi + echo "${blue}Removing libvirt and kvm packages" + yum -y remove libvirt-* + yum -y remove *qemu* +else + echo "${blue}libvirt/KVM is not installed${reset}" +fi + ###remove kernel modules echo "${blue}Removing kernel modules ${reset}" -for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv; do +for kernel_mod in vboxnetadp vboxnetflt vboxpci vboxdrv kvm_intel kvm; do if ! rmmod $kernel_mod; then if rmmod $kernel_mod 2>&1 | grep -i 'not currently loaded'; then echo "${blue} $kernel_mod is not currently loaded! ${reset}" -- cgit 1.2.3-korg From be1ddda8fc823d8a3032d107e242dd3ba91b89a1 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 26 Aug 2015 15:03:30 -0400 Subject: Adds check to ensure 3 control nodes are defined with HA New check makes sure that 3 controller nodes are defined in base_config when ha_flag is set to true in base_config. JIRA: APEX-7 Change-Id: I7f49dec82704d8c9cbbcf17eb004b0adede406b3 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a05b3de..5746e10 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -650,6 +650,20 @@ configure_network() { fi fi + nodes=`sed -nr '/nodes:/{:start /workaround/!{N;b start};//p}' opnfv_ksgen_settings.yml | sed -n '/^ [A-Za-z0-9]\+:$/p' | sed 's/\s*//g' | sed 's/://g'` + controller_nodes=`echo $nodes | tr " " "\n" | grep controller | tr "\n" " "` + echo "${blue}Controller nodes found in settings: ${controller_nodes}${reset}" + my_controller_array=( $controller_nodes ) + num_control_nodes=${#my_controller_array[@]} + if [ "$num_control_nodes" -ne 3 ]; then + if cat opnfv_ksgen_settings.yml | grep ha_flag | grep true; then + echo "${red}Error: You must define exactly 3 control nodes when HA flag is true!${reset}" + exit 1 + fi + else + echo "${blue}Number of Controller nodes detected: ${num_control_nodes}${reset}" + fi + if [ $no_parse ]; then echo "${blue}Skipping parsing variables into settings file as no_parse flag is set${reset}" -- cgit 1.2.3-korg From 1903ca60ff23ab90b2ff8801ed655779225024b5 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 26 Aug 2015 18:18:59 -0400 Subject: Adds ability to specify NICs to bridge on the jumphost This patch adds new functionality to be able to specify the physical interfaces on the jumphost to bridge to (rather than figured out dynamically). The patch introduces new args for each nic: - admin_nic - private_nic - public_nic - storage_nic The public_nic is the only arg to be used when using -virtual. In baremetal: admin_nic, private_nic, and public_nic must either be all or none specified. The patch also cleans up virtual node logic, which was working, but had unnecessary logic. JIRA: APEX-4 Change-Id: Ic0a2e1d8ce2b7464441e3e9041faf123b86ca7e6 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 248 +++++++++++++++++++++++++++++---------------------- 1 file changed, 139 insertions(+), 109 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a05b3de..862077e 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -44,6 +44,14 @@ display_usage() { echo -e "\n -static_ip_range : static IP range to define when using virtual and when dhcp is not being used (default), must at least a 20 IP block. Format: '192.168.1.1,192.168.1.20' \n" echo -e "\n -ping_site : site to use to verify IP connectivity from the VM when -virtual is used. Format: -ping_site www.blah.com \n" echo -e "\n -floating_ip_count : number of IP address from the public range to be used for floating IP. Default is 20.\n" + echo -e "\n -admin_nic : Baremetal NIC for the admin network. Required if other "nic" arguments are used. \ +Not applicable with -virtual. Example: -admin_nic em1" + echo -e "\n -private_nic : Baremetal NIC for the private network. Required if other "nic" arguments are used. \ +Not applicable with -virtual. Example: -private_nic em2" + echo -e "\n -public_nic : Baremetal NIC for the public network. Required if other "nic" arguments are used. \ +Can also be used with -virtual. Example: -public_nic em3" + echo -e "\n -storage_nic : Baremetal NIC for the storage network. Optional. Not applicable with -virtual. \ +Private NIC will be used for storage if not specified. Example: -storage_nic em4" } ##verify vm dir exists @@ -269,6 +277,26 @@ parse_cmdline() { floating_ip_count=$2 shift 2 ;; + -admin_nic) + admin_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -private_nic) + private_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -public_nic) + public_nic=$2 + shift 2 + nic_arg_flag=1 + ;; + -storage_nic) + storage_nic=$2 + shift 2 + nic_arg_flag=1 + ;; *) display_usage exit 1 @@ -294,6 +322,39 @@ parse_cmdline() { if [ -z "$floating_ip_count" ]; then floating_ip_count=20 fi + + ##Validate nic args + if [ $nic_arg_flag -eq 1 ]; then + if [ -z "$virtual" ]; then + for nic_type in admin_nic private_nic public_nic; do + eval "nic_value=\$$nic_type" + if [ -z "$nic_value" ]; then + echo "${red}$nic_type is empty or not defined. Required when other nic args are given!${reset}" + exit 1 + fi + interface_ip=$(find_ip $nic_value) + if [ ! "$interface_ip" ]; then + echo "${red}$nic_value does not have an IP address! Exiting... ${reset}" + exit 1 + fi + done + else + ##if virtual only public_nic should be specified + for nic_type in admin_nic private_nic storage_nic; do + eval "nic_value=\$$nic_type" + if [ ! -z "$nic_value" ]; then + echo "${red}$nic_type is not a valid argument using -virtual. Please only specify public_nic!${reset}" + exit 1 + fi + done + + interface_ip=$(find_ip $public_nic) + if [ ! "$interface_ip" ]; then + echo "${red}Public NIC: $public_nic does not have an IP address! Exiting... ${reset}" + exit 1 + fi + fi + fi } ##disable selinux @@ -437,10 +498,26 @@ clone_bgs() { configure_network() { cd $vm_dir/foreman_vm - echo "${blue}Detecting network configuration...${reset}" - ##detect host 1 or 3 interface configuration - #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` - output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` + ##if nic_arg_flag is set, then we don't figure out + ##NICs dynamically + if [ $nic_arg_flag -eq 1 ]; then + echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}" + if [ $virtual ]; then + nic_list="$public_nic" + elif [ -z "$storage_nic" ]; then + echo "${blue}storage_nic not defined, will combine storage into private VLAN ${reset}" + nic_list="$admin_nic $private_nic $public_nic" + else + nic_list="$admin_nic $private_nic $public_nic $storage_nic" + fi + nic_array=( $nic_list ) + output=$nic_list + else + echo "${blue}Detecting network configuration...${reset}" + ##detect host 1 or 3 interface configuration + #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` + output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` + fi if [ ! "$output" ]; then printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 @@ -449,10 +526,15 @@ configure_network() { ##virtual we only find 1 interface if [ $virtual ]; then - ##find interface with default gateway - this_default_gw=$(ip route | grep default | awk '{print $3}') - echo "${blue}Default Gateway: $this_default_gw ${reset}" - this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}') + if [ ! -z "${nic_array[0]}" ]; then + echo "${blue}Public Interface specified: ${nic_array[0]}${reset}" + this_default_gw_interface=${nic_array[0]} + else + ##find interface with default gateway + this_default_gw=$(ip route | grep default | awk '{print $3}') + echo "${blue}Default Gateway: $this_default_gw ${reset}" + this_default_gw_interface=$(ip route get $this_default_gw | awk '{print $3}') + fi ##find interface IP, make sure its valid interface_ip=$(find_ip $this_default_gw_interface) @@ -941,117 +1023,65 @@ start_virtual_nodes() { sleep 1400 fi - ##find number of interfaces with ip and substitute in VagrantFile - output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` - - if [ ! "$output" ]; then - printf '%s\n' 'deploy.sh: Unable to detect interfaces to bridge to' >&2 + ## Add Admin interface + mac_string=config_nodes_${node}_mac_address + mac_addr=$(eval echo \$$mac_string) + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ $mac_addr == "" ]; then + echo "${red} Unable to find mac_address for $node! ${reset}" exit 1 fi + this_admin_ip=${admin_ip_arr[$node]} + sed -i 's/^.*eth_replace0.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile - if_counter=0 - for interface in ${output}; do - - if [ -z "$enable_virtual_dhcp" ]; then - if [ "$if_counter" -ge 1 ]; then - break - fi - elif [ "$if_counter" -ge 4 ]; then - break - fi - interface_ip=$(find_ip $interface) - if [ ! "$interface_ip" ]; then - continue - fi - case "${if_counter}" in - 0) - mac_string=config_nodes_${node}_mac_address - mac_addr=$(eval echo \$$mac_string) - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - if [ $mac_addr == "" ]; then - echo "${red} Unable to find mac_address for $node! ${reset}" - exit 1 - fi - ;; - 1) - if [ "$node_type" == "controller" ]; then - mac_string=config_nodes_${node}_private_mac - mac_addr=$(eval echo \$$mac_string) - if [ $mac_addr == "" ]; then - echo "${red} Unable to find private_mac for $node! ${reset}" - exit 1 - fi - else - ##generate random mac - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - fi - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - *) - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - ;; - esac - this_admin_ip=${admin_ip_arr[$node]} - sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "private_network", virtualbox__intnet: "my_admin_network", ip: '\""$this_admin_ip"\"', netmask: '\""$admin_subnet_mask"\"', :mac => '\""$mac_addr"\"'/' Vagrantfile - ((if_counter++)) - done - ##now remove interface config in Vagrantfile for 1 node - ##if 1, 3, or 4 interfaces set deployment type - ##if 2 interfaces remove 2nd interface and set deployment type - if [[ "$if_counter" == 1 || "$if_counter" == 2 ]]; then - deployment_type="single_network" - if [ "$node_type" == "controller" ]; then - mac_string=config_nodes_${node}_private_mac - mac_addr=$(eval echo \$$mac_string) - if [ $mac_addr == "" ]; then - echo "${red} Unable to find private_mac for $node! ${reset}" - exit 1 - fi - else - ##generate random mac - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - fi - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - if [ "$node_type" == "controller" ]; then - new_node_ip=${controllers_ip_arr[$controller_count]} - if [ ! "$new_node_ip" ]; then - echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}" - exit 1 - fi - ((controller_count++)) - else - next_private_ip=$(next_ip $next_private_ip) - if [ ! "$next_private_ip" ]; then - echo "{red}ERROR: Could not find private ip for $node ${reset}" + ## Add private interface + if [ "$node_type" == "controller" ]; then + mac_string=config_nodes_${node}_private_mac + mac_addr=$(eval echo \$$mac_string) + if [ $mac_addr == "" ]; then + echo "${red} Unable to find private_mac for $node! ${reset}" exit 1 fi - new_node_ip=$next_private_ip + else + ##generate random mac + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + fi + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + if [ "$node_type" == "controller" ]; then + new_node_ip=${controllers_ip_arr[$controller_count]} + if [ ! "$new_node_ip" ]; then + echo "{red}ERROR: Empty node ip for controller $controller_count ${reset}" + exit 1 fi - sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile - ##replace host_ip in vm_nodes_provision with private ip - sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh - ##replace ping site - if [ ! -z "$ping_site" ]; then - sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh + ((controller_count++)) + else + next_private_ip=$(next_ip $next_private_ip) + if [ ! "$next_private_ip" ]; then + echo "{red}ERROR: Could not find private ip for $node ${reset}" + exit 1 fi - ##find public ip info - mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') - mac_addr=$(echo $mac_addr | sed 's/:\|-//g') - this_public_ip=${public_ip_arr[$node]} + new_node_ip=$next_private_ip + fi + sed -i 's/^.*eth_replace1.*$/ config.vm.network "private_network", virtualbox__intnet: "my_private_network", :mac => '\""$mac_addr"\"', ip: '\""$new_node_ip"\"', netmask: '\""$private_subnet_mask"\"'/' Vagrantfile + ##replace host_ip in vm_nodes_provision with private ip + sed -i 's/^host_ip=REPLACE/host_ip='$new_node_ip'/' vm_nodes_provision.sh + ##replace ping site + if [ ! -z "$ping_site" ]; then + sed -i 's/www.google.com/'$ping_site'/' vm_nodes_provision.sh + fi - if [ -z "$enable_virtual_dhcp" ]; then - sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile - else - sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile - fi - remove_vagrant_network eth_replace3 - elif [ "$if_counter" == 3 ]; then - deployment_type="three_network" - remove_vagrant_network eth_replace3 + ##find public ip info and add public interface + mac_addr=$(echo -n 00-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"') + mac_addr=$(echo $mac_addr | sed 's/:\|-//g') + this_public_ip=${public_ip_arr[$node]} + + if [ -z "$enable_virtual_dhcp" ]; then + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"', ip: '\""$this_public_ip"\"', netmask: '\""$public_subnet_mask"\"'/' Vagrantfile else - deployment_type="multi_network" + sed -i 's/^.*eth_replace2.*$/ config.vm.network "public_network", bridge: '\'"$public_interface"\'', :mac => '\""$mac_addr"\"'/' Vagrantfile fi + remove_vagrant_network eth_replace3 + ##modify provisioning to do puppet install, config, and foreman check-in ##substitute host_name and dns_server in the provisioning script host_string=config_nodes_${node}_hostname -- cgit 1.2.3-korg From 8228547ade02168c3c687ab2611f6065c2641766 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 26 Aug 2015 14:22:15 -0400 Subject: Adds baremetal node power check before attempting deploy Ensures nodes are shutoff if a baremetal deployment, before attempting to deploy. Previous behavior is deploy will run up to Foreman pxeboot, then fail. We can save a lot of time by detecting this up front. Nodes must be shutoff before deploying. JIRA: BGS-68 Change-Id: Ie585e12a5271bc1baac60f6256f85e5cb1b1baeb Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a05b3de..344356c 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -1107,11 +1107,79 @@ start_virtual_nodes() { fi } +##check to make sure nodes are powered off +##this function does nothing if virtual +##params: none +##usage: check_baremetal_nodes() +check_baremetal_nodes() { + if [ $virtual ]; then + echo "${blue}Skipping Baremetal node power status check as deployment is virtual ${reset}" + else + echo "${blue}Checking Baremetal nodes power state... ${reset}" + if [ ! -z "$base_config" ]; then + # Install ipmitool + # Major version is pinned to force some consistency for Arno + if ! yum list installed | grep -i ipmitool; then + echo "${blue}Installing ipmitool...${reset}" + if ! yum -y install ipmitool-1*; then + echo "${red}Failed to install ipmitool!${reset}" + exit 1 + fi + fi + + ###find all the bmc IPs and number of nodes + node_counter=0 + output=`grep bmc_ip $base_config | grep -Eo '[0-9]+.[0-9]+.[0-9]+.[0-9]+'` + for line in ${output} ; do + bmc_ip[$node_counter]=$line + ((node_counter++)) + done + + max_nodes=$((node_counter-1)) + + ###find bmc_users per node + node_counter=0 + output=`grep bmc_user $base_config | sed 's/\s*bmc_user:\s*//'` + for line in ${output} ; do + bmc_user[$node_counter]=$line + ((node_counter++)) + done + + ###find bmc_pass per node + node_counter=0 + output=`grep bmc_pass $base_config | sed 's/\s*bmc_pass:\s*//'` + for line in ${output} ; do + bmc_pass[$node_counter]=$line + ((node_counter++)) + done + + for mynode in `seq 0 $max_nodes`; do + echo "${blue}Node: ${bmc_ip[$mynode]} ${bmc_user[$mynode]} ${bmc_pass[$mynode]} ${reset}" + ipmi_output=`ipmitool -I lanplus -P ${bmc_pass[$mynode]} -U ${bmc_user[$mynode]} -H ${bmc_ip[$mynode]} chassis status \ + | grep "System Power" | cut -d ':' -f2 | tr -d [:blank:]` + if [ "$ipmi_output" == "on" ]; then + echo "${red}Error: Node is powered on: ${bmc_ip[$mynode]} ${reset}" + echo "${red}Please run clean.sh before running deploy! ${reset}" + exit 1 + elif [ "$ipmi_output" == "off" ]; then + echo "${blue}Node: ${bmc_ip[$mynode]} is powered off${reset}" + else + echo "${red}Warning: Unable to detect node power state: ${bmc_ip[$mynode]} ${reset}" + fi + done + else + echo "${red}base_config was not provided for a baremetal install! Exiting${reset}" + exit 1 + fi + fi +} + ##END FUNCTIONS main() { parse_cmdline "$@" disable_selinux + check_baremetal_nodes install_EPEL install_vbox install_ansible -- cgit 1.2.3-korg From 290b60674ce471abe898c4f444e4ed9562456477 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 27 Aug 2015 16:30:54 -0400 Subject: Fixes vagrant base box to be opnfv Chef removed the centos7 basebox from Atlas. A new opnfv base box was added. This patch includes the modifications necessary to point to that new box. JIRA: APEX-14 Change-Id: I7e74726c692f21583d0d70f9dd7558665dfb5d99 Signed-off-by: Tim Rozet --- foreman/ci/Vagrantfile | 2 +- foreman/ci/deploy.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/Vagrantfile b/foreman/ci/Vagrantfile index a01da70..5550976 100644 --- a/foreman/ci/Vagrantfile +++ b/foreman/ci/Vagrantfile @@ -12,7 +12,7 @@ Vagrant.configure(2) do |config| # Every Vagrant development environment requires a box. You can search for # boxes at https://atlas.hashicorp.com/search. - config.vm.box = "chef/centos-7.0" + config.vm.box = "opnfv/centos-7.0" # Disable automatic box update checking. If you disable this, then # boxes will only be checked for updates when the user runs diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a05b3de..405e286 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -387,8 +387,8 @@ install_vagrant() { fi ##add centos 7 box to vagrant - if ! vagrant box list | grep chef/centos-7.0; then - if ! vagrant box add chef/centos-7.0 --provider virtualbox; then + if ! vagrant box list | grep opnfv/centos-7.0; then + if ! vagrant box add opnfv/centos-7.0 --provider virtualbox; then printf '%s\n' 'deploy.sh: Unable to download centos7 box for Vagrant' >&2 exit 1 fi -- cgit 1.2.3-korg From 24f395d3bf5b6bf3c123a152cae044d156b7a7d9 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Wed, 26 Aug 2015 16:00:40 -0400 Subject: Fixes bug with interface detection This patch addresses the detection of the physical NICs to bridge to. There was a bug where the regexp would ignore some NICs on accident, and the purpose of the expression was to ignore virtual NICs. Rather than ignore NICs by their name, this patch ensures that NICs are ignored if they are actually virtual types. Also fixes syntax issue with checking for nic_arg_flag. JIRA: APEX-11 Change-Id: Ibd965c454faab1d751e3ce0b260eabc7fbe7c720 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a978e38..59254d1 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -324,7 +324,7 @@ parse_cmdline() { fi ##Validate nic args - if [ $nic_arg_flag -eq 1 ]; then + if [[ $nic_arg_flag -eq 1 ]]; then if [ -z "$virtual" ]; then for nic_type in admin_nic private_nic public_nic; do eval "nic_value=\$$nic_type" @@ -500,7 +500,7 @@ configure_network() { ##if nic_arg_flag is set, then we don't figure out ##NICs dynamically - if [ $nic_arg_flag -eq 1 ]; then + if [[ $nic_arg_flag -eq 1 ]]; then echo "${blue}Static Network Interfaces Defined. Updating Vagrantfile...${reset}" if [ $virtual ]; then nic_list="$public_nic" @@ -516,7 +516,7 @@ configure_network() { echo "${blue}Detecting network configuration...${reset}" ##detect host 1 or 3 interface configuration #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` - output=`ifconfig | grep -E "^[a-zA-Z0-9]+:"| grep -Ev "lo|tun|virbr|vboxnet" | awk '{print $1}' | sed 's/://'` + output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f9` fi if [ ! "$output" ]; then -- cgit 1.2.3-korg From 8ce84b06eb628e3b1a2145a3bfda75ca12082edb Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Mon, 31 Aug 2015 10:36:27 -0400 Subject: Fixes puppet modules to come from Genesis repo This patch deprecates the use of puppet-trystack repo and migrates the puppet modules even to Genesis. deploy.sh now now copies the modules from the current Genesis repo into foreman VM's /vagrant to be used during install (rather than recloning). Associated changes in Khaleesi/QuickStack: - Khaleesi: commit 31b912778847f295d1459a71e5c41c808977c91d - Astapor: commit ac4be11e348d7bcff1e1aa28c96056e18dba5291 JIRA: APEX-8 Change-Id: I322ac797d1d687f17434e07e6775acef8961bdef Signed-off-by: Tim Rozet --- common/puppet-opnfv/manifests/compute.pp | 43 +++++++----- .../puppet-opnfv/manifests/controller_networker.pp | 79 +++++++++++----------- .../manifests/external_net_presetup.pp | 2 +- common/puppet-opnfv/manifests/templates/br_ex.erb | 10 +++ foreman/ci/deploy.sh | 29 +++----- 5 files changed, 85 insertions(+), 78 deletions(-) create mode 100644 common/puppet-opnfv/manifests/templates/br_ex.erb (limited to 'foreman/ci') diff --git a/common/puppet-opnfv/manifests/compute.pp b/common/puppet-opnfv/manifests/compute.pp index 0b81757..2fed241 100644 --- a/common/puppet-opnfv/manifests/compute.pp +++ b/common/puppet-opnfv/manifests/compute.pp @@ -51,11 +51,11 @@ class opnfv::compute { if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password } ##HA Global params - if $ha_flag { + if $ha_flag and str2bool($ha_flag) { if $private_network == '' { fail('private_network is empty') } if !$keystone_private_vip { fail('keystone_private_vip is empty') } if !$glance_private_vip { fail('glance_private_vip is empty') } - if !$nova_private_vip { fail('nova_private_vip is empty') } + if !$nova_public_vip { fail('nova_public_vip is empty') } if !$nova_db_password { $nova_db_password = $single_password } if !$nova_user_password { $nova_user_password = $single_password } if !$controllers_ip_array { fail('controllers_ip_array is empty') } @@ -78,19 +78,30 @@ class opnfv::compute { } else { ##non HA params - if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } - if !$private_ip { fail('private_ip is empty') } - $keystone_private_vip = $private_ip - $glance_private_vip = $private_ip - $nova_private_vip = $private_ip - $neutron_private_vip = $private_ip - if !$nova_db_password { fail('nova_db_password is empty') } - if !$nova_user_password { fail('nova_user_password is empty') } - if !$odl_control_ip { $odl_control_ip = $private_ip } - if !$mysql_ip { $mysql_ip = $private_ip } - if !$amqp_ip { $amqp_ip = $private_ip } - if !$amqp_username { $amqp_username = 'guest' } - if !$amqp_password { $amqp_password = 'guest' } + ##Mandatory + if $private_network == '' { fail('private_network is empty') } + if ($odl_flag != '') and str2bool($odl_flag) { + if $odl_control_ip == '' { fail('odl_control_ip is empty') } + } + if $controller_ip == '' { fail('controller_ip is empty') } + + ##Optional + ##Find private interface + $ovs_tunnel_if = get_nic_from_network("$private_network") + ##Find private ip + $private_ip = get_ip_from_nic("$ovs_tunnel_if") + + $keystone_private_vip = $controller_ip + $glance_private_vip = $controller_ip + $nova_public_vip = $controller_ip + $neutron_private_vip = $controller_ip + + if !$nova_db_password { $nova_db_password = $single_password } + if !$nova_user_password { $nova_user_password = $single_password } + if !$mysql_ip { $mysql_ip = $controller_ip } + if !$amqp_ip { $amqp_ip = $controller_ip } + if !$amqp_username { $amqp_username = $single_username } + if !$amqp_password { $amqp_password = $single_password } if !$ceph_mon_host { $ceph_mon_host= ["$private_ip"] } if !$ceph_mon_initial_members { $ceph_mon_initial_members = ["$::hostname"] } } @@ -103,7 +114,7 @@ class opnfv::compute { libvirt_inject_password => 'false', libvirt_inject_key => 'false', libvirt_images_type => 'rbd', - nova_host => $nova_private_vip, + nova_host => $nova_public_vip, nova_db_password => $nova_db_password, nova_user_password => $nova_user_password, private_network => '', diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp index b148ec8..60cae34 100644 --- a/common/puppet-opnfv/manifests/controller_networker.pp +++ b/common/puppet-opnfv/manifests/controller_networker.pp @@ -329,50 +329,47 @@ class opnfv::controller_networker { } } else { - if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } - if $public_ip == '' { fail('public_ip is empty') } - if $private_ip == '' { fail('private_ip is empty') } - - if $odl_control_ip == '' { $odl_control_ip = $private_ip } - - if $mysql_ip == '' { fail('mysql_ip is empty') } - if $mysql_root_password == '' { fail('mysql_root_password is empty') } - if $amqp_ip == '' { fail('amqp_ip is empty') } - - if $memcache_ip == '' { fail('memcache_ip is empty') } - if $neutron_ip == '' { fail('neutron_ip is empty') } - - if $keystone_db_password == '' { fail('keystone_db_password is empty') } - - if $horizon_secret_key == '' { fail('horizon_secret_key is empty') } - - if $nova_user_password == '' { fail('nova_user_password is empty') } - if $nova_db_password == '' { fail('nova_db_password is empty') } - - if $cinder_user_password == '' { fail('cinder_user_password is empty') } - if $cinder_db_password == '' { fail('cinder_db_password is empty') } - - if $glance_user_password == '' { fail('glance_user_password is empty') } - if $glance_db_password == '' { fail('glance_db_password is empty') } - - if $neutron_user_password == '' { fail('neutron_user_password is empty') } - if $neutron_db_password == '' { fail('neutron_db_password is empty') } - if $neutron_metadata_shared_secret == '' { fail('neutron_metadata_shared_secret is empty') } - - if $ceilometer_user_password == '' { fail('ceilometer_user_password is empty') } - if $ceilometer_metering_secret == '' { fail('ceilometer_user_password is empty') } - - if $heat_user_password == '' { fail('heat_user_password is empty') } - if $heat_db_password == '' { fail('heat_db_password is empty') } - if $heat_auth_encrypt_key == '' { fail('heat_auth_encrypt_key is empty') } - - if $swift_user_password == '' { fail('swift_user_password is empty') } - if $swift_shared_secret == '' { fail('swift_shared_secret is empty') } - if $swift_admin_password == '' { fail('swift_admin_password is empty') } + ##Mandatory Non-HA parameters + if $private_network == '' { fail('private_network is empty') } + if $public_network == '' { fail('public_network is empty') } + ##Optional Non-HA parameters if !$amqp_username { $amqp_username = $single_username } if !$amqp_password { $amqp_password = $single_password } + if !$mysql_root_password { $mysql_root_password = $single_password } + if !$keystone_db_password { $keystone_db_password = $single_password } + if !$horizon_secret_key { $horizon_secret_key = $single_password } + if !$nova_db_password { $nova_db_password = $single_password } + if !$nova_user_password { $nova_user_password = $single_password } + if !$cinder_db_password { $cinder_db_password = $single_password } + if !$cinder_user_password { $cinder_user_password = $single_password } + if !$glance_db_password { $glance_db_password = $single_password } + if !$glance_user_password { $glance_user_password = $single_password } + if !$neutron_db_password { $neutron_db_password = $single_password } + if !$neutron_user_password { $neutron_user_password = $single_password } + if !$neutron_metadata_shared_secret { $neutron_metadata_shared_secret = $single_password } + if !$ceilometer_user_password { $ceilometer_user_password = $single_password } + if !$ceilometer_metering_secret { $ceilometer_metering_secret = $single_password } + if !$heat_user_password { $heat_user_password = $single_password } + if !$heat_db_password { $heat_db_password = $single_password } + if !$heat_auth_encryption_key { $heat_auth_encryption_key = 'octopus1octopus1' } + if !$swift_user_password { $swift_user_password = $single_password } + if !$swift_shared_secret { $swift_shared_secret = $single_password } + if !$swift_admin_password { $swift_admin_password = $single_password } + ##Find private interface + $ovs_tunnel_if = get_nic_from_network("$private_network") + ##Find private ip + $private_ip = get_ip_from_nic("$ovs_tunnel_if") + #Find public NIC + $public_nic = get_nic_from_network("$public_network") + $public_ip = get_ip_from_nic("$public_nic") + + if !$mysql_ip { $mysql_ip = $private_ip } + if !$amqp_ip { $amqp_ip = $private_ip } + if !$memcache_ip { $memcache_ip = $private_ip } + if !$neutron_ip { $neutron_ip = $private_ip } + if !$odl_control_ip { $odl_control_ip = $private_ip } class { "quickstack::neutron::controller_networker": admin_email => $admin_email, @@ -427,6 +424,8 @@ class opnfv::controller_networker { horizon_cert => $quickstack::params::horizon_cert, horizon_key => $quickstack::params::horizon_key, + keystonerc => true, + ml2_mechanism_drivers => $ml2_mech_drivers, #neutron => true, diff --git a/common/puppet-opnfv/manifests/external_net_presetup.pp b/common/puppet-opnfv/manifests/external_net_presetup.pp index b7c7c5f..96038c0 100644 --- a/common/puppet-opnfv/manifests/external_net_presetup.pp +++ b/common/puppet-opnfv/manifests/external_net_presetup.pp @@ -85,7 +85,7 @@ class opnfv::external_net_presetup { owner => 'root', group => 'root', mode => '0644', - content => template('trystack/br_ex.erb'), + content => template('opnfv/br_ex.erb'), before => Class["quickstack::pacemaker::params"], } -> diff --git a/common/puppet-opnfv/manifests/templates/br_ex.erb b/common/puppet-opnfv/manifests/templates/br_ex.erb new file mode 100644 index 0000000..6c0e7e7 --- /dev/null +++ b/common/puppet-opnfv/manifests/templates/br_ex.erb @@ -0,0 +1,10 @@ +DEVICE=br-ex +DEVICETYPE=ovs +IPADDR=<%= @public_nic_ip %> +NETMASK=<%= @public_nic_netmask %> +GATEWAY=<%= @public_gateway %> +BOOTPROTO=static +ONBOOT=yes +TYPE=OVSBridge +PROMISC=yes +PEERDNS=no diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index a05b3de..13ed641 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -415,20 +415,13 @@ clean_tmp() { } ##clone genesis and move to node vm dir -##params: none -##usage: clone_bgs +##params: destination directory +##usage: clone_bgs /tmp/myvm/ clone_bgs() { - cd /tmp/ - rm -rf /tmp/genesis/ - - ##clone artifacts and move into foreman_vm dir - if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then - printf '%s\n' 'deploy.sh: Unable to clone genesis repo' >&2 - exit 1 - fi - - mv -f /tmp/genesis/foreman/ci $vm_dir/foreman_vm - rm -rf /tmp/genesis/ + script=`realpath $0` + script_dir="`dirname "$script"`" + cp -fr $script_dir/ $1 + cp -fr $script_dir/../../common/puppet-opnfv $1 } ##validates the network settings and update VagrantFile with network settings @@ -911,13 +904,7 @@ start_virtual_nodes() { rm -rf /tmp/genesis/ ##clone genesis and move into node folder - if ! GIT_SSL_NO_VERIFY=true git clone https://gerrit.opnfv.org/gerrit/genesis.git; then - printf '%s\n' 'deploy.sh: Unable to clone vagrant repo' >&2 - exit 1 - fi - - mv -f /tmp/genesis/foreman/ci $vm_dir/$node - rm -rf /tmp/genesis/ + clone_bgs $vm_dir/$node cd $vm_dir/$node @@ -1118,7 +1105,7 @@ main() { install_vagrant clean_tmp verify_vm_dir - clone_bgs + clone_bgs $vm_dir/foreman_vm configure_network configure_virtual start_foreman -- cgit 1.2.3-korg From 41125e53d5ca5e727e326078420cd900b9b48076 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 1 Sep 2015 13:22:35 -0400 Subject: Removes default vagrant route from virtual nodes Removes default gateway route to 10.0.2.2 after adding public default route JIRA: APEX-2 Change-Id: I7ba23e916f3995f39ee1677007ae5c1612cb3cc9 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 1 + 1 file changed, 1 insertion(+) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 9c1447b..46d4a96 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -1127,6 +1127,7 @@ start_virtual_nodes() { if ! vagrant ssh -c "route | grep default | grep $this_default_gw"; then echo "${blue} Adding public route back to $node! ${reset}" vagrant ssh -c "route add default gw $this_default_gw" + vagrant ssh -c "route delete default gw 10.0.2.2" fi popd done -- cgit 1.2.3-korg From 18ea7aaaaf32eb7027008196f2c784cd156fb5d9 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Tue, 1 Sep 2015 16:41:28 -0400 Subject: Fixes issue with deploy copying from /tmp and clean bailing early deploy.sh was failing in user deployments because the necessary puppet modules and vagrant files were being copied accidentally from /tmp and not the script directory. This patch fixes that issue. clean.sh was failing checks to see if vagrant or virtualbox was still running, due to a bug with checking "ps" and using grep. This patch resolves that. JIRA: APEX-16,APEX-17 Change-Id: I1faa6fc134c0308acb2e7b14be30f7cd3c99d109 Signed-off-by: Tim Rozet --- foreman/ci/clean.sh | 6 +++--- foreman/ci/deploy.sh | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'foreman/ci') diff --git a/foreman/ci/clean.sh b/foreman/ci/clean.sh index 1bd1713..345864b 100755 --- a/foreman/ci/clean.sh +++ b/foreman/ci/clean.sh @@ -128,7 +128,7 @@ if [ $skip_vagrant -eq 0 ]; then echo "${red}Unable to destroy $vm Vagrant VM! Attempting to killall vagrant if process is hung ${reset}" killall vagrant echo "${blue}Checking if vagrant was already destroyed and no process is active...${reset}" - if ps axf | grep vagrant; then + if ps axf | grep vagrant | grep -v 'grep'; then echo "${red}Vagrant process still exists after kill...exiting ${reset}" exit 1 else @@ -158,13 +158,13 @@ if [ $skip_vagrant -eq 0 ]; then echo "${blue}Checking for any remaining virtual box processes...${reset}" ###kill virtualbox - if ps axf | grep virtualbox; then + if ps axf | grep virtualbox | grep -v 'grep'; then echo "${blue}virtualbox processes are still running. Killing any remaining VirtualBox processes...${reset}" killall virtualbox fi ###kill any leftover VMs (brute force) - if ps axf | grep VBoxHeadless; then + if ps axf | grep VBoxHeadless | grep -v 'grep'; then echo "${blue}VBoxHeadless processes are still running. Killing any remaining VBoxHeadless processes...${reset}" killall VBoxHeadless fi diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 9c1447b..50a9dcf 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -30,6 +30,7 @@ declare -A admin_ip_arr declare -A public_ip_arr vm_dir=/var/opt/opnfv +script=`realpath $0` ##END VARS ##FUNCTIONS @@ -479,7 +480,6 @@ clean_tmp() { ##params: destination directory ##usage: clone_bgs /tmp/myvm/ clone_bgs() { - script=`realpath $0` script_dir="`dirname "$script"`" cp -fr $script_dir/ $1 cp -fr $script_dir/../../common/puppet-opnfv $1 @@ -993,7 +993,6 @@ start_virtual_nodes() { compute_wait_completed=false for node in ${nodes}; do - cd /tmp/ ##remove VM nodes incase it wasn't cleaned up rm -rf $vm_dir/$node -- cgit 1.2.3-korg From 591a1c4327c0793b4a15443f9e72b79ffbeaa226 Mon Sep 17 00:00:00 2001 From: Tim Rozet Date: Thu, 3 Sep 2015 15:44:30 -0400 Subject: Fixes bug in looking for interfaces Issue where the wrong field was being used for finding interfaces. JIRA: APEX-11 Change-Id: I94635837c73acd0d271dbc9d0474dc5ba688ff10 Signed-off-by: Tim Rozet --- foreman/ci/deploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'foreman/ci') diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index dcead8b..6771da0 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -509,7 +509,7 @@ configure_network() { echo "${blue}Detecting network configuration...${reset}" ##detect host 1 or 3 interface configuration #output=`ip link show | grep -E "^[0-9]" | grep -Ev ": lo|tun|virbr|vboxnet" | awk '{print $2}' | sed 's/://'` - output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f9` + output=`/bin/ls -l /sys/class/net | tail -n +2 | grep -v virtual | cut -d " " -f10` fi if [ ! "$output" ]; then -- cgit 1.2.3-korg