summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2018-08-06 13:54:33 +0000
committerGerrit Code Review <gerrit@opnfv.org>2018-08-06 13:54:33 +0000
commit7fe07e31a3aca294b4093ba238ce648fa4cb0f38 (patch)
tree8701980292f88affa7e1f50d662b5eac790c13fa
parentc52837f7d28b7c0f231be2b0215a7035b26ae302 (diff)
parent827d8e0ea6f083f3b2082c8906a41258ed52f51a (diff)
Merge "[lib.sh] Reset virtual nodes after MaaS install"
-rwxr-xr-xci/deploy.sh5
-rwxr-xr-xmcp/config/states/maas2
-rw-r--r--mcp/scripts/lib.sh17
3 files changed, 23 insertions, 1 deletions
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 10b639e3c..40176073d 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -313,6 +313,11 @@ else
wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
/root/fuel/mcp/config/states/${state}"
+ if [ "${state}" = 'maas' ]; then
+ # For hybrid PODs (virtual + baremetal nodes), the virtual nodes
+ # should be reset to force a DHCP request from MaaS DHCP
+ reset_vms "${virtual_nodes[@]}"
+ fi
done
fi
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
index ec2458234..f321b7160 100755
--- a/mcp/config/states/maas
+++ b/mcp/config/states/maas
@@ -101,6 +101,6 @@ salt -C 'mas01*' pillar.item\
maas:region:admin:password
# Check all baremetal nodes are available
-wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
+wait_for 10.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
index c9c1bbd0a..9c12bff64 100644
--- a/mcp/scripts/lib.sh
+++ b/mcp/scripts/lib.sh
@@ -452,6 +452,23 @@ function update_mcpcontrol_network {
"<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config
}
+function reset_vms {
+ local vnodes=("$@")
+ local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
+
+ # reset non-infrastructure vms, wait for them to come back online
+ for node in "${vnodes[@]}"; do
+ if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
+ virsh reset "${node}"
+ fi
+ done
+ for node in "${vnodes[@]}"; do
+ if [[ ! "${node}" =~ (cfg01|mas01) ]]; then
+ wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all"
+ fi
+ done
+}
+
function start_vms {
local vnodes=("$@")