summaryrefslogtreecommitdiffstats
path: root/mcp
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-09-08 23:03:56 +0200
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-10-14 01:14:54 +0000
commitea64dc22ea171d45ae60aff682fcadf9d525889b (patch)
tree4e098d0370f8b6fb43b4ecda31af6ce4642ef98f /mcp
parent139aacf390d38b60f4fe4fd618a0bcf6b54b35cf (diff)
states: Split virtual_control_plane from maas
We should eventually also support baremetal deploys without a virtualized control plane (VCP), so decouple MaaS provisioning from VCP provisioning. While at it, move "wait_for" bash function from maas state to common library file, lib.sh. Change-Id: I32c33135655cb6aceae901a5f92b51265a8c84b4 Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com> (cherry picked from commit ebd7067904b36ae32566da63e433365948a1f973)
Diffstat (limited to 'mcp')
-rw-r--r--mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml1
-rw-r--r--mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml1
-rw-r--r--mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml1
-rwxr-xr-xmcp/config/states/maas50
-rwxr-xr-xmcp/config/states/openstack_ha4
-rwxr-xr-xmcp/config/states/virtual_control_plane41
-rw-r--r--mcp/scripts/lib.sh33
7 files changed, 71 insertions, 60 deletions
diff --git a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
index ca5b6a24d..881359b24 100644
--- a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
+++ b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
@@ -3,6 +3,7 @@ cluster:
domain: baremetal-mcp-ocata-ovs-ha.local
states:
- maas
+ - virtual_control_plane
- openstack_ha
- networks
virtual:
diff --git a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml
index 6eb1e33ce..8b937835c 100644
--- a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml
+++ b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml
@@ -3,6 +3,7 @@ cluster:
domain: baremetal-mcp-ocata-ovs-dpdk-ha.local
states:
- maas
+ - virtual_control_plane
- dpdk
- openstack_ha
- neutron_compute
diff --git a/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml
index 893b6797b..b48738562 100644
--- a/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml
+++ b/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml
@@ -3,6 +3,7 @@ cluster:
domain: baremetal-mcp-ocata-odl-ha.local
states:
- maas
+ - virtual_control_plane
- opendaylight
- openstack_ha
- networks
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
index 67ef6d559..df11d5b13 100755
--- a/mcp/config/states/maas
+++ b/mcp/config/states/maas
@@ -1,18 +1,8 @@
#!/bin/bash
+# shellcheck disable=SC1090
set -x
-function wait_for() {
- local total_attempts=$1; shift
- local cmdstr=$*
- local sleep_time=10
- echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
- # shellcheck disable=SC2034
- for attempt in $(seq "${total_attempts}"); do
- # shellcheck disable=SC2015
- eval "${cmdstr}" && break || true
- echo -n '.'; sleep "${sleep_time}"
- done
-}
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# Wait for MaaS commissioning/deploy to finish, retry on failure
function maas_fixup() {
@@ -80,40 +70,4 @@ salt -C 'mas01*' pillar.item\
maas:region:admin:username \
maas:region:admin:password
-# KVM, compute node prereqs (libvirt first), VCP deployment
salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
-
-salt -C 'kvm*' pkg.install bridge-utils
-salt -C 'kvm*' state.apply linux.network
-salt -C 'kvm*' system.reboot
-wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'"
-
-salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
-
-salt -C 'kvm*' state.sls libvirt
-
-salt -C '* and not cfg01* and not mas01*' state.apply salt
-salt -C 'kvm*' saltutil.sync_all
-wait_for 10 "! salt -C 'kvm*' state.sls salt.control | " \
- "tee /dev/stderr | fgrep -q 'Not connected'"
-
-vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
- awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}')
-
-# Check all vcp nodes are available
-rc=1
-while [ $rc -ne 0 ]; do
- rc=0
- for node in $vcp_nodes; do
- salt "$node" test.ping 2>/dev/null || { rc=$?; break; };
- done
- sleep 5
-done
-
-wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
-wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
-wait_for 10 "! salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp | " \
- "tee /dev/stderr | fgrep -q 'Not connected'"
-
-wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \
- $(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")"
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
index 78da80a3f..e11135f43 100755
--- a/mcp/config/states/openstack_ha
+++ b/mcp/config/states/openstack_ha
@@ -57,5 +57,5 @@ salt -I 'nginx:server' state.sls nginx
clstr_vip_addr=$(salt -C 'I@nginx:server and *01*' --out=yaml \
pillar.get _param:cluster_vip_address | awk '{print $2; exit}')
salt -C 'I@nginx:server and *01*' cp.push \
- /etc/ssl/certs/${clstr_vip_addr}-with-chain.crt upload_path='certs/os_cacert'
-cd /etc/ssl/certs && ln -s /var/cache/salt/master/minions/prx01.*/files/certs/os_cacert
+ "/etc/ssl/certs/${clstr_vip_addr}-with-chain.crt" upload_path='certs/os_cacert'
+cd /etc/ssl/certs && ln -sf /var/cache/salt/master/minions/prx01.*/files/certs/os_cacert
diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane
new file mode 100755
index 000000000..69f26c34e
--- /dev/null
+++ b/mcp/config/states/virtual_control_plane
@@ -0,0 +1,41 @@
+#!/bin/bash
+# shellcheck disable=SC1090
+set -x
+
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+
+# KVM, compute node prereqs (libvirt first), VCP deployment
+salt -C 'kvm*' pkg.install bridge-utils
+salt -C 'kvm*' state.apply linux.network
+salt -C 'kvm*' system.reboot
+wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'"
+
+salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
+
+salt -C 'kvm*' state.sls libvirt
+
+salt -C '* and not cfg01* and not mas01*' state.apply salt
+salt -C 'kvm*' saltutil.sync_all
+wait_for 10 "! salt -C 'kvm*' state.sls salt.control | " \
+ "tee /dev/stderr | fgrep -q 'Not connected'"
+
+vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
+ awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}')
+
+# Check all vcp nodes are available
+rc=1
+while [ $rc -ne 0 ]; do
+ rc=0
+ for node in $vcp_nodes; do
+ salt "$node" test.ping 2>/dev/null || { rc=$?; break; };
+ done
+ sleep 5
+done
+
+wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
+wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
+wait_for 10 "! salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp | " \
+ "tee /dev/stderr | fgrep -q 'Not connected'"
+
+wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \
+ $(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")"
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
index fcc5d76ac..748c6dde1 100644
--- a/mcp/scripts/lib.sh
+++ b/mcp/scripts/lib.sh
@@ -3,7 +3,7 @@
# Library of shell functions
#
-generate_ssh_key() {
+function generate_ssh_key {
# shellcheck disable=SC2155
local mcp_ssh_key=$(basename "${SSH_KEY}")
local user=${USER}
@@ -20,7 +20,7 @@ generate_ssh_key() {
sudo install -D -o "${user}" -m 0600 "${mcp_ssh_key}" "${SSH_KEY}"
}
-get_base_image() {
+function get_base_image {
local base_image=$1
local image_dir=$2
@@ -28,7 +28,7 @@ get_base_image() {
wget -P "${image_dir}" -N "${base_image}"
}
-cleanup_vms() {
+function cleanup_vms {
# clean up existing nodes
for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
virsh destroy "${node}"
@@ -40,7 +40,7 @@ cleanup_vms() {
done
}
-prepare_vms() {
+function prepare_vms {
local -n vnodes=$1
local base_image=$2
local image_dir=$3
@@ -60,7 +60,7 @@ prepare_vms() {
done
}
-create_networks() {
+function create_networks {
local -n vnode_networks=$1
# create required networks, including constant "mcpcontrol"
# FIXME(alav): since we renamed "pxe" to "mcpcontrol", we need to make sure
@@ -80,7 +80,7 @@ create_networks() {
done
}
-create_vms() {
+function create_vms {
local -n vnodes=$1
local -n vnodes_ram=$2
local -n vnodes_vcpus=$3
@@ -119,7 +119,7 @@ create_vms() {
done
}
-update_mcpcontrol_network() {
+function update_mcpcontrol_network {
# set static ip address for salt master node, MaaS node
# shellcheck disable=SC2155
local cmac=$(virsh domiflist cfg01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
@@ -131,7 +131,7 @@ update_mcpcontrol_network() {
"<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live
}
-start_vms() {
+function start_vms {
local -n vnodes=$1
# start vms
@@ -141,7 +141,7 @@ start_vms() {
done
}
-check_connection() {
+function check_connection {
local total_attempts=60
local sleep_time=5
local attempt=1
@@ -163,7 +163,7 @@ check_connection() {
set -e
}
-parse_yaml() {
+function parse_yaml {
local prefix=$2
local s
local w
@@ -183,3 +183,16 @@ parse_yaml() {
}
}' | sed 's/_=/+=/g'
}
+
+function wait_for {
+ local total_attempts=$1; shift
+ local cmdstr=$*
+ local sleep_time=10
+ echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
+ # shellcheck disable=SC2034
+ for attempt in $(seq "${total_attempts}"); do
+ # shellcheck disable=SC2015
+ eval "${cmdstr}" && break || true
+ echo -n '.'; sleep "${sleep_time}"
+ done
+}