diff options
author | Alexandru Avadanii <Alexandru.Avadanii@enea.com> | 2017-09-08 23:03:56 +0200 |
---|---|---|
committer | Alexandru Avadanii <Alexandru.Avadanii@enea.com> | 2017-10-14 01:14:54 +0000 |
commit | ea64dc22ea171d45ae60aff682fcadf9d525889b (patch) | |
tree | 4e098d0370f8b6fb43b4ecda31af6ce4642ef98f /mcp/config | |
parent | 139aacf390d38b60f4fe4fd618a0bcf6b54b35cf (diff) |
states: Split virtual_control_plane from maas
We should eventually also support baremetal deploys without a
virtualized control plane (VCP), so decouple MaaS provisioning
from VCP provisioning.
While at it, move "wait_for" bash function from maas state to
common library file, lib.sh.
Change-Id: I32c33135655cb6aceae901a5f92b51265a8c84b4
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
(cherry picked from commit ebd7067904b36ae32566da63e433365948a1f973)
Diffstat (limited to 'mcp/config')
-rw-r--r-- | mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml | 1 | ||||
-rw-r--r-- | mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml | 1 | ||||
-rw-r--r-- | mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml | 1 | ||||
-rwxr-xr-x | mcp/config/states/maas | 50 | ||||
-rwxr-xr-x | mcp/config/states/openstack_ha | 4 | ||||
-rwxr-xr-x | mcp/config/states/virtual_control_plane | 41 |
6 files changed, 48 insertions, 50 deletions
diff --git a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml index ca5b6a24d..881359b24 100644 --- a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml +++ b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml @@ -3,6 +3,7 @@ cluster: domain: baremetal-mcp-ocata-ovs-ha.local states: - maas + - virtual_control_plane - openstack_ha - networks virtual: diff --git a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml index 6eb1e33ce..8b937835c 100644 --- a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml +++ b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml @@ -3,6 +3,7 @@ cluster: domain: baremetal-mcp-ocata-ovs-dpdk-ha.local states: - maas + - virtual_control_plane - dpdk - openstack_ha - neutron_compute diff --git a/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml index 893b6797b..b48738562 100644 --- a/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml +++ b/mcp/config/scenario/baremetal/os-odl-nofeature-ha.yaml @@ -3,6 +3,7 @@ cluster: domain: baremetal-mcp-ocata-odl-ha.local states: - maas + - virtual_control_plane - opendaylight - openstack_ha - networks diff --git a/mcp/config/states/maas b/mcp/config/states/maas index 67ef6d559..df11d5b13 100755 --- a/mcp/config/states/maas +++ b/mcp/config/states/maas @@ -1,18 +1,8 @@ #!/bin/bash +# shellcheck disable=SC1090 set -x -function wait_for() { - local total_attempts=$1; shift - local cmdstr=$* - local sleep_time=10 - echo "[NOTE] Waiting for cmd to return success: ${cmdstr}" - # shellcheck disable=SC2034 - for attempt in $(seq "${total_attempts}"); do - # shellcheck disable=SC2015 - eval "${cmdstr}" && break || true - echo -n '.'; sleep "${sleep_time}" - done -} +source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh" # Wait for MaaS commissioning/deploy to finish, retry on failure function maas_fixup() { @@ -80,40 +70,4 @@ salt -C 'mas01*' pillar.item\ maas:region:admin:username \ maas:region:admin:password -# KVM, compute node prereqs (libvirt first), VCP deployment salt -C '* and not cfg01* and not mas01*' saltutil.sync_all - -salt -C 'kvm*' pkg.install bridge-utils -salt -C 'kvm*' state.apply linux.network -salt -C 'kvm*' system.reboot -wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" - -salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp - -salt -C 'kvm*' state.sls libvirt - -salt -C '* and not cfg01* and not mas01*' state.apply salt -salt -C 'kvm*' saltutil.sync_all -wait_for 10 "! salt -C 'kvm*' state.sls salt.control | " \ - "tee /dev/stderr | fgrep -q 'Not connected'" - -vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \ - awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}') - -# Check all vcp nodes are available -rc=1 -while [ $rc -ne 0 ]; do - rc=0 - for node in $vcp_nodes; do - salt "$node" test.ping 2>/dev/null || { rc=$?; break; }; - done - sleep 5 -done - -wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all" -wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt" -wait_for 10 "! salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp | " \ - "tee /dev/stderr | fgrep -q 'Not connected'" - -wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \ - $(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")" diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha index 78da80a3f..e11135f43 100755 --- a/mcp/config/states/openstack_ha +++ b/mcp/config/states/openstack_ha @@ -57,5 +57,5 @@ salt -I 'nginx:server' state.sls nginx clstr_vip_addr=$(salt -C 'I@nginx:server and *01*' --out=yaml \ pillar.get _param:cluster_vip_address | awk '{print $2; exit}') salt -C 'I@nginx:server and *01*' cp.push \ - /etc/ssl/certs/${clstr_vip_addr}-with-chain.crt upload_path='certs/os_cacert' -cd /etc/ssl/certs && ln -s /var/cache/salt/master/minions/prx01.*/files/certs/os_cacert + "/etc/ssl/certs/${clstr_vip_addr}-with-chain.crt" upload_path='certs/os_cacert' +cd /etc/ssl/certs && ln -sf /var/cache/salt/master/minions/prx01.*/files/certs/os_cacert diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane new file mode 100755 index 000000000..69f26c34e --- /dev/null +++ b/mcp/config/states/virtual_control_plane @@ -0,0 +1,41 @@ +#!/bin/bash +# shellcheck disable=SC1090 +set -x + +source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh" + +# KVM, compute node prereqs (libvirt first), VCP deployment +salt -C 'kvm*' pkg.install bridge-utils +salt -C 'kvm*' state.apply linux.network +salt -C 'kvm*' system.reboot +wait_for 90 "! salt 'kvm*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'" + +salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp + +salt -C 'kvm*' state.sls libvirt + +salt -C '* and not cfg01* and not mas01*' state.apply salt +salt -C 'kvm*' saltutil.sync_all +wait_for 10 "! salt -C 'kvm*' state.sls salt.control | " \ + "tee /dev/stderr | fgrep -q 'Not connected'" + +vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \ + awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}') + +# Check all vcp nodes are available +rc=1 +while [ $rc -ne 0 ]; do + rc=0 + for node in $vcp_nodes; do + salt "$node" test.ping 2>/dev/null || { rc=$?; break; }; + done + sleep 5 +done + +wait_for 10 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all" +wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt" +wait_for 10 "! salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp | " \ + "tee /dev/stderr | fgrep -q 'Not connected'" + +wait_for 10 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \ + $(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")" |