From 58af9a94ef78bbcf3f0593d4170d32ebce721455 Mon Sep 17 00:00:00 2001
From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Date: Thu, 7 Feb 2019 19:51:04 +0100
Subject: [baremetal] Containerize MaaS

- replace mas01 VM with a Docker container;
- drop `mcpcontrol` virsh-managed network, including special handling
  previously required for it across all scripts;
- drop infrastructure VMs handling from scripts, the only VMs we still
  handle are cluster VMs for virtual and/or hybrid deployments;
- drop SSH server from mas01;
- stop running linux state on mas01, as all prerequisites are properly
  handled durin Docker build or via entrypoint.sh - for completeness,
  we still keep pillar data in sync with the actual contents of mas01
  configuration, so running the state manually would still work;
- make port 5240 available on the jumpserver for MaaS dashboard access;
- docs: update diagrams and text to reflect the new changes;

Change-Id: I6d9424995e9a90c530fd7577edf401d552bab929
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---
 mcp/config/scenario/defaults.yaml.j2 |  7 -------
 mcp/config/states/maas               |  4 +++-
 mcp/config/states/virtual_init       | 13 ++++++-------
 3 files changed, 9 insertions(+), 15 deletions(-)

(limited to 'mcp/config')

diff --git a/mcp/config/scenario/defaults.yaml.j2 b/mcp/config/scenario/defaults.yaml.j2
index 73799c5bb..4c6a86f03 100644
--- a/mcp/config/scenario/defaults.yaml.j2
+++ b/mcp/config/scenario/defaults.yaml.j2
@@ -12,12 +12,6 @@ x86_64:
   default:
     vcpus: 2
     ram: 4096
-    virtual: &arch_default_virtual_nodes_infra
-      nodes:
-        infra:
-{%- if nm.cluster.has_baremetal_nodes %}
-          - mas01
-{%- endif %}
     cluster: &arch_default_cluster_states
       states:
         - virtual_init
@@ -49,7 +43,6 @@ aarch64:
   default:
     vcpus: 6
     ram: 4096
-    virtual: *arch_default_virtual_nodes_infra
     cluster: *arch_default_cluster_states
   common:
     apt:
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
index 47f66a451..28ef4cae0 100755
--- a/mcp/config/states/maas
+++ b/mcp/config/states/maas
@@ -17,6 +17,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
 bm_nodes=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
            awk '/^\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
 
+wait_for 60.0 "salt --out yaml -C 'mas01*' service.status maas-fixup | fgrep -q 'false'"
+
 # Optionally destroy MaaS machines from a previous run
 if [ "${ERASE_ENV}" -gt 1 ]; then
   cleanup_uefi
@@ -26,7 +28,7 @@ if [ "${ERASE_ENV}" -gt 1 ]; then
 fi
 
 # MaaS rack/region controller, node commissioning
-wait_for 10.0 "salt -C 'mas01*' state.apply linux,salt,openssh,ntp,iptables"
+wait_for 10.0 "salt -C 'mas01*' state.apply salt,iptables"
 salt -C 'mas01*' state.apply maas.cluster
 
 wait_for 10 "salt -C 'mas01*' state.apply maas.region"
diff --git a/mcp/config/states/virtual_init b/mcp/config/states/virtual_init
index 46d880471..e8837571b 100755
--- a/mcp/config/states/virtual_init
+++ b/mcp/config/states/virtual_init
@@ -17,7 +17,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
 CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
 # shellcheck disable=SC2154,SC2086,SC2116
 LOCAL_VIRT_NODES=$(echo ${virtual_nodes[*]}) # unquoted to filter space
-[[ ! "${LOCAL_VIRT_NODES}" =~ mas01 ]] || LOCAL_VIRT_NODES='mas01'
+[[ ! "${cluster_states[*]}" =~ maas ]] || LOCAL_VIRT_NODES='mas01'
 NODE_MASK="${LOCAL_VIRT_NODES// /|}"
 
 wait_for 5.0 "salt-call state.sls reclass,linux.network,salt.minion \
@@ -28,13 +28,12 @@ wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.refresh_pillar"
 
 # Init specific to VMs on FN (all for virtual, mas for baremetal)
 wait_for 3.0 "(for n in ${LOCAL_VIRT_NODES}; do salt -C \${n}.* test.ping || exit; done)"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
+[[ ! "${NODE_MASK}" =~ mas01 ]] || exit 0
+
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux"
 
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux.system,linux.storage"
-wait_for 2.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux.network"
 salt -C "E@^(${NODE_MASK}).*" system.reboot
 wait_for 90.0 "salt -C 'E@^(${NODE_MASK}).*' test.ping"
 wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' pkg.upgrade refresh=False dist_upgrade=True"
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt"
-
-wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls ntp"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt,ntp"
-- 
cgit