summaryrefslogtreecommitdiffstats
path: root/mcp/config
diff options
context:
space:
mode:
Diffstat (limited to 'mcp/config')
-rw-r--r--mcp/config/scenario/baremetal/defaults.yaml6
-rw-r--r--mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml15
-rwxr-xr-xmcp/config/states/maas69
-rwxr-xr-xmcp/config/states/openstack_ha12
4 files changed, 102 insertions, 0 deletions
diff --git a/mcp/config/scenario/baremetal/defaults.yaml b/mcp/config/scenario/baremetal/defaults.yaml
new file mode 100644
index 000000000..b841e88c9
--- /dev/null
+++ b/mcp/config/scenario/baremetal/defaults.yaml
@@ -0,0 +1,6 @@
+base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+virtual:
+ default:
+ vcpus: 2
+ ram: 4096
+
diff --git a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
new file mode 100644
index 000000000..1118d28f6
--- /dev/null
+++ b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
@@ -0,0 +1,15 @@
+cluster:
+ domain: baremetal-mcp-ocata-ovs-ha.local
+ states:
+ - maas
+ - openstack_ha
+ - openstack
+ - neutron_compute
+ - networks
+virtual:
+ nodes:
+ - cfg01
+ - mas01
+ mas01:
+ vcpus: 4
+ ram: 16384
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
new file mode 100755
index 000000000..0c016170c
--- /dev/null
+++ b/mcp/config/states/maas
@@ -0,0 +1,69 @@
+#!/bin/bash
+function wait_for() {
+ local cmdstr=$@
+ local total_attempts=360
+ local sleep_time=10
+ local attempt=1
+ echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
+ while ((attempt <= total_attempts)); do
+ eval "${cmdstr}" && break || true
+ echo -n '.'; sleep "${sleep_time}"
+ ((attempt+=1))
+ done
+}
+
+# MaaS rack/region controller, node commissioning
+salt -C 'mas01*' cmd.run "add-apt-repository ppa:maas/stable"
+
+salt -C 'mas01*' state.apply linux,salt,openssh,ntp
+salt -C 'mas01*' state.apply linux.network.interface
+salt -C 'mas01*' state.apply maas.pxe_nat
+salt -C 'mas01*' state.apply maas.cluster
+salt -C 'cfg01*' cmd.run \
+ "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.3}"
+
+wait_for "salt -C 'mas01*' state.apply maas.region"
+
+salt -C 'mas01*' state.apply maas.machines
+# TODO: relax cond, as this is not re-entrant (e.g. nodes already deployed)
+wait_for "salt 'mas01*' --out yaml state.apply maas.machines.status | " \
+ "tee /dev/stderr | fgrep -q 'Ready: 5'"
+
+# MaaS node deployment
+salt -C 'mas01*' state.apply maas.machines.deploy
+wait_for "salt 'mas01*' --out yaml state.apply maas.machines.status | " \
+ "tee /dev/stderr | fgrep -q 'Deployed: 5'"
+
+salt -C 'mas01*' pillar.item\
+ maas:region:admin:username \
+ maas:region:admin:password
+
+# KVM, compute node prereqs (libvirt first), VCP deployment
+salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
+
+salt -C 'kvm*' pkg.install bridge-utils
+salt -C 'kvm*' state.apply linux.network
+salt -C 'kvm*' system.reboot
+wait_for "! salt '*' test.ping | tee /dev/stderr | fgrep -q 'Not connected'"
+
+salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
+
+salt -C 'kvm*' state.sls libvirt
+
+salt -C '* and not cfg01* and not mas01*' state.apply salt
+salt -C 'kvm*' state.sls salt.control
+
+vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | awk '/\s+\w+:$/ {gsub(/:$/, "*"); print $1}')
+
+# Check all vcp nodes are available
+rc=1
+while [ $rc -ne 0 ]; do
+ rc=0
+ for node in $vcp_nodes; do
+ salt "$node" test.ping 2>/dev/null 1>&2 || { rc=$?; break; };
+ done
+done
+
+salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
+salt -C '* and not cfg01* and not mas01*' state.apply salt
+salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
new file mode 100755
index 000000000..293273594
--- /dev/null
+++ b/mcp/config/states/openstack_ha
@@ -0,0 +1,12 @@
+salt -I 'keepalived:cluster' state.sls keepalived -b 1
+
+salt -I 'rabbitmq:server' state.sls rabbitmq # maybe twice
+salt -I 'rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+
+salt -I 'glusterfs:server' state.sls glusterfs.server.service
+salt -I 'glusterfs:server' state.sls glusterfs.server.setup -b 1
+salt -I 'glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+
+salt -I 'galera:master' state.sls galera
+salt -I 'galera:slave' state.sls galera
+salt -I 'galera:master' mysql.status | grep -A1 wsrep_cluster_size