summaryrefslogtreecommitdiffstats
path: root/mcp/config
diff options
context:
space:
mode:
authorAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-08-01 22:18:41 +0200
committerAlexandru Avadanii <Alexandru.Avadanii@enea.com>2017-08-17 02:59:30 +0200
commit5039d069265df15ed3d8e41f7a1c7f9457a9d58a (patch)
tree18a9160f72be9a01ef0008e3aa9912e18262057d /mcp/config
parent9720ddf955b76d678a08dc7ea53684400c659ce3 (diff)
Bring in baremetal support
- ci/deploy.sh: fail if default scenario file is missing; - start by copying reclass/classes/cluster/virtual-mcp-ocata-ovs as classes/cluster/baremetal-mcp-ocata-ovs; - add new state (maas) that will handle MaaS configuration; - Split PXE network in two for baremetal: * rename old "pxe" virtual network to "mcpcontrol", make it non-configurable and identical for baremetal/virtual deploys; * new "pxebr" bridge is dedicated for MaaS fabric network, which comes with its own DHCP, TFTP etc.; - Drop hardcoded PXE gateway & static IP for MaaS node, since "mcpcontrol" remains a NAT-ed virtual network, with its own DHCP; - Keep internet access available on first interfaces for cfg01/mas01; - Align MaaS IP addrs (all x.y.z.3), add public IP for easy debug via MaaS dashboard; - Add static IP in new network segment (192.168.11.3/24) on MaaS node's PXE interface; - Set MaaS PXE interface MTU 1500 (weird network errors with jumbo); - MaaS node: Add NAT iptables traffic forward from "mcpcontrol" to "pxebr" interfaces; - MaaS: Add harcoded lf-pod2 machine info (fixed identation in v6); - Switch our targeted scenario to HA; * scenario: s/os-nosdn-nofeature-noha/os-nosdn-nofeature-ha/ - maas region: Use mcp.rsa.pub from ~ubuntu/.ssh/authorized_keys; - add route for 192.168.11.0/24 via mas01 on cfg01; - fix race condition on kvm nodes network setup: * add "noifupdown" support in salt formula for linux.network; * keep primary eth/br-mgmt unconfigured till reboot; TODO: - Read all this info from PDF (Pod Descriptor File) later; - investigate leftover references to eno2, eth3; - add public network interfaces config, IPs; - improve wait conditions for MaaS commision/deploy; - report upstream breakage in system.single; Change-Id: Ie8dd584b140991d2bd992acdfe47f5644bf51409 Signed-off-by: Michael Polenchuk <mpolenchuk@mirantis.com> Signed-off-by: Guillermo Herrero <Guillermo.Herrero@enea.com> Signed-off-by: Charalampos Kominos <Charalampos.Kominos@enea.com> Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
Diffstat (limited to 'mcp/config')
-rw-r--r--mcp/config/scenario/baremetal/defaults.yaml6
-rw-r--r--mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml15
-rwxr-xr-xmcp/config/states/maas56
-rwxr-xr-xmcp/config/states/openstack_ha12
4 files changed, 89 insertions, 0 deletions
diff --git a/mcp/config/scenario/baremetal/defaults.yaml b/mcp/config/scenario/baremetal/defaults.yaml
new file mode 100644
index 000000000..b841e88c9
--- /dev/null
+++ b/mcp/config/scenario/baremetal/defaults.yaml
@@ -0,0 +1,6 @@
+base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+virtual:
+ default:
+ vcpus: 2
+ ram: 4096
+
diff --git a/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
new file mode 100644
index 000000000..1118d28f6
--- /dev/null
+++ b/mcp/config/scenario/baremetal/os-nosdn-nofeature-ha.yaml
@@ -0,0 +1,15 @@
+cluster:
+ domain: baremetal-mcp-ocata-ovs-ha.local
+ states:
+ - maas
+ - openstack_ha
+ - openstack
+ - neutron_compute
+ - networks
+virtual:
+ nodes:
+ - cfg01
+ - mas01
+ mas01:
+ vcpus: 4
+ ram: 16384
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
new file mode 100755
index 000000000..8b599105d
--- /dev/null
+++ b/mcp/config/states/maas
@@ -0,0 +1,56 @@
+#!/bin/bash
+function wait_for() {
+ local cmdstr=$@
+ local total_attempts=360
+ local sleep_time=10
+ local attempt=1
+ echo "[NOTE] Waiting for cmd to return success: ${cmdstr}"
+ while ((attempt <= total_attempts)); do
+ eval "${cmdstr}" && break || true
+ echo -n '.'; sleep "${sleep_time}"
+ ((attempt+=1))
+ done
+}
+
+# MaaS rack/region controller, node commissioning
+salt -C 'mas01*' cmd.run "add-apt-repository ppa:maas/stable"
+
+salt -C 'mas01*' state.apply linux,salt,openssh,ntp
+salt -C 'mas01*' state.apply linux.network.interface
+salt -C 'mas01*' state.apply maas.pxe_nat
+salt -C 'mas01*' state.apply maas.cluster
+salt -C 'cfg01*' cmd.run \
+ "route add -net 192.168.11.0/24 gw ${MAAS_IP:-192.168.10.3}"
+
+wait_for "salt -C 'mas01*' state.apply maas.region"
+
+salt -C 'mas01*' state.apply maas.machines
+# TODO: relax cond, as this is not re-entrant (e.g. nodes already deployed)
+wait_for "salt 'mas01*' --out yaml state.apply maas.machines.status | " \
+ "fgrep -q 'Ready: 5'"
+
+# MaaS node deployment
+salt -C 'mas01*' state.apply maas.machines.deploy
+wait_for "salt 'mas01*' --out yaml state.apply maas.machines.status | " \
+ "fgrep -q 'Deployed: 5'"
+
+salt -C 'mas01*' pillar.item\
+ maas:region:admin:username \
+ maas:region:admin:password
+
+# KVM, compute node prereqs (libvirt first), VCP deployment
+salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
+salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
+
+salt -C 'kvm*' state.sls libvirt
+
+salt -C '* and not cfg01* and not mas01*' system.reboot
+wait_for "! salt '*' test.ping | fgrep -q 'Not connected'"
+
+salt -C '* and not cfg01* and not mas01*' state.apply salt
+
+salt -C 'kvm*' state.sls salt.control
+
+salt -C '* and not cfg01* and not mas01*' saltutil.sync_all
+salt -C '* and not cfg01* and not mas01*' state.apply salt
+salt -C '* and not cfg01* and not mas01*' state.apply linux,ntp
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
new file mode 100755
index 000000000..293273594
--- /dev/null
+++ b/mcp/config/states/openstack_ha
@@ -0,0 +1,12 @@
+salt -I 'keepalived:cluster' state.sls keepalived -b 1
+
+salt -I 'rabbitmq:server' state.sls rabbitmq # maybe twice
+salt -I 'rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+
+salt -I 'glusterfs:server' state.sls glusterfs.server.service
+salt -I 'glusterfs:server' state.sls glusterfs.server.setup -b 1
+salt -I 'glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+
+salt -I 'galera:master' state.sls galera
+salt -I 'galera:slave' state.sls galera
+salt -I 'galera:master' mysql.status | grep -A1 wsrep_cluster_size