summaryrefslogtreecommitdiffstats
path: root/site/intel-pod15/profiles/region.yaml
blob: fd8d987eeac4f274f56c1a1ce96cab429c9d2550 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
---
# The purpose of this file is to define the drydock Region, which in turn drives
# the MaaS region.
schema: 'drydock/Region/v1'
metadata:
  schema: 'metadata/Document/v1'
  # NEWSITE-CHANGEME: Replace with the site name
  name: intel-pod15
  layeringDefinition:
    abstract: false
    layer: site
  storagePolicy: cleartext
  substitutions:
    - dest:
        path: .repositories.main_archive
      src:
        schema: pegleg/SoftwareVersions/v1
        name: software-versions
        path: .packages.repositories.main_archive
    # NEWSITE-CHANGEME: Substitutions from deckhand SSH public keys into the
    # list of authorized keys which MaaS will register for the build-in "ubuntu"
    # account during the PXE process. Create a substitution rule for each SSH
    # key that should have access to the "ubuntu" account (useful for trouble-
    # shooting problems before UAM or UAM-lite is operational). SSH keys are
    # stored as secrets in site/seaworthy/secrets.
    - dest:
        # Add/replace the item in the list
        path: .authorized_keys[0]
      src:
        schema: deckhand/PublicKey/v1
        # This should match the "name" metadata of the SSH key which will be
        # substituted, located in site/intel-pod15/secrets folder.
        name: sridhar_ssh_public_key
        path: .
    - dest:
        # Increment the list index
        path: .authorized_keys[1]
      src:
        schema: deckhand/PublicKey/v1
        # your ssh key
        name: mfix_ssh_public_key
        path: .
    - dest:
        # Increment the list index
        path: .authorized_keys[2]
      src:
        schema: deckhand/PublicKey/v1
        # your ssh key
        name: cedric_ssh_public_key
        path: .
data:
  tag_definitions: []
  # This is the list of SSH keys which MaaS will register for the built-in
  # "ubuntu" account during the PXE process. This list is populated by
  # substitution, so the same SSH keys do not need to be repeated in multiple
  # manifests.
  authorized_keys: []
  repositories:
    remove_unlisted: true
...
ass="si">${fcnodes}; do salt -C 'mas01*' state.apply maas.machines.delete \ pillar="{'system_id': '${node_system_id}'}" sleep 10 done for node_system_id in ${ftnodes}; do salt -C 'mas01*' state.apply maas.machines.override_failed_testing \ pillar="{'system_id': '${node_system_id}'}" sleep 10 done if [ -n "${fcnodes}" ] || [ -n "${ftnodes}" ]; then salt -C 'mas01*' state.apply maas.machines return 1 fi local fdnodes=$(echo "${statusout}" | \ grep -Pzo 'status: (Failed deployment|Allocated)\n\s+system_id: \K.+\n') local rnodes=$(echo "${statusout}" | \ grep -Pzo 'status: Ready\n\s+system_id: \K.+\n') for node_system_id in ${fdnodes}; do salt -C 'mas01*' state.apply maas.machines.mark_broken_fixed \ pillar="{'system_id': '${node_system_id}'}" sleep 10 done if [ -n "${fdnodes}" ] || [ -n "${rnodes}" ]; then for node_system_id in ${fdnodes} ${rnodes}; do # For now, we allocate 30GB (fixed) for / on cmp nodes local node_hostname=$(echo "${statusout}" | \ grep -Pzo 'hostname: \K.+(?=\n.+\n\s+system_id: '"${node_system_id}"')') if [[ "${node_hostname}" =~ ^cmp ]]; then salt -C 'mas01*' state.apply maas.machines.set_storage_layout \ pillar="{'system_id': '${node_system_id}', 'lv_size': '32212254720'}" sleep 10 fi done salt -C 'mas01*' state.apply maas.machines.deploy return 1 fi return 0 } # Optionally destroy MaaS machines from a previous run if [ "${ERASE_ENV}" -gt 1 ]; then set +e; dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \ grep -Pzo '\s+system_id: \K.+\n'); set -e cleanup_uefi for node_system_id in ${dnodes}; do salt -C 'mas01*' state.apply maas.machines.delete \ pillar="{'system_id': '${node_system_id}'}" sleep 10 done fi # MaaS rack/region controller, node commissioning salt -C 'mas01*' state.apply linux,salt,openssh,ntp salt -C 'mas01*' state.apply maas.pxe_nat salt -C 'mas01*' state.apply maas.cluster wait_for 10 "salt -C 'mas01*' state.apply maas.region" if [ -n "${bm_nodes}" ]; then salt -C 'mas01*' state.apply maas.machines fi # cleanup outdated salt keys sleep 30 salt-key --out yaml | awk '!/^(minions|- cfg01|- mas01)/ {print $2}' | \ xargs --no-run-if-empty -I{} salt-key -yd {} # MaaS node deployment wait_for 10 maas_fixup salt -C 'mas01*' pillar.item\ maas:region:admin:username \ maas:region:admin:password # Check all baremetal nodes are available wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)" wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"