summaryrefslogtreecommitdiffstats
path: root/mcp/scripts/lib.sh
blob: 4abeb0946b5f0f546e6c556e857134cfb870dc36 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#
# Library of shell functions
#

generate_ssh_key() {
  local user=${SUDO_USER:-$USER}

  [ -f "$SSH_KEY" ] || ssh-keygen -f ${SSH_KEY} -N ''
  install -o $user -m 0600 ${SSH_KEY} /tmp/
}

get_base_image() {
  local base_image=$1

  mkdir -p images
  wget -P /tmp -nc $base_image
}

cleanup_vms() {
  # clean up existing nodes
  for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
    virsh destroy $node
  done
  for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do
    virsh undefine --nvram $node
  done
}

prepare_vms() {
  local -n vnodes=$1
  local base_image=$2

  cleanup_vms
  get_base_image $base_image
  envsubst < user-data.template > user-data.sh

  for node in "${vnodes[@]}"; do
    # create/prepare images
    ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
    cp /tmp/${base_image/*\/} images/mcp_${node}.qcow2
    qemu-img resize images/mcp_${node}.qcow2 100G
  done
}

create_networks() {
  # create required networks
  for net in pxe mgmt internal public; do
    if virsh net-info $net >/dev/null 2>&1; then
      virsh net-destroy ${net}
      virsh net-undefine ${net}
    fi
    virsh net-define net_${net}.xml
    virsh net-autostart ${net}
    virsh net-start ${net}
  done
}

create_vms() {
  local -n vnodes=$1
  local -n vnodes_ram=$2
  local -n vnodes_vcpus=$3

  # create vms with specified options
  for node in "${vnodes[@]}"; do
    virt-install --name ${node} \
    --ram ${vnodes_ram[$node]} --vcpus ${vnodes_vcpus[$node]} \
    --cpu host-passthrough --accelerate \
    --network network:pxe,model=virtio \
    --network network:mgmt,model=virtio \
    --network network:internal,model=virtio \
    --network network:public,model=virtio \
    --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
    --os-type linux --os-variant none \
    --boot hd --vnc --console pty --autostart --noreboot \
    --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom \
    --noautoconsole
  done
}

update_pxe_network() {
  # set static ip address for salt master node
  virsh net-update pxe add ip-dhcp-host \
  "<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
}

start_vms() {
  local -n vnodes=$1

  # start vms
  for node in "${vnodes[@]}"; do
    virsh start ${node}
    sleep $[RANDOM%5+1]
  done
}

check_connection() {
  local total_attempts=60
  local sleep_time=5
  local attempt=1

  set +e
  echo '[INFO] Attempting to get into Salt master ...'

  # wait until ssh on Salt master is available
  while (($attempt <= $total_attempts)); do
    ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} uptime
    case $? in
      0) echo "${attempt}> Success"; break ;;
      *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
    esac
    sleep $sleep_time
    ((attempt+=1))
  done
  set -e
}

parse_yaml() {
  local prefix=$2
  local s
  local w
  local fs
  s='[[:space:]]*'
  w='[a-zA-Z0-9_]*'
  fs="$(echo @|tr @ '\034')"
  sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
      -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
  awk -F"$fs" '{
  indent = length($1)/2;
  vname[indent] = $2;
  for (i in vname) {if (i > indent) {delete vname[i]}}
      if (length($3) > 0) {
          vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
          printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
      }
  }' | sed 's/_=/+=/g'
}