summaryrefslogtreecommitdiffstats
path: root/mcp/reclass/scripts/infra.sh
blob: 07ee2c75db7092e51bd87bdc7e5e4fd2cd26f451 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#!/bin/bash

BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
declare -A NODES=( [cfg01]=4096 [ctl01]=8192 [ctl02]=8192 [ctl03]=8192 [gtw01]=2048 [cmp01]=6144 )

# get required packages
apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm

# generate ssh key
[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
cp $SSH_KEY /tmp/

# get base image
mkdir -p images
wget -P /tmp -nc $BASE_IMAGE

# generate cloud-init user data
envsubst < user-data.template > user-data.sh

for node in "${!NODES[@]}"; do
  # clean up existing nodes
  if [ "$(virsh domstate $node 2>/dev/null)" == 'running' ]; then
    virsh destroy $node
    virsh undefine $node
  fi

  # create/prepare images
  [ -f images/mcp_${node}.iso ] || ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
  cp /tmp/${BASE_IMAGE/*\/} images/mcp_${node}.qcow2
  qemu-img resize images/mcp_${node}.qcow2 100G
done

# create required networks
for net in pxe mgmt internal public; do
  if virsh net-info $net >/dev/null 2>&1; then
    virsh net-destroy ${net}
    virsh net-undefine ${net}
  fi
  virsh net-define net_${net}.xml
  virsh net-autostart ${net}
  virsh net-start ${net}
done

# create vms with specified options
for node in "${!NODES[@]}"; do
  virt-install --name ${node} --ram ${NODES[$node]} --vcpus=2 --cpu host --accelerate \
  --network network:pxe,model=virtio \
  --network network:mgmt,model=virtio \
  --network network:internal,model=virtio \
  --network network:public,model=virtio \
  --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
  --os-type linux --os-variant none \
  --boot hd --vnc --console pty --autostart --noreboot \
  --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
done

# set static ip address for salt master node
virsh net-update pxe add ip-dhcp-host \
"<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live

# start vms
for node in "${!NODES[@]}"; do
  virsh start ${node}
  sleep $[RANDOM%5+1]
done

CONNECTION_ATTEMPTS=60
SLEEP=5

# wait until ssh on Salt master is available
echo "Attempting to ssh to Salt master ..."
ATTEMPT=1

while (($ATTEMPT <= $CONNECTION_ATTEMPTS)); do
  ssh $SSH_OPTS ubuntu@$SALT_MASTER uptime
  case $? in
    (0) echo "${ATTEMPT}> Success"; break ;;
    (*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
  esac
  sleep $SLEEP
  ((ATTEMPT+=1))
done