aboutsummaryrefslogtreecommitdiffstats
path: root/mcp/reclass
diff options
context:
space:
mode:
authorMichael Polenchuk <mpolenchuk@mirantis.com>2017-05-24 13:52:02 +0400
committerMichael Polenchuk <mpolenchuk@mirantis.com>2017-05-25 14:38:56 +0400
commit27d968bc35ef4f622acd171ada778a4e0f4c76fb (patch)
tree5f780e69aa1969ac499e5442162629984521ca1d /mcp/reclass
parenta6daf4ece3f05600ad66fea55c5220d07a71cef1 (diff)
[mcp] Bring in deployment scripts
Change-Id: I7d2af958e447a5892f7cd1f6c6fb8616951e2ff3 Signed-off-by: Michael Polenchuk <mpolenchuk@mirantis.com>
Diffstat (limited to 'mcp/reclass')
-rwxr-xr-xmcp/reclass/scripts/create-config-drive.sh102
-rwxr-xr-xmcp/reclass/scripts/infra.sh75
-rw-r--r--mcp/reclass/scripts/net_internal.xml4
-rw-r--r--mcp/reclass/scripts/net_mgmt.xml4
-rw-r--r--mcp/reclass/scripts/net_public.xml6
-rw-r--r--mcp/reclass/scripts/net_pxe.xml10
-rwxr-xr-xmcp/reclass/scripts/openstack.sh53
-rwxr-xr-xmcp/reclass/scripts/salt.sh20
-rwxr-xr-xmcp/reclass/scripts/user-data.sh10
9 files changed, 284 insertions, 0 deletions
diff --git a/mcp/reclass/scripts/create-config-drive.sh b/mcp/reclass/scripts/create-config-drive.sh
new file mode 100755
index 000000000..cf87150fc
--- /dev/null
+++ b/mcp/reclass/scripts/create-config-drive.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+# This will generate a openstack-style config drive image suitable for
+# use with cloud-init. You may optionally pass in an ssh public key
+# (using the -k/--ssh-key option) and a user-data blog (using the
+# -u/--user-data option).
+
+usage () {
+ echo "usage: ${0##*/}: [--ssh-key <pubkey>] [--vendor-data <file>] [--user-data <file>] [--hostname <hostname>] <imagename>"
+}
+
+ARGS=$(getopt \
+ -o k:u:v:h: \
+ --long help,hostname:,ssh-key:,user-data:,vendor-data: -n ${0##*/} \
+ -- "$@")
+
+if [ $? -ne 0 ]; then
+ usage >&2
+ exit 2
+fi
+
+eval set -- "$ARGS"
+
+while :; do
+ case "$1" in
+ --help)
+ usage
+ exit 0
+ ;;
+ -k|--ssh-key)
+ ssh_key="$2"
+ shift 2
+ ;;
+ -u|--user-data)
+ user_data="$2"
+ shift 2
+ ;;
+ -v|--vendor-data)
+ vendor_data="$2"
+ shift 2
+ ;;
+ -h|--hostname)
+ hostname="$2"
+ shift 2
+ ;;
+ --) shift
+ break
+ ;;
+ esac
+done
+
+config_image=$1
+shift
+
+if [ "$ssh_key" ] && [ -f "$ssh_key" ]; then
+ echo "adding pubkey from $ssh_key"
+ ssh_key_data=$(cat "$ssh_key")
+fi
+
+uuid=$(uuidgen)
+if ! [ "$hostname" ]; then
+ hostname="$uuid"
+fi
+
+trap 'rm -rf $config_dir' EXIT
+config_dir=$(mktemp -t -d configXXXXXX)
+
+if [ "$user_data" ] && [ -f "$user_data" ]; then
+ echo "adding user data from $user_data"
+ cp $user_data $config_dir/user-data
+else
+ touch $config_dir/user-data
+fi
+
+if [ "$vendor_data" ] && [ -f "$vendor_data" ]; then
+ echo "adding vendor data from $vendor_data"
+ cp $vendor_data $config_dir/vendor-data
+fi
+
+cat > $config_dir/meta-data <<-EOF
+instance-id: $uuid
+hostname: $hostname
+local-hostname: $hostname
+EOF
+
+if [ "$ssh_key_data" ]; then
+ cat >> $config_dir/meta-data <<-EOF
+ public-keys:
+ - |
+ $ssh_key_data
+ EOF
+fi
+
+#PS1="debug> " bash --norc
+
+echo "generating configuration image at $config_image"
+if ! mkisofs -o $config_image -V cidata -r -J --quiet $config_dir; then
+ echo "ERROR: failed to create $config_image" >&2
+ exit 1
+fi
+chmod a+r $config_image
+
diff --git a/mcp/reclass/scripts/infra.sh b/mcp/reclass/scripts/infra.sh
new file mode 100755
index 000000000..182d90692
--- /dev/null
+++ b/mcp/reclass/scripts/infra.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+SSH_KEY=mcp.rsa
+SALT_MASTER=192.168.10.100
+BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+declare -A NODES=( [cfg01]=4096 [ctl01]=6144 [ctl02]=6144 [ctl03]=6144 [gtw01]=2048 [cmp01]=2048 )
+
+[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
+
+# get base image
+mkdir -p images
+wget -nc $BASE_IMAGE
+
+for node in "${!NODES[@]}"; do
+ # clean up existing nodes
+ if [ "$(virsh domstate $node 2>/dev/null)" == 'running' ]; then
+ virsh destroy $node
+ virsh undefine $node
+ fi
+
+ # create/prepare images
+ [ -f images/mcp_${node}.iso ] || ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
+ cp ${BASE_IMAGE/*\/} images/mcp_${node}.qcow2
+ qemu-img resize images/mcp_${node}.qcow2 100G
+done
+
+# create required networks
+for net in pxe mgmt internal public; do
+ if virsh net-info $net >/dev/null 2>&1; then
+ virsh net-destroy ${net}
+ virsh net-undefine ${net}
+ fi
+ virsh net-define net_${net}.xml
+ virsh net-autostart ${net}
+ virsh net-start ${net}
+done
+
+# create vms with specified options
+for node in "${!NODES[@]}"; do
+ virt-install --name ${node} --ram ${NODES[$node]} --vcpus=2 --cpu host --accelerate \
+ --network network:pxe,model=virtio \
+ --network network:mgmt,model=virtio \
+ --network network:internal,model=virtio \
+ --network network:public,model=virtio \
+ --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
+ --boot hd --vnc --console pty --autostart --noreboot \
+ --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
+done
+
+# set static ip address for salt master node
+virsh net-update pxe add ip-dhcp-host \
+"<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+
+# start vms
+for node in "${!NODES[@]}"; do
+ virsh start ${node}
+ sleep $[RANDOM%5+1]
+done
+
+CONNECTION_ATTEMPTS=20
+SLEEP=15
+
+# wait until ssh on Salt master is available
+echo "Attempting to ssh to Salt master ..."
+ATTEMPT=1
+
+while (($ATTEMPT <= $CONNECTION_ATTEMPTS)); do
+ ssh -i ${SSH_KEY} ubuntu@$SALT_MASTER uptime
+ case $? in
+ (0) echo "${ATTEMPT}> Success"; break ;;
+ (*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
+ esac
+ sleep $SLEEP
+ ((ATTEMPT+=1))
+done
diff --git a/mcp/reclass/scripts/net_internal.xml b/mcp/reclass/scripts/net_internal.xml
new file mode 100644
index 000000000..a9abece70
--- /dev/null
+++ b/mcp/reclass/scripts/net_internal.xml
@@ -0,0 +1,4 @@
+<network>
+ <name>internal</name>
+ <bridge name="internal"/>
+</network>
diff --git a/mcp/reclass/scripts/net_mgmt.xml b/mcp/reclass/scripts/net_mgmt.xml
new file mode 100644
index 000000000..fde2a23fa
--- /dev/null
+++ b/mcp/reclass/scripts/net_mgmt.xml
@@ -0,0 +1,4 @@
+<network>
+ <name>mgmt</name>
+ <bridge name="mgmt"/>
+</network>
diff --git a/mcp/reclass/scripts/net_public.xml b/mcp/reclass/scripts/net_public.xml
new file mode 100644
index 000000000..61650d5a6
--- /dev/null
+++ b/mcp/reclass/scripts/net_public.xml
@@ -0,0 +1,6 @@
+<network>
+ <name>public</name>
+ <bridge name="public"/>
+ <forward mode="nat"/>
+ <ip address="10.16.0.1" netmask="255.255.255.0" />
+</network>
diff --git a/mcp/reclass/scripts/net_pxe.xml b/mcp/reclass/scripts/net_pxe.xml
new file mode 100644
index 000000000..92eaa6b52
--- /dev/null
+++ b/mcp/reclass/scripts/net_pxe.xml
@@ -0,0 +1,10 @@
+<network>
+ <name>pxe</name>
+ <bridge name="pxe"/>
+ <forward mode="nat"/>
+ <ip address="192.168.10.1" netmask="255.255.255.0">
+ <dhcp>
+ <range start="192.168.10.100" end="192.168.10.254"/>
+ </dhcp>
+ </ip>
+</network>
diff --git a/mcp/reclass/scripts/openstack.sh b/mcp/reclass/scripts/openstack.sh
new file mode 100755
index 000000000..b757e8ecf
--- /dev/null
+++ b/mcp/reclass/scripts/openstack.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Deploy Openstack
+#
+
+ssh -i mcp.rsa ubuntu@192.168.10.100 bash -s << OPENSTACK_INSTALL_END
+ sudo -i
+
+ salt-call state.apply salt
+ salt '*' state.apply salt
+
+ salt -C 'I@salt:master' state.sls linux
+ salt -C '* and not cfg01*' state.sls linux
+
+ salt '*' state.sls ntp
+
+ salt -C 'I@keepalived:cluster' state.sls keepalived -b 1
+
+ salt -C 'I@rabbitmq:server' state.sls rabbitmq
+ salt -C 'I@rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
+
+ salt -C 'I@glusterfs:server' state.sls glusterfs.server.service
+ salt -C 'I@glusterfs:server' state.sls glusterfs.server.setup -b 1
+ salt -C 'I@glusterfs:server' cmd.run "gluster peer status; gluster volume status" -b 1
+
+ salt -C 'I@galera:master' state.sls galera
+ salt -C 'I@galera:slave' state.sls galera
+ salt -C 'I@galera:master' mysql.status | grep -A1 wsrep_cluster_size
+
+ salt -C 'I@haproxy:proxy' state.sls haproxy
+ salt -C 'I@memcached:server' state.sls memcached
+
+ salt -C 'I@keystone:server' state.sls keystone.server -b 1
+ salt -C 'I@keystone:server' cmd.run "service apache2 restart"
+ salt -C 'I@keystone:client' state.sls keystone.client
+ salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; openstack user list"
+
+ salt -C 'I@glance:server' state.sls glance -b 1
+ salt -C 'I@nova:controller' state.sls nova -b 1
+ salt -C 'I@heat:server' state.sls heat -b 1
+ salt -C 'I@cinder:controller' state.sls cinder -b 1
+
+ salt -C 'I@neutron:server' state.sls neutron -b 1
+ salt -C 'I@neutron:gateway' state.sls neutron
+
+ salt -C 'I@nova:compute' state.sls nova
+ salt -C 'I@neutron:compute' state.sls neutron
+
+ salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; nova service-list"
+ salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; neutron agent-list"
+ salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; heat stack-list"
+ salt -C 'I@keystone:server' cmd.run ". /root/keystonercv3; cinder list"
+OPENSTACK_INSTALL_END
diff --git a/mcp/reclass/scripts/salt.sh b/mcp/reclass/scripts/salt.sh
new file mode 100755
index 000000000..c202ab574
--- /dev/null
+++ b/mcp/reclass/scripts/salt.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Deploy Salt Master
+#
+
+# ssh to cfg01
+ssh -i mcp.rsa ubuntu@192.168.10.100 bash -s << SALT_INSTALL_END
+ sudo -i
+
+ apt-get update
+ apt-get install -y git curl subversion
+
+ svn export --force https://github.com/salt-formulas/salt-formulas/trunk/deploy/scripts /srv/salt/scripts
+ git clone --depth=1 https://git.opnfv.org/fuel
+ ln -s fuel/mcp/reclass /srv/salt/reclass
+
+ cd /srv/salt/scripts
+ MASTER_HOSTNAME=cfg01.virtual-mcp-ocata-ovs.local ./salt-master-init.sh
+ salt-key -Ay
+SALT_INSTALL_END
diff --git a/mcp/reclass/scripts/user-data.sh b/mcp/reclass/scripts/user-data.sh
new file mode 100755
index 000000000..2b9b6845c
--- /dev/null
+++ b/mcp/reclass/scripts/user-data.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+wget -O - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -
+echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/latest xenial main" > /etc/apt/sources.list.d/salt.list
+apt update
+apt-get install -y salt-minion
+rm /etc/salt/minion_id
+rm -f /etc/salt/pki/minion/minion_master.pub
+echo "id: $(hostname).virtual-mcp-ocata-ovs.local" > /etc/salt/minion
+echo "master: 192.168.10.100" >> /etc/salt/minion
+service salt-minion restart