aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xci/deploy.sh57
-rw-r--r--mcp/config/defaults.yaml6
-rw-r--r--mcp/config/os-nosdn-nofeature-noha.yaml14
-rw-r--r--mcp/config/os-nosdn-ovs-noha.yaml18
-rw-r--r--mcp/config/os-odl_l2-nofeature-noha.yaml17
-rwxr-xr-xmcp/reclass/scripts/infra.sh82
-rwxr-xr-xmcp/scripts/create-config-drive.sh (renamed from mcp/reclass/scripts/create-config-drive.sh)4
-rwxr-xr-xmcp/scripts/dpdk.sh (renamed from mcp/reclass/scripts/dpdk.sh)2
-rw-r--r--mcp/scripts/lib.sh128
-rw-r--r--mcp/scripts/net_internal.xml (renamed from mcp/reclass/scripts/net_internal.xml)0
-rw-r--r--mcp/scripts/net_mgmt.xml (renamed from mcp/reclass/scripts/net_mgmt.xml)0
-rw-r--r--mcp/scripts/net_public.xml (renamed from mcp/reclass/scripts/net_public.xml)0
-rw-r--r--mcp/scripts/net_pxe.xml (renamed from mcp/reclass/scripts/net_pxe.xml)0
-rwxr-xr-xmcp/scripts/openstack.sh (renamed from mcp/reclass/scripts/openstack.sh)2
-rwxr-xr-xmcp/scripts/salt.sh (renamed from mcp/reclass/scripts/salt.sh)2
-rw-r--r--mcp/scripts/user-data.template (renamed from mcp/reclass/scripts/user-data.template)0
16 files changed, 223 insertions, 109 deletions
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 8389bc0c8..77030a44d 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -1,7 +1,7 @@
#!/bin/bash
set -ex
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2017 Ericsson AB, Mirantis Inc. and others.
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -111,7 +111,7 @@ clean() {
# BEGIN of shorthand variables for internal use
#
SCRIPT_PATH=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
-DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/reclass/scripts; pwd)
+DEPLOY_DIR=$(cd ${SCRIPT_PATH}/../mcp/scripts; pwd)
PXE_BRIDGE=''
NO_HEALTH_CHECK=''
USE_EXISTING_FUEL=''
@@ -217,17 +217,6 @@ if [[ $EUID -ne 0 ]]; then
exit 1
fi
-if [ -z $BASE_CONFIG_URI ] || [ -z $TARGET_LAB ] || \
- [ -z $TARGET_POD ] || [ -z $DEPLOY_SCENARIO ] || \
- [ -z $ISO ]; then
- echo "Arguments not according to new argument style"
- echo "Trying old-style compatibility mode"
- pushd ${DEPLOY_DIR} > /dev/null
- python deploy.py "$@"
- popd > /dev/null
- exit 0
-fi
-
# Enable the automatic exit trap
trap do_exit SIGINT SIGTERM EXIT
@@ -240,23 +229,47 @@ pushd ${DEPLOY_DIR} > /dev/null
# Prepare the deploy config files based on lab/pod information, deployment
# scenario, etc.
-# Set cluster domain
-case $DEPLOY_SCENARIO in
- *dpdk*) CLUSTER_DOMAIN=virtual-mcp-ocata-ovs-dpdk.local ;;
- *) CLUSTER_DOMAIN=virtual-mcp-ocata-ovs.local ;;
-esac
+# Install required packages
+[ -n "$(command -v apt-get)" ] && apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm
+[ -n "$(command -v yum)" ] && yum install -y genisoimage curl virt-install qemu-kvm
+
+# Check scenario file existence
+if [[ ! -f ../config/${DEPLOY_SCENARIO}.yaml ]]; then
+ echo "[WARN] ${DEPLOY_SCENARIO}.yaml not found, setting simplest scenario"
+ DEPLOY_SCENARIO='os-nosdn-nofeature-noha'
+fi
+
+# Get required infra deployment data
+source lib.sh
+eval $(parse_yaml ../config/defaults.yaml)
+eval $(parse_yaml ../config/${DEPLOY_SCENARIO}.yaml)
+
+declare -A virtual_nodes_ram
+for node in "${virtual_nodes[@]}"; do
+ virtual_custom_ram="virtual_${node}_ram"
+ virtual_nodes_ram[$node]=${!virtual_custom_ram:-$virtual_default_ram}
+done
-export CLUSTER_DOMAIN
+export CLUSTER_DOMAIN=$cluster_domain
export SSH_KEY=${SSH_KEY:-mcp.rsa}
export SALT_MASTER=${SALT_MASTER_IP:-192.168.10.100}
export SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i ${SSH_KEY}"
-./infra.sh
+# Infra setup
+generate_ssh_key
+prepare_vms virtual_nodes $base_image
+create_networks
+create_vms virtual_nodes virtual_nodes_ram
+update_pxe_network
+start_vms virtual_nodes
+check_connection
+
+# Openstack cluster setup
./salt.sh
./openstack.sh
-# enable dpdk on computes
-[[ "$DEPLOY_SCENARIO" =~ dpdk ]] && ./dpdk.sh
+# Enable dpdk on computes
+[[ "$DEPLOY_SCENARIO" =~ (ovs|dpdk) ]] && ./dpdk.sh
## Disable Fuel deployment engine
#
diff --git a/mcp/config/defaults.yaml b/mcp/config/defaults.yaml
new file mode 100644
index 000000000..b841e88c9
--- /dev/null
+++ b/mcp/config/defaults.yaml
@@ -0,0 +1,6 @@
+base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+virtual:
+ default:
+ vcpus: 2
+ ram: 4096
+
diff --git a/mcp/config/os-nosdn-nofeature-noha.yaml b/mcp/config/os-nosdn-nofeature-noha.yaml
new file mode 100644
index 000000000..526ea5763
--- /dev/null
+++ b/mcp/config/os-nosdn-nofeature-noha.yaml
@@ -0,0 +1,14 @@
+cluster:
+ domain: virtual-mcp-ocata-ovs.local
+virtual:
+ nodes:
+ - cfg01
+ - ctl01
+ - cmp01
+ - cmp02
+ - gtw01
+ ctl01:
+ vcpus: 4
+ ram: 14336
+ gtw01:
+ ram: 2048
diff --git a/mcp/config/os-nosdn-ovs-noha.yaml b/mcp/config/os-nosdn-ovs-noha.yaml
new file mode 100644
index 000000000..ef35d72d3
--- /dev/null
+++ b/mcp/config/os-nosdn-ovs-noha.yaml
@@ -0,0 +1,18 @@
+cluster:
+ domain: virtual-mcp-ocata-ovs-dpdk.local
+virtual:
+ nodes:
+ - cfg01
+ - ctl01
+ - cmp01
+ - cmp02
+ - gtw01
+ ctl01:
+ vcpus: 4
+ ram: 14336
+ gtw01:
+ ram: 2048
+ cmp01:
+ ram: 6144
+ cmp02:
+ ram: 6144
diff --git a/mcp/config/os-odl_l2-nofeature-noha.yaml b/mcp/config/os-odl_l2-nofeature-noha.yaml
new file mode 100644
index 000000000..d9810791b
--- /dev/null
+++ b/mcp/config/os-odl_l2-nofeature-noha.yaml
@@ -0,0 +1,17 @@
+cluster:
+ domain: virtual-mcp-ocata-odl.local
+virtual:
+ nodes:
+ - cfg01
+ - ctl01
+ - cmp01
+ - gtw01
+ - odl01
+ ctl01:
+ vcpus: 4
+ ram: 14336
+ gtw01:
+ ram: 2048
+ odl01:
+ vcpus: 4
+ ram: 5120
diff --git a/mcp/reclass/scripts/infra.sh b/mcp/reclass/scripts/infra.sh
deleted file mode 100755
index 72ad5aa5a..000000000
--- a/mcp/reclass/scripts/infra.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-BASE_IMAGE=https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
-declare -A NODES=( [cfg01]=4096 [ctl01]=14336 [gtw01]=2048 [cmp01]=6144 [cmp02]=6144 )
-
-# get required packages
-apt-get install -y mkisofs curl virtinst cpu-checker qemu-kvm
-
-# generate ssh key
-[ -f $SSH_KEY ] || ssh-keygen -f $SSH_KEY -N ''
-install -o jenkins -m 0600 ${SSH_KEY} /tmp/
-
-# get base image
-mkdir -p images
-wget -P /tmp -nc $BASE_IMAGE
-
-# generate cloud-init user data
-envsubst < user-data.template > user-data.sh
-
-for node in "${!NODES[@]}"; do
- # clean up existing nodes
- if [ "$(virsh domstate $node 2>/dev/null)" == 'running' ]; then
- virsh destroy $node
- virsh undefine $node
- fi
-
- # create/prepare images
- [ -f images/mcp_${node}.iso ] || ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
- cp /tmp/${BASE_IMAGE/*\/} images/mcp_${node}.qcow2
- qemu-img resize images/mcp_${node}.qcow2 100G
-done
-
-# create required networks
-for net in pxe mgmt internal public; do
- if virsh net-info $net >/dev/null 2>&1; then
- virsh net-destroy ${net}
- virsh net-undefine ${net}
- fi
- virsh net-define net_${net}.xml
- virsh net-autostart ${net}
- virsh net-start ${net}
-done
-
-# create vms with specified options
-for node in "${!NODES[@]}"; do
- virt-install --name ${node} --ram ${NODES[$node]} --vcpus=2 --cpu host --accelerate \
- --network network:pxe,model=virtio \
- --network network:mgmt,model=virtio \
- --network network:internal,model=virtio \
- --network network:public,model=virtio \
- --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
- --os-type linux --os-variant none \
- --boot hd --vnc --console pty --autostart --noreboot \
- --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
-done
-
-# set static ip address for salt master node
-virsh net-update pxe add ip-dhcp-host \
-"<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
-
-# start vms
-for node in "${!NODES[@]}"; do
- virsh start ${node}
- sleep $[RANDOM%5+1]
-done
-
-CONNECTION_ATTEMPTS=60
-SLEEP=5
-
-# wait until ssh on Salt master is available
-echo "Attempting to ssh to Salt master ..."
-ATTEMPT=1
-
-while (($ATTEMPT <= $CONNECTION_ATTEMPTS)); do
- ssh $SSH_OPTS ubuntu@$SALT_MASTER uptime
- case $? in
- (0) echo "${ATTEMPT}> Success"; break ;;
- (*) echo "${ATTEMPT}/${CONNECTION_ATTEMPTS}> ssh server ain't ready yet, waiting for ${SLEEP} seconds ..." ;;
- esac
- sleep $SLEEP
- ((ATTEMPT+=1))
-done
diff --git a/mcp/reclass/scripts/create-config-drive.sh b/mcp/scripts/create-config-drive.sh
index cf87150fc..df3f72f1f 100755
--- a/mcp/reclass/scripts/create-config-drive.sh
+++ b/mcp/scripts/create-config-drive.sh
@@ -67,14 +67,14 @@ config_dir=$(mktemp -t -d configXXXXXX)
if [ "$user_data" ] && [ -f "$user_data" ]; then
echo "adding user data from $user_data"
- cp $user_data $config_dir/user-data
+ cp ${user_data} ${config_dir}/user-data
else
touch $config_dir/user-data
fi
if [ "$vendor_data" ] && [ -f "$vendor_data" ]; then
echo "adding vendor data from $vendor_data"
- cp $vendor_data $config_dir/vendor-data
+ cp ${vendor_data} ${config_dir}/vendor-data
fi
cat > $config_dir/meta-data <<-EOF
diff --git a/mcp/reclass/scripts/dpdk.sh b/mcp/scripts/dpdk.sh
index 4e4cd0b2a..faa4390d3 100755
--- a/mcp/reclass/scripts/dpdk.sh
+++ b/mcp/scripts/dpdk.sh
@@ -3,7 +3,7 @@
# Enable DPDK on compute nodes
#
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << DPDK_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << DPDK_INSTALL_END
sudo -i
salt -C 'I@nova:compute' system.reboot
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
new file mode 100644
index 000000000..50f441a78
--- /dev/null
+++ b/mcp/scripts/lib.sh
@@ -0,0 +1,128 @@
+#
+# Library of shell functions
+#
+
+generate_ssh_key() {
+ [ -f "$SSH_KEY" ] || ssh-keygen -f ${SSH_KEY} -N ''
+ install -o jenkins -m 0600 ${SSH_KEY} /tmp/
+}
+
+get_base_image() {
+ local base_image=$1
+
+ mkdir -p images
+ wget -P /tmp -nc $base_image
+}
+
+cleanup_vms() {
+ # clean up existing nodes
+ for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
+ virsh destroy $node
+ virsh undefine $node
+ done
+}
+
+prepare_vms() {
+ local -n vnodes=$1
+ local base_image=$2
+
+ cleanup_vms
+ get_base_image $base_image
+ envsubst < user-data.template > user-data.sh
+
+ for node in "${vnodes[@]}"; do
+ # create/prepare images
+ ./create-config-drive.sh -k ${SSH_KEY}.pub -u user-data.sh -h ${node} images/mcp_${node}.iso
+ cp /tmp/${base_image/*\/} images/mcp_${node}.qcow2
+ qemu-img resize images/mcp_${node}.qcow2 100G
+ done
+}
+
+create_networks() {
+ # create required networks
+ for net in pxe mgmt internal public; do
+ if virsh net-info $net >/dev/null 2>&1; then
+ virsh net-destroy ${net}
+ virsh net-undefine ${net}
+ fi
+ virsh net-define net_${net}.xml
+ virsh net-autostart ${net}
+ virsh net-start ${net}
+ done
+}
+
+create_vms() {
+ local -n vnodes=$1
+ local -n vnodes_ram=$2
+
+ # create vms with specified options
+ for node in "${vnodes[@]}"; do
+ virt-install --name ${node} --ram ${vnodes_ram[$node]} --vcpus=2 --cpu host --accelerate \
+ --network network:pxe,model=virtio \
+ --network network:mgmt,model=virtio \
+ --network network:internal,model=virtio \
+ --network network:public,model=virtio \
+ --disk path=$(pwd)/images/mcp_${node}.qcow2,format=qcow2,bus=virtio,cache=none,io=native \
+ --os-type linux --os-variant none \
+ --boot hd --vnc --console pty --autostart --noreboot \
+ --disk path=$(pwd)/images/mcp_${node}.iso,device=cdrom
+ done
+}
+
+update_pxe_network() {
+ # set static ip address for salt master node
+ virsh net-update pxe add ip-dhcp-host \
+ "<host mac='$(virsh domiflist cfg01 | awk '/pxe/ {print $5}')' name='cfg01' ip='$SALT_MASTER'/>" --live
+}
+
+start_vms() {
+ local -n vnodes=$1
+
+ # start vms
+ for node in "${vnodes[@]}"; do
+ virsh start ${node}
+ sleep $[RANDOM%5+1]
+ done
+}
+
+check_connection() {
+ local total_attempts=60
+ local sleep_time=5
+ local attempt=1
+
+ set +e
+ echo '[INFO] Attempting to get into Salt master ...'
+
+ # wait until ssh on Salt master is available
+ while (($attempt <= $total_attempts)); do
+ ssh -i ${SSH_KEY} ubuntu@${SALT_MASTER} uptime
+ case $? in
+ 0) echo "${attempt}> Success"; break ;;
+ *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
+ esac
+ sleep $sleep_time
+ ((attempt+=1))
+ done
+ set -e
+}
+
+parse_yaml() {
+ local prefix=$2
+ local s
+ local w
+ local fs
+ s='[[:space:]]*'
+ w='[a-zA-Z0-9_]*'
+ fs="$(echo @|tr @ '\034')"
+ sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
+ -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
+ awk -F"$fs" '{
+ indent = length($1)/2;
+ vname[indent] = $2;
+ for (i in vname) {if (i > indent) {delete vname[i]}}
+ if (length($3) > 0) {
+ vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
+ printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
+ }
+ }' | sed 's/_=/+=/g'
+}
diff --git a/mcp/reclass/scripts/net_internal.xml b/mcp/scripts/net_internal.xml
index a9abece70..a9abece70 100644
--- a/mcp/reclass/scripts/net_internal.xml
+++ b/mcp/scripts/net_internal.xml
diff --git a/mcp/reclass/scripts/net_mgmt.xml b/mcp/scripts/net_mgmt.xml
index 0ba613152..0ba613152 100644
--- a/mcp/reclass/scripts/net_mgmt.xml
+++ b/mcp/scripts/net_mgmt.xml
diff --git a/mcp/reclass/scripts/net_public.xml b/mcp/scripts/net_public.xml
index 61650d5a6..61650d5a6 100644
--- a/mcp/reclass/scripts/net_public.xml
+++ b/mcp/scripts/net_public.xml
diff --git a/mcp/reclass/scripts/net_pxe.xml b/mcp/scripts/net_pxe.xml
index 92eaa6b52..92eaa6b52 100644
--- a/mcp/reclass/scripts/net_pxe.xml
+++ b/mcp/scripts/net_pxe.xml
diff --git a/mcp/reclass/scripts/openstack.sh b/mcp/scripts/openstack.sh
index 9e636ddaa..88db83d49 100755
--- a/mcp/reclass/scripts/openstack.sh
+++ b/mcp/scripts/openstack.sh
@@ -3,7 +3,7 @@
# Deploy Openstack
#
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << OPENSTACK_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << OPENSTACK_INSTALL_END
sudo -i
salt-call state.apply salt
diff --git a/mcp/reclass/scripts/salt.sh b/mcp/scripts/salt.sh
index 3b6fa9900..56a6fb3b8 100755
--- a/mcp/reclass/scripts/salt.sh
+++ b/mcp/scripts/salt.sh
@@ -4,7 +4,7 @@
#
# ssh to cfg01
-ssh $SSH_OPTS ubuntu@$SALT_MASTER bash -s << SALT_INSTALL_END
+ssh ${SSH_OPTS} ubuntu@${SALT_MASTER} bash -s << SALT_INSTALL_END
sudo -i
echo -n 'Checking out cloud-init has finished running ...'
diff --git a/mcp/reclass/scripts/user-data.template b/mcp/scripts/user-data.template
index 811a58c70..811a58c70 100644
--- a/mcp/reclass/scripts/user-data.template
+++ b/mcp/scripts/user-data.template