From f7e63a63d68a31e13d31d561b9f3a68de89595d7 Mon Sep 17 00:00:00 2001 From: Bryan Sullivan Date: Mon, 27 Nov 2017 16:48:02 -0800 Subject: Run actions in parallel to reduce overall deploy time. JIRA: MODELS-2 Change-Id: I9af9dbfe25cf149c3ec31a4180669f37f7426c09 Signed-off-by: Bryan Sullivan --- tools/kubernetes/demo_deploy.sh | 12 ++++++++++-- tools/kubernetes/k8s-cluster.sh | 42 +++++++++++++++++++++++++---------------- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh index 0c581b8..1a63212 100644 --- a/tools/kubernetes/demo_deploy.sh +++ b/tools/kubernetes/demo_deploy.sh @@ -39,6 +39,14 @@ #. : disk (e.g. sda, sdb) or folder (e.g. "/ceph") #. : optional name of script for extra setup functions as needed +function run() { + start=$((`date +%s`/60)) + $1 + end=$((`date +%s`/60)) + runtime=$((end-start)) + log "step \"$1\" duration = $runtime minutes" +} + function run_master() { start=$((`date +%s`/60)) ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ @@ -116,7 +124,7 @@ run_master "bash cloudify/k8s-cloudify.sh prereqs" run_master "bash cloudify/k8s-cloudify.sh setup" echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..." -bash ~/models/tools/cloudify/k8s-cloudify.sh demo start +run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start" echo; echo "$0 $(date): Setting up VES" # not re-cloned if existing - allows patch testing locally @@ -131,7 +139,7 @@ ves_grafana_auth=admin:admin export ves_grafana_auth ves_kafka_hostname=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master hostname) export ves_kafka_hostname -bash ~/ves/tools/demo_deploy.sh $k8s_key $k8s_master "$k8s_workers" cloudify +run "bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_master \"$k8s_workers\"" echo; echo "$0 $(date): All done!" export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki) diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh index edbe93c..1700a6a 100644 --- a/tools/kubernetes/k8s-cluster.sh +++ b/tools/kubernetes/k8s-cluster.sh @@ -140,15 +140,8 @@ function setup_k8s_master() { # Updated to deploy Calico 2.6 per the create-cluster-kubeadm guide above # sudo kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml sudo kubectl apply -f https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml -} - -function setup_k8s_workers() { - workers="$1" - export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out) - log "Installing workers at $1 with joincmd: $k8s_joincmd" - - setup_prereqs + log "Wait for kubedns to be Running" kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}') while [[ "$kubedns" != "Running" ]]; do log "kube-dns status is $kubedns. Waiting 60 seconds for it to be 'Running'" @@ -156,6 +149,12 @@ function setup_k8s_workers() { kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}') done log "kube-dns status is $kubedns" +} + +function setup_k8s_workers() { + workers="$1" + export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out) + log "Installing workers at $1 with joincmd: $k8s_joincmd" for worker in $workers; do log "Install worker at $worker" @@ -166,16 +165,27 @@ function setup_k8s_workers() { scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \ ubuntu@$worker:/home/ubuntu/. ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ - ubuntu@$worker bash /tmp/prereqs.sh worker - # Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet - # is not empty" per https://github.com/kubernetes/kubeadm/issues/1 - ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ - ubuntu@$worker sudo kubeadm reset - ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ - ubuntu@$worker sudo $k8s_joincmd + ubuntu@$worker < /dev/null 2>&1 & +bash /tmp/prereqs.sh worker +# Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet +# is not empty" per https://github.com/kubernetes/kubeadm/issues/1 +sudo kubeadm reset +sudo $k8s_joincmd +EOF done - log "Cluster is ready when all nodes in 'kubectl get nodes' show as 'Ready'." + for worker in $workers; do + host=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker hostname) + log "checking node $host" + status=$(kubectl get nodes | awk "/$host/ {print \$2}") + while [[ "$status" != "Ready" ]]; do + log "node $host is \"$status\", waiting 10 seconds for it to be 'Ready'." + status=$(kubectl get nodes | awk "/$host/ {print \$2}") + sleep 10 + done + log "node $host is 'Ready'." + done + log "Cluster is ready (all nodes in 'kubectl get nodes' show as 'Ready')." } function setup_ceph() { -- cgit 1.2.3-korg