summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-11-01 22:28:15 -0700
committerBryan Sullivan <bryan.sullivan@att.com>2017-11-01 22:28:15 -0700
commit5370e971211cf35c844988646404acbca2e33201 (patch)
tree6a31ceea780f8619b2ce6fa1ce3c0869866c0aca
parent14cad79fc39fafa942f07f8b4c5c671c33b7a427 (diff)
Improve logging. Remove extra ceph test step.
JIRA: MODELS-23 Change-Id: Idd377ee35ae7b90e10c95b4b41e13bfd533b30e0 Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--tools/cloudify/k8s-cloudify.sh38
-rw-r--r--tools/docker/docker-cluster.sh30
-rw-r--r--tools/kubernetes/ceph-baremetal.sh54
-rw-r--r--tools/kubernetes/ceph-helm.sh18
-rw-r--r--tools/kubernetes/k8s-cluster.sh51
-rw-r--r--tools/maas/deploy.sh14
-rw-r--r--tools/prometheus/prometheus-tools.sh42
-rw-r--r--tools/rancher/rancher-cluster.sh100
8 files changed, 194 insertions, 153 deletions
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index 61245f6..d40f235 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -26,22 +26,28 @@
#. $ ssh -x ubuntu@<k8s-master> bash cloudify/k8s-cloudify.sh [setup|clean]
#. Status: this is a work in progress, under test.
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
function prereqs() {
- echo "${FUNCNAME[0]}: Install prerequisites"
+ log "Install prerequisites"
sudo apt-get install -y virtinst qemu-kvm libguestfs-tools virtualenv git python-pip
- echo "${FUNCNAME[0]}: Setup $USER for kvm use"
+ log "Setup $USER for kvm use"
# Per http://libguestfs.org/guestfs-faq.1.html
# workaround for virt-customize warning: libguestfs: warning: current user is not a member of the KVM group (group ID 121). This user cannot access /dev/kvm, so libguestfs may run very slowly. It is recommended that you 'chmod 0666 /dev/kvm' or add the current user to the KVM group (you might need to log out and log in again).
# Also see: https://help.ubuntu.com/community/KVM/Installation
# also to avoid permission denied errors in guestfish, from http://manpages.ubuntu.com/manpages/zesty/man1/guestfs-faq.1.html
sudo usermod -a -G kvm $USER
sudo chmod 0644 /boot/vmlinuz*
- echo "${FUNCNAME[0]}: Clone repo"
+ log "Clone repo"
}
function setup () {
cd ~/cloudify
- echo "${FUNCNAME[0]}: Setup Cloudify-CLI"
+ log "Setup Cloudify-CLI"
# Per http://docs.getcloudify.org/4.1.0/installation/bootstrapping/#installing-cloudify-manager-in-an-offline-environment
wget -q http://repository.cloudifysource.org/cloudify/17.9.21/community-release/cloudify-cli-community-17.9.21.deb
# Installs into /opt/cfy/
@@ -50,7 +56,7 @@ function setup () {
virtualenv ~/cloudify/env
source ~/cloudify/env/bin/activate
- echo "${FUNCNAME[0]}: Setup Cloudify-Manager"
+ log "Setup Cloudify-Manager"
# to start over
# sudo virsh destroy cloudify-manager; sudo virsh undefine cloudify-manager
wget -q http://repository.cloudifysource.org/cloudify/17.9.21/community-release/cloudify-manager-community-17.9.21.qcow2
@@ -60,20 +66,20 @@ function setup () {
VM_IP=""
n=0
while [[ "x$VM_IP" == "x" ]]; do
- echo "${FUNCNAME[0]}: $n minutes so far; waiting 60 seconds for cloudify-manager IP to be assigned"
+ log "$n minutes so far; waiting 60 seconds for cloudify-manager IP to be assigned"
sleep 60
((n++))
VM_MAC=$(virsh domiflist cloudify-manager | grep default | grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
VM_IP=$(/usr/sbin/arp -e | grep ${VM_MAC} | awk {'print $1'})
done
- echo "${FUNCNAME[0]}: cloudify-manager IP=$VM_IP"
+ log "cloudify-manager IP=$VM_IP"
while ! cfy profiles use $VM_IP -u admin -p admin -t default_tenant ; do
- echo "${FUNCNAME[0]}: waiting 60 seconds for cloudify-manager API to be active"
+ log "waiting 60 seconds for cloudify-manager API to be active"
sleep 60
done
cfy status
- echo "${FUNCNAME[0]}: Install Cloudify Kubernetes Plugin"
+ log "Install Cloudify Kubernetes Plugin"
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
pip install kubernetes wagon
@@ -84,7 +90,7 @@ function setup () {
# For Cloudify-Manager per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/master/examples/persistent-volumes-blueprint.yaml
cfy plugins upload cloudify_kubernetes_plugin-1.2.1-py27-none-linux_x86_64-centos-Core.wgn
- echo "${FUNCNAME[0]}: Create secrets for kubernetes as referenced in blueprints"
+ log "Create secrets for kubernetes as referenced in blueprints"
cfy secrets create -s $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $1}') kubernetes_master_ip
cfy secrets create -s $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $2}') kubernetes_master_port
cfy secrets create -s $(grep 'certificate-authority-data: ' ~/.kube/config | awk -F ' ' '{print $2}') kubernetes_certificate_authority_data
@@ -92,8 +98,8 @@ function setup () {
cfy secrets create -s $(grep 'client-key-data: ' ~/.kube/config | awk -F ' ' '{print $2}') kubernetes-admin_client_key_data
cfy secrets list
- echo "${FUNCNAME[0]}: Cloudify CLI config is at ~/.cloudify/config.yaml"
- echo "${FUNCNAME[0]}: Cloudify CLI log is at ~/.cloudify/logs/cli.log"
+ log "Cloudify CLI config is at ~/.cloudify/config.yaml"
+ log "Cloudify CLI log is at ~/.cloudify/logs/cli.log"
}
function demo() {
@@ -113,20 +119,20 @@ function demo() {
cfy executions start install -d k8s-hello-world
pod_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.podIP}' nginx)
while [[ "x$pod_ip" == "x" ]]; do
- echo "${FUNCNAME[0]}: nginx pod IP is not yet assigned, waiting 10 seconds"
+ log "nginx pod IP is not yet assigned, waiting 10 seconds"
sleep 10
pod_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.podIP}' nginx)
done
while ! curl http://$pod_ip ; do
- echo "${FUNCNAME[0]}: nginx pod is not yet responding at http://$pod_ip, waiting 10 seconds"
+ log "nginx pod is not yet responding at http://$pod_ip, waiting 10 seconds"
sleep 10
done
- echo "${FUNCNAME[0]}: nginx pod is active at http://$pod_ip"
+ log "nginx pod is active at http://$pod_ip"
curl http://$pod_ip
}
function clean () {
- echo "${FUNCNAME[0]}: Cleanup cloudify"
+ log "Cleanup cloudify"
# TODO
}
diff --git a/tools/docker/docker-cluster.sh b/tools/docker/docker-cluster.sh
index 7404e4a..ce2701d 100644
--- a/tools/docker/docker-cluster.sh
+++ b/tools/docker/docker-cluster.sh
@@ -37,6 +37,12 @@
#. By default, cleans the entire cluster.
#.
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
# Setup master and worker hosts
function setup() {
# Per https://docs.docker.com/engine/swarm/swarm-tutorial/
@@ -83,27 +89,27 @@ sudo docker swarm init --advertise-addr $master
EOF
if ! curl http://$master:4243/version ; then
- echo "${FUNCNAME[0]}: docker API failed to initialize"
+ log "docker API failed to initialize"
exit 1
fi
# Per https://docs.docker.com/engine/swarm/swarm-tutorial/add-nodes/
token=$(ssh -o StrictHostKeyChecking=no -x ubuntu@$master sudo docker swarm join-token worker | grep docker)
for worker in $workers; do
- echo "${FUNCNAME[0]}: setting up worker at $worker"
+ log "setting up worker at $worker"
scp -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$worker:/home/ubuntu/.
ssh -x -o StrictHostKeyChecking=no ubuntu@$worker bash /home/ubuntu/prereqs.sh
ssh -x -o StrictHostKeyChecking=no ubuntu@$worker sudo $token
done
- echo "${FUNCNAME[0]}: testing service creation"
+ log "testing service creation"
reps=1; for a in $workers; do ((reps++)); done
create_service nginx $reps
}
function create_service() {
- echo "${FUNCNAME[0]}: creating service $1 with $2 replicas"
+ log "creating service $1 with $2 replicas"
# sudo docker service create -p 80:80 --replicas $reps --name nginx nginx
# per https://docs.docker.com/engine/api/v1.27/
source /tmp/env.sh
@@ -112,11 +118,11 @@ function create_service() {
match="Welcome to nginx!"
;;
*)
- echo "${FUNCNAME[0]}: service $1 not setup for use with this script"
+ log "service $1 not setup for use with this script"
esac
if ! curl -X POST http://$master:4243/services/create -d @$1.json ; then
- echo "${FUNCNAME[0]}: service creation failed"
+ log "service creation failed"
exit 1
fi
@@ -124,7 +130,7 @@ function create_service() {
}
function check_service() {
- echo "${FUNCNAME[0]}: checking service state for $1 with match string $2"
+ log "checking service state for $1 with match string $2"
source /tmp/env.sh
service=$1
match="$2"
@@ -139,7 +145,7 @@ function check_service() {
for node in $nodes; do
not=""
while ! curl -s -o /tmp/resp http://$node:$port ; do
- echo "${FUNCNAME[0]}: service is not yet active, waiting 10 seconds"
+ log "service is not yet active, waiting 10 seconds"
sleep 10
done
curl -s -o /tmp/resp http://$node:$port
@@ -155,7 +161,7 @@ function check_service() {
}
function delete_service() {
- echo "${FUNCNAME[0]}: deleting service $1"
+ log "deleting service $1"
source /tmp/env.sh
service=$1
services=$(curl http://$master:4243/services)
@@ -165,9 +171,9 @@ function delete_service() {
if [[ $(echo $services | jq -r ".[$n].Spec.Name") == $service ]]; then
id=$(echo $services | jq -r ".[$n].ID")
if ! curl -X DELETE http://$master:4243/services/$id ; then
- echo "${FUNCNAME[0]}: failed to delete service $1"
+ log "failed to delete service $1"
else
- echo "${FUNCNAME[0]}: deleted service $1"
+ log "deleted service $1"
fi
break
fi
@@ -205,7 +211,7 @@ case "$1" in
end=`date +%s`
runtime=$((end-start))
runtime=$((runtime/60))
- echo "${FUNCNAME[0]}: Demo duration = $runtime minutes"
+ log "Demo duration = $runtime minutes"
;;
create)
create_service "$2" $3
diff --git a/tools/kubernetes/ceph-baremetal.sh b/tools/kubernetes/ceph-baremetal.sh
index dcad340..d806178 100644
--- a/tools/kubernetes/ceph-baremetal.sh
+++ b/tools/kubernetes/ceph-baremetal.sh
@@ -31,20 +31,26 @@
#. Status: work in progress, incomplete
#
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
function setup_ceph() {
node_ips=$1
cluster_net=$2
public_net=$3
ceph_dev=$4
- echo "${FUNCNAME[0]}: Deploying ceph-mon on localhost $HOSTNAME"
- echo "${FUNCNAME[0]}: Deploying ceph-osd on nodes $node_ips"
- echo "${FUNCNAME[0]}: Setting cluster-network=$cluster_net and public-network=$public_net"
+ log "Deploying ceph-mon on localhost $HOSTNAME"
+ log "Deploying ceph-osd on nodes $node_ips"
+ log "Setting cluster-network=$cluster_net and public-network=$public_net"
mon_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
all_nodes="$mon_ip $node_ips"
# Also caches the server fingerprints so ceph-deploy does not prompt the user
# Note this loop may be partially redundant with the ceph-deploy steps below
for node_ip in $all_nodes; do
- echo "${FUNCNAME[0]}: Install ntp and ceph on $node_ip"
+ log "Install ntp and ceph on $node_ip"
ssh -x -o StrictHostKeyChecking=no ubuntu@$node_ip <<EOF
sudo timedatectl set-ntp no
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
@@ -56,11 +62,11 @@ EOF
# per http://docs.ceph.com/docs/master/start/quick-ceph-deploy/
# also https://upcommons.upc.edu/bitstream/handle/2117/101816/Degree_Thesis_Nabil_El_Alami.pdf#vote +1
- echo "${FUNCNAME[0]}: Create ceph config folder ~/ceph-cluster"
+ log "Create ceph config folder ~/ceph-cluster"
mkdir ~/ceph-cluster
cd ~/ceph-cluster
- echo "${FUNCNAME[0]}: Create new cluster with $HOSTNAME as initial ceph-mon node"
+ log "Create new cluster with $HOSTNAME as initial ceph-mon node"
ceph-deploy new --cluster-network $cluster_net --public-network $public_net --no-ssh-copykey $HOSTNAME
# Update conf per recommendations of http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
cat <<EOF >>ceph.conf
@@ -69,16 +75,16 @@ osd max object namespace len = 64
EOF
cat ceph.conf
- echo "${FUNCNAME[0]}: Deploy ceph packages on other nodes"
+ log "Deploy ceph packages on other nodes"
ceph-deploy install $mon_ip $node_ips
- echo "${FUNCNAME[0]}: Deploy the initial monitor and gather the keys"
+ log "Deploy the initial monitor and gather the keys"
ceph-deploy mon create-initial
if [[ "x$ceph_dev" == "x" ]]; then
n=1
for node_ip in $node_ips; do
- echo "${FUNCNAME[0]}: Prepare ceph OSD on node $node_ip"
+ log "Prepare ceph OSD on node $node_ip"
echo "$node_ip ceph-osd$n" | sudo tee -a /etc/hosts
# Using ceph-osd$n here avoids need for manual acceptance of the new server hash
ssh -x -o StrictHostKeyChecking=no ubuntu@ceph-osd$n <<EOF
@@ -90,17 +96,17 @@ EOF
((n++))
done
else
- echo "${FUNCNAME[0]}: Deploy OSDs"
+ log "Deploy OSDs"
for node_ip in $node_ips; do
- echo "${FUNCNAME[0]}: Create ceph osd on $node_ip using $ceph_dev"
+ log "Create ceph osd on $node_ip using $ceph_dev"
ceph-deploy osd create $node_ip:$ceph_dev
done
fi
- echo "${FUNCNAME[0]}: Copy the config file and admin key to the admin node and OSD nodes"
+ log "Copy the config file and admin key to the admin node and OSD nodes"
ceph-deploy admin $mon_ip $node_ips
- echo "${FUNCNAME[0]}: Check the cluster health"
+ log "Check the cluster health"
sudo ceph health
sudo ceph -s
@@ -108,22 +114,22 @@ EOF
# rbd is not included in default kube-controller-manager... use attcomdev version
sudo sed -i -- 's~gcr.io/google_containers/kube-controller-manager-amd64:.*~quay.io/attcomdev/kube-controller-manager:v1.7.3~' /etc/kubernetes/manifests/kube-controller-manager.yaml
if [[ $(sudo grep -c attcomdev/kube-controller-manager /etc/kubernetes/manifests/kube-controller-manager.yaml) == 0 ]]; then
- echo "${FUNCNAME[0]}: Problem patching /etc/kubernetes/manifests/kube-controller-manager.yaml... script update needed"
+ log "Problem patching /etc/kubernetes/manifests/kube-controller-manager.yaml... script update needed"
exit 1
fi
mgr=$(kubectl get pods --all-namespaces | grep kube-controller-manager | awk '{print $4}')
while [[ "$mgr" != "Running" ]]; do
- echo "${FUNCNAME[0]}: kube-controller-manager status is $mgr. Waiting 60 seconds for it to be 'Running'"
+ log "kube-controller-manager status is $mgr. Waiting 60 seconds for it to be 'Running'"
sleep 60
mgr=$(kubectl get pods --all-namespaces | grep kube-controller-manager | awk '{print $4}')
done
- echo "${FUNCNAME[0]}: kube-controller-manager status is $mgr"
+ log "kube-controller-manager status is $mgr"
- echo "${FUNCNAME[0]}: Create Ceph admin secret"
+ log "Create Ceph admin secret"
admin_key=$(sudo ceph auth get-key client.admin)
kubectl create secret generic ceph-secret-admin --from-literal=key="$admin_key" --namespace=kube-system --type=kubernetes.io/rbd
- echo "${FUNCNAME[0]}: Create rdb storageClass 'general'"
+ log "Create rdb storageClass 'general'"
cat <<EOF >/tmp/ceph-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -143,21 +149,21 @@ EOF
sudo chown -R ubuntu:ubuntu ~/.kube/*
kubectl create -f /tmp/ceph-sc.yaml
- echo "${FUNCNAME[0]}: Create storage pool 'kube'"
+ log "Create storage pool 'kube'"
# https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method
sudo ceph osd pool create kube 32 32
- echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'"
+ log "Authorize client 'kube' access to pool 'kube'"
sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'
- echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'"
+ log "Create ceph-secret-user secret in namespace 'default'"
kube_key=$(sudo ceph auth get-key client.kube)
kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd
# A similar secret must be created in other namespaces that intend to access the ceph pool
# Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md
- echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim"
+ log "Create andtest a persistentVolumeClaim"
cat <<EOF >/tmp/ceph-pvc.yaml
{
"kind": "PersistentVolumeClaim",
@@ -182,11 +188,11 @@ EOF
EOF
kubectl create -f /tmp/ceph-pvc.yaml
while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do
- echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'"
+ log "Waiting for pvc claim1 to be 'Bound'"
kubectl describe pvc
sleep 10
done
- echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)"
+ log "pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)"
kubectl get pvc
kubectl delete pvc claim1
kubectl describe pods
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh
index 4660881..280c045 100644
--- a/tools/kubernetes/ceph-helm.sh
+++ b/tools/kubernetes/ceph-helm.sh
@@ -32,7 +32,9 @@
#
function log() {
- echo "${FUNCNAME[0]} $(date): $1"
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
}
function setup_ceph() {
@@ -40,6 +42,10 @@ function setup_ceph() {
private_net=$2
public_net=$3
dev=$4
+
+ log "Install ceph prerequisites"
+ sudo apt-get -y install ceph ceph-common
+
# per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm
log "Clone netarbiter"
git clone https://github.com/att/netarbiter.git
@@ -94,7 +100,7 @@ nameserver $kubedns
search ceph.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
EOF
-sudo apt install -y ceph
+sudo apt install -y ceph ceph-common
sudo ceph-disk zap /dev/$dev
EOG
log "Run ceph-osd at $node"
@@ -144,14 +150,8 @@ EOG
log "pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)"
kubectl describe pvc
- log "Attach the pvc to a job and check if the job is successful (i.e., 1)"
+ log "Attach the pvc to a job"
kubectl create -f tests/ceph/job.yaml
- status=$(kubectl get jobs ceph-test-job -n default -o json | jq -r '.status.succeeded')
- if [[ "$status" != "1" ]]; then
- log "pvc attachment was not successful:"
- kubectl get jobs ceph-test-job -n default -o json
- exit 1
- fi
log "Verify that the test job was successful"
pod=$(kubectl get pods --namespace default | awk "/ceph-test/{print \$1}")
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index 1ef17e2..9072442 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -44,8 +44,14 @@
#. Status: work in progress, incomplete
#
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
function setup_prereqs() {
- echo "${FUNCNAME[0]}: Create prerequisite setup script"
+ log "Create prerequisite setup script"
cat <<'EOG' >/tmp/prereqs.sh
#!/bin/bash
# Basic server pre-reqs
@@ -70,15 +76,14 @@ EOF
sudo apt-get update
# Next command is to workaround bug resulting in "PersistentVolumeClaim is not bound" for pod startup (remain in Pending)
# TODO: reverify if this is still an issue in the final working script
-sudo apt-get -y install ceph ceph-common
sudo apt-get -y install --allow-downgrades kubectl=${KUBE_VERSION}-00 kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00
-# Needed for ceph setup etc
+# Needed for API output parsing
sudo apt-get -y install jq
EOG
}
function setup_k8s_master() {
- echo "${FUNCNAME[0]}: Setting up kubernetes master"
+ log "Setting up kubernetes master"
setup_prereqs
# Install master
@@ -89,35 +94,35 @@ function setup_k8s_master() {
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>/tmp/kubeadm.out
cat /tmp/kubeadm.out
export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
- echo "${FUNCNAME[0]}: Cluster join command for manual use if needed: $k8s_joincmd"
+ log "Cluster join command for manual use if needed: $k8s_joincmd"
# Start cluster
- echo "${FUNCNAME[0]}: Start the cluster"
+ log "Start the cluster"
mkdir -p $HOME/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Deploy pod network
- echo "${FUNCNAME[0]}: Deploy calico as CNI"
+ log "Deploy calico as CNI"
sudo kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
}
function setup_k8s_agents() {
agents="$1"
export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
- echo "${FUNCNAME[0]}: Installing agents at $1 with joincmd: $k8s_joincmd"
+ log "Installing agents at $1 with joincmd: $k8s_joincmd"
setup_prereqs
kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}')
while [[ "$kubedns" != "Running" ]]; do
- echo "${FUNCNAME[0]}: kube-dns status is $kubedns. Waiting 60 seconds for it to be 'Running'"
+ log "kube-dns status is $kubedns. Waiting 60 seconds for it to be 'Running'"
sleep 60
kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}')
done
- echo "${FUNCNAME[0]}: kube-dns status is $kubedns"
+ log "kube-dns status is $kubedns"
for agent in $agents; do
- echo "${FUNCNAME[0]}: Install agent at $agent"
+ log "Install agent at $agent"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$agent:/tmp/prereqs.sh
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$agent bash /tmp/prereqs.sh agent
# Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet is not empty" per https://github.com/kubernetes/kubeadm/issues/1
@@ -125,30 +130,30 @@ function setup_k8s_agents() {
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$agent sudo $k8s_joincmd
done
- echo "${FUNCNAME[0]}: Cluster is ready when all nodes in the output of 'kubectl get nodes' show as 'Ready'."
+ log "Cluster is ready when all nodes in the output of 'kubectl get nodes' show as 'Ready'."
}
function wait_for_service() {
- echo "${FUNCNAME[0]}: Waiting for service $1 to be available"
+ log "Waiting for service $1 to be available"
pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- echo "${FUNCNAME[0]}: Service $1 is at pod $pod"
+ log "Service $1 is at pod $pod"
ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
while [[ "$ready" != "true" ]]; do
- echo "${FUNCNAME[0]}: $1 container is not yet ready... waiting 10 seconds"
+ log "$1 container is not yet ready... waiting 10 seconds"
sleep 10
# TODO: figure out why transient pods sometimes mess up this logic, thus need to re-get the pods
pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
done
- echo "${FUNCNAME[0]}: pod $pod container status is $ready"
+ log "pod $pod container status is $ready"
host_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.hostIP}' $pod)
port=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services $1)
- echo "${FUNCNAME[0]}: pod $pod container is at host $host_ip and port $port"
+ log "pod $pod container is at host $host_ip and port $port"
while ! curl http://$host_ip:$port ; do
- echo "${FUNCNAME[0]}: $1 service is not yet responding... waiting 10 seconds"
+ log "$1 service is not yet responding... waiting 10 seconds"
sleep 10
done
- echo "${FUNCNAME[0]}: $1 is available at http://$host_ip:$port"
+ log "$1 is available at http://$host_ip:$port"
}
function demo_chart() {
@@ -211,7 +216,7 @@ function demo_chart() {
wait_for_service oc-owncloud
;;
*)
- echo "${FUNCNAME[0]}: demo not implemented for $1"
+ log "demo not implemented for $1"
esac
# extra useful commands
# kubectl describe pvc
@@ -225,7 +230,7 @@ function demo_chart() {
}
function setup_helm() {
- echo "${FUNCNAME[0]}: Setup helm"
+ log "Setup helm"
# Install Helm
cd ~
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
@@ -242,11 +247,11 @@ function setup_helm() {
# Wait till tiller is running
tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
while [[ "$tiller_deploy" != "Running" ]]; do
- echo "${FUNCNAME[0]}: tiller-deploy status is $tiller_deploy. Waiting 60 seconds for it to be 'Running'"
+ log "tiller-deploy status is $tiller_deploy. Waiting 60 seconds for it to be 'Running'"
sleep 60
tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
done
- echo "${FUNCNAME[0]}: tiller-deploy status is $tiller_deploy"
+ log "tiller-deploy status is $tiller_deploy"
# Install services via helm charts from https://kubeapps.com/charts
# e.g. helm install stable/dokuwiki
diff --git a/tools/maas/deploy.sh b/tools/maas/deploy.sh
index 1c0880f..55984da 100644
--- a/tools/maas/deploy.sh
+++ b/tools/maas/deploy.sh
@@ -26,20 +26,26 @@
#. <hosts>: space separated list of hostnames managed by MAAS
#. <extras>: optional name of script for extra setup functions as needed
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
function wait_node_status() {
status=$(maas opnfv machines read hostname=$1 | jq -r ".[0].status_name")
while [[ "x$status" != "x$2" ]]; do
- echo "$0 $(date): $1 status is $status ... waiting for it to be $2"
+ log "$1 status is $status ... waiting for it to be $2"
sleep 30
status=$(maas opnfv machines read hostname=$1 | jq -r ".[0].status_name")
done
- echo "$0 $(date): $1 status is $status"
+ log "$1 status is $status"
}
function release_nodes() {
nodes=$1
for node in $nodes; do
- echo "$0 $(date): Releasing node $node"
+ log "Releasing node $node"
id=$(maas opnfv machines read hostname=$node | jq -r '.[0].system_id')
maas opnfv machines release machines=$id
done
@@ -48,7 +54,7 @@ function release_nodes() {
function deploy_nodes() {
nodes=$1
for node in $nodes; do
- echo "$0 $(date): Deploying node $node"
+ log "Deploying node $node"
id=$(maas opnfv machines read hostname=$node | jq -r '.[0].system_id')
maas opnfv machines allocate system_id=$id
maas opnfv machine deploy $id
diff --git a/tools/prometheus/prometheus-tools.sh b/tools/prometheus/prometheus-tools.sh
index ebf9eca..072156a 100644
--- a/tools/prometheus/prometheus-tools.sh
+++ b/tools/prometheus/prometheus-tools.sh
@@ -40,6 +40,12 @@
# https://github.com/prometheus/haproxy_exporter
# https://github.com/prometheus/collectd_exporter
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
# Use this to trigger fail() at the right places
# if [ "$RESULT" == "Test Failed!" ]; then fail "message"; fi
function fail() {
@@ -49,11 +55,11 @@ function fail() {
function setup_prometheus() {
# Prerequisites
- echo "${FUNCNAME[0]}: Setting up prometheus master and agents"
+ log "Setting up prometheus master and agents"
sudo apt install -y golang-go jq
# Install Prometheus server
- echo "${FUNCNAME[0]}: Setting up prometheus master"
+ log "Setting up prometheus master"
if [[ -d ~/prometheus ]]; then rm -rf ~/prometheus; fi
mkdir ~/prometheus
cd ~/prometheus
@@ -92,7 +98,7 @@ EOF
nohup ./prometheus --config.file=prometheus.yml > /dev/null 2>&1 &
# Browse to http://host_ip:9090
- echo "${FUNCNAME[0]}: Installing exporters"
+ log "Installing exporters"
# Install exporters
# https://github.com/prometheus/node_exporter
cd ~/prometheus
@@ -104,7 +110,7 @@ EOF
# The scp and ssh actions below assume you have key-based access enabled to the nodes
for node in $nodes; do
- echo "${FUNCNAME[0]}: Setup agent at $node"
+ log "Setup agent at $node"
scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
node_exporter-0.14.0.linux-amd64/node_exporter ubuntu@$node:/home/ubuntu/node_exporter
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
@@ -117,33 +123,33 @@ EOF
host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
while ! curl -o /tmp/up http://$host_ip:9090/api/v1/query?query=up ; do
- echo "${FUNCNAME[0]}: Prometheus API is not yet responding... waiting 10 seconds"
+ log "Prometheus API is not yet responding... waiting 10 seconds"
sleep 10
done
exp=$(jq '.data.result|length' /tmp/up)
- echo "${FUNCNAME[0]}: $exp exporters are up"
+ log "$exp exporters are up"
while [[ $exp > 0 ]]; do
((exp--))
eip=$(jq -r ".data.result[$exp].metric.instance" /tmp/up)
job=$(jq -r ".data.result[$exp].metric.job" /tmp/up)
- echo "${FUNCNAME[0]}: $job at $eip"
+ log "$job at $eip"
done
- echo "${FUNCNAME[0]}: Prometheus dashboard is available at http://$host_ip:9090"
+ log "Prometheus dashboard is available at http://$host_ip:9090"
echo "Prometheus dashboard is available at http://$host_ip:9090" >>/tmp/summary
}
function connect_grafana() {
- echo "${FUNCNAME[0]}: Setup Grafana datasources and dashboards"
+ log "Setup Grafana datasources and dashboards"
prometheus_ip=$1
grafana_ip=$2
while ! curl -X POST http://admin:admin@$grafana_ip:3000/api/login/ping ; do
- echo "${FUNCNAME[0]}: Grafana API is not yet responding... waiting 10 seconds"
+ log "Grafana API is not yet responding... waiting 10 seconds"
sleep 10
done
- echo "${FUNCNAME[0]}: Setup Prometheus datasource for Grafana"
+ log "Setup Prometheus datasource for Grafana"
cd ~/prometheus/
cat >datasources.json <<EOF
{"name":"Prometheus", "type":"prometheus", "access":"proxy", \
@@ -157,9 +163,9 @@ EOF
if [[ "$(jq -r '.message' /tmp/json)" != "Datasource added" ]]; then
fail "Datasource creation failed"
fi
- echo "${FUNCNAME[0]}: Prometheus datasource for Grafana added"
+ log "Prometheus datasource for Grafana added"
- echo "${FUNCNAME[0]}: Import Grafana dashboards"
+ log "Import Grafana dashboards"
# Setup Prometheus dashboards
# https://grafana.com/dashboards?dataSource=prometheus
# To add additional dashboards, browse the URL above and import the dashboard via the id displayed for the dashboard
@@ -170,9 +176,9 @@ EOF
for board in $boards; do
curl -X POST -u admin:admin -H "Accept: application/json" -H "Content-type: application/json" -d @${board} http://$grafana_ip:3000/api/dashboards/db
done
- echo "${FUNCNAME[0]}: Grafana dashboards are available at http://$host_ip:3000 (login as admin/admin)"
+ log "Grafana dashboards are available at http://$host_ip:3000 (login as admin/admin)"
echo "Grafana dashboards are available at http://$host_ip:3000 (login as admin/admin)" >>/tmp/summary
- echo "${FUNCNAME[0]}: Grafana API is available at http://admin:admin@$host_ip:3000/api/v1/query?query=<string>"
+ log "Grafana API is available at http://admin:admin@$host_ip:3000/api/v1/query?query=<string>"
echo "Grafana API is available at http://admin:admin@$host_ip:3000/api/v1/query?query=<string>" >>/tmp/summary
}
@@ -182,14 +188,14 @@ function run_and_connect_grafana() {
sudo docker run -d -p 3000:3000 --name grafana grafana/grafana
status=$(sudo docker inspect grafana | jq -r '.[0].State.Status')
while [[ "x$status" != "xrunning" ]]; do
- echo "${FUNCNAME[0]}: Grafana container state is ($status)"
+ log "Grafana container state is ($status)"
sleep 10
status=$(sudo docker inspect grafana | jq -r '.[0].State.Status')
done
- echo "${FUNCNAME[0]}: Grafana container state is $status"
+ log "Grafana container state is $status"
connect_grafana $host_ip $host_ip
- echo "${FUNCNAME[0]}: connect_grafana complete"
+ log "connect_grafana complete"
}
nodes=$2
diff --git a/tools/rancher/rancher-cluster.sh b/tools/rancher/rancher-cluster.sh
index ea8b16d..129042f 100644
--- a/tools/rancher/rancher-cluster.sh
+++ b/tools/rancher/rancher-cluster.sh
@@ -40,11 +40,17 @@
#. See below for function-specific usage
#.
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
# Install master
function setup_master() {
docker_installed=$(dpkg-query -W --showformat='${Status}\n' docker-ce | grep -c "install ok")
if [[ $docker_installed == 0 ]]; then
- echo "${FUNCNAME[0]}: installing and starting docker"
+ log "installing and starting docker"
# Per https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
sudo apt-get remove -y docker docker-engine docker.io
sudo apt-get update
@@ -64,23 +70,23 @@ function setup_master() {
sudo apt-get update
sudo apt-get install -y docker-ce
- echo "${FUNCNAME[0]}: installing jq"
+ log "installing jq"
sudo apt-get install -y jq
fi
- echo "${FUNCNAME[0]}: installing rancher server (master)"
+ log "installing rancher server (master)"
sudo docker run -d --restart=unless-stopped -p 8080:8080 --name rancher rancher/server
- echo "${FUNCNAME[0]}: wait until server is up at http://$1:8080"
+ log "wait until server is up at http://$1:8080"
delay=0
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
while [[ "$id" == "" ]]; do
- echo "${FUNCNAME[0]}: rancher server is not yet up, checking again in 10 seconds"
+ log "rancher server is not yet up, checking again in 10 seconds"
sleep 10
let delay=$delay+10
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
done
- echo "${FUNCNAME[0]}: rancher server is up after $delay seconds"
+ log "rancher server is up after $delay seconds"
rm -rf ~/rancher
mkdir ~/rancher
@@ -89,21 +95,21 @@ function setup_master() {
# Install rancher CLI tools
# Usage example: install_cli_tools 172.16.0.2
function install_cli_tools() {
- echo "${FUNCNAME[0]}: installing rancher CLI tools for master $1"
+ log "installing rancher CLI tools for master $1"
cd ~
- echo "${FUNCNAME[0]}: install Rancher CLI"
+ log "install Rancher CLI"
rm -rf rancher-v0.6.3
wget -q https://releases.rancher.com/cli/v0.6.3/rancher-linux-amd64-v0.6.3.tar.gz
gzip -d -f rancher-linux-amd64-v0.6.3.tar.gz
tar -xvf rancher-linux-amd64-v0.6.3.tar
sudo mv rancher-v0.6.3/rancher /usr/bin/rancher
- echo "${FUNCNAME[0]}: install Rancher Compose"
+ log "install Rancher Compose"
rm -rf rancher-compose-v0.12.5
wget -q https://releases.rancher.com/compose/v0.12.5/rancher-compose-linux-amd64-v0.12.5.tar.gz
gzip -d -f rancher-compose-linux-amd64-v0.12.5.tar.gz
tar -xvf rancher-compose-linux-amd64-v0.12.5.tar
sudo mv rancher-compose-v0.12.5/rancher-compose /usr/bin/rancher-compose
- echo "${FUNCNAME[0]}: setup Rancher CLI environment"
+ log "setup Rancher CLI environment"
# CLI setup http://rancher.com/docs/rancher/v1.6/en/cli/
# Under the UI "API" select "Add account API key" and name it. Export the keys:
# The following scripted approach assumes you have 1 project/environment (Default)
@@ -123,7 +129,7 @@ $RANCHER_SECRET_KEY
EOF
master=$(rancher config --print | jq -r '.url' | cut -d '/' -f 3)
- echo "${FUNCNAME[0]}: Create registration token"
+ log "Create registration token"
# added sleep to allow server time to be ready to create registration tokens (otherwise error is returned)
sleep 5
curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
@@ -132,22 +138,22 @@ EOF
curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
done
id=$(jq -r ".id" /tmp/token)
- echo "${FUNCNAME[0]}: registration token id=$id"
+ log "registration token id=$id"
- echo "${FUNCNAME[0]}: wait until registration command is created"
+ log "wait until registration command is created"
command=$(curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' http://$master/v1/registrationtokens/$id | jq -r '.command')
while [[ "$command" == "null" ]]; do
- echo "${FUNCNAME[0]}: registration command is not yet created, checking again in 10 seconds"
+ log "registration command is not yet created, checking again in 10 seconds"
sleep 10
command=$(curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' http://$master/v1/registrationtokens/$id | jq -r '.command')
done
export RANCHER_REGISTER_COMMAND="$command"
-# echo "${FUNCNAME[0]}: activate rancher debug"
+# log "activate rancher debug"
# export RANCHER_CLIENT_DEBUG=true
- echo "${FUNCNAME[0]}: Install docker-compose for syntax checks"
+ log "Install docker-compose for syntax checks"
sudo apt install -y docker-compose
cd ~/rancher
@@ -156,43 +162,43 @@ EOF
# Start an agent host
# Usage example: start_host Default 172.16.0.7
function setup_agent() {
- echo "${FUNCNAME[0]}: SSH to host $2 in env $1 and execute registration command"
+ log "SSH to host $2 in env $1 and execute registration command"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$2 "sudo apt-get install -y docker.io; sudo service docker start"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$2 $RANCHER_REGISTER_COMMAND
- echo "${FUNCNAME[0]}: wait until agent $2 is active"
+ log "wait until agent $2 is active"
delay=0
id=$(rancher hosts | awk "/$2/{print \$1}")
while [[ "$id" == "" ]]; do
- echo "${FUNCNAME[0]}: agent $2 is not yet created, checking again in 10 seconds"
+ log "agent $2 is not yet created, checking again in 10 seconds"
sleep 10
let delay=$delay+10
id=$(rancher hosts | awk "/$2/{print \$1}")
done
- echo "${FUNCNAME[0]}: agent $2 id=$id"
+ log "agent $2 id=$id"
state=$(rancher inspect $id | jq -r '.state')
while [[ "$state" != "active" ]]; do
- echo "${FUNCNAME[0]}: host $2 state is $state, checking again in 10 seconds"
+ log "host $2 state is $state, checking again in 10 seconds"
sleep 10
let delay=$delay+10
state=$(rancher inspect $id | jq -r '.state')
done
- echo "${FUNCNAME[0]}: agent $2 state is $state after $delay seconds"
+ log "agent $2 state is $state after $delay seconds"
}
# Delete an agent host
# Usage example: delete_host 172.16.0.7
function stop_agent() {
- echo "${FUNCNAME[0]}: deleting host $1"
+ log "deleting host $1"
rancher rm --stop $(rancher hosts | awk "/$1/{print \$1}")
}
# Test service at access points
# Usage example: check_service nginx/nginx http "Welcome to nginx!"
function check_service() {
- echo "${FUNCNAME[0]}: checking service state for $1 over $2 with match string $3"
+ log "checking service state for $1 over $2 with match string $3"
service=$1
scheme=$2
match="$3"
@@ -217,7 +223,7 @@ function wait_till_healthy() {
tries=$2
let delay=$tries*10
- echo "${FUNCNAME[0]}: waiting for service $service to be ready in $delay seconds"
+ log "waiting for service $service to be ready in $delay seconds"
id=$(rancher ps | grep " $service " | awk "{print \$1}")
health=$(rancher inspect $id | jq -r ".healthState")
state=$(rancher inspect $id | jq -r ".state")
@@ -233,7 +239,7 @@ function wait_till_healthy() {
# Usage example: start_simple_service nginx nginx:latest 8081:80 3
# Usage example: start_simple_service dokuwiki ununseptium/dokuwiki-docker 8082:80 2
function start_simple_service() {
- echo "${FUNCNAME[0]}: starting service $1 with image $2, ports $3, and scale $4"
+ log "starting service $1 with image $2, ports $3, and scale $4"
service=$1
image=$2
# port is either a single (unexposed) port, or an source:target pair (source
@@ -241,10 +247,10 @@ function start_simple_service() {
ports=$3
scale=$4
- echo "${FUNCNAME[0]}: creating service folder ~/rancher/$service"
+ log "creating service folder ~/rancher/$service"
mkdir ~/rancher/$service
cd ~/rancher/$service
- echo "${FUNCNAME[0]}: creating docker-compose.yml"
+ log "creating docker-compose.yml"
# Define service via docker-compose.yml
cat <<EOF >docker-compose.yml
version: '2'
@@ -255,10 +261,10 @@ services:
- "$ports"
EOF
- echo "${FUNCNAME[0]}: syntax checking docker-compose.yml"
+ log "syntax checking docker-compose.yml"
docker-compose -f docker-compose.yml config
- echo "${FUNCNAME[0]}: creating rancher-compose.yml"
+ log "creating rancher-compose.yml"
cat <<EOF >rancher-compose.yml
version: '2'
services:
@@ -267,7 +273,7 @@ services:
scale: $scale
EOF
- echo "${FUNCNAME[0]}: starting service $service"
+ log "starting service $service"
rancher up -s $service -d
wait_till_healthy "$service/$service" 6
@@ -278,13 +284,13 @@ EOF
# Usage example: lb_service nginx 8000 8081
# Usage example: lb_service dokuwiki 8001 8082
function lb_service() {
- echo "${FUNCNAME[0]}: adding load balancer port $2 to service $1, port $3"
+ log "adding load balancer port $2 to service $1, port $3"
service=$1
lbport=$2
port=$3
cd ~/rancher/$service
- echo "${FUNCNAME[0]}: creating docker-compose-lb.yml"
+ log "creating docker-compose-lb.yml"
# Define lb service via docker-compose.yml
cat <<EOF >docker-compose-lb.yml
version: '2'
@@ -295,10 +301,10 @@ services:
image: rancher/lb-service-haproxy:latest
EOF
- echo "${FUNCNAME[0]}: syntax checking docker-compose-lb.yml"
+ log "syntax checking docker-compose-lb.yml"
docker-compose -f docker-compose-lb.yml config
- echo "${FUNCNAME[0]}: creating rancher-compose-lb.yml"
+ log "creating rancher-compose-lb.yml"
cat <<EOF >rancher-compose-lb.yml
version: '2'
services:
@@ -317,7 +323,7 @@ services:
response_timeout: 2000
EOF
- echo "${FUNCNAME[0]}: starting service lb"
+ log "starting service lb"
rancher up -s $service -d --file docker-compose-lb.yml --rancher-file rancher-compose-lb.yml
wait_till_healthy "$service/lb" 6
@@ -327,7 +333,7 @@ EOF
# Change scale of a service
# Usage example: scale_service nginx 1
function scale_service() {
- echo "${FUNCNAME[0]}: scaling service $1 to $2 instances"
+ log "scaling service $1 to $2 instances"
id=$(rancher ps | grep " $1 " | awk '{print $1}')
rancher scale $id=$2
@@ -348,20 +354,20 @@ function public_endpoint() {
id=$(rancher ps | grep " $1 " | awk "{print \$1}")
ip=$(rancher inspect $id | jq -r ".publicEndpoints[0].ipAddress")
port=$(rancher inspect $id | jq -r ".publicEndpoints[0].port")
- echo "${FUNCNAME[0]}: $1 is accessible at http://$ip:$port"
+ log "$1 is accessible at http://$ip:$port"
}
# Stop a stack
# Usage example: stop_stack nginx
function stop_stack() {
- echo "${FUNCNAME[0]}: stopping stack $1"
+ log "stopping stack $1"
rancher stop $(rancher stacks | awk "/$1/{print \$1}")
}
# Start a stopped stack
# Usage example: start_stack nginx
function start_stack() {
- echo "${FUNCNAME[0]}: starting stack $1"
+ log "starting stack $1"
rancher start $(rancher stacks | awk "/$1/{print \$1}")
wait_till_healthy $1 6
}
@@ -370,7 +376,7 @@ function start_stack() {
# Usage example: delete_stack dokuwiki
function delete_stack() {
id=$(rancher stacks | grep "$1" | awk "{print \$1}")
- echo "${FUNCNAME[0]}: deleting stack $1 with id $id"
+ log "deleting stack $1 with id $id"
rancher rm --stop $id
}
@@ -378,24 +384,24 @@ function delete_stack() {
# Usage example: delete_service nginx/lb
function delete_service() {
id=$(rancher ps | grep "$1" | awk "{print \$1}")
- echo "${FUNCNAME[0]}: deleting service $1 with id $id"
+ log "deleting service $1 with id $id"
rancher rm --stop $id
}
# Start a complex service, i.e. with yaml file customizations
# Usage example: start_complex_service grafana 3000:3000 1
function start_complex_service() {
- echo "${FUNCNAME[0]}: starting service $1 at ports $2, and scale $3"
+ log "starting service $1 at ports $2, and scale $3"
service=$1
# port is either a single (unexposed) port, or an source:target pair (source
# is the external port)
ports=$2
scale=$3
- echo "${FUNCNAME[0]}: creating service folder ~/rancher/$service"
+ log "creating service folder ~/rancher/$service"
mkdir ~/rancher/$service
cd ~/rancher/$service
- echo "${FUNCNAME[0]}: creating docker-compose.yml"
+ log "creating docker-compose.yml"
# Define service via docker-compose.yml
case "$service" in
grafana)
@@ -414,7 +420,7 @@ EOF
*)
esac
- echo "${FUNCNAME[0]}: starting service $service"
+ log "starting service $service"
rancher up -s $service -d
wait_till_healthy "$service/$service" 6
@@ -455,7 +461,7 @@ function demo() {
end=`date +%s`
runtime=$((end-start))
runtime=$((runtime/60))
- echo "${FUNCNAME[0]}: Demo duration = $runtime minutes"
+ log "Demo duration = $runtime minutes"
}
# Automate the installation