diff options
author | 2017-10-31 07:20:28 -0700 | |
---|---|---|
committer | 2017-10-31 07:20:28 -0700 | |
commit | 63533a0058f9e313dbd818fceb5551813f0a8950 (patch) | |
tree | 2ee24c53d5d4c61263f615d9910b620a093099f6 | |
parent | 4518364b4ce515884eadf476ae6db021210bd7f0 (diff) |
Add log function; fix line endings
JIRA: MODELS-23
Change-Id: Ie464181659db2d229dc83b9877dea2a64d6bb06b
Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r-- | tools/kubernetes/ceph-helm.sh | 61 | ||||
-rw-r--r-- | tools/kubernetes/demo_deploy.sh | 6 | ||||
-rw-r--r-- | tools/kubernetes/k8s-cluster.sh | 3 | ||||
-rw-r--r-- | tools/maas/deploy.sh | 8 |
4 files changed, 42 insertions, 36 deletions
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh index 534fd86..73a32b0 100644 --- a/tools/kubernetes/ceph-helm.sh +++ b/tools/kubernetes/ceph-helm.sh @@ -31,20 +31,24 @@ #. Status: work in progress, incomplete # +function log() { + echo "${FUNCNAME[0]} $(date): $1" +} + function setup_ceph() { nodes=$1 private_net=$2 public_net=$3 dev=$4 # per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm - echo "${FUNCNAME[0]}: Clone netarbiter" + log "Clone netarbiter" git clone https://github.com/att/netarbiter.git cd netarbiter/sds/ceph-docker/examples/helm - echo "${FUNCNAME[0]}: Prepare a ceph namespace in your K8s cluster" + log "Prepare a ceph namespace in your K8s cluster" ./prep-ceph-ns.sh - echo "${FUNCNAME[0]}: Run ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" + log "Run ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" # Pre-req per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm#notes kubedns=$(kubectl get service -o json --namespace kube-system kube-dns | \ jq -r '.spec.clusterIP') @@ -57,32 +61,32 @@ EOF ./helm-install-ceph.sh cephtest $private_net $public_net - echo "${FUNCNAME[0]}: Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" + log "Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" services="rbd-provisioner ceph-mon-0 ceph-mgr ceph-mon-check" for service in $services; do pod=$(kubectl get pods --namespace ceph | awk "/$service/{print \$1}") status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase') while [[ "x$status" != "xRunning" ]]; do - echo "${FUNCNAME[0]}: $pod status is \"$status\". Waiting 10 seconds for it to be 'Running'" + log "$pod status is \"$status\". Waiting 10 seconds for it to be 'Running'" sleep 10 status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase') done done kubectl get pods --namespace ceph - echo "${FUNCNAME[0]}: Check ceph health status" + log "Check ceph health status" status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}") while [[ "x$status" != "xHEALTH_OK" ]]; do - echo "${FUNCNAME[0]}: ceph status is \"$status\". Waiting 10 seconds for it to be 'HEALTH_OK'" + log "ceph status is \"$status\". Waiting 10 seconds for it to be 'HEALTH_OK'" kubectl -n ceph exec -it ceph-mon-0 -- ceph -s sleep 10 status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}") done - echo "${FUNCNAME[0]}: ceph status is 'HEALTH_OK'" + log "ceph status is 'HEALTH_OK'" kubectl -n ceph exec -it ceph-mon-0 -- ceph -s for node in $nodes; do - echo "${FUNCNAME[0]}: setup resolv.conf for $node" + log "install ceph, setup resolv.conf, zap disk for $node" ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ ubuntu@$node <<EOG cat <<EOF | sudo tee /etc/resolv.conf @@ -90,11 +94,10 @@ nameserver $kubedns search ceph.svc.cluster.local svc.cluster.local cluster.local options ndots:5 EOF +sudo apt install -y ceph +sudo ceph-disk zap /dev/$dev EOG - echo "${FUNCNAME[0]}: Zap disk $dev at $node" - ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ - ubuntu@$node sudo ceph-disk zap /dev/$dev - echo "${FUNCNAME[0]}: Run ceph-osd at $node" + log "Run ceph-osd at $node" name=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ ubuntu@$node hostname) ./helm-install-ceph-osd.sh $name /dev/$dev @@ -104,66 +107,66 @@ EOG name=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ ubuntu@$node hostname) pod=$(kubectl get pods --namespace ceph | awk "/$name/{print \$1}") - echo "${FUNCNAME[0]}: verify ceph-osd is Running at node $name" + log "verify ceph-osd is Running at node $name" status=$(kubectl get pods --namespace ceph $pod | awk "/$pod/ {print \$3}") while [[ "x$status" != "xRunning" ]]; do - echo "${FUNCNAME[0]}: $pod status is $status. Waiting 10 seconds for it to be Running." + log "$pod status is $status. Waiting 10 seconds for it to be Running." sleep 10 status=$(kubectl get pods --namespace ceph $pod | awk "/$pod/ {print \$3}") kubectl get pods --namespace ceph done done - echo "${FUNCNAME[0]}: WORKAROUND take ownership of .kube" + log "WORKAROUND take ownership of .kube" # TODO: find out why this is needed sudo chown -R ubuntu:ubuntu ~/.kube/* - echo "${FUNCNAME[0]}: Activate Ceph for namespace 'default'" + log "Activate Ceph for namespace 'default'" ./activate-namespace.sh default - echo "${FUNCNAME[0]}: Relax access control rules" + log "Relax access control rules" kubectl replace -f relax-rbac-k8s1.7.yaml - echo "${FUNCNAME[0]}: Setup complete, running smoke tests" - echo "${FUNCNAME[0]}: Create a pool from a ceph-mon pod (e.g., ceph-mon-0)" + log "Setup complete, running smoke tests" + log "Create a pool from a ceph-mon pod (e.g., ceph-mon-0)" kubectl -n ceph exec -it ceph-mon-0 -- ceph osd pool create rbd 100 100 - echo "${FUNCNAME[0]}: Create a pvc and check if the pvc status is Bound" + log "Create a pvc and check if the pvc status is Bound" kubectl create -f tests/ceph/pvc.yaml status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase') while [[ "$status" != "Bound" ]]; do - echo "${FUNCNAME[0]}: pvc status is $status, waiting 10 seconds for it to be Bound" + log "pvc status is $status, waiting 10 seconds for it to be Bound" sleep 10 status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase') done - echo "${FUNCNAME[0]}: pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)" + log "pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)" kubectl describe pvc - echo "${FUNCNAME[0]}: Attach the pvc to a job and check if the job is successful (i.e., 1)" + log "Attach the pvc to a job and check if the job is successful (i.e., 1)" kubectl create -f tests/ceph/job.yaml status=$(kubectl get jobs ceph-secret-generator -n ceph -o json | jq -r '.status.succeeded') if [[ "$status" != "1" ]]; then - echo "${FUNCNAME[0]}: pvc attachment was not successful:" + log "pvc attachment was not successful:" kubectl get jobs ceph-secret-generator -n ceph -o json exit 1 fi - echo "${FUNCNAME[0]}: Verify that the test job was successful" + log "Verify that the test job was successful" pod=$(kubectl get pods --namespace default | awk "/ceph-test/{print \$1}") active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active') while [[ $active > 0 ]]; do - echo "${FUNCNAME[0]}: test job is still running, waiting 10 seconds for it to complete" + log "test job is still running, waiting 10 seconds for it to complete" kubectl describe pods --namespace default $pod | awk '/Events:/{y=1;next}y' sleep 10 active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active') done - echo "${FUNCNAME[0]}: test job succeeded" + log "test job succeeded" kubectl delete jobs ceph-secret-generator -n ceph kubectl delete pvc ceph-test - echo "${FUNCNAME[0]}: Ceph setup complete!" + log "Ceph setup complete!" } if [[ "$1" != "" ]]; then diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh index 8d5cbd2..7489622 100644 --- a/tools/kubernetes/demo_deploy.sh +++ b/tools/kubernetes/demo_deploy.sh @@ -56,7 +56,7 @@ eval `ssh-agent` ssh-add $key if [[ "x$extras" != "x" ]]; then source $extras; fi scp -o StrictHostKeyChecking=no $key ubuntu@$master:/home/ubuntu/$key -echo "Setting up kubernetes..." +echo "$0 $(date): Setting up kubernetes..." scp -r -o StrictHostKeyChecking=no ~/models/tools/kubernetes/* \ ubuntu@$master:/home/ubuntu/. ssh -x -o StrictHostKeyChecking=no ubuntu@$master <<EOF @@ -78,7 +78,7 @@ ssh-add $key cd models/tools/prometheus bash prometheus-tools.sh all "$workers" EOF -echo "Setting up cloudify..." +echo "$0 $(date): Setting up cloudify..." scp -r -o StrictHostKeyChecking=no ~/models/tools/cloudify \ ubuntu@$master:/home/ubuntu/. ssh -x -o StrictHostKeyChecking=no ubuntu@$master \ @@ -88,7 +88,7 @@ ssh -x -o StrictHostKeyChecking=no ubuntu@$master \ ssh -x -o StrictHostKeyChecking=no ubuntu@$master \ bash cloudify/k8s-cloudify.sh demo -echo "All done!" +echo "$0 $(date): All done!" export NODE_PORT=$(ssh -x -o StrictHostKeyChecking=no ubuntu@$master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki) export NODE_IP=$(ssh -x -o StrictHostKeyChecking=no ubuntu@$master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}") echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/" diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh index 858eca3..1ef17e2 100644 --- a/tools/kubernetes/k8s-cluster.sh +++ b/tools/kubernetes/k8s-cluster.sh @@ -38,6 +38,9 @@ #. $ bash k8s-cluster.sh all "<nodes>" <cluster-net> <public-net> <ceph-mode> [ceph_dev] #. Runs all the steps above #. +#. When deployment is complete, the k8s API will be available at the master +#. node, e.g. via: curl -k https://<master-ip>:6443/api/v1 +#. #. Status: work in progress, incomplete # diff --git a/tools/maas/deploy.sh b/tools/maas/deploy.sh index 18373cc..1c0880f 100644 --- a/tools/maas/deploy.sh +++ b/tools/maas/deploy.sh @@ -29,17 +29,17 @@ function wait_node_status() { status=$(maas opnfv machines read hostname=$1 | jq -r ".[0].status_name") while [[ "x$status" != "x$2" ]]; do - echo "$1 status is $status ... waiting for it to be $2" + echo "$0 $(date): $1 status is $status ... waiting for it to be $2" sleep 30 status=$(maas opnfv machines read hostname=$1 | jq -r ".[0].status_name") done - echo "$1 status is $status" + echo "$0 $(date): $1 status is $status" } function release_nodes() { nodes=$1 for node in $nodes; do - echo "Releasing node $node" + echo "$0 $(date): Releasing node $node" id=$(maas opnfv machines read hostname=$node | jq -r '.[0].system_id') maas opnfv machines release machines=$id done @@ -48,7 +48,7 @@ function release_nodes() { function deploy_nodes() { nodes=$1 for node in $nodes; do - echo "Deploying node $node" + echo "$0 $(date): Deploying node $node" id=$(maas opnfv machines read hostname=$node | jq -r '.[0].system_id') maas opnfv machines allocate system_id=$id maas opnfv machine deploy $id |