summaryrefslogtreecommitdiffstats
path: root/tools/kubernetes/ceph-helm.sh
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-10-31 07:20:28 -0700
committerBryan Sullivan <bryan.sullivan@att.com>2017-10-31 07:20:28 -0700
commit63533a0058f9e313dbd818fceb5551813f0a8950 (patch)
tree2ee24c53d5d4c61263f615d9910b620a093099f6 /tools/kubernetes/ceph-helm.sh
parent4518364b4ce515884eadf476ae6db021210bd7f0 (diff)
Add log function; fix line endings
JIRA: MODELS-23 Change-Id: Ie464181659db2d229dc83b9877dea2a64d6bb06b Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
Diffstat (limited to 'tools/kubernetes/ceph-helm.sh')
-rw-r--r--tools/kubernetes/ceph-helm.sh61
1 files changed, 32 insertions, 29 deletions
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh
index 534fd86..73a32b0 100644
--- a/tools/kubernetes/ceph-helm.sh
+++ b/tools/kubernetes/ceph-helm.sh
@@ -31,20 +31,24 @@
#. Status: work in progress, incomplete
#
+function log() {
+ echo "${FUNCNAME[0]} $(date): $1"
+}
+
function setup_ceph() {
nodes=$1
private_net=$2
public_net=$3
dev=$4
# per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm
- echo "${FUNCNAME[0]}: Clone netarbiter"
+ log "Clone netarbiter"
git clone https://github.com/att/netarbiter.git
cd netarbiter/sds/ceph-docker/examples/helm
- echo "${FUNCNAME[0]}: Prepare a ceph namespace in your K8s cluster"
+ log "Prepare a ceph namespace in your K8s cluster"
./prep-ceph-ns.sh
- echo "${FUNCNAME[0]}: Run ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner"
+ log "Run ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner"
# Pre-req per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm#notes
kubedns=$(kubectl get service -o json --namespace kube-system kube-dns | \
jq -r '.spec.clusterIP')
@@ -57,32 +61,32 @@ EOF
./helm-install-ceph.sh cephtest $private_net $public_net
- echo "${FUNCNAME[0]}: Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner"
+ log "Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner"
services="rbd-provisioner ceph-mon-0 ceph-mgr ceph-mon-check"
for service in $services; do
pod=$(kubectl get pods --namespace ceph | awk "/$service/{print \$1}")
status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase')
while [[ "x$status" != "xRunning" ]]; do
- echo "${FUNCNAME[0]}: $pod status is \"$status\". Waiting 10 seconds for it to be 'Running'"
+ log "$pod status is \"$status\". Waiting 10 seconds for it to be 'Running'"
sleep 10
status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase')
done
done
kubectl get pods --namespace ceph
- echo "${FUNCNAME[0]}: Check ceph health status"
+ log "Check ceph health status"
status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}")
while [[ "x$status" != "xHEALTH_OK" ]]; do
- echo "${FUNCNAME[0]}: ceph status is \"$status\". Waiting 10 seconds for it to be 'HEALTH_OK'"
+ log "ceph status is \"$status\". Waiting 10 seconds for it to be 'HEALTH_OK'"
kubectl -n ceph exec -it ceph-mon-0 -- ceph -s
sleep 10
status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}")
done
- echo "${FUNCNAME[0]}: ceph status is 'HEALTH_OK'"
+ log "ceph status is 'HEALTH_OK'"
kubectl -n ceph exec -it ceph-mon-0 -- ceph -s
for node in $nodes; do
- echo "${FUNCNAME[0]}: setup resolv.conf for $node"
+ log "install ceph, setup resolv.conf, zap disk for $node"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
ubuntu@$node <<EOG
cat <<EOF | sudo tee /etc/resolv.conf
@@ -90,11 +94,10 @@ nameserver $kubedns
search ceph.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
EOF
+sudo apt install -y ceph
+sudo ceph-disk zap /dev/$dev
EOG
- echo "${FUNCNAME[0]}: Zap disk $dev at $node"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node sudo ceph-disk zap /dev/$dev
- echo "${FUNCNAME[0]}: Run ceph-osd at $node"
+ log "Run ceph-osd at $node"
name=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
ubuntu@$node hostname)
./helm-install-ceph-osd.sh $name /dev/$dev
@@ -104,66 +107,66 @@ EOG
name=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
ubuntu@$node hostname)
pod=$(kubectl get pods --namespace ceph | awk "/$name/{print \$1}")
- echo "${FUNCNAME[0]}: verify ceph-osd is Running at node $name"
+ log "verify ceph-osd is Running at node $name"
status=$(kubectl get pods --namespace ceph $pod | awk "/$pod/ {print \$3}")
while [[ "x$status" != "xRunning" ]]; do
- echo "${FUNCNAME[0]}: $pod status is $status. Waiting 10 seconds for it to be Running."
+ log "$pod status is $status. Waiting 10 seconds for it to be Running."
sleep 10
status=$(kubectl get pods --namespace ceph $pod | awk "/$pod/ {print \$3}")
kubectl get pods --namespace ceph
done
done
- echo "${FUNCNAME[0]}: WORKAROUND take ownership of .kube"
+ log "WORKAROUND take ownership of .kube"
# TODO: find out why this is needed
sudo chown -R ubuntu:ubuntu ~/.kube/*
- echo "${FUNCNAME[0]}: Activate Ceph for namespace 'default'"
+ log "Activate Ceph for namespace 'default'"
./activate-namespace.sh default
- echo "${FUNCNAME[0]}: Relax access control rules"
+ log "Relax access control rules"
kubectl replace -f relax-rbac-k8s1.7.yaml
- echo "${FUNCNAME[0]}: Setup complete, running smoke tests"
- echo "${FUNCNAME[0]}: Create a pool from a ceph-mon pod (e.g., ceph-mon-0)"
+ log "Setup complete, running smoke tests"
+ log "Create a pool from a ceph-mon pod (e.g., ceph-mon-0)"
kubectl -n ceph exec -it ceph-mon-0 -- ceph osd pool create rbd 100 100
- echo "${FUNCNAME[0]}: Create a pvc and check if the pvc status is Bound"
+ log "Create a pvc and check if the pvc status is Bound"
kubectl create -f tests/ceph/pvc.yaml
status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase')
while [[ "$status" != "Bound" ]]; do
- echo "${FUNCNAME[0]}: pvc status is $status, waiting 10 seconds for it to be Bound"
+ log "pvc status is $status, waiting 10 seconds for it to be Bound"
sleep 10
status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase')
done
- echo "${FUNCNAME[0]}: pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)"
+ log "pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)"
kubectl describe pvc
- echo "${FUNCNAME[0]}: Attach the pvc to a job and check if the job is successful (i.e., 1)"
+ log "Attach the pvc to a job and check if the job is successful (i.e., 1)"
kubectl create -f tests/ceph/job.yaml
status=$(kubectl get jobs ceph-secret-generator -n ceph -o json | jq -r '.status.succeeded')
if [[ "$status" != "1" ]]; then
- echo "${FUNCNAME[0]}: pvc attachment was not successful:"
+ log "pvc attachment was not successful:"
kubectl get jobs ceph-secret-generator -n ceph -o json
exit 1
fi
- echo "${FUNCNAME[0]}: Verify that the test job was successful"
+ log "Verify that the test job was successful"
pod=$(kubectl get pods --namespace default | awk "/ceph-test/{print \$1}")
active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active')
while [[ $active > 0 ]]; do
- echo "${FUNCNAME[0]}: test job is still running, waiting 10 seconds for it to complete"
+ log "test job is still running, waiting 10 seconds for it to complete"
kubectl describe pods --namespace default $pod | awk '/Events:/{y=1;next}y'
sleep 10
active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active')
done
- echo "${FUNCNAME[0]}: test job succeeded"
+ log "test job succeeded"
kubectl delete jobs ceph-secret-generator -n ceph
kubectl delete pvc ceph-test
- echo "${FUNCNAME[0]}: Ceph setup complete!"
+ log "Ceph setup complete!"
}
if [[ "$1" != "" ]]; then