summaryrefslogtreecommitdiffstats
path: root/tools/kubernetes/ceph-baremetal.sh
diff options
context:
space:
mode:
Diffstat (limited to 'tools/kubernetes/ceph-baremetal.sh')
-rw-r--r--tools/kubernetes/ceph-baremetal.sh54
1 files changed, 30 insertions, 24 deletions
diff --git a/tools/kubernetes/ceph-baremetal.sh b/tools/kubernetes/ceph-baremetal.sh
index dcad340..d806178 100644
--- a/tools/kubernetes/ceph-baremetal.sh
+++ b/tools/kubernetes/ceph-baremetal.sh
@@ -31,20 +31,26 @@
#. Status: work in progress, incomplete
#
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
function setup_ceph() {
node_ips=$1
cluster_net=$2
public_net=$3
ceph_dev=$4
- echo "${FUNCNAME[0]}: Deploying ceph-mon on localhost $HOSTNAME"
- echo "${FUNCNAME[0]}: Deploying ceph-osd on nodes $node_ips"
- echo "${FUNCNAME[0]}: Setting cluster-network=$cluster_net and public-network=$public_net"
+ log "Deploying ceph-mon on localhost $HOSTNAME"
+ log "Deploying ceph-osd on nodes $node_ips"
+ log "Setting cluster-network=$cluster_net and public-network=$public_net"
mon_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
all_nodes="$mon_ip $node_ips"
# Also caches the server fingerprints so ceph-deploy does not prompt the user
# Note this loop may be partially redundant with the ceph-deploy steps below
for node_ip in $all_nodes; do
- echo "${FUNCNAME[0]}: Install ntp and ceph on $node_ip"
+ log "Install ntp and ceph on $node_ip"
ssh -x -o StrictHostKeyChecking=no ubuntu@$node_ip <<EOF
sudo timedatectl set-ntp no
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
@@ -56,11 +62,11 @@ EOF
# per http://docs.ceph.com/docs/master/start/quick-ceph-deploy/
# also https://upcommons.upc.edu/bitstream/handle/2117/101816/Degree_Thesis_Nabil_El_Alami.pdf#vote +1
- echo "${FUNCNAME[0]}: Create ceph config folder ~/ceph-cluster"
+ log "Create ceph config folder ~/ceph-cluster"
mkdir ~/ceph-cluster
cd ~/ceph-cluster
- echo "${FUNCNAME[0]}: Create new cluster with $HOSTNAME as initial ceph-mon node"
+ log "Create new cluster with $HOSTNAME as initial ceph-mon node"
ceph-deploy new --cluster-network $cluster_net --public-network $public_net --no-ssh-copykey $HOSTNAME
# Update conf per recommendations of http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
cat <<EOF >>ceph.conf
@@ -69,16 +75,16 @@ osd max object namespace len = 64
EOF
cat ceph.conf
- echo "${FUNCNAME[0]}: Deploy ceph packages on other nodes"
+ log "Deploy ceph packages on other nodes"
ceph-deploy install $mon_ip $node_ips
- echo "${FUNCNAME[0]}: Deploy the initial monitor and gather the keys"
+ log "Deploy the initial monitor and gather the keys"
ceph-deploy mon create-initial
if [[ "x$ceph_dev" == "x" ]]; then
n=1
for node_ip in $node_ips; do
- echo "${FUNCNAME[0]}: Prepare ceph OSD on node $node_ip"
+ log "Prepare ceph OSD on node $node_ip"
echo "$node_ip ceph-osd$n" | sudo tee -a /etc/hosts
# Using ceph-osd$n here avoids need for manual acceptance of the new server hash
ssh -x -o StrictHostKeyChecking=no ubuntu@ceph-osd$n <<EOF
@@ -90,17 +96,17 @@ EOF
((n++))
done
else
- echo "${FUNCNAME[0]}: Deploy OSDs"
+ log "Deploy OSDs"
for node_ip in $node_ips; do
- echo "${FUNCNAME[0]}: Create ceph osd on $node_ip using $ceph_dev"
+ log "Create ceph osd on $node_ip using $ceph_dev"
ceph-deploy osd create $node_ip:$ceph_dev
done
fi
- echo "${FUNCNAME[0]}: Copy the config file and admin key to the admin node and OSD nodes"
+ log "Copy the config file and admin key to the admin node and OSD nodes"
ceph-deploy admin $mon_ip $node_ips
- echo "${FUNCNAME[0]}: Check the cluster health"
+ log "Check the cluster health"
sudo ceph health
sudo ceph -s
@@ -108,22 +114,22 @@ EOF
# rbd is not included in default kube-controller-manager... use attcomdev version
sudo sed -i -- 's~gcr.io/google_containers/kube-controller-manager-amd64:.*~quay.io/attcomdev/kube-controller-manager:v1.7.3~' /etc/kubernetes/manifests/kube-controller-manager.yaml
if [[ $(sudo grep -c attcomdev/kube-controller-manager /etc/kubernetes/manifests/kube-controller-manager.yaml) == 0 ]]; then
- echo "${FUNCNAME[0]}: Problem patching /etc/kubernetes/manifests/kube-controller-manager.yaml... script update needed"
+ log "Problem patching /etc/kubernetes/manifests/kube-controller-manager.yaml... script update needed"
exit 1
fi
mgr=$(kubectl get pods --all-namespaces | grep kube-controller-manager | awk '{print $4}')
while [[ "$mgr" != "Running" ]]; do
- echo "${FUNCNAME[0]}: kube-controller-manager status is $mgr. Waiting 60 seconds for it to be 'Running'"
+ log "kube-controller-manager status is $mgr. Waiting 60 seconds for it to be 'Running'"
sleep 60
mgr=$(kubectl get pods --all-namespaces | grep kube-controller-manager | awk '{print $4}')
done
- echo "${FUNCNAME[0]}: kube-controller-manager status is $mgr"
+ log "kube-controller-manager status is $mgr"
- echo "${FUNCNAME[0]}: Create Ceph admin secret"
+ log "Create Ceph admin secret"
admin_key=$(sudo ceph auth get-key client.admin)
kubectl create secret generic ceph-secret-admin --from-literal=key="$admin_key" --namespace=kube-system --type=kubernetes.io/rbd
- echo "${FUNCNAME[0]}: Create rdb storageClass 'general'"
+ log "Create rdb storageClass 'general'"
cat <<EOF >/tmp/ceph-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -143,21 +149,21 @@ EOF
sudo chown -R ubuntu:ubuntu ~/.kube/*
kubectl create -f /tmp/ceph-sc.yaml
- echo "${FUNCNAME[0]}: Create storage pool 'kube'"
+ log "Create storage pool 'kube'"
# https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method
sudo ceph osd pool create kube 32 32
- echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'"
+ log "Authorize client 'kube' access to pool 'kube'"
sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'
- echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'"
+ log "Create ceph-secret-user secret in namespace 'default'"
kube_key=$(sudo ceph auth get-key client.kube)
kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd
# A similar secret must be created in other namespaces that intend to access the ceph pool
# Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md
- echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim"
+ log "Create andtest a persistentVolumeClaim"
cat <<EOF >/tmp/ceph-pvc.yaml
{
"kind": "PersistentVolumeClaim",
@@ -182,11 +188,11 @@ EOF
EOF
kubectl create -f /tmp/ceph-pvc.yaml
while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do
- echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'"
+ log "Waiting for pvc claim1 to be 'Bound'"
kubectl describe pvc
sleep 10
done
- echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)"
+ log "pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)"
kubectl get pvc
kubectl delete pvc claim1
kubectl describe pods