summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-10-20 08:20:56 -0700
committerBryan Sullivan <bryan.sullivan@att.com>2017-10-20 08:20:56 -0700
commit0daff9fc896967633f32f73cb4377b922cc5dc0c (patch)
tree2f1d7593ded98167e66884e02e429a7f973ef545
parent1e33ff4b43994ea0bd2d3155ac6f298242573898 (diff)
Fix issues with resolv.conf and ceph-disk calls
JIRA: MODELS-23 Add preliminary test steps Change-Id: Ib22c13b880cd143a500315c86bbac7c2a8b3e526 Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--tools/kubernetes/ceph-helm.sh73
1 files changed, 71 insertions, 2 deletions
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh
index 9820677..0028423 100644
--- a/tools/kubernetes/ceph-helm.sh
+++ b/tools/kubernetes/ceph-helm.sh
@@ -51,19 +51,23 @@ function setup_ceph() {
for node in $nodes; do
echo "${FUNCNAME[0]}: setup resolv.conf for $node"
- echo <<EOF | sudo tee -a /etc/resolv.conf
+ cat <<EOF | sudo tee -a /etc/resolv.conf
nameserver $kubedns
search ceph.svc.cluster.local svc.cluster.local cluster.local
EOF
echo "${FUNCNAME[0]}: Zap disk $dev at $node"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node sudo ceph-disk zap $dev
+ ubuntu@$node sudo ceph-disk zap /dev/$dev
echo "${FUNCNAME[0]}: Run ceph-osd at $node"
name=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
ubuntu@$node hostname)
./helm-install-ceph-osd.sh $name /dev/$dev
done
+ echo "${FUNCNAME[0]}: WORKAROUND take ownership of .kube"
+ # TODO: find out why this is needed
+ sudo chown -R ubuntu:ubuntu ~/.kube/*
+
echo "${FUNCNAME[0]}: Activate Ceph for namespace 'default'"
./activate-namespace.sh default
@@ -71,6 +75,71 @@ EOF
kubectl replace -f relax-rbac-k8s1.7.yaml
# TODO: verification tests
+
+ echo "${FUNCNAME[0]}: Create rdb storageClass 'slow'"
+ cat <<EOF >/tmp/ceph-sc.yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: slow
+provisioner: kubernetes.io/rbd
+parameters:
+ monitors: $mon_ip:6789
+ adminId: admin
+ adminSecretName: ceph-secret-admin
+ adminSecretNamespace: "kube-system"
+ pool: kube
+ userId: kube
+ userSecretName: ceph-secret-user
+EOF
+
+ echo "${FUNCNAME[0]}: Create storage pool 'kube'"
+ # https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method
+ sudo ceph osd pool create kube 32 32
+
+ echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'"
+ sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube'
+
+ echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'"
+ kube_key=$(sudo ceph auth get-key client.kube)
+ kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd
+ # A similar secret must be created in other namespaces that intend to access the ceph pool
+
+ # Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md
+
+ echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim"
+ cat <<EOF >/tmp/ceph-pvc.yaml
+{
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "claim1",
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": "slow"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "3Gi"
+ }
+ }
+ }
+}
+EOF
+ kubectl create -f /tmp/ceph-pvc.yaml
+ while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do
+ echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'"
+ kubectl describe pvc
+ sleep 10
+ done
+ echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)"
+ kubectl get pvc
+ kubectl delete pvc claim1
+ kubectl describe pods
}
if [[ "$1" != "" ]]; then