From 4518364b4ce515884eadf476ae6db021210bd7f0 Mon Sep 17 00:00:00 2001 From: Bryan Sullivan Date: Sat, 28 Oct 2017 12:02:38 -0700 Subject: Clean trailing spaces; ceph-helm.sh further work JIRA: MODELS-23 Change-Id: I6f9ef087219ad2f6a736d1027efda2eaf16abcff Signed-off-by: Bryan Sullivan --- tools/cloudify/k8s-cloudify.sh | 10 +-- tools/kubernetes/ceph-baremetal.sh | 6 +- tools/kubernetes/ceph-helm.sh | 156 ++++++++++++++++++++----------------- tools/kubernetes/k8s-cluster.sh | 20 ++--- 4 files changed, 102 insertions(+), 90 deletions(-) diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh index fe85697..61245f6 100644 --- a/tools/cloudify/k8s-cloudify.sh +++ b/tools/cloudify/k8s-cloudify.sh @@ -14,12 +14,12 @@ # limitations under the License. # #. What this is: Setup script for Cloudify use with Kubernetes. -#. Prerequisites: +#. Prerequisites: #. - Kubernetes cluster installed per k8s-cluster.sh (in this repo) #. Usage: #. From a server with access to the kubernetes master node: #. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models -#. $ scp -r ~/models/tools/cloudify ubuntu@:/home/ubuntu/. +#. $ scp -r ~/models/tools/cloudify ubuntu@:/home/ubuntu/. #. : IP or hostname of kubernetes master server #. $ ssh -x ubuntu@ cloudify/k8s-cloudify.sh prereqs #. prereqs: installs prerequisites and configures ubuntu user for kvm use @@ -31,7 +31,7 @@ function prereqs() { sudo apt-get install -y virtinst qemu-kvm libguestfs-tools virtualenv git python-pip echo "${FUNCNAME[0]}: Setup $USER for kvm use" # Per http://libguestfs.org/guestfs-faq.1.html - # workaround for virt-customize warning: libguestfs: warning: current user is not a member of the KVM group (group ID 121). This user cannot access /dev/kvm, so libguestfs may run very slowly. It is recommended that you 'chmod 0666 /dev/kvm' or add the current user to the KVM group (you might need to log out and log in again). + # workaround for virt-customize warning: libguestfs: warning: current user is not a member of the KVM group (group ID 121). This user cannot access /dev/kvm, so libguestfs may run very slowly. It is recommended that you 'chmod 0666 /dev/kvm' or add the current user to the KVM group (you might need to log out and log in again). # Also see: https://help.ubuntu.com/community/KVM/Installation # also to avoid permission denied errors in guestfish, from http://manpages.ubuntu.com/manpages/zesty/man1/guestfs-faq.1.html sudo usermod -a -G kvm $USER @@ -72,7 +72,7 @@ function setup () { sleep 60 done cfy status - + echo "${FUNCNAME[0]}: Install Cloudify Kubernetes Plugin" # Per http://docs.getcloudify.org/4.1.0/plugins/container-support/ # Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin @@ -84,7 +84,7 @@ function setup () { # For Cloudify-Manager per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/master/examples/persistent-volumes-blueprint.yaml cfy plugins upload cloudify_kubernetes_plugin-1.2.1-py27-none-linux_x86_64-centos-Core.wgn - echo "${FUNCNAME[0]}: Create secrets for kubernetes as referenced in blueprints" + echo "${FUNCNAME[0]}: Create secrets for kubernetes as referenced in blueprints" cfy secrets create -s $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $1}') kubernetes_master_ip cfy secrets create -s $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $2}') kubernetes_master_port cfy secrets create -s $(grep 'certificate-authority-data: ' ~/.kube/config | awk -F ' ' '{print $2}') kubernetes_certificate_authority_data diff --git a/tools/kubernetes/ceph-baremetal.sh b/tools/kubernetes/ceph-baremetal.sh index 998d136..dcad340 100644 --- a/tools/kubernetes/ceph-baremetal.sh +++ b/tools/kubernetes/ceph-baremetal.sh @@ -123,12 +123,12 @@ EOF admin_key=$(sudo ceph auth get-key client.admin) kubectl create secret generic ceph-secret-admin --from-literal=key="$admin_key" --namespace=kube-system --type=kubernetes.io/rbd - echo "${FUNCNAME[0]}: Create rdb storageClass 'slow'" + echo "${FUNCNAME[0]}: Create rdb storageClass 'general'" cat </tmp/ceph-sc.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: slow + name: general provisioner: kubernetes.io/rbd parameters: monitors: $mon_ip:6789 @@ -165,7 +165,7 @@ EOF "metadata": { "name": "claim1", "annotations": { - "volume.beta.kubernetes.io/storage-class": "slow" + "volume.beta.kubernetes.io/storage-class": "general" } }, "spec": { diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh index 3635c83..534fd86 100644 --- a/tools/kubernetes/ceph-helm.sh +++ b/tools/kubernetes/ceph-helm.sh @@ -39,13 +39,13 @@ function setup_ceph() { # per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm echo "${FUNCNAME[0]}: Clone netarbiter" git clone https://github.com/att/netarbiter.git - - echo "${FUNCNAME[0]}: Create a .kube/config secret so that a K8s job could run kubectl inside the container" cd netarbiter/sds/ceph-docker/examples/helm - kubectl create namespace ceph - ./create-secret-kube-config.sh ceph - ./helm-install-ceph.sh cephtest $private_net $public_net + echo "${FUNCNAME[0]}: Prepare a ceph namespace in your K8s cluster" + ./prep-ceph-ns.sh + + echo "${FUNCNAME[0]}: Run ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" + # Pre-req per https://github.com/att/netarbiter/tree/master/sds/ceph-docker/examples/helm#notes kubedns=$(kubectl get service -o json --namespace kube-system kube-dns | \ jq -r '.spec.clusterIP') @@ -55,13 +55,39 @@ search ceph.svc.cluster.local svc.cluster.local cluster.local options ndots:5 EOF + ./helm-install-ceph.sh cephtest $private_net $public_net + + echo "${FUNCNAME[0]}: Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner" + services="rbd-provisioner ceph-mon-0 ceph-mgr ceph-mon-check" + for service in $services; do + pod=$(kubectl get pods --namespace ceph | awk "/$service/{print \$1}") + status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase') + while [[ "x$status" != "xRunning" ]]; do + echo "${FUNCNAME[0]}: $pod status is \"$status\". Waiting 10 seconds for it to be 'Running'" + sleep 10 + status=$(kubectl get pods --namespace ceph $pod -o json | jq -r '.status.phase') + done + done + kubectl get pods --namespace ceph + + echo "${FUNCNAME[0]}: Check ceph health status" + status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}") + while [[ "x$status" != "xHEALTH_OK" ]]; do + echo "${FUNCNAME[0]}: ceph status is \"$status\". Waiting 10 seconds for it to be 'HEALTH_OK'" + kubectl -n ceph exec -it ceph-mon-0 -- ceph -s + sleep 10 + status=$(kubectl -n ceph exec -it ceph-mon-0 -- ceph -s | awk "/health:/{print \$2}") + done + echo "${FUNCNAME[0]}: ceph status is 'HEALTH_OK'" + kubectl -n ceph exec -it ceph-mon-0 -- ceph -s + for node in $nodes; do echo "${FUNCNAME[0]}: setup resolv.conf for $node" ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ ubuntu@$node </tmp/ceph-sc.yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/rbd -parameters: - monitors: $mon_ip:6789 - adminId: admin - adminSecretName: ceph-secret-admin - adminSecretNamespace: "kube-system" - pool: kube - userId: kube - userSecretName: ceph-secret-user -EOF + echo "${FUNCNAME[0]}: Setup complete, running smoke tests" + echo "${FUNCNAME[0]}: Create a pool from a ceph-mon pod (e.g., ceph-mon-0)" - kubectl create -f /tmp/ceph-sc.yaml - - echo "${FUNCNAME[0]}: Create storage pool 'kube'" - # https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method - sudo ceph osd pool create kube 32 32 - - echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'" - sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube' - - echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'" - kube_key=$(sudo ceph auth get-key client.kube) - kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd - # A similar secret must be created in other namespaces that intend to access the ceph pool - - # Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md - - echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim" - cat </tmp/ceph-pvc.yaml -{ - "kind": "PersistentVolumeClaim", - "apiVersion": "v1", - "metadata": { - "name": "claim1", - "annotations": { - "volume.beta.kubernetes.io/storage-class": "slow" - } - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "3Gi" - } - } - } -} -EOF - kubectl create -f /tmp/ceph-pvc.yaml - while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do - echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'" - kubectl describe pvc + kubectl -n ceph exec -it ceph-mon-0 -- ceph osd pool create rbd 100 100 + + echo "${FUNCNAME[0]}: Create a pvc and check if the pvc status is Bound" + + kubectl create -f tests/ceph/pvc.yaml + status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase') + while [[ "$status" != "Bound" ]]; do + echo "${FUNCNAME[0]}: pvc status is $status, waiting 10 seconds for it to be Bound" sleep 10 + status=$(kubectl get pvc ceph-test -o json | jq -r '.status.phase') done - echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)" - kubectl get pvc - kubectl delete pvc claim1 - kubectl describe pods + echo "${FUNCNAME[0]}: pvc ceph-test successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' ceph-test)" + kubectl describe pvc + + echo "${FUNCNAME[0]}: Attach the pvc to a job and check if the job is successful (i.e., 1)" + kubectl create -f tests/ceph/job.yaml + status=$(kubectl get jobs ceph-secret-generator -n ceph -o json | jq -r '.status.succeeded') + if [[ "$status" != "1" ]]; then + echo "${FUNCNAME[0]}: pvc attachment was not successful:" + kubectl get jobs ceph-secret-generator -n ceph -o json + exit 1 + fi + + echo "${FUNCNAME[0]}: Verify that the test job was successful" + pod=$(kubectl get pods --namespace default | awk "/ceph-test/{print \$1}") + active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active') + while [[ $active > 0 ]]; do + echo "${FUNCNAME[0]}: test job is still running, waiting 10 seconds for it to complete" + kubectl describe pods --namespace default $pod | awk '/Events:/{y=1;next}y' + sleep 10 + active=$(kubectl get jobs --namespace default -o json ceph-test-job | jq -r '.status.active') + done + echo "${FUNCNAME[0]}: test job succeeded" + + kubectl delete jobs ceph-secret-generator -n ceph + kubectl delete pvc ceph-test + echo "${FUNCNAME[0]}: Ceph setup complete!" } if [[ "$1" != "" ]]; then diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh index a5cc07d..858eca3 100644 --- a/tools/kubernetes/k8s-cluster.sh +++ b/tools/kubernetes/k8s-cluster.sh @@ -162,13 +162,13 @@ function demo_chart() { # LoadBalancer is N/A for baremetal (public cloud only) - use NodePort sed -i -- 's/LoadBalancer/NodePort/g' ./mediawiki/values.yaml # Select the storageClass created in the ceph setup step - sed -i -- 's/# storageClass:/storageClass: "slow"/g' ./mediawiki/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./mediawiki/charts/mariadb/values.yaml + sed -i -- 's/# storageClass:/storageClass: "general"/g' ./mediawiki/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./mediawiki/charts/mariadb/values.yaml helm install --name mw -f ./mediawiki/values.yaml ./mediawiki wait_for_service mw-mediawiki ;; dokuwiki) - sed -i -- 's/# storageClass:/storageClass: "slow"/g' ./dokuwiki/values.yaml + sed -i -- 's/# storageClass:/storageClass: "general"/g' ./dokuwiki/values.yaml sed -i -- 's/LoadBalancer/NodePort/g' ./dokuwiki/values.yaml helm install --name dw -f ./dokuwiki/values.yaml ./dokuwiki wait_for_service dw-dokuwiki @@ -179,8 +179,8 @@ function demo_chart() { mkdir ./wordpress/charts cp -r ./mariadb ./wordpress/charts sed -i -- 's/LoadBalancer/NodePort/g' ./wordpress/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./wordpress/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./wordpress/charts/mariadb/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/charts/mariadb/values.yaml helm install --name wp -f ./wordpress/values.yaml ./wordpress wait_for_service wp-wordpress ;; @@ -191,9 +191,9 @@ function demo_chart() { cp -r ./mariadb ./redmine/charts cp -r ./postgresql ./redmine/charts sed -i -- 's/LoadBalancer/NodePort/g' ./redmine/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./redmine/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./redmine/charts/mariadb/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./redmine/charts/postgresql/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/mariadb/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/postgresql/values.yaml helm install --name rdm -f ./redmine/values.yaml ./redmine wait_for_service rdm-redmine ;; @@ -202,8 +202,8 @@ function demo_chart() { mkdir ./owncloud/charts cp -r ./mariadb ./owncloud/charts sed -i -- 's/LoadBalancer/NodePort/g' ./owncloud/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./owncloud/values.yaml - sed -i -- 's/# storageClass: "-"/storageClass: "slow"/g' ./owncloud/charts/mariadb/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/values.yaml + sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/charts/mariadb/values.yaml helm install --name oc -f ./owncloud/values.yaml ./owncloud wait_for_service oc-owncloud ;; -- cgit 1.2.3-korg