From 6d5fa696c841d9c032c58f0ac0501a9d1d9e8498 Mon Sep 17 00:00:00 2001 From: Bryan Sullivan Date: Wed, 18 Oct 2017 12:13:25 -0700 Subject: Factor out ceph setup in prep for ceph-helm implementation. Change-Id: I121204e90550ccb6dcfc0b084ace6230a8d6b8f5 Signed-off-by: Bryan Sullivan --- tools/kubernetes/ceph-baremetal.sh | 199 +++++++++++++++++++++++++++++++++++++ tools/kubernetes/ceph-helm.sh | 32 ++++++ tools/kubernetes/k8s-cluster.sh | 178 +++------------------------------ 3 files changed, 244 insertions(+), 165 deletions(-) create mode 100644 tools/kubernetes/ceph-baremetal.sh create mode 100644 tools/kubernetes/ceph-helm.sh (limited to 'tools/kubernetes') diff --git a/tools/kubernetes/ceph-baremetal.sh b/tools/kubernetes/ceph-baremetal.sh new file mode 100644 index 0000000..55d2a7f --- /dev/null +++ b/tools/kubernetes/ceph-baremetal.sh @@ -0,0 +1,199 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: script to setup a Ceph-based SDS (Software Defined Storage) +#. service for a kubernetes cluster, directly on the master and worker nodes. +#. Prerequisites: +#. - Ubuntu xenial server for master and agent nodes +#. - key-based auth setup for ssh/scp between master and agent nodes +#. - 192.168.0.0/16 should not be used on your server network interface subnets +#. Usage: +# Intended to be called from k8s-cluster.sh in this folder. To run directly: +#. $ bash ceph-baremetal.sh "" [ceph_dev] +#. nodes: space-separated list of ceph node IPs +#. cluster-net: CIDR of ceph cluster network e.g. 10.0.0.1/24 +#. public-net: CIDR of public network +#. ceph_dev: disk to use for ceph. ***MUST NOT BE USED FOR ANY OTHER PURPOSE*** +#. if not provided, ceph data will be stored on osd nodes in /ceph +#. +#. Status: work in progress, incomplete +# + +function setup_ceph() { + node_ips=$1 + cluster_net=$2 + public_net=$3 + ceph_dev=$4 + echo "${FUNCNAME[0]}: Deploying ceph-mon on localhost $HOSTNAME" + echo "${FUNCNAME[0]}: Deploying ceph-osd on nodes $node_ips" + echo "${FUNCNAME[0]}: Setting cluster-network=$cluster_net and public-network=$public_net" + mon_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}') + all_nodes="$mon_ip $node_ips" + # Also caches the server fingerprints so ceph-deploy does not prompt the user + # Note this loop may be partially redundant with the ceph-deploy steps below + for node_ip in $all_nodes; do + echo "${FUNCNAME[0]}: Install ntp and ceph on $node_ip" + ssh -x -o StrictHostKeyChecking=no ubuntu@$node_ip <>ceph.conf +osd max object name len = 256 +osd max object namespace len = 64 +EOF + cat ceph.conf + + echo "${FUNCNAME[0]}: Deploy ceph packages on other nodes" + ceph-deploy install $mon_ip $node_ips + + echo "${FUNCNAME[0]}: Deploy the initial monitor and gather the keys" + ceph-deploy mon create-initial + + if [[ "x$ceph_dev" == "x" ]]; then + n=1 + for node_ip in $node_ips; do + echo "${FUNCNAME[0]}: Prepare ceph OSD on node $node_ip" + echo "$node_ip ceph-osd$n" | sudo tee -a /etc/hosts + # Using ceph-osd$n here avoids need for manual acceptance of the new server hash + ssh -x -o StrictHostKeyChecking=no ubuntu@ceph-osd$n </tmp/ceph-sc.yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: slow +provisioner: kubernetes.io/rbd +parameters: + monitors: $mon_ip:6789 + adminId: admin + adminSecretName: ceph-secret-admin + adminSecretNamespace: "kube-system" + pool: kube + userId: kube + userSecretName: ceph-secret-user +EOF + # TODO: find out where in the above ~/.kube folders became owned by root + sudo chown -R ubuntu:ubuntu ~/.kube/* + kubectl create -f /tmp/ceph-sc.yaml + + echo "${FUNCNAME[0]}: Create storage pool 'kube'" + # https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method + sudo ceph osd pool create kube 32 32 + + echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'" + sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube' + + echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'" + kube_key=$(sudo ceph auth get-key client.kube) + kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd + # A similar secret must be created in other namespaces that intend to access the ceph pool + + # Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md + + echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim" + cat </tmp/ceph-pvc.yaml +{ + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": { + "name": "claim1", + "annotations": { + "volume.beta.kubernetes.io/storage-class": "slow" + } + }, + "spec": { + "accessModes": [ + "ReadWriteOnce" + ], + "resources": { + "requests": { + "storage": "3Gi" + } + } + } +} +EOF + kubectl create -f /tmp/ceph-pvc.yaml + while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do + echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'" + kubectl describe pvc + sleep 10 + done + echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)" + kubectl get pvc + kubectl delete pvc claim1 + kubectl describe pods +} + +if [[ "$1" .ne "" ]]; then + setup_ceph "$1" $2 $3 $4 +else + grep '#. ' $0 +fi diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh new file mode 100644 index 0000000..96a310c --- /dev/null +++ b/tools/kubernetes/ceph-helm.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2017 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#. What this is: script to setup a Ceph-based SDS (Software Defined Storage) +#. service for a kubernetes cluster, using Helm as deployment tool. +#. Prerequisites: +#. - Ubuntu xenial server for master and agent nodes +#. - key-based auth setup for ssh/scp between master and agent nodes +#. - 192.168.0.0/16 should not be used on your server network interface subnets +#. Usage: +# Intended to be called from k8s-cluster.sh in this folder. To run directly: +#. $ bash ceph-helm.sh "" [ceph_dev] +#. nodes: space-separated list of ceph node IPs +#. cluster-net: CIDR of ceph cluster network e.g. 10.0.0.1/24 +#. public-net: CIDR of public network +#. ceph_dev: disk to use for ceph. ***MUST NOT BE USED FOR ANY OTHER PURPOSE*** +#. if not provided, ceph data will be stored on osd nodes in /ceph +#. +#. Status: work in progress, incomplete +# diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh index 3d896bf..015e217 100644 --- a/tools/kubernetes/k8s-cluster.sh +++ b/tools/kubernetes/k8s-cluster.sh @@ -24,17 +24,18 @@ #. $ bash k8s-cluster.sh master #. $ bash k8s-cluster.sh agents "" #. nodes: space-separated list of ceph node IPs -#. $ bash k8s-cluster.sh ceph "" [ceph_dev] +#. $ bash k8s-cluster.sh ceph "" [ceph_dev] #. nodes: space-separated list of ceph node IPs #. cluster-net: CIDR of ceph cluster network e.g. 10.0.0.1/24 #. public-net: CIDR of public network +#. ceph-mode: "helm" or "baremetal" #. ceph_dev: disk to use for ceph. ***MUST NOT BE USED FOR ANY OTHER PURPOSE*** #. if not provided, ceph data will be stored on osd nodes in /ceph #. $ bash k8s-cluster.sh helm #. Setup helm as app kubernetes orchestration tool #. $ bash k8s-cluster.sh demo #. Install helm charts for mediawiki and dokuwiki -#. $ bash k8s-cluster.sh all "" [ceph_dev] +#. $ bash k8s-cluster.sh all "" [ceph_dev] #. Runs all the steps above #. #. Status: work in progress, incomplete @@ -122,167 +123,6 @@ function setup_k8s_agents() { echo "${FUNCNAME[0]}: Cluster is ready when all nodes in the output of 'kubectl get nodes' show as 'Ready'." } -function setup_ceph() { - node_ips=$1 - cluster_net=$2 - public_net=$3 - ceph_dev=$4 - echo "${FUNCNAME[0]}: Deploying ceph-mon on localhost $HOSTNAME" - echo "${FUNCNAME[0]}: Deploying ceph-osd on nodes $node_ips" - echo "${FUNCNAME[0]}: Setting cluster-network=$cluster_net and public-network=$public_net" - mon_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}') - all_nodes="$mon_ip $node_ips" - # Also caches the server fingerprints so ceph-deploy does not prompt the user - # Note this loop may be partially redundant with the ceph-deploy steps below - for node_ip in $all_nodes; do - echo "${FUNCNAME[0]}: Install ntp and ceph on $node_ip" - ssh -x -o StrictHostKeyChecking=no ubuntu@$node_ip <>ceph.conf -osd max object name len = 256 -osd max object namespace len = 64 -EOF - cat ceph.conf - - echo "${FUNCNAME[0]}: Deploy ceph packages on other nodes" - ceph-deploy install $mon_ip $node_ips - - echo "${FUNCNAME[0]}: Deploy the initial monitor and gather the keys" - ceph-deploy mon create-initial - - if [[ "x$ceph_dev" == "x" ]]; then - n=1 - for node_ip in $node_ips; do - echo "${FUNCNAME[0]}: Prepare ceph OSD on node $node_ip" - echo "$node_ip ceph-osd$n" | sudo tee -a /etc/hosts - # Using ceph-osd$n here avoids need for manual acceptance of the new server hash - ssh -x -o StrictHostKeyChecking=no ubuntu@ceph-osd$n </tmp/ceph-sc.yaml -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: slow -provisioner: kubernetes.io/rbd -parameters: - monitors: $mon_ip:6789 - adminId: admin - adminSecretName: ceph-secret-admin - adminSecretNamespace: "kube-system" - pool: kube - userId: kube - userSecretName: ceph-secret-user -EOF - # TODO: find out where in the above ~/.kube folders became owned by root - sudo chown -R ubuntu:ubuntu ~/.kube/* - kubectl create -f /tmp/ceph-sc.yaml - - echo "${FUNCNAME[0]}: Create storage pool 'kube'" - # https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method - sudo ceph osd pool create kube 32 32 - - echo "${FUNCNAME[0]}: Authorize client 'kube' access to pool 'kube'" - sudo ceph auth get-or-create client.kube mon 'allow r' osd 'allow rwx pool=kube' - - echo "${FUNCNAME[0]}: Create ceph-secret-user secret in namespace 'default'" - kube_key=$(sudo ceph auth get-key client.kube) - kubectl create secret generic ceph-secret-user --from-literal=key="$kube_key" --namespace=default --type=kubernetes.io/rbd - # A similar secret must be created in other namespaces that intend to access the ceph pool - - # Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md - - echo "${FUNCNAME[0]}: Create andtest a persistentVolumeClaim" - cat </tmp/ceph-pvc.yaml -{ - "kind": "PersistentVolumeClaim", - "apiVersion": "v1", - "metadata": { - "name": "claim1", - "annotations": { - "volume.beta.kubernetes.io/storage-class": "slow" - } - }, - "spec": { - "accessModes": [ - "ReadWriteOnce" - ], - "resources": { - "requests": { - "storage": "3Gi" - } - } - } -} -EOF - kubectl create -f /tmp/ceph-pvc.yaml - while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do - echo "${FUNCNAME[0]}: Waiting for pvc claim1 to be 'Bound'" - kubectl describe pvc - sleep 10 - done - echo "${FUNCNAME[0]}: pvc claim1 successfully bound to $(kubectl get pvc -o jsonpath='{.spec.volumeName}' claim1)" - kubectl get pvc - kubectl delete pvc claim1 - kubectl describe pods -} - function wait_for_service() { echo "${FUNCNAME[0]}: Waiting for service $1 to be available" pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }") @@ -406,6 +246,14 @@ function setup_helm() { # e.g. helm install stable/dokuwiki } +function setup_ceph() { + if [[ "$4" .eq "helm" ]]; then + source ./ceph-helm.sh "$1" $2 $3 $4 + else + source ./ceph-baremetal.sh "$1" $2 $3 $4 + fi +} + export WORK_DIR=$(pwd) case "$1" in master) @@ -415,7 +263,7 @@ case "$1" in setup_k8s_agents "$2" ;; ceph) - setup_ceph "$2" $3 $4 $5 + setup_ceph "$2" $3 $4 $5 $6 ;; helm) setup_helm @@ -426,7 +274,7 @@ case "$1" in all) setup_k8s_master setup_k8s_agents "$2" - setup_ceph "$2" $3 $4 $5 + setup_ceph "$2" $3 $4 $5 $6 setup_helm demo_chart dokuwiki ;; -- cgit 1.2.3-korg