summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/images/models-k8s.pngbin53813 -> 107735 bytes
-rw-r--r--tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml17
-rw-r--r--tools/cloudify/k8s-cloudify.sh33
-rw-r--r--tools/kubernetes/README.md24
-rw-r--r--tools/kubernetes/ceph-helm.sh2
-rw-r--r--tools/kubernetes/demo_deploy.sh176
-rw-r--r--tools/kubernetes/helm-tools.sh203
-rw-r--r--tools/kubernetes/k8s-cluster.sh275
-rw-r--r--tools/prometheus/prometheus-tools.sh3
9 files changed, 477 insertions, 256 deletions
diff --git a/docs/images/models-k8s.png b/docs/images/models-k8s.png
index c54bcdb..107e2bb 100644
--- a/docs/images/models-k8s.png
+++ b/docs/images/models-k8s.png
Binary files differ
diff --git a/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml b/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
index bdbba8c..54c0b45 100644
--- a/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
+++ b/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
@@ -1,4 +1,21 @@
tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify demo blueprint for running nginx under kubernetes.
+#
# Based upon
# https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/master/examples/simple-blueprint-defined-resource.yaml
# https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/1.2.0/examples/example-blueprint.yaml
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index bff85e4..cf6de93 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -9,23 +9,25 @@
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#. What this is: Setup script for Cloudify use with Kubernetes.
#. Prerequisites:
-#. - Kubernetes cluster installed per k8s-cluster.sh (in this repo)
+#. - OPNFV Models repo cloned into ~/models, i.e.
+#. git clone https://gerrit.opnfv.org/gerrit/models ~/models
+#. - Kubernetes cluster installed per tools/kubernetes/demo_deploy.sh and
+#. environment setup file ~/models/tools/k8s_env.sh as setup by demo_deploy.sh
#. Usage:
#. From a server with access to the kubernetes master node:
-#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ scp -r ~/models/tools/cloudify ubuntu@<k8s-master>:/home/ubuntu/.
+#. $ cd ~/models/tools/cloudify
+#. $ scp -r ~/models/tools/* ubuntu@<k8s-master>:/home/ubuntu/.
#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x ubuntu@<k8s-master> cloudify/k8s-cloudify.sh prereqs
#. prereqs: installs prerequisites and configures ubuntu user for kvm use
#. $ ssh -x ubuntu@<k8s-master> bash cloudify/k8s-cloudify.sh setup
#. setup: installs cloudify CLI and Manager
-#. $ bash k8s-cloudify.sh demo <start|stop> <k8s-master>
+#. $ bash k8s-cloudify.sh demo <start|stop>
#. demo: control demo blueprint
#. start|stop: start or stop the demo
#. <k8s-master>: IP or hostname of kubernetes master server
@@ -51,8 +53,7 @@ function fail() {
function log() {
f=$(caller 0 | awk '{print $2}')
l=$(caller 0 | awk '{print $1}')
- echo ""
- echo "$f:$l ($(date)) $1"
+ echo; echo "$f:$l ($(date)) $1"
}
function prereqs() {
@@ -141,6 +142,7 @@ function setup () {
HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
# Forward host port 80 to VM
+ log "Setip iptables to forward $HOST_IP port 80 to Cloudify Manager VM at $VM_IP"
sudo iptables -t nat -I PREROUTING -p tcp -d $HOST_IP --dport 80 -j DNAT --to-destination $VM_IP:80
sudo iptables -I FORWARD -m state -d $VM_IP/32 --state NEW,RELATED,ESTABLISHED -j ACCEPT
sudo iptables -t nat -A POSTROUTING -j MASQUERADE
@@ -159,7 +161,6 @@ function setup () {
function service_port() {
name=$1
- manager_ip=$2
tries=6
port="null"
while [[ "$port" == "null" && $tries -gt 0 ]]; do
@@ -177,7 +178,7 @@ function service_port() {
sleep 10
((tries--))
done
- if [[ "$port" == "" ]]; then
+ if [[ "$port" == "null" ]]; then
jq -r '.items' /tmp/json
fail "node_port not found for service"
fi
@@ -186,7 +187,6 @@ function service_port() {
function start() {
name=$1
bp=$2
- manager_ip=$3
log "start app $name with blueprint $bp"
log "copy kube config from k8s master for insertion into blueprint"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
@@ -235,7 +235,7 @@ function start() {
function stop() {
name=$1
bp=$2
- manager_ip=$3
+
# TODO: fix the need for this workaround
log "try to first cancel all current executions"
curl -s -u admin:admin --header 'Tenant: default_tenant' \
@@ -282,6 +282,7 @@ function stop() {
-d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
-o /tmp/json http://$manager_ip/api/v3.1/executions
id=$(jq -r ".id" /tmp/json)
+ log "uninstall execution id = $id"
status=""
tries=1
while [[ "$status" != "terminated" && $tries -lt 10 ]]; do
@@ -354,7 +355,6 @@ function demo() {
# echo "master-port: $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $2}')" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml
# echo "file_content:" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml
# sed 's/^/ /' ~/.kube/config | tee -a ~/cloudify/blueprints/k8s-hello-world/inputs.yaml
- manager_ip=$2
cd ~/models/tools/cloudify/blueprints
if [[ "$1" == "start" ]]; then
@@ -376,6 +376,9 @@ function clean () {
}
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+source ~/k8s_env.sh
+manager_ip=$k8s_master
+
case "$1" in
"prereqs")
prereqs
@@ -387,13 +390,13 @@ case "$1" in
demo $2 $3
;;
"start")
- start $2 $3 $4
+ start $2 $3
;;
"stop")
- stop $2 $3 $4
+ stop $2 $3
;;
"port")
- service_port $2 $3
+ service_port $2
;;
"clean")
clean
diff --git a/tools/kubernetes/README.md b/tools/kubernetes/README.md
index b8c81f2..fa6ca5c 100644
--- a/tools/kubernetes/README.md
+++ b/tools/kubernetes/README.md
@@ -1,17 +1,7 @@
-This folder contains scripts etc to setup a kubernetes cluster with the following type of environment and components:
-* hardware
- * 2 or more bare metal servers
- * two connected networks (public and private): may work if just a single network
- * one or more disks on each server: ceph-osd can be setup on an unused disk, or a folder (/ceph) on the host OS disk
-* kubernetes
- * single master (admin) node
- * other cluster nodes
-* ceph: ceph-mon on admin, ceph-osd on other nodes
-* helm on admin node
-* demo helm charts, cloned from https://github.com/kubernetes/charts and modified/tested to work on this cluster
-
-See comments in [setup script](k8s-cluster.sh) for more info.
-
-This is a work in progress!
-
-![Resulting Cluster](/docs/images/models-k8s.png?raw=true "Resulting Cluster")
+This folder contains scripts etc to setup a kubernetes cluster with the following type of environment and components:
+* hardware
+ * 2 or more bare metal servers * two connected networks (public and private): may work if just a single network * one or more disks on each server: ceph-osd can be setup on an unused disk, or a folder (/ceph) on the host OS disk * Kubernetes * single k8s master (admin) node * other cluster (k8s worker) nodes * Ceph: backend for persistent volume claims (PVCs) for the k8s cluster, deployed using Helm charts from https://github.com/att/netarbiter * Helm on k8s master (used for initial cluster deployment only) * demo helm charts for Helm install verification etc, cloned from https://github.com/kubernetes/charts and modified/tested to work on this cluster * Prometheus: server on the k8s master, exporters on the k8s workers * Cloudify CLI and Cloudify Manager with Kubernetes plugin (https://github.com/cloudify-incubator/cloudify-kubernetes-plugin) * OPNFV VES Collector and Agent * OPNFV Barometer collectd plugin with libvirt and kafka support * As many components as possible above will be deployed using k8s charts, managed either through Helm or Cloudify
+A larger goal of this work is to demonstrate hybrid cloud deployment as indicated by the presence of OpenStack nodes in the diagram below.
+See comments in [setup script](k8s-cluster.sh) and other scripts in the for more info.
+This is a work in progress!
+![Resulting Cluster](/docs/images/models-k8s.png?raw=true "Resulting Cluster")
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh
index ea5dccf..a1be588 100644
--- a/tools/kubernetes/ceph-helm.sh
+++ b/tools/kubernetes/ceph-helm.sh
@@ -65,7 +65,7 @@ search ceph.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
EOF
- ./helm-install-ceph.sh cephtest $private_net $public_net
+ ./helm-install-ceph.sh cephtest $public_net $private_net
log "Check the pod status of ceph-mon, ceph-mgr, ceph-mon-check, and rbd-provisioner"
services="rbd-provisioner ceph-mon-0 ceph-mgr ceph-mon-check"
diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh
index 187caa2..dba500b 100644
--- a/tools/kubernetes/demo_deploy.sh
+++ b/tools/kubernetes/demo_deploy.sh
@@ -16,12 +16,10 @@
#. What this is: Complete scripted deployment of an experimental kubernetes-based
#. cloud-native application platform. When complete, kubernetes and the following
#. will be installed:
-#. - helm and dokuwiki as a demo helm cart based application
+#. - helm and dokuwiki as a demo helm chart based application
#. - prometheus + grafana for cluster monitoring/stats
#. - cloudify + kubernetes plugin and a demo hello world (nginx) app installed
-#. will be setup with:
-#. Prometheus dashboard: http://<master_public_ip>:9090
-#. Grafana dashboard: http://<master_public_ip>:3000
+#. - OPNFV VES as an ONAP-compatible monitoring platform
#.
#. Prerequisites:
#. - Ubuntu server for kubernetes cluster nodes (master and worker nodes)
@@ -35,68 +33,134 @@
#. <key>: name of private key for cluster node ssh (in current folder)
#. <hosts>: space separated list of hostnames managed by MAAS
#. <master>: IP of cluster master node
-#. <workers>: space separated list of agent node IPs
+#. <workers>: space separated list of worker node IPs
#. <pub-net>: CID formatted public network
#. <priv-net>: CIDR formatted private network (may be same as pub-net)
#. <ceph-mode>: "helm" or "baremetal"
#. <ceph-dev>: disk (e.g. sda, sdb) or folder (e.g. "/ceph")
#. <extras>: optional name of script for extra setup functions as needed
+#.
+#. See tools/demo_deploy.sh in the OPNFV VES repo for additional environment
+#. variables (mandatory/optional) for VES
-key=$1
-nodes="$2"
-master=$3
-workers="$4"
-priv_net=$5
-pub_net=$6
-ceph_mode=$7
-ceph_dev=$8
-extras=$9
+function run() {
+ start=$((`date +%s`/60))
+ $1
+ step_end "$1"
+}
-source ~/models/tools/maas/deploy.sh $1 "$2" $9
-eval `ssh-agent`
-ssh-add $key
-if [[ "x$extras" != "x" ]]; then source $extras; fi
-scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $key ubuntu@$master:/home/ubuntu/$key
-echo "$0 $(date): Setting up kubernetes..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/kubernetes/* \
- ubuntu@$master:/home/ubuntu/.
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master <<EOF
+function step_end() {
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step \"$1\" duration = $runtime minutes"
+}
+
+function run_master() {
+ start=$((`date +%s`/60))
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$k8s_master <<EOF
exec ssh-agent bash
-ssh-add $key
-bash k8s-cluster.sh all "$workers" $priv_net $pub_net $ceph_mode $ceph_dev
+ssh-add $k8s_key
+$1
EOF
-# TODO: Figure this out... Have to break the setup into two steps as something
-# causes the ssh session to end before the prometheus setup, if both scripts
-# are in the same ssh session
-echo "Setting up Prometheus..."
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master mkdir -p \
- /home/ubuntu/models/tools/prometheus
-scp -r -o StrictHostKeyChecking=no ~/models/tools/prometheus/* \
- ubuntu@$master:/home/ubuntu/models/tools/prometheus
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master <<EOF
-exec ssh-agent bash
-ssh-add $key
-cd models/tools/prometheus
-bash prometheus-tools.sh all "$workers"
+ step_end "$1"
+}
+
+extras=$9
+
+cat <<EOF >~/k8s_env.sh
+k8s_key=$1
+k8s_nodes="$2"
+k8s_master=$3
+k8s_workers="$4"
+k8s_priv_net=$5
+k8s_pub_net=$6
+k8s_ceph_mode=$7
+k8s_ceph_dev=$8
+export k8s_key
+export k8s_nodes
+export k8s_master
+export k8s_workers
+export k8s_priv_net
+export k8s_pub_net
+export k8s_ceph_mode
+export k8s_ceph_dev
EOF
-echo "$0 $(date): Setting up cloudify..."
+source ~/k8s_env.sh
+env | grep k8s_
+
+source ~/models/tools/maas/deploy.sh $k8s_key "$k8s_nodes" $extras
+eval `ssh-agent`
+ssh-add $k8s_key
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
+ ubuntu@$k8s_master:/home/ubuntu/$k8s_key
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
+ ubuntu@$k8s_master:/home/ubuntu/.
+
+echo; echo "$0 $(date): Setting up kubernetes master..."
+scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/models/tools/kubernetes/* ubuntu@$k8s_master:/home/ubuntu/.
+run_master "bash k8s-cluster.sh master"
+
+echo; echo "$0 $(date): Setting up kubernetes workers..."
+run_master "bash k8s-cluster.sh workers \"$k8s_workers\""
+
+echo; echo "$0 $(date): Setting up helm..."
+run_master "bash k8s-cluster.sh helm"
+
+echo; echo "$0 $(date): Verifying kubernetes+helm install..."
+run_master "bash k8s-cluster.sh demo start nginx"
+run_master "bash k8s-cluster.sh demo stop nginx"
+
+echo; echo "$0 $(date): Setting up ceph-helm"
+run_master "bash k8s-cluster.sh ceph \"$k8s_workers\" $k8s_priv_net $k8s_pub_net $k8s_ceph_mode $k8s_ceph_dev"
+
+echo; echo "$0 $(date): Verifying kubernetes+helm+ceph install..."
+run_master "bash k8s-cluster.sh demo start dokuwiki"
+
+echo; echo "Setting up Prometheus..."
+scp -r -o StrictHostKeyChecking=no ~/models/tools/prometheus/* \
+ ubuntu@$k8s_master:/home/ubuntu/.
+run_master "bash prometheus-tools.sh all \"$k8s_workers\""
+
+echo; echo "$0 $(date): Setting up cloudify..."
scp -r -o StrictHostKeyChecking=no ~/models/tools/cloudify \
- ubuntu@$master:/home/ubuntu/.
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master \
- bash cloudify/k8s-cloudify.sh prereqs
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master \
- bash cloudify/k8s-cloudify.sh setup
-source ~/models/tools/cloudify/k8s-cloudify.sh demo start $master
-
-echo "$0 $(date): All done!"
-export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
-export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
+ ubuntu@$k8s_master:/home/ubuntu/.
+run_master "bash cloudify/k8s-cloudify.sh prereqs"
+run_master "bash cloudify/k8s-cloudify.sh setup"
+
+echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
+run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start"
+
+echo; echo "$0 $(date): Setting up VES"
+# not re-cloned if existing - allows patch testing locally
+if [[ ! -d ~/ves ]]; then
+ git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
+fi
+ves_influxdb_host=$k8s_master:8086
+export ves_influxdb_host
+ves_grafana_host=$k8s_master:3000
+export ves_grafana_host
+ves_grafana_auth=admin:admin
+export ves_grafana_auth
+ves_kafka_hostname=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master hostname)
+export ves_kafka_hostname
+ves_loglevel=$ves_loglevel
+export ves_loglevel
+# Can't pass quoted strings in commands
+start=$((`date +%s`/60))
+bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_master "$k8s_workers"
+step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_master \"$k8s_workers\""
+
+echo; echo "$0 $(date): All done!"
+export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
+export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
# TODO update Cloudify demo app to have public exposed service address
-port=$( bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx $master)
-echo "Cloudify-deployed demo app nginx is available at http://$master:$port"
-echo "Prometheus UI is available at http://$master:9090"
-echo "Grafana dashboards are available at http://$master:3000 (login as admin/admin)"
-echo "Grafana API is available at http://admin:admin@$master:3000/api/v1/query?query=<string>"
-echo "Kubernetes API is available at https://$master:6443/api/v1/"
-echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$master/api/v3.1/status"
+port=$( bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx $k8s_master)
+echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+echo "Prometheus UI is available at http://$k8s_master:9090"
+echo "Grafana dashboards are available at http://$ves_grafana_host (login as $ves_grafana_auth)"
+echo "Grafana API is available at http://$ves_grafana_auth@$ves_influx_host/api/v1/query?query=<string>"
+echo "Kubernetes API is available at https://$k8s_master:6443/api/v1/"
+echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
diff --git a/tools/kubernetes/helm-tools.sh b/tools/kubernetes/helm-tools.sh
new file mode 100644
index 0000000..e528a15
--- /dev/null
+++ b/tools/kubernetes/helm-tools.sh
@@ -0,0 +1,203 @@
+#!/bin/bash
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: script to setup Helm as kubernetes chart manager, and to deploy
+#. demo apps.
+#. Prerequisites:
+#. - Kubernetes cluster deployed using k8s-cluster.sh (demo charts supported
+#. leverage the ceph SDS storage classes setup by k8s-cluster.sh)
+#. Usage:
+# Intended to be called from k8s-cluster.sh. To run directly:
+#. $ bash ceph-tools.sh setup
+#. $ bash ceph-tools.sh <start|stop> <chart>
+#. start|stop: start or stop the demo app
+#. chart: name of helm chart; currently implemented charts include nginx,
+#. mediawiki, dokuwiki, wordpress, redmine
+#. For info see https://github.com/kubernetes/charts/tree/master/stable
+#.
+#. Status: work in progress, incomplete
+#
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo "$f:$l ($(date)) $1"
+}
+
+function setup_helm() {
+ log "Setup helm"
+ # Install Helm
+ cd ~
+ curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
+ chmod 700 get_helm.sh
+ ./get_helm.sh
+ helm init
+ nohup helm serve > /dev/null 2>&1 &
+ helm repo update
+ # TODO: Workaround for bug https://github.com/kubernetes/helm/issues/2224
+ # For testing use only!
+ kubectl create clusterrolebinding permissive-binding \
+ --clusterrole=cluster-admin --user=admin --user=kubelet \
+ --group=system:serviceaccounts;
+ # TODO: workaround for tiller FailedScheduling (No nodes are available that
+ # match all of the following predicates:: PodToleratesNodeTaints (1).)
+ # kubectl taint nodes $HOSTNAME node-role.kubernetes.io/master:NoSchedule-
+ # Wait till tiller is running
+ tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
+ while [[ "$tiller_deploy" != "Running" ]]; do
+ log "tiller-deploy status is $tiller_deploy. Waiting 60 seconds for it to be 'Running'"
+ sleep 60
+ tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
+ done
+ log "tiller-deploy status is $tiller_deploy"
+
+ # Install services via helm charts from https://kubeapps.com/charts
+ # e.g. helm install stable/dokuwiki
+}
+
+function wait_for_service() {
+ log "Waiting for service $1 to be available"
+ pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
+ log "Service $1 is at pod $pod"
+ ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ while [[ "$ready" != "true" ]]; do
+ log "pod $1 is not yet ready... waiting 10 seconds"
+ sleep 10
+ # TODO: figure out why transient pods sometimes mess up this logic, thus need to re-get the pods
+ pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
+ ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ done
+ log "pod $pod is ready"
+ host_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.hostIP}' $pod)
+ port=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services $1)
+ log "$pod pod is running on assigned node $host_ip"
+ log "$1 service is assigned node_port $port"
+ log "verify $1 service is accessible via all workers at node_port $port"
+ nodes=$(kubectl get nodes | awk '/Ready/ {print $1}')
+ for node in $nodes; do
+ ip=$(kubectl describe nodes $node | awk '/InternalIP/ { print $2}')
+ while ! curl http://$ip:$port ; do
+ log "$1 service is not yet responding at worker $node IP $ip... waiting 10 seconds"
+ sleep 10
+ done
+ log "$1 service is accessible at worker $node at http://$ip:$port"
+ done
+}
+
+function start_chart() {
+ rm -rf /tmp/git/charts
+ git clone https://github.com/kubernetes/charts.git /tmp/git/charts
+ cd /tmp/git/charts/stable
+ case "$1" in
+ nginx)
+ rm -rf /tmp/git/helm
+ git clone https://github.com/kubernetes/helm.git /tmp/git/helm
+ cd /tmp/git/helm/docs/examples
+ sed -i -- 's/type: ClusterIP/type: NodePort/' ./nginx/values.yaml
+ helm install --name nx -f ./nginx/values.yaml ./nginx
+ wait_for_service nx-nginx
+ ;;
+ mediawiki)
+ mkdir ./mediawiki/charts
+ cp -r ./mariadb ./mediawiki/charts
+ # LoadBalancer is N/A for baremetal (public cloud only) - use NodePort
+ sed -i -- 's/LoadBalancer/NodePort/g' ./mediawiki/values.yaml
+ # Select the storageClass created in the ceph setup step
+ sed -i -- 's/# storageClass:/storageClass: "general"/g' ./mediawiki/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./mediawiki/charts/mariadb/values.yaml
+ helm install --name mw -f ./mediawiki/values.yaml ./mediawiki
+ wait_for_service mw-mediawiki
+ ;;
+ dokuwiki)
+ sed -i -- 's/# storageClass:/storageClass: "general"/g' ./dokuwiki/values.yaml
+ sed -i -- 's/LoadBalancer/NodePort/g' ./dokuwiki/values.yaml
+ helm install --name dw -f ./dokuwiki/values.yaml ./dokuwiki
+ wait_for_service dw-dokuwiki
+ ;;
+ wordpress)
+ mkdir ./wordpress/charts
+ cp -r ./mariadb ./wordpress/charts
+ sed -i -- 's/LoadBalancer/NodePort/g' ./wordpress/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/charts/mariadb/values.yaml
+ helm install --name wp -f ./wordpress/values.yaml ./wordpress
+ wait_for_service wp-wordpress
+ ;;
+ redmine)
+ mkdir ./redmine/charts
+ cp -r ./mariadb ./redmine/charts
+ cp -r ./postgresql ./redmine/charts
+ sed -i -- 's/LoadBalancer/NodePort/g' ./redmine/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/mariadb/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/postgresql/values.yaml
+ helm install --name rdm -f ./redmine/values.yaml ./redmine
+ wait_for_service rdm-redmine
+ ;;
+ owncloud)
+ # NOT YET WORKING: needs resolvable hostname for service
+ mkdir ./owncloud/charts
+ cp -r ./mariadb ./owncloud/charts
+ sed -i -- 's/LoadBalancer/NodePort/g' ./owncloud/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/values.yaml
+ sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/charts/mariadb/values.yaml
+ helm install --name oc -f ./owncloud/values.yaml ./owncloud
+ wait_for_service oc-owncloud
+ ;;
+ *)
+ log "demo not implemented for $1"
+ esac
+# extra useful commands
+# kubectl describe pvc
+# kubectl get pvc
+# kubectl describe pods
+# kubectl get pods --namespace default
+# kubectl get pods --all-namespaces
+# kubectl get svc --namespace default dw-dokuwiki
+# kubectl describe svc --namespace default dw-dokuwiki
+# kubectl describe pods --namespace default dw-dokuwiki
+}
+
+function stop_chart() {
+ log "stop chart $1"
+ service=$(kubectl get services --namespace default | awk "/$1/ {print \$1}")
+ kubectl delete services --namespace default $service
+ secret=$(kubectl get secrets --namespace default | awk "/$1/ {print \$1}")
+ kubectl delete secrets --namespace default $secret
+ pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
+ kubectl delete pods --namespace default $pod
+ release=$(echo $service | cut -d '-' -f 1)
+ helm del --purge $release
+ job=$(kubectl get jobs --namespace default | awk "/$1/ {print \$1}")
+ kubectl delete jobs --namespace default $job
+}
+
+export WORK_DIR=$(pwd)
+case "$1" in
+ setup)
+ setup_helm
+ ;;
+ start)
+ start_chart $2
+ ;;
+ stop)
+ stop_chart $2
+ ;;
+ clean)
+ # TODO
+ ;;
+ *)
+ if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then grep '#. ' $0; fi
+esac
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index f55c1b9..1700a6a 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -15,15 +15,18 @@
#
#. What this is: script to setup a kubernetes cluster with calico as sni
#. Prerequisites:
-#. - Ubuntu xenial server for master and agent nodes
-#. - key-based auth setup for ssh/scp between master and agent nodes
+#. - Ubuntu xenial server for master and worker nodes
+#. - key-based auth setup for ssh/scp between master and worker nodes
#. - 192.168.0.0/16 should not be used on your server network interface subnets
#. Usage:
#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
#. $ cd ~/models/tools/kubernetes
#. $ bash k8s-cluster.sh master
-#. $ bash k8s-cluster.sh agents "<nodes>"
+#. $ bash k8s-cluster.sh workers "<nodes>"
#. nodes: space-separated list of ceph node IPs
+#. $ bash k8s-cluster.sh helm
+#. Setup helm as kubernetes app management tool. Note this is a
+#. prerequisite for selecting "helm" ceph-mode as described below.
#. $ bash k8s-cluster.sh ceph "<nodes>" <cluster-net> <public-net> <ceph-mode> [ceph_dev]
#. nodes: space-separated list of ceph node IPs
#. cluster-net: CIDR of ceph cluster network e.g. 10.0.0.1/24
@@ -31,12 +34,10 @@
#. ceph-mode: "helm" or "baremetal"
#. ceph_dev: disk to use for ceph. ***MUST NOT BE USED FOR ANY OTHER PURPOSE***
#. if not provided, ceph data will be stored on osd nodes in /ceph
-#. $ bash k8s-cluster.sh helm
-#. Setup helm as app kubernetes orchestration tool
-#. $ bash k8s-cluster.sh demo
-#. Install helm charts for mediawiki and dokuwiki
#. $ bash k8s-cluster.sh all "<nodes>" <cluster-net> <public-net> <ceph-mode> [ceph_dev]
-#. Runs all the steps above
+#. Runs all the steps above, including starting dokuwiki demo app.
+#. $ bash k8s-cluster.sh demo <start|stop> <chart>
+#. Start or stop demo helm charts. See helm-tools.sh for chart options.
#.
#. When deployment is complete, the k8s API will be available at the master
#. node, e.g. via: curl -k https://<master-ip>:6443/api/v1
@@ -44,10 +45,15 @@
#. Status: work in progress, incomplete
#
+function fail() {
+ log $1
+ exit 1
+}
+
function log() {
f=$(caller 0 | awk '{print $2}')
l=$(caller 0 | awk '{print $1}')
- echo "$f:$l ($(date)) $1"
+ echo; echo "$f:$l ($(date)) $1"
}
function setup_prereqs() {
@@ -55,16 +61,22 @@ function setup_prereqs() {
cat <<'EOG' >/tmp/prereqs.sh
#!/bin/bash
# Basic server pre-reqs
-sudo apt-get -y remove kubectl kubelet kubeadm
+echo; echo "prereqs.sh: ($(date)) Basic prerequisites"
sudo apt-get update
sudo apt-get upgrade -y
-# Set hostname on agent nodes
-if [[ "$1" == "agent" ]]; then
- echo $(ip route get 8.8.8.8 | awk '{print $NF; exit}') $HOSTNAME | sudo tee -a /etc/hosts
+if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
+ echo; echo "prereqs.sh: ($(date)) Add $HOSTNAME to /etc/hosts"
+ echo "$(ip route get 8.8.8.8 | awk '{print $NF; exit}') $HOSTNAME" \
+ | sudo tee -a /etc/hosts
fi
-# Install docker 1.12 (default for xenial is 1.12.6)
+echo; echo "prereqs.sh: ($(date)) Install latest docker"
sudo apt-get install -y docker.io
-sudo service docker start
+# Alternate for 1.12.6
+#sudo apt-get install -y libltdl7
+#wget https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine/docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
+#sudo dpkg -i docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
+sudo service docker restart
+echo; echo "prereqs.sh: ($(date)) Get k8s packages"
export KUBE_VERSION=1.7.5
# per https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# Install kubelet, kubeadm, kubectl per https://kubernetes.io/docs/setup/independent/install-kubeadm/
@@ -74,11 +86,32 @@ cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
-# Next command is to workaround bug resulting in "PersistentVolumeClaim is not bound" for pod startup (remain in Pending)
-# TODO: reverify if this is still an issue in the final working script
-sudo apt-get -y install --allow-downgrades kubectl=${KUBE_VERSION}-00 kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00
-# Needed for API output parsing
+echo; echo "prereqs.sh: ($(date)) Install kubectl, kubelet, kubeadm"
+sudo apt-get -y install --allow-downgrades kubectl=${KUBE_VERSION}-00 \
+ kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00
+echo; echo "prereqs.sh: ($(date)) Install jq for API output parsing"
sudo apt-get -y install jq
+echo; echo "prereqs.sh: ($(date)) Set firewall rules"
+# Per https://kubernetes.io/docs/setup/independent/install-kubeadm/
+if [[ "$(sudo ufw status)" == "Status: active" ]]; then
+ if [[ "$1" == "master" ]]; then
+ sudo ufw allow 6443/tcp
+ sudo ufw allow 2379:2380/tcp
+ sudo ufw allow 10250/tcp
+ sudo ufw allow 10251/tcp
+ sudo ufw allow 10252/tcp
+ sudo ufw allow 10255/tcp
+ else
+ sudo ufw allow 10250/tcp
+ sudo ufw allow 10255/tcp
+ sudo ufw allow 30000:32767/tcp
+ fi
+fi
+# TODO: fix need for this workaround: disable firewall since the commands
+# above do not appear to open the needed ports, even if ufw is inactive
+# (symptom: nodeport requests fail unless sent from within the cluster or
+# to the node IP where the pod is assigned) issue discovered ~11/16/17
+sudo ufw disable
EOG
}
@@ -90,7 +123,8 @@ function setup_k8s_master() {
bash /tmp/prereqs.sh master
# per https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# If the following command fails, run "kubeadm reset" before trying again
- # --pod-network-cidr=192.168.0.0/16 is required for calico; this should not conflict with your server network interface subnets
+ # --pod-network-cidr=192.168.0.0/16 is required for calico; this should not
+ # conflict with your server network interface subnets
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>/tmp/kubeadm.out
cat /tmp/kubeadm.out
export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
@@ -103,16 +137,11 @@ function setup_k8s_master() {
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# Deploy pod network
log "Deploy calico as CNI"
- sudo kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
-}
-
-function setup_k8s_agents() {
- agents="$1"
- export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
- log "Installing agents at $1 with joincmd: $k8s_joincmd"
-
- setup_prereqs
+ # Updated to deploy Calico 2.6 per the create-cluster-kubeadm guide above
+ # sudo kubectl apply -f http://docs.projectcalico.org/v2.4/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
+ sudo kubectl apply -f https://docs.projectcalico.org/v2.6/getting-started/kubernetes/installation/hosted/kubeadm/1.6/calico.yaml
+ log "Wait for kubedns to be Running"
kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}')
while [[ "$kubedns" != "Running" ]]; do
log "kube-dns status is $kubedns. Waiting 60 seconds for it to be 'Running'"
@@ -120,141 +149,43 @@ function setup_k8s_agents() {
kubedns=$(kubectl get pods --all-namespaces | grep kube-dns | awk '{print $4}')
done
log "kube-dns status is $kubedns"
-
- for agent in $agents; do
- log "Install agent at $agent"
- scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$agent:/tmp/prereqs.sh
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$agent bash /tmp/prereqs.sh agent
- # Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet is not empty" per https://github.com/kubernetes/kubeadm/issues/1
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$agent sudo kubeadm reset
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$agent sudo $k8s_joincmd
- done
-
- log "Cluster is ready when all nodes in the output of 'kubectl get nodes' show as 'Ready'."
}
-function wait_for_service() {
- log "Waiting for service $1 to be available"
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- log "Service $1 is at pod $pod"
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
- while [[ "$ready" != "true" ]]; do
- log "$1 container is not yet ready... waiting 10 seconds"
- sleep 10
- # TODO: figure out why transient pods sometimes mess up this logic, thus need to re-get the pods
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
- done
- log "pod $pod container status is $ready"
- host_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.hostIP}' $pod)
- port=$(kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services $1)
- log "pod $pod container is at host $host_ip and port $port"
- while ! curl http://$host_ip:$port ; do
- log "$1 service is not yet responding... waiting 10 seconds"
- sleep 10
+function setup_k8s_workers() {
+ workers="$1"
+ export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
+ log "Installing workers at $1 with joincmd: $k8s_joincmd"
+
+ for worker in $workers; do
+ log "Install worker at $worker"
+ if ! scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ /tmp/prereqs.sh ubuntu@$worker:/tmp/prereqs.sh ; then
+ fail "Failed copying setup files to $worker"
+ fi
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
+ ubuntu@$worker:/home/ubuntu/.
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$worker <<EOF > /dev/null 2>&1 &
+bash /tmp/prereqs.sh worker
+# Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet
+# is not empty" per https://github.com/kubernetes/kubeadm/issues/1
+sudo kubeadm reset
+sudo $k8s_joincmd
+EOF
done
- log "$1 is available at http://$host_ip:$port"
-}
-
-function demo_chart() {
- cd ~
- rm -rf charts
- git clone https://github.com/kubernetes/charts.git
- cd charts/stable
- case "$1" in
- mediawiki)
- # NOT YET WORKING
- # mariadb: Readiness probe failed: mysqladmin: connect to server at 'localhost' failed
- mkdir ./mediawiki/charts
- cp -r ./mariadb ./mediawiki/charts
- # LoadBalancer is N/A for baremetal (public cloud only) - use NodePort
- sed -i -- 's/LoadBalancer/NodePort/g' ./mediawiki/values.yaml
- # Select the storageClass created in the ceph setup step
- sed -i -- 's/# storageClass:/storageClass: "general"/g' ./mediawiki/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./mediawiki/charts/mariadb/values.yaml
- helm install --name mw -f ./mediawiki/values.yaml ./mediawiki
- wait_for_service mw-mediawiki
- ;;
- dokuwiki)
- sed -i -- 's/# storageClass:/storageClass: "general"/g' ./dokuwiki/values.yaml
- sed -i -- 's/LoadBalancer/NodePort/g' ./dokuwiki/values.yaml
- helm install --name dw -f ./dokuwiki/values.yaml ./dokuwiki
- wait_for_service dw-dokuwiki
- ;;
- wordpress)
- # NOT YET WORKING
- # mariadb: Readiness probe failed: mysqladmin: connect to server at 'localhost' failed
- mkdir ./wordpress/charts
- cp -r ./mariadb ./wordpress/charts
- sed -i -- 's/LoadBalancer/NodePort/g' ./wordpress/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./wordpress/charts/mariadb/values.yaml
- helm install --name wp -f ./wordpress/values.yaml ./wordpress
- wait_for_service wp-wordpress
- ;;
- redmine)
- # NOT YET WORKING
- # mariadb: Readiness probe failed: mysqladmin: connect to server at 'localhost' failed
- mkdir ./redmine/charts
- cp -r ./mariadb ./redmine/charts
- cp -r ./postgresql ./redmine/charts
- sed -i -- 's/LoadBalancer/NodePort/g' ./redmine/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/mariadb/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./redmine/charts/postgresql/values.yaml
- helm install --name rdm -f ./redmine/values.yaml ./redmine
- wait_for_service rdm-redmine
- ;;
- owncloud)
- # NOT YET WORKING: needs resolvable hostname for service
- mkdir ./owncloud/charts
- cp -r ./mariadb ./owncloud/charts
- sed -i -- 's/LoadBalancer/NodePort/g' ./owncloud/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/values.yaml
- sed -i -- 's/# storageClass: "-"/storageClass: "general"/g' ./owncloud/charts/mariadb/values.yaml
- helm install --name oc -f ./owncloud/values.yaml ./owncloud
- wait_for_service oc-owncloud
- ;;
- *)
- log "demo not implemented for $1"
- esac
-# extra useful commands
-# kubectl describe pvc
-# kubectl get pvc
-# kubectl describe pods
-# kubectl get pods --namespace default
-# kubectl get pods --all-namespaces
-# kubectl get svc --namespace default dw-dokuwiki
-# kubectl describe svc --namespace default dw-dokuwiki
-# kubectl describe pods --namespace default dw-dokuwiki
-}
-function setup_helm() {
- log "Setup helm"
- # Install Helm
- cd ~
- curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
- chmod 700 get_helm.sh
- ./get_helm.sh
- helm init
- nohup helm serve > /dev/null 2>&1 &
- helm repo update
- # TODO: Workaround for bug https://github.com/kubernetes/helm/issues/2224
- # For testing use only!
- kubectl create clusterrolebinding permissive-binding --clusterrole=cluster-admin --user=admin --user=kubelet --group=system:serviceaccounts;
- # TODO: workaround for tiller FailedScheduling (No nodes are available that match all of the following predicates:: PodToleratesNodeTaints (1).)
- # kubectl taint nodes $HOSTNAME node-role.kubernetes.io/master:NoSchedule-
- # Wait till tiller is running
- tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
- while [[ "$tiller_deploy" != "Running" ]]; do
- log "tiller-deploy status is $tiller_deploy. Waiting 60 seconds for it to be 'Running'"
- sleep 60
- tiller_deploy=$(kubectl get pods --all-namespaces | grep tiller-deploy | awk '{print $4}')
+ for worker in $workers; do
+ host=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker hostname)
+ log "checking node $host"
+ status=$(kubectl get nodes | awk "/$host/ {print \$2}")
+ while [[ "$status" != "Ready" ]]; do
+ log "node $host is \"$status\", waiting 10 seconds for it to be 'Ready'."
+ status=$(kubectl get nodes | awk "/$host/ {print \$2}")
+ sleep 10
+ done
+ log "node $host is 'Ready'."
done
- log "tiller-deploy status is $tiller_deploy"
-
- # Install services via helm charts from https://kubeapps.com/charts
- # e.g. helm install stable/dokuwiki
+ log "Cluster is ready (all nodes in 'kubectl get nodes' show as 'Ready')."
}
function setup_ceph() {
@@ -265,29 +196,41 @@ function setup_ceph() {
fi
}
+workers="$2"
+privnet=$3
+pubnet=$4
+ceph_mode=$5
+ceph_dev=$6
+
export WORK_DIR=$(pwd)
case "$1" in
master)
setup_k8s_master
;;
- agents)
- setup_k8s_agents "$2"
+ workers)
+ setup_k8s_workers "$2"
;;
ceph)
setup_ceph "$2" $3 $4 $5 $6
;;
helm)
- setup_helm
+ bash ./helm-tools.sh setup
;;
demo)
- demo_chart $2
+ if [[ "$2" == "start" ]]; then
+ bash ./helm-tools.sh start $3
+ else
+ bash ./helm-tools.sh stop $3
+ fi
;;
all)
setup_k8s_master
- setup_k8s_agents "$2"
- setup_helm
+ setup_k8s_workers "$2"
+ bash ./helm-tools.sh setup
+ bash ./helm-tools.sh start nginx
+ bash ./helm-tools.sh stop nginx
setup_ceph "$2" $3 $4 $5 $6
- demo_chart dokuwiki
+ bash ./helm-tools.sh start dokuwiki
;;
clean)
# TODO
diff --git a/tools/prometheus/prometheus-tools.sh b/tools/prometheus/prometheus-tools.sh
index f713f01..04a2623 100644
--- a/tools/prometheus/prometheus-tools.sh
+++ b/tools/prometheus/prometheus-tools.sh
@@ -171,7 +171,7 @@ EOF
# To add additional dashboards, browse the URL above and import the dashboard via the id displayed for the dashboard
# Select the home icon (upper left), Dashboards / Import, enter the id, select load, and select the Prometheus datasource
- cd ~/models/tools/prometheus/dashboards
+ cd $WORK_DIR/dashboards
boards=$(ls)
for board in $boards; do
curl -X POST -u admin:admin -H "Accept: application/json" -H "Content-type: application/json" -d @${board} http://$grafana_ip:3000/api/dashboards/db
@@ -198,6 +198,7 @@ function run_and_connect_grafana() {
log "connect_grafana complete"
}
+export WORK_DIR=$(pwd)
nodes=$2
case "$1" in
setup)