summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/kubernetes/README.md4
-rw-r--r--tools/kubernetes/demo_deploy.sh114
-rw-r--r--tools/kubernetes/helm-tools.sh25
-rw-r--r--tools/kubernetes/k8s-cluster.sh66
-rw-r--r--tools/prometheus/prometheus-tools.sh16
5 files changed, 136 insertions, 89 deletions
diff --git a/tools/kubernetes/README.md b/tools/kubernetes/README.md
index ebddc11..55a54f8 100644
--- a/tools/kubernetes/README.md
+++ b/tools/kubernetes/README.md
@@ -42,7 +42,7 @@ Here is an overview of the deployment process, which if desired can be completed
* deploy VES agent (OPNFV Barometer "VES Application")
* on each worker, deploy OPNFV Barometer collectd plugin
* [/tools/prometheus/prometheus-tools.sh](/tools/prometheus/prometheus-tools.sh): setup prometheus server and exporters on all nodes
- * [/tests/k8s-cloudify-clearwater.sh](/tests/k8s-cloudify-clearwater.sh): deploy clearwater-docker and run clearwater-live-test
+ * [/tests/k8s-cloudify-clearwater.sh](/tests/k8s-cloudify-clearwater.sh): deploy clearwater-docker and run clearwater-live-test
* note: kubectl is currently used to deploy the clearwater-docker charts; use of cloudify-kubernetes for this is coming soon.
* when done, these demo elements are available, as described in the script output
* Helm-deployed demo app dokuwiki
@@ -51,7 +51,7 @@ Here is an overview of the deployment process, which if desired can be completed
* Grafana dashboards and API
* Kubernetes API
* Cloudify API
- * Clearwater-docker
+ * Clearwater-docker
See comments in the [overall demo deploy script](demo_deploy.sh), the [k8s setup script](k8s-cluster.sh), and the other scripts for more info.
diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh
index 2a3f8f2..ebb4dd2 100644
--- a/tools/kubernetes/demo_deploy.sh
+++ b/tools/kubernetes/demo_deploy.sh
@@ -18,17 +18,21 @@
#. will be installed:
#. - helm and dokuwiki as a demo helm chart based application
#. - prometheus + grafana for cluster monitoring/stats
+#. And optionally, the following will be installed:
#. - cloudify + kubernetes plugin and a demo hello world (nginx) app installed
#. - OPNFV VES as an ONAP-compatible monitoring platform
+#. - Clearwater-docker as an example complex VNF
#.
#. Prerequisites:
#. - MAAS server as cluster admin for k8s master/worker nodes.
#. - Password-less ssh key provided for node setup
#. - hostname of kubernetes master setup in DNS or /etc/hosts
#. Usage: on the MAAS server
-#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ bash ~/models/tools/kubernetes/demo_deploy.sh "<hosts>" <os> <key>
-#. <master> "<workers>" <pub-net> <priv-net> <ceph-mode> "<ceph-dev>" [<extras>]
+#. $ git clone https://gerrit.opnfv.org/gerrit/models models
+#. $ git clone https://gerrit.opnfv.org/gerrit/ves ves
+#. $ bash models/tools/kubernetes/demo_deploy.sh "<hosts>" <os> <key>
+#. <master> "<workers>" <pub-net> <priv-net> <ceph-mode> "<ceph-dev>"
+#. <base|all> [<extras>]
#. <hosts>: space separated list of hostnames managed by MAAS
#. <os>: OS to deploy, one of "ubuntu" (Xenial) or "centos" (Centos 7)
#. <key>: name of private key for cluster node ssh (in current folder)
@@ -40,6 +44,7 @@
#. <ceph-mode>: "helm" or "baremetal"
#. <ceph-dev>: space-separated list of disks (e.g. sda, sdb) to use on each
#. worker, or folder (e.g. "/ceph")
+#. <base|all>: deploy k8s base services, or (for all) add Cloudify, VES, Clearwater
#. <extras>: optional name of script for extra setup functions as needed
#.
#. See tools/demo_deploy.sh in the OPNFV VES repo for additional environment
@@ -64,6 +69,7 @@ function step_end() {
}
function run_master() {
+ trap 'fail' ERR
start=$((`date +%s`/60))
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
$k8s_user@$k8s_master <<EOF
@@ -76,14 +82,15 @@ EOF
deploy_start=$((`date +%s`/60))
-extras=${10}
+deploy=${10}
+extras=${11}
if [[ "$4" != "$5" ]]; then
k8s_master_hostname=$(echo "$1" | cut -d ' ' -f 1)
else
k8s_master_hostname=$1
fi
-cat <<EOF >~/k8s_env.sh
+cat <<EOF >k8s_env.sh
#!/bin/bash
k8s_nodes="$1"
k8s_user=$2
@@ -106,24 +113,27 @@ export k8s_pub_net
export k8s_ceph_mode
export k8s_ceph_dev
EOF
-source ~/k8s_env.sh
+source k8s_env.sh
env | grep k8s_
echo; echo "$0 $(date): Deploying base OS for master and worker nodes..."
start=$((`date +%s`/60))
-source ~/models/tools/maas/deploy.sh $k8s_user $k8s_key "$k8s_nodes" $extras
-step_end "source ~/models/tools/maas/deploy.sh $k8s_user $k8s_key \"$k8s_nodes\" $extras"
+source models/tools/maas/deploy.sh $k8s_user $k8s_key "$k8s_nodes" $extras
+step_end "source models/tools/maas/deploy.sh $k8s_user $k8s_key \"$k8s_nodes\" $extras"
eval `ssh-agent`
ssh-add $k8s_key
-scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
- $k8s_user@$k8s_master:/home/$k8s_user/$k8s_key
+while ! scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
+ $k8s_user@$k8s_master:/home/$k8s_user/$k8s_key ; do
+ echo; echo "$0 $(date): server is not yet ready for ssh; waiting 10 secs"
+ sleep 10
+done
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/k8s_env.sh $k8s_user@$k8s_master:/home/$k8s_user/k8s_env.sh
+ k8s_env.sh $k8s_user@$k8s_master:/home/$k8s_user/k8s_env.sh
echo; echo "$0 $(date): Setting up kubernetes master..."
scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/models/tools/kubernetes/* $k8s_user@$k8s_master:/home/$k8s_user/.
+ models/tools/kubernetes/* $k8s_user@$k8s_master:/home/$k8s_user/.
run_master "bash k8s-cluster.sh master"
if [[ "$k8s_master" != "$k8s_workers" ]]; then
@@ -151,57 +161,63 @@ else
echo; echo "$0 $(date): Skipping ceph (not yet working for AIO deployment)"
fi
-echo; echo "$0 $(date): Setting up cloudify..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/cloudify \
- $k8s_user@$k8s_master:/home/$k8s_user/.
-run_master "bash cloudify/k8s-cloudify.sh prereqs"
-run_master "bash cloudify/k8s-cloudify.sh setup"
-
-echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
-run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start"
-
-echo; echo "$0 $(date): Setting up VES..."
-# not re-cloned if existing - allows patch testing locally
-if [[ ! -d ~/ves ]]; then
- echo; echo "$0 $(date): Cloning VES..."
- git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
-fi
-# Can't pass quoted strings in commands
-start=$((`date +%s`/60))
-bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify
-step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify"
-
echo; echo "Setting up Prometheus..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/prometheus/* \
+scp -r -o StrictHostKeyChecking=no models/tools/prometheus/* \
$k8s_user@$k8s_master:/home/$k8s_user/.
run_master "bash prometheus-tools.sh setup prometheus helm"
-run_master "bash prometheus-tools.sh setup grafana helm $k8s_master:3000"
+run_master "bash prometheus-tools.sh setup grafana helm"
+
+if [[ "$deploy" == "all" ]]; then
+ echo; echo "$0 $(date): Setting up cloudify..."
+ scp -r -o StrictHostKeyChecking=no models/tools/cloudify \
+ $k8s_user@$k8s_master:/home/$k8s_user/.
+ run_master "bash cloudify/k8s-cloudify.sh prereqs"
+ run_master "bash cloudify/k8s-cloudify.sh setup"
+
+ echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
+ run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start"
+
+ echo; echo "$0 $(date): Setting up VES..."
+ # not re-cloned if existing - allows patch testing locally
+ if [[ ! -d ves ]]; then
+ echo; echo "$0 $(date): Cloning VES..."
+ git clone https://gerrit.opnfv.org/gerrit/ves ves
+ fi
+ # Can't pass quoted strings in commands
+ start=$((`date +%s`/60))
+ bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify
+ step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify"
+
+ echo; echo "Installing clearwater-docker..."
+ run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh start $k8s_master blsaws latest"
-echo; echo "Installing clearwater-docker..."
-run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh start $k8s_master blsaws latest"
+ echo; echo "Waiting 5 minutes for clearwater IMS to be fully ready..."
+ sleep 300
-echo; echo "Waiting 5 minutes for clearwater IMS to be fully ready..."
-sleep 300
+ echo; echo "Run clearwater-live-test..."
+ run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh test $k8s_master"
+fi
-echo; echo "Run clearwater-live-test..."
-run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh test $k8s_master"
-
echo; echo "$0 $(date): All done!"
deploy_end=$((`date +%s`/60))
runtime=$((deploy_end-deploy_start))
log "Deploy \"$1\" duration = $runtime minutes"
-source ~/ves/tools/ves_env.sh
-#echo "Prometheus UI is available at http://$k8s_master:30990"
-echo "InfluxDB API is available at http://$ves_influxdb_host:$ves_influxdb_port/query&db=veseventsdb&q=<string>"
-echo "Grafana dashboards are available at http://$ves_grafana_host:$ves_grafana_port (login as $ves_grafana_auth)"
-echo "Grafana API is available at http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/v1/query?query=<string>"
echo "Kubernetes API is available at https://$k8s_master:6443/api/v1/"
-echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
-port=$(bash ~/models/tools/cloudify/k8s-cloudify.sh nodePort nginx)
-echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+echo "Prometheus UI is available at http://$k8s_master:30990"
if [[ "$k8s_master" != "$k8s_workers" ]]; then
export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
fi
+
+if [[ "$deploy" == "all" ]]; then
+ source ves/tools/ves_env.sh
+ echo "InfluxDB API is available at http://$ves_influxdb_host:$ves_influxdb_port/query&db=veseventsdb&q=<string>"
+ echo "Grafana dashboards are available at http://$ves_grafana_host:$ves_grafana_port (login as $ves_grafana_auth)"
+ echo "Grafana API is available at http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/v1/query?query=<string>"
+ echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
+ port=$(bash models/tools/cloudify/k8s-cloudify.sh nodePort nginx)
+ echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+fi
+
diff --git a/tools/kubernetes/helm-tools.sh b/tools/kubernetes/helm-tools.sh
index a28b340..fff9a4d 100644
--- a/tools/kubernetes/helm-tools.sh
+++ b/tools/kubernetes/helm-tools.sh
@@ -39,13 +39,16 @@ function log() {
function setup_helm() {
log "Setup helm"
# Install Helm
+ # per https://github.com/kubernetes/helm/blob/master/docs/install.md
cd ~
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
+ log "Initialize helm"
helm init
- nohup helm serve > /dev/null 2>&1 &
- helm repo update
+# nohup helm serve > /dev/null 2>&1 &
+# log "Run helm repo update"
+# helm repo update
# TODO: Workaround for bug https://github.com/kubernetes/helm/issues/2224
# For testing use only!
kubectl create clusterrolebinding permissive-binding \
@@ -69,15 +72,17 @@ function setup_helm() {
function wait_for_service() {
log "Waiting for service $1 to be available"
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- log "Service $1 is at pod $pod"
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
- while [[ "$ready" != "true" ]]; do
- log "pod $1 is not yet ready... waiting 10 seconds"
+ pods=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
+ log "Service $1 is at pod(s) $pods"
+ ready="false"
+ while [[ "$ready" != "true" ]] ; do
+ log "Waiting 10 seconds to check pod status"
sleep 10
- # TODO: figure out why transient pods sometimes mess up this logic, thus need to re-get the pods
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ for pod in $pods ; do
+ rdy=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ log "pod $pod is ready: $rdy"
+ if [[ "$rdy" == "true" ]]; then ready="true"; fi
+ done
done
log "pod $pod is ready"
host_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.hostIP}' $pod)
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index e9293f6..9ff75fe 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -46,18 +46,21 @@
#. Status: work in progress, incomplete
#
-trap 'fail' ERR
+# TODO: Debug why some commands below will trigger fail incorrectly
+# trap 'fail' ERR
-function fail() {
- log $1
- exit 1
-}
+# function fail() {
+# log $1
+# exit 1
+# }
function log() {
f=$(caller 0 | awk '{print $2}')
l=$(caller 0 | awk '{print $1}')
echo; echo "$f:$l ($(date)) $1"
- kubectl get pods --all-namespaces
+ if [[ "$kubectl_status" == "ready" ]]; then
+ kubectl get pods --all-namespaces
+ fi
}
function setup_prereqs() {
@@ -85,15 +88,27 @@ if [[ "$dist" == "ubuntu" ]]; then
wait_dpkg; sudo apt-get update
wait_dpkg; sudo apt-get upgrade -y
- echo; echo "prereqs.sh: ($(date)) Install latest docker"
- wait_dpkg; sudo apt-get install -y docker.io
- # Alternate for 1.12.6
- #sudo apt-get install -y libltdl7
- #wget https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine/docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
- #sudo dpkg -i docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
- sudo service docker restart
+
+ dce=$(dpkg -l | grep -c docker-ce)
+ if [[ $dce -eq 0 ]]; then
+ echo; echo "prereqs.sh: ($(date)) Install latest docker-ce"
+ # Per https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
+ sudo apt-get remove -y docker docker-engine docker.io docker-ce
+ sudo apt-get update
+ sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository "deb [arch=amd64] \
+ https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ sudo apt-get update
+ sudo apt-get install -y docker-ce
+ fi
+
echo; echo "prereqs.sh: ($(date)) Get k8s packages"
- export KUBE_VERSION=1.7.5
+ export KUBE_VERSION=1.10.0
# per https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# Install kubelet, kubeadm, kubectl per https://kubernetes.io/docs/setup/independent/install-kubeadm/
sudo apt-get update && sudo apt-get install -y apt-transport-https
@@ -179,8 +194,15 @@ function setup_k8s_master() {
log "Reset kubeadm in case pre-existing cluster"
sudo kubeadm reset
# Start cluster
- log "Start the cluster"
+ log "Workaround issue '/etc/kubernetes/manifests is not empty'"
mkdir ~/tmp
+ # workaround for [preflight] Some fatal errors occurred:
+ # /etc/kubernetes/manifests is not empty
+ sudo rm -rf /etc/kubernetes/manifests/*
+ log "Disable swap to workaround k8s incompatibility with swap"
+ # per https://github.com/kubernetes/kubeadm/issues/610
+ sudo swapoff -a
+ log "Start the cluster"
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>~/tmp/kubeadm.out
cat ~/tmp/kubeadm.out
export k8s_joincmd=$(grep "kubeadm join" ~/tmp/kubeadm.out)
@@ -188,6 +210,7 @@ function setup_k8s_master() {
mkdir -p $HOME/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
+ export KUBECONFIG=$HOME/.kube/config
# Deploy pod network
log "Deploy calico as CNI"
# Updated to deploy Calico 2.6 per the create-cluster-kubeadm guide above
@@ -198,7 +221,7 @@ function setup_k8s_master() {
# Failure to wait for all calico pods to be running can cause the first worker
# to be incompletely setup. Symptom is that node_ports cannot be routed
# via that node (no response - incoming SYN packets are dropped).
- log "Wait for calico pods to be Running"
+ log "Wait for all calico pods to be Created"
# calico-etcd, calico-kube-controllers, calico-node
pods=$(kubectl get pods --namespace kube-system | grep -c calico)
while [[ $pods -lt 3 ]]; do
@@ -207,23 +230,24 @@ function setup_k8s_master() {
pods=$(kubectl get pods --namespace kube-system | grep -c calico)
done
- pods=$(kubectl get pods --all-namespaces | awk '/calico/ {print $2}')
+ log "Wait for all calico pods to be Running"
+ pods=$(kubectl get pods --namespace kube-system | awk '/calico/ {print $1}')
for pod in $pods; do
- status=$(kubectl get pods --all-namespaces | awk "/$pod/ {print \$4}")
+ status=$(kubectl get pods --namespace kube-system | awk "/$pod/ {print \$3}")
while [[ "$status" != "Running" ]]; do
log "$pod status is $status. Waiting 10 seconds"
sleep 10
- status=$(kubectl get pods --all-namespaces | awk "/$pod/ {print \$4}")
+ status=$(kubectl get pods --namespace kube-system | awk "/$pod/ {print \$3}")
done
log "$pod status is $status"
done
log "Wait for kubedns to be Running"
- kubedns=$(kubectl get pods --all-namespaces | awk '/kube-dns/ {print $4}')
+ kubedns=$(kubectl get pods --namespace kube-system | awk '/kube-dns/ {print $3}')
while [[ "$kubedns" != "Running" ]]; do
log "kube-dns status is $kubedns. Waiting 60 seconds"
sleep 60
- kubedns=$(kubectl get pods --all-namespaces | awk '/kube-dns/ {print $4}')
+ kubedns=$(kubectl get pods --namespace kube-system | awk '/kube-dns/ {print $3}')
done
log "kube-dns status is $kubedns"
diff --git a/tools/prometheus/prometheus-tools.sh b/tools/prometheus/prometheus-tools.sh
index 8463231..05526f6 100644
--- a/tools/prometheus/prometheus-tools.sh
+++ b/tools/prometheus/prometheus-tools.sh
@@ -29,11 +29,11 @@
#. helm: setup/clean via helm
#. agents: for docker-based setup, a quoted, space-separated list agent nodes
#. note: node running this script must have ssh-key enabled access to agents
-#. $ bash prometheus-tools.sh <setup|clean> grafana <docker|helm> [URI] [creds]
+#. $ bash prometheus-tools.sh <setup|clean> grafana <docker|helm> [server] [creds]
#. grafana: setup/clean grafana
#. docker: setup/clean via docker
#. helm: setup/clean via helm
-#. URI: optional URI of grafana server to use
+#. server: optional host:port of grafana server to use
#. creds: optional grafana credentials (default: admin:admin)
#
@@ -60,6 +60,7 @@ function fail() {
}
function setup_prometheus() {
+ trap 'fail' ERR
log "Setup prometheus"
log "Setup prerequisites"
if [[ "$dist" == "ubuntu" ]]; then
@@ -126,6 +127,7 @@ EOF
}
function setup_grafana() {
+ trap 'fail' ERR
host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
if [[ "$grafana" == "" ]]; then
if [[ "$how" == "docker" ]]; then
@@ -138,10 +140,10 @@ function setup_grafana() {
log "Setup grafana via Helm"
#TODO: add --set server.persistentVolume.storageClass=general
helm install --name gf stable/grafana \
- --set server.service.nodePort=30330 \
- --set server.service.type=NodePort \
- --set server.adminPassword=admin \
- --set server.persistentVolume.enabled=false
+ --set service.nodePort=30330 \
+ --set service.type=NodePort \
+ --set adminPassword=admin \
+ --set persistentVolume.enabled=false
fi
grafana=$host_ip:30330
fi
@@ -264,4 +266,4 @@ case "$1" in
;;
*)
grep '#. ' $0
-esac \ No newline at end of file
+esac