summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-11-26 22:58:15 -0800
committerBryan Sullivan <bryan.sullivan@att.com>2017-11-26 22:58:15 -0800
commitc467bafdce46993f7edcf2db4cce75b6cbe448ae (patch)
tree2857ee31c2142621abf17c8d4388b7ff31c69777
parent87f74be0cfe0de03bff32b67423f23fcf332e9e1 (diff)
Further cleanup for testing with VES
JIRA:MODELS-2 Change-Id: I79914716620ded1e76f15a3c7cd26ad84fa90e10 Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml17
-rw-r--r--tools/cloudify/k8s-cloudify.sh26
-rw-r--r--tools/kubernetes/demo_deploy.sh100
-rw-r--r--tools/kubernetes/k8s-cluster.sh28
4 files changed, 110 insertions, 61 deletions
diff --git a/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml b/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
index bdbba8c..54c0b45 100644
--- a/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
+++ b/tools/cloudify/blueprints/k8s-hello-world/blueprint.yaml
@@ -1,4 +1,21 @@
tosca_definitions_version: cloudify_dsl_1_3
+#
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: Cloudify demo blueprint for running nginx under kubernetes.
+#
# Based upon
# https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/master/examples/simple-blueprint-defined-resource.yaml
# https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/blob/1.2.0/examples/example-blueprint.yaml
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index 8f52f88..cf6de93 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -14,18 +14,20 @@
#
#. What this is: Setup script for Cloudify use with Kubernetes.
#. Prerequisites:
-#. - Kubernetes cluster installed per k8s-cluster.sh (in this repo)
+#. - OPNFV Models repo cloned into ~/models, i.e.
+#. git clone https://gerrit.opnfv.org/gerrit/models ~/models
+#. - Kubernetes cluster installed per tools/kubernetes/demo_deploy.sh and
+#. environment setup file ~/models/tools/k8s_env.sh as setup by demo_deploy.sh
#. Usage:
#. From a server with access to the kubernetes master node:
-#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ cd models/tools/cloudify
-#. $ scp -r ~/models/tools/cloudify ubuntu@<k8s-master>:/home/ubuntu/.
+#. $ cd ~/models/tools/cloudify
+#. $ scp -r ~/models/tools/* ubuntu@<k8s-master>:/home/ubuntu/.
#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x ubuntu@<k8s-master> cloudify/k8s-cloudify.sh prereqs
#. prereqs: installs prerequisites and configures ubuntu user for kvm use
#. $ ssh -x ubuntu@<k8s-master> bash cloudify/k8s-cloudify.sh setup
#. setup: installs cloudify CLI and Manager
-#. $ bash k8s-cloudify.sh demo <start|stop> <k8s-master>
+#. $ bash k8s-cloudify.sh demo <start|stop>
#. demo: control demo blueprint
#. start|stop: start or stop the demo
#. <k8s-master>: IP or hostname of kubernetes master server
@@ -159,7 +161,6 @@ function setup () {
function service_port() {
name=$1
- manager_ip=$2
tries=6
port="null"
while [[ "$port" == "null" && $tries -gt 0 ]]; do
@@ -186,7 +187,6 @@ function service_port() {
function start() {
name=$1
bp=$2
- manager_ip=$3
log "start app $name with blueprint $bp"
log "copy kube config from k8s master for insertion into blueprint"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
@@ -235,7 +235,7 @@ function start() {
function stop() {
name=$1
bp=$2
- manager_ip=$3
+
# TODO: fix the need for this workaround
log "try to first cancel all current executions"
curl -s -u admin:admin --header 'Tenant: default_tenant' \
@@ -355,7 +355,6 @@ function demo() {
# echo "master-port: $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $2}')" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml
# echo "file_content:" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml
# sed 's/^/ /' ~/.kube/config | tee -a ~/cloudify/blueprints/k8s-hello-world/inputs.yaml
- manager_ip=$2
cd ~/models/tools/cloudify/blueprints
if [[ "$1" == "start" ]]; then
@@ -377,6 +376,9 @@ function clean () {
}
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+source ~/k8s_env.sh
+manager_ip=$k8s_master
+
case "$1" in
"prereqs")
prereqs
@@ -388,13 +390,13 @@ case "$1" in
demo $2 $3
;;
"start")
- start $2 $3 $4
+ start $2 $3
;;
"stop")
- stop $2 $3 $4
+ stop $2 $3
;;
"port")
- service_port $2 $3
+ service_port $2
;;
"clean")
clean
diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh
index 4777c4b..0c581b8 100644
--- a/tools/kubernetes/demo_deploy.sh
+++ b/tools/kubernetes/demo_deploy.sh
@@ -39,37 +39,57 @@
#. <ceph-dev>: disk (e.g. sda, sdb) or folder (e.g. "/ceph")
#. <extras>: optional name of script for extra setup functions as needed
-key=$1
-nodes="$2"
-master=$3
-workers="$4"
-priv_net=$5
-pub_net=$6
-ceph_mode=$7
-ceph_dev=$8
-extras=$9
-
function run_master() {
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$master <<EOF
+ start=$((`date +%s`/60))
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$k8s_master <<EOF
exec ssh-agent bash
-ssh-add $key
+ssh-add $k8s_key
$1
EOF
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step \"$1\" duration = $runtime minutes"
}
-source ~/models/tools/maas/deploy.sh $1 "$2" $9
+extras=$9
+
+cat <<EOF >~/k8s_env.sh
+k8s_key=$1
+k8s_nodes="$2"
+k8s_master=$3
+k8s_workers="$4"
+k8s_priv_net=$5
+k8s_pub_net=$6
+k8s_ceph_mode=$7
+k8s_ceph_dev=$8
+export k8s_key
+export k8s_nodes
+export k8s_master
+export k8s_workers
+export k8s_priv_net
+export k8s_pub_net
+export k8s_ceph_mode
+export k8s_ceph_dev
+EOF
+source ~/k8s_env.sh
+env | grep k8s_
+
+source ~/models/tools/maas/deploy.sh $k8s_key "$k8s_nodes" $extras
eval `ssh-agent`
-ssh-add $key
-scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $key ubuntu@$master:/home/ubuntu/$key
+ssh-add $k8s_key
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
+ ubuntu@$k8s_master:/home/ubuntu/$k8s_key
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
+ ubuntu@$k8s_master:/home/ubuntu/.
echo; echo "$0 $(date): Setting up kubernetes master..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/kubernetes/* \
- ubuntu@$master:/home/ubuntu/.
+scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/models/tools/kubernetes/* ubuntu@$k8s_master:/home/ubuntu/.
run_master "bash k8s-cluster.sh master"
echo; echo "$0 $(date): Setting up kubernetes workers..."
-run_master "bash k8s-cluster.sh workers \"$workers\""
+run_master "bash k8s-cluster.sh workers \"$k8s_workers\""
echo; echo "$0 $(date): Setting up helm..."
run_master "bash k8s-cluster.sh helm"
@@ -79,49 +99,49 @@ run_master "bash k8s-cluster.sh demo start nginx"
run_master "bash k8s-cluster.sh demo stop nginx"
echo; echo "$0 $(date): Setting up ceph-helm"
-run_master "bash k8s-cluster.sh ceph \"$workers\" $priv_net $pub_net $ceph_mode $ceph_dev"
+run_master "bash k8s-cluster.sh ceph \"$k8s_workers\" $k8s_priv_net $k8s_pub_net $k8s_ceph_mode $k8s_ceph_dev"
echo; echo "$0 $(date): Verifying kubernetes+helm+ceph install..."
run_master "bash k8s-cluster.sh demo start dokuwiki"
echo; echo "Setting up Prometheus..."
scp -r -o StrictHostKeyChecking=no ~/models/tools/prometheus/* \
- ubuntu@$master:/home/ubuntu/.
-run_master "bash prometheus-tools.sh all \"$workers\""
+ ubuntu@$k8s_master:/home/ubuntu/.
+run_master "bash prometheus-tools.sh all \"$k8s_workers\""
echo; echo "$0 $(date): Setting up cloudify..."
scp -r -o StrictHostKeyChecking=no ~/models/tools/cloudify \
- ubuntu@$master:/home/ubuntu/.
+ ubuntu@$k8s_master:/home/ubuntu/.
run_master "bash cloudify/k8s-cloudify.sh prereqs"
run_master "bash cloudify/k8s-cloudify.sh setup"
echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
-bash ~/models/tools/cloudify/k8s-cloudify.sh demo start $master
+bash ~/models/tools/cloudify/k8s-cloudify.sh demo start
-echo; echo "$0 $(date): Setting up VES master node"
+echo; echo "$0 $(date): Setting up VES"
+# not re-cloned if existing - allows patch testing locally
if [[ ! -d ~/ves ]]; then
git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
fi
-ves_grafana_host=$master:3000
-ves_grafana_auth=admin:admin
+ves_influxdb_host=$k8s_master:8086
+export ves_influxdb_host
+ves_grafana_host=$k8s_master:3000
export ves_grafana_host
+ves_grafana_auth=admin:admin
export ves_grafana_auth
-bash ~/ves/tools/demo_deploy.sh master $master $key
-
-echo; echo "$0 $(date): Setting up collectd for VES events from worker nodes"
-for worker in $workers; do
-bash ~/ves/tools/demo_deploy.sh worker $worker $key
-done
+ves_kafka_hostname=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master hostname)
+export ves_kafka_hostname
+bash ~/ves/tools/demo_deploy.sh $k8s_key $k8s_master "$k8s_workers" cloudify
echo; echo "$0 $(date): All done!"
-export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
-export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
+export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
+export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
# TODO update Cloudify demo app to have public exposed service address
-port=$( bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx $master)
-echo "Cloudify-deployed demo app nginx is available at http://$master:$port"
-echo "Prometheus UI is available at http://$master:9090"
+port=$( bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx $k8s_master)
+echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+echo "Prometheus UI is available at http://$k8s_master:9090"
echo "Grafana dashboards are available at http://$ves_grafana_host (login as $ves_grafana_auth)"
echo "Grafana API is available at http://$ves_grafana_auth@$ves_influx_host/api/v1/query?query=<string>"
-echo "Kubernetes API is available at https://$master:6443/api/v1/"
-echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$master/api/v3.1/status"
+echo "Kubernetes API is available at https://$k8s_master:6443/api/v1/"
+echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index 0d351ba..edbe93c 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -66,7 +66,8 @@ sudo apt-get update
sudo apt-get upgrade -y
if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
echo; echo "prereqs.sh: ($(date)) Add $HOSTNAME to /etc/hosts"
- echo "$(ip route get 8.8.8.8 | awk '{print $NF; exit}') $HOSTNAME" | sudo tee -a /etc/hosts
+ echo "$(ip route get 8.8.8.8 | awk '{print $NF; exit}') $HOSTNAME" \
+ | sudo tee -a /etc/hosts
fi
echo; echo "prereqs.sh: ($(date)) Install latest docker"
sudo apt-get install -y docker.io
@@ -86,7 +87,8 @@ deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
echo; echo "prereqs.sh: ($(date)) Install kubectl, kubelet, kubeadm"
-sudo apt-get -y install --allow-downgrades kubectl=${KUBE_VERSION}-00 kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00
+sudo apt-get -y install --allow-downgrades kubectl=${KUBE_VERSION}-00 \
+ kubelet=${KUBE_VERSION}-00 kubeadm=${KUBE_VERSION}-00
echo; echo "prereqs.sh: ($(date)) Install jq for API output parsing"
sudo apt-get -y install jq
echo; echo "prereqs.sh: ($(date)) Set firewall rules"
@@ -121,7 +123,8 @@ function setup_k8s_master() {
bash /tmp/prereqs.sh master
# per https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# If the following command fails, run "kubeadm reset" before trying again
- # --pod-network-cidr=192.168.0.0/16 is required for calico; this should not conflict with your server network interface subnets
+ # --pod-network-cidr=192.168.0.0/16 is required for calico; this should not
+ # conflict with your server network interface subnets
sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>/tmp/kubeadm.out
cat /tmp/kubeadm.out
export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
@@ -156,16 +159,23 @@ function setup_k8s_workers() {
for worker in $workers; do
log "Install worker at $worker"
- if ! scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$worker:/tmp/prereqs.sh ; then
+ if ! scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ /tmp/prereqs.sh ubuntu@$worker:/tmp/prereqs.sh ; then
fail "Failed copying setup files to $worker"
fi
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker bash /tmp/prereqs.sh worker
- # Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet is not empty" per https://github.com/kubernetes/kubeadm/issues/1
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker sudo kubeadm reset
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker sudo $k8s_joincmd
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
+ ubuntu@$worker:/home/ubuntu/.
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$worker bash /tmp/prereqs.sh worker
+ # Workaround for "[preflight] Some fatal errors occurred: /var/lib/kubelet
+ # is not empty" per https://github.com/kubernetes/kubeadm/issues/1
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$worker sudo kubeadm reset
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$worker sudo $k8s_joincmd
done
- log "Cluster is ready when all nodes in the output of 'kubectl get nodes' show as 'Ready'."
+ log "Cluster is ready when all nodes in 'kubectl get nodes' show as 'Ready'."
}
function setup_ceph() {