summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/clearwater-docker.sh75
-rw-r--r--build/clearwater-live-test.sh76
-rw-r--r--build/tacker.sh8
-rw-r--r--docs/images/models-k8s.pngbin107735 -> 168289 bytes
-rw-r--r--docs/images/models_demo_flow.svg112
-rw-r--r--docs/source/models_demo_flow.txt112
-rwxr-xr-xtests/OpenWRT-clean.sh93
-rwxr-xr-xtests/OpenWRT.sh189
-rw-r--r--tests/blueprints/tosca-vnfd-3node-tacker/blueprint.yaml16
-rw-r--r--tests/k8s-cloudify-clearwater.sh233
-rw-r--r--tests/utils/ansible-setup.sh173
-rw-r--r--tests/utils/apex_wol_workaround.sh107
-rw-r--r--tests/utils/cloudify-clean.sh16
-rw-r--r--tests/utils/cloudify-setup.sh59
-rw-r--r--tests/utils/osclient.sh156
-rw-r--r--tests/vHello_Cloudify.sh32
-rw-r--r--tests/vLamp_Ansible.sh236
-rw-r--r--tools/README.md49
-rw-r--r--tools/anteater-exceptions.yaml73
-rwxr-xr-xtools/anteater.sh41
-rw-r--r--tools/cloudify/k8s-cloudify-clearwater.sh115
-rw-r--r--tools/cloudify/k8s-cloudify.sh504
-rw-r--r--tools/docker/docker-cluster.sh24
-rw-r--r--tools/kubernetes/README.md117
-rw-r--r--tools/kubernetes/ceph-baremetal.sh8
-rw-r--r--tools/kubernetes/ceph-helm.sh2
-rw-r--r--tools/kubernetes/demo_deploy.sh138
-rw-r--r--tools/kubernetes/helm-tools.sh25
-rw-r--r--tools/kubernetes/k8s-cluster.sh75
-rw-r--r--tools/maas/deploy.sh2
-rw-r--r--tools/prometheus/README.md6
-rw-r--r--tools/prometheus/dashboards/Docker_Host_and_Container_Overview-1503539411705.json2
-rw-r--r--tools/prometheus/prometheus-tools.sh221
-rw-r--r--tools/rancher/rancher-cluster.sh16
34 files changed, 1580 insertions, 1531 deletions
diff --git a/build/clearwater-docker.sh b/build/clearwater-docker.sh
index a8228ca..a0c8b79 100644
--- a/build/clearwater-docker.sh
+++ b/build/clearwater-docker.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,44 +18,73 @@
#.
#. Prerequisites:
#. Docker hub user logged on so images can be pushed to docker hub, i.e. via
-#. $ docker login -u <hub-user>
+#. $ docker login -u <hub_user>
#.
#. Usage:
-#. bash clearwater-docker.sh <hub-user>
-#. hub-user: username for dockerhub
+#. bash clearwater-docker.sh <hub_user> <tag> [--no-cache]
+#. hub_user: username for dockerhub
+#. tag: tag to apply to the built images
+#. --no-cache: build clean
#.
#. Status: this is a work in progress, under test.
+trap 'fail' ERR
+
+fail() {
+ log "Build Failed!"
+ exit 1
+}
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo ""
+ echo "$f:$l ($(date)) $1"
+}
+
+hub_user=$1
+tag=$2
+cache="$3"
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+export WORK_DIR=$(pwd)
-echo; echo "$0 $(date): Update package repos"
+log "Update package repos"
if [ "$dist" == "Ubuntu" ]; then
sudo apt-get update
else
sudo yum update -y
fi
-echo; echo "$0 $(date): Starting VES agent build process"
-if [[ -d /tmp/clearwater-docker ]]; then rm -rf /tmp/clearwater-docker; fi
+if [[ "$cache" == "--no-cache" ]]; then
+ log "Purge old images"
+ images=$(sudo docker images clearwater-* | awk '/clearwater/ {print $1}')
+ for image in $images ; do sudo docker image rm $image; done
+fi
+
+log "Starting clearwater-docker build process"
+if [[ -d ~/tmp/clearwater-docker ]]; then rm -rf ~/tmp/clearwater-docker; fi
-echo; echo "$0 $(date): Cloning clearwater-docker repo to /tmp/clearwater-docker"
- git clone https://github.com/Metaswitch/clearwater-docker.git \
- /tmp/clearwater-docker
+log "Cloning clearwater-docker repo to ~/tmp/clearwater-docker"
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git \
+ ~/tmp/clearwater-docker
-echo; echo "$0 $(date): Building the images"
-cd /tmp/clearwater-docker
+log "Building the images"
+cd ~/tmp/clearwater-docker
vnfc="base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout"
for i in $vnfc ; do
- sudo docker build -t clearwater/$i $i
-done
-
-echo; echo "$0 $(date): push images to docker hub"
-for i in $vnfc ; do
- echo; echo "$0 $(date): Tagging the image as $1/clearwater-$i:latest"
- id=$(sudo docker images | grep clearwater/$i | awk '{print $3}')
+ log "Building $i"
+ if [[ "$i" != "base" ]]; then
+ log "Reference $hub_user/clearwater-base:$tag"
+ sed -i -- "s~FROM clearwater/base~FROM $hub_user/clearwater-base:$tag~" \
+ $i/Dockerfile
+ fi
+ sudo docker build $cache -t clearwater-$i $i
+ log "Tagging the image as $hub_user/clearwater-$i:$tag"
+ id=$(sudo docker images | grep "clearwater-$i " | awk '{print $3}')
id=$(echo $id | cut -d ' ' -f 1)
- sudo docker tag $id $1/clearwater-$i:latest
-
- echo; echo "$0 $(date): Pushing the image to dockerhub as $1/clearwater-$i"
- sudo docker push $1/clearwater-$i
+ sudo docker tag $id $hub_user/clearwater-$i:$tag
+ log "Pushing the image to dockerhub as $hub_user/clearwater-$i"
+ sudo docker push $hub_user/clearwater-$i
done
+
+cd $WORK_DIR
diff --git a/build/clearwater-live-test.sh b/build/clearwater-live-test.sh
new file mode 100644
index 0000000..d49ece4
--- /dev/null
+++ b/build/clearwater-live-test.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Build script for the github clearwater-live-test project
+#. https://github.com/Metaswitch/clearwater-live-test
+#.
+#. Prerequisites:
+#. Docker hub user logged on so images can be pushed to docker hub, i.e. via
+#. $ docker login -u <hub_user>
+#.
+#. Usage:
+#. bash clearwater-live-test.sh <hub_user> <tag> [--no-cache]
+#. hub_user: username for dockerhub
+#. tag: tag to apply to the built images
+#. --no-cache: build clean
+#.
+#. Status: this is a work in progress, under test.
+
+trap 'fail' ERR
+
+fail() {
+ log "Build Failed!"
+ exit 1
+}
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo ""
+ echo "$f:$l ($(date)) $1"
+}
+
+hub_user=$1
+tag=$2
+cache="$3"
+dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
+export WORK_DIR=$(pwd)
+
+log "Update package repos"
+if [ "$dist" == "Ubuntu" ]; then
+ sudo apt-get update
+else
+ sudo yum update -y
+fi
+
+log "Starting clearwater-live-test build process"
+if [[ -d ~/tmp/clearwater-live-test ]]; then rm -rf ~/tmp/clearwater-live-test; fi
+
+log "Cloning clearwater-live-test repo to ~/tmp/clearwater-live-test"
+git clone --recursive https://github.com/Metaswitch/clearwater-live-test.git \
+ ~/tmp/clearwater-live-test
+cd ~/tmp/clearwater-live-test
+
+log "Building the image"
+sudo docker build $cache -t clearwater/clearwater-live-test .
+
+log "Tagging the image as $hub_user/clearwater-live-test:$tag"
+id=$(sudo docker images | grep "clearwater-live-test " | awk '{print $3}')
+id=$(echo $id | cut -d ' ' -f 1)
+sudo docker tag $id $hub_user/clearwater-live-test:$tag
+
+log "Pushing the image to dockerhub as $hub_user/clearwater-live-test"
+sudo docker push $hub_user/clearwater-live-test
+
diff --git a/build/tacker.sh b/build/tacker.sh
index a416fc4..d29a1c8 100644
--- a/build/tacker.sh
+++ b/build/tacker.sh
@@ -46,13 +46,13 @@ else
sudo yum update -y
fi
-if [[ ! -d /tmp/models ]]; then
- echo; echo "$0 $(date): Cloning models repo to /tmp/models"
- git clone https://gerrit.opnfv.org/gerrit/models /tmp/models
+if [[ ! -d ~/tmp/models ]]; then
+ echo; echo "$0 $(date): Cloning models repo to ~/tmp/models"
+ git clone https://gerrit.opnfv.org/gerrit/models ~/tmp/models
fi
echo; echo "$0 $(date): Starting Tacker build process"
-cd /tmp/models/build/tacker
+cd ~/tmp/models/build/tacker
sed -i -- "s/<branch>/$branch/g" Dockerfile
sudo docker build -t tacker .
diff --git a/docs/images/models-k8s.png b/docs/images/models-k8s.png
index 107e2bb..221442c 100644
--- a/docs/images/models-k8s.png
+++ b/docs/images/models-k8s.png
Binary files differ
diff --git a/docs/images/models_demo_flow.svg b/docs/images/models_demo_flow.svg
new file mode 100644
index 0000000..3a61dc3
--- /dev/null
+++ b/docs/images/models_demo_flow.svg
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="utf-8" standalone="no"?><!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"><svg xmlns="http://www.w3.org/2000/svg" width="2024" height="3877" xmlns:xlink="http://www.w3.org/1999/xlink"><source><![CDATA[Title: High-level flow for the OPNFV Models+VES demo deployment.
+# (c) 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# To generate the flow, paste the content of this file into the editor at
+# https://bramp.github.io/js-sequence-diagrams/, and select Theme "Simple"
+
+participant Admin Server as admin
+participant k8s master as k8s_master
+participant k8s worker as k8s_worker
+participant Cloudify Manager as cloudify
+participant k8s pods as k8s_pod
+participant Barometer as barometer
+participant Kafka\nZookeeper as kafka
+participant VES Agent as agent
+participant VES Collector as collector
+participant InfluxDB as influxdb
+participant Grafana as grafana
+Note over admin, grafana: For brevity, action/api responses are not shown (flow continuation indicates success).
+admin->k8s_master: deploy host OS
+admin->k8s_worker: deploy host OS
+admin->k8s_master: (SSH) install k8s master
+note over k8s_master: install k8s and\nkube-system pods
+admin->k8s_master: (SSH) install k8s workers
+k8s_master->k8s_worker: (SSH) install k8s worker
+k8s_worker->k8s_master: (api) register as worker
+admin->k8s_master: (SSH) install helm
+admin->k8s_master: (SSH) test helm chart (nginx)
+k8s_master->k8s_worker: (k8s) start nginx pod
+note over k8s_pod: (nginx pod)
+k8s_worker->k8s_pod: create pod
+k8s_master->k8s_pod: GET http://(nginx-service)
+k8s_master->k8s_worker: (k8s) stop nginx pod
+k8s_worker->k8s_pod: delete pod
+admin->k8s_master: (SSH) deploy ceph-docker\nhelm chart
+k8s_master->k8s_worker: (k8s) install ceph-docker
+admin->k8s_master: (SSH) test helm chart with\n ceph PVC (dokuwiki)
+k8s_master->k8s_worker: (k8s) start dokuwiki pod
+note over k8s_pod: (dokuwiki pod)
+k8s_worker->k8s_pod: create pod
+k8s_master->k8s_pod: GET http://(dokuwiki-service)
+note over k8s_worker: (k8s worker)
+admin->k8s_master: (SSH) install cloudify manager
+note over k8s_master: install cloudify CLI
+note over cloudify: (on k8s master)
+k8s_master->cloudify: create cloudify manager VM
+k8s_master->cloudify: (cfy cli) install cloudify k8s plugin
+admin->cloudify: (api) upload/deploy cloudify-k8s chart (nginx)
+cloudify->k8s_master: (api) deploy nginx chart
+k8s_master->k8s_worker: (k8s) start nginx pod
+note over k8s_pod: (nginx pod)
+k8s_worker->k8s_pod: create pod
+admin->k8s_pod: GET http://(nginx-service)
+admin->cloudify: (api) upload/deploy ves-influxdb chart
+cloudify->k8s_master: (api) deploy ves-influxdb chart
+k8s_master->k8s_worker: (k8s) start ves-influxdb pod
+k8s_worker->influxdb: create pod (type=ClusterIP,port=8086)
+admin->influxdb: (api) create veseventsdb
+admin->cloudify: (api) upload/deploy grafana chart
+cloudify->k8s_master: (api) deploy grafana chart
+k8s_master->k8s_worker: (k8s) start grafana pod
+k8s_worker->grafana: create pod (type=NodePort,port=30300)
+admin->grafana: create VESEvents datasource
+note over grafana: (repeat on\nupdate schedule)
+grafana->influxdb: (api)\nquery veseventsdb
+admin->grafana: create VES dashboard
+note over grafana: display stats\non dashboard
+admin->cloudify: (api) upload/deploy ves-collector chart
+cloudify->k8s_master: (api) deploy ves-collector chart
+k8s_master->k8s_worker: (k8s) start ves-collector pod
+k8s_worker->collector: create pod (type=ClusterIP,port=3001)
+admin->cloudify: (api) upload/deploy ves-zookeeper chart
+cloudify->k8s_master: (api) deploy ves-zookeeper chart
+k8s_master->k8s_worker: (k8s) start ves-zookeeper pod
+note over kafka: (zookeeper pod)
+k8s_worker->kafka: create pod (type=ClusterIP,port=2181)
+admin->cloudify: (api) upload/deploy ves-kafka chart
+cloudify->k8s_master: (api) deploy ves-kafka chart
+k8s_master->k8s_worker: (k8s) start ves-kafka pod
+note over kafka: (kafka pod)
+k8s_worker->kafka: create pod (type=NodePort,port=30992)
+note over kafka: NodePort required\nfor barometer
+admin->cloudify: (api) upload/deploy ves-agent chart
+cloudify->k8s_master: (api) deploy ves-agent chart
+k8s_master->k8s_worker: (k8s) start ves-agent pod
+k8s_worker->agent: create pod
+admin->k8s_master: (SSH) install barometer container
+note over barometer: (on k8s master)
+k8s_master->barometer: create barometer container
+barometer->kafka: (api) register\n"collectd" topic
+note over kafka: configure topic\nvia zookeeper
+admin->k8s_worker: (SSH) install barometer container
+note over barometer: (on k8s worker)
+k8s_worker->barometer: create barometer container
+barometer->kafka: (api) register\n"collectd" topic
+note over kafka: (api) configure topic\nvia zookeeper
+note over barometer: (on k8s master\nand workers,\nper schedule)
+barometer->kafka: (api) publish\ncollectd stats
+kafka->agent: (api)\ncollectd stats
+agent->collector: (api) VES event
+collector->influxdb: (api)\nveseventsdb entry]]></source><desc>High-level flow for the OPNFV Models+VES demo deployment.</desc><defs><marker viewBox="0 0 5 5" markerWidth="5" markerHeight="5" orient="auto" refX="5" refY="2.5" id="markerArrowBlock"><path d="M 0 0 L 5 2.5 L 0 5 z"></path></marker><marker viewBox="0 0 9.6 16" markerWidth="4" markerHeight="16" orient="auto" refX="9.6" refY="8" id="markerArrowOpen"><path d="M 9.6,8 1.92,16 0,13.7 5.76,8 0,2.286 1.92,0 9.6,8 z"></path></marker></defs><g class="title"><rect x="10" y="10" width="511.421875" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="15" y="29.390625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="15">High-level flow for the OPNFV Models+VES demo deployment.</tspan></text></g><g class="actor"><rect x="10" y="48.390625" width="126.359375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="20.796875" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="20">Admin Server</tspan></text></g><g class="actor"><rect x="10" y="3799.65625" width="126.359375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="20.796875" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="20">Admin Server</tspan></text></g><line x1="73.1796875" x2="73.1796875" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="329.4921875" y="48.390625" width="107.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="339.4921875" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="339.4921875">k8s master</tspan></text></g><g class="actor"><rect x="329.4921875" y="3799.65625" width="107.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="339.4921875" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="339.4921875">k8s master</tspan></text></g><line x1="383.4765625" x2="383.4765625" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="604.6015625" y="48.390625" width="107.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="614.6015625" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="614.6015625">k8s worker</tspan></text></g><g class="actor"><rect x="604.6015625" y="3799.65625" width="107.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="614.6015625" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="614.6015625">k8s worker</tspan></text></g><line x1="658.5859375" x2="658.5859375" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="732.5703125" y="48.390625" width="160.75" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="742.5703125" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="742.5703125">Cloudify Manager</tspan></text></g><g class="actor"><rect x="732.5703125" y="3799.65625" width="160.75" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="742.5703125" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="742.5703125">Cloudify Manager</tspan></text></g><line x1="812.9453125" x2="812.9453125" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="913.3203125" y="48.390625" width="90.375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="923.3203125" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="923.3203125">k8s pods</tspan></text></g><g class="actor"><rect x="913.3203125" y="3799.65625" width="90.375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="923.3203125" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="923.3203125">k8s pods</tspan></text></g><line x1="958.5078125" x2="958.5078125" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1023.6953125" y="48.390625" width="99.171875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1033.6953125" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1033.6953125">Barometer</tspan></text></g><g class="actor"><rect x="1023.6953125" y="3799.65625" width="99.171875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1033.6953125" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1033.6953125">Barometer</tspan></text></g><line x1="1073.28125" x2="1073.28125" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1184.4453125" y="48.390625" width="99.171875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1194.4453125" y="72.78125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1194.4453125">Kafka</tspan><tspan dy="1.2em" x="1194.4453125">Zookeeper</tspan></text></g><g class="actor"><rect x="1184.4453125" y="3799.65625" width="99.171875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1194.4453125" y="3824.046875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1194.4453125">Kafka</tspan><tspan dy="1.2em" x="1194.4453125">Zookeeper</tspan></text></g><line x1="1234.03125" x2="1234.03125" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1327.203125" y="48.390625" width="99.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1338" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1337.203125">VES Agent</tspan></text></g><g class="actor"><rect x="1327.203125" y="3799.65625" width="99.96875" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1338" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1337.203125">VES Agent</tspan></text></g><line x1="1377.1875" x2="1377.1875" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1461.5625" y="48.390625" width="135.15625" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1472.359375" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1471.5625">VES Collector</tspan></text></g><g class="actor"><rect x="1461.5625" y="3799.65625" width="135.15625" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1472.359375" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1471.5625">VES Collector</tspan></text></g><line x1="1529.140625" x2="1529.140625" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1653.5" y="48.390625" width="90.375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1663.5" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1663.5">InfluxDB</tspan></text></g><g class="actor"><rect x="1653.5" y="3799.65625" width="90.375" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1663.5" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1663.5">InfluxDB</tspan></text></g><line x1="1698.6875" x2="1698.6875" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="actor"><rect x="1827.4453125" y="48.390625" width="81.578125" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1837.4453125" y="82.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1837.4453125">Grafana</tspan></text></g><g class="actor"><rect x="1827.4453125" y="3799.65625" width="81.578125" height="57.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1837.4453125" y="3833.6484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1837.4453125">Grafana</tspan></text></g><line x1="1868.234375" x2="1868.234375" y1="105.984375" y2="3799.65625" stroke="#000000" fill="none" style="stroke-width: 2;"></line><g class="note"><rect x="63.1796875" y="125.984375" width="1815.0546875" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="68.1796875" y="145.375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="68.1796875">For brevity, action/api responses are not shown (flow continuation indicates success).</tspan></text></g><g class="signal"><text x="166.75" y="184.5703125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="166.75">deploy host OS</tspan></text><line x1="73.1796875" x2="383.4765625" y1="192.765625" y2="192.765625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="304.3046875" y="222.9609375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="304.3046875">deploy host OS</tspan></text><line x1="73.1796875" x2="658.5859375" y1="231.15625" y2="231.15625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="122.765625" y="261.3515625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="122.765625">(SSH) install k8s master</tspan></text><line x1="73.1796875" x2="383.4765625" y1="269.546875" y2="269.546875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="308.1015625" y="289.546875" width="150.75" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="313.1015625" y="308.9375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="313.1015625">install k8s and</tspan><tspan dy="1.2em" x="313.1015625">kube-system pods</tspan></text></g><g class="signal"><text x="118.3671875" y="367.3359375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="118.3671875">(SSH) install k8s workers</tspan></text><line x1="73.1796875" x2="383.4765625" y1="375.53125" y2="375.53125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="415.46875" y="405.7265625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="415.46875">(SSH) install k8s worker</tspan></text><line x1="383.4765625" x2="658.5859375" y1="413.921875" y2="413.921875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="415.46875" y="444.1171875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="415.46875">(api) register as worker</tspan></text><line x1="658.5859375" x2="383.4765625" y1="452.3125" y2="452.3125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="149.15625" y="482.5078125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="149.15625">(SSH) install helm</tspan></text><line x1="73.1796875" x2="383.4765625" y1="490.703125" y2="490.703125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="100.7734375" y="520.8984375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="100.7734375">(SSH) test helm chart (nginx)</tspan></text><line x1="73.1796875" x2="383.4765625" y1="529.09375" y2="529.09375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="428.6640625" y="559.2890625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="428.6640625">(k8s) start nginx pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="567.484375" y2="567.484375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="905.125" y="587.484375" width="106.765625" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="910.125" y="606.875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="910.125">(nginx pod)</tspan></text></g><g class="signal"><text x="764.5625" y="646.0703125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="764.5625">create pod</tspan></text><line x1="658.5859375" x2="958.5078125" y1="654.265625" y2="654.265625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="556.6328125" y="684.4609375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="556.6328125">GET http://(nginx-service)</tspan></text><line x1="383.4765625" x2="958.5078125" y1="692.65625" y2="692.65625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="433.0625" y="722.8515625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="433.0625">(k8s) stop nginx pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="731.046875" y2="731.046875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="764.5625" y="761.2421875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="764.5625">delete pod</tspan></text><line x1="658.5859375" x2="958.5078125" y1="769.4375" y2="769.4375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="122.765625" y="790.03125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="122.765625">(SSH) deploy ceph-docker</tspan><tspan dy="1.2em" x="122.765625">helm chart</tspan></text><line x1="73.1796875" x2="383.4765625" y1="827.03125" y2="827.03125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="411.0703125" y="857.2265625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="411.0703125">(k8s) install ceph-docker</tspan></text><line x1="383.4765625" x2="658.5859375" y1="865.421875" y2="865.421875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="113.96875" y="886.015625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="113.96875">(SSH) test helm chart with</tspan><tspan dy="1.2em" x="113.96875">ceph PVC (dokuwiki)</tspan></text><line x1="73.1796875" x2="383.4765625" y1="923.015625" y2="923.015625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="415.46875" y="953.2109375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="415.46875">(k8s) start dokuwiki pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="961.40625" y2="961.40625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="891.9296875" y="981.40625" width="133.15625" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="896.9296875" y="1000.796875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="896.9296875">(dokuwiki pod)</tspan></text></g><g class="signal"><text x="764.5625" y="1039.9921875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="764.5625">create pod</tspan></text><line x1="658.5859375" x2="958.5078125" y1="1048.1875" y2="1048.1875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="543.4375" y="1078.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="543.4375">GET http://(dokuwiki-service)</tspan></text><line x1="383.4765625" x2="958.5078125" y1="1086.578125" y2="1086.578125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="600.8046875" y="1106.578125" width="115.5625" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="605.8046875" y="1125.96875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="605.8046875">(k8s worker)</tspan></text></g><g class="signal"><text x="96.375" y="1165.1640625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="96.375">(SSH) install cloudify manager</tspan></text><line x1="73.1796875" x2="383.4765625" y1="1173.359375" y2="1173.359375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="290.5078125" y="1193.359375" width="185.9375" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="295.5078125" y="1212.75" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="295.5078125">install cloudify CLI</tspan></text></g><g class="note"><rect x="741.96875" y="1241.75" width="141.953125" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="746.96875" y="1261.140625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="746.96875">(on k8s master)</tspan></text></g><g class="signal"><text x="483.8515625" y="1300.3359375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="483.8515625">create cloudify manager VM</tspan></text><line x1="383.4765625" x2="812.9453125" y1="1308.53125" y2="1308.53125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="435.46875" y="1338.7265625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="435.46875">(cfy cli) install cloudify k8s plugin</tspan></text><line x1="383.4765625" x2="812.9453125" y1="1346.921875" y2="1346.921875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="240.734375" y="1377.1171875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="240.734375">(api) upload/deploy cloudify-k8s chart (nginx)</tspan></text><line x1="73.1796875" x2="812.9453125" y1="1385.3125" y2="1385.3125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="492.6484375" y="1415.5078125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="492.6484375">(api) deploy nginx chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="1423.703125" y2="1423.703125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="428.6640625" y="1453.8984375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="428.6640625">(k8s) start nginx pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="1462.09375" y2="1462.09375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="905.125" y="1482.09375" width="106.765625" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="910.125" y="1501.484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="910.125">(nginx pod)</tspan></text></g><g class="signal"><text x="764.5625" y="1540.6796875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="764.5625">create pod</tspan></text><line x1="658.5859375" x2="958.5078125" y1="1548.875" y2="1548.875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="401.484375" y="1579.0703125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="401.484375">GET http://(nginx-service)</tspan></text><line x1="73.1796875" x2="958.5078125" y1="1587.265625" y2="1587.265625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="275.921875" y="1617.4609375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="275.921875">(api) upload/deploy ves-influxdb chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="1625.65625" y2="1625.65625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="461.859375" y="1655.8515625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="461.859375">(api) deploy ves-influxdb chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="1664.046875" y2="1664.046875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="397.875" y="1694.2421875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="397.875">(k8s) start ves-influxdb pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="1702.4375" y2="1702.4375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1015.89453125" y="1732.6328125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1015.89453125">create pod (type=ClusterIP,port=8086)</tspan></text><line x1="658.5859375" x2="1698.6875" y1="1740.828125" y2="1740.828125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="780.37109375" y="1771.0234375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="780.37109375">(api) create veseventsdb</tspan></text><line x1="73.1796875" x2="1698.6875" y1="1779.21875" y2="1779.21875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="297.9140625" y="1809.4140625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="297.9140625">(api) upload/deploy grafana chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="1817.609375" y2="1817.609375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="483.8515625" y="1847.8046875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="483.8515625">(api) deploy grafana chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="1856" y2="1856" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="419.8671875" y="1886.1953125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="419.8671875">(k8s) start grafana pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="1894.390625" y2="1894.390625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1100.66796875" y="1924.5859375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1100.66796875">create pod (type=NodePort,port=30300)</tspan></text><line x1="658.5859375" x2="1868.234375" y1="1932.78125" y2="1932.78125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="851.94921875" y="1962.9765625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="851.94921875">create VESEvents datasource</tspan></text><line x1="73.1796875" x2="1868.234375" y1="1971.171875" y2="1971.171875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1792.859375" y="1991.171875" width="150.75" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1797.859375" y="2010.5625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1797.859375">(repeat on</tspan><tspan dy="1.2em" x="1797.859375">update schedule)</tspan></text></g><g class="signal"><text x="1708.6875" y="2059.359375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1708.6875">(api)</tspan><tspan dy="1.2em" x="1708.6875">query veseventsdb</tspan></text><line x1="1868.234375" x2="1698.6875" y1="2096.359375" y2="2096.359375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="882.73828125" y="2126.5546875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="882.73828125">create VES dashboard</tspan></text><line x1="73.1796875" x2="1868.234375" y1="2134.75" y2="2134.75" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1806.0546875" y="2154.75" width="124.359375" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1811.0546875" y="2174.140625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1811.0546875">display stats</tspan><tspan dy="1.2em" x="1811.0546875">on dashboard</tspan></text></g><g class="signal"><text x="271.5234375" y="2232.5390625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="271.5234375">(api) upload/deploy ves-collector chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="2240.734375" y2="2240.734375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="457.4609375" y="2270.9296875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="457.4609375">(api) deploy ves-collector chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="2279.125" y2="2279.125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="393.4765625" y="2309.3203125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="393.4765625">(k8s) start ves-collector pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="2317.515625" y2="2317.515625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="931.12109375" y="2347.7109375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="931.12109375">create pod (type=ClusterIP,port=3001)</tspan></text><line x1="658.5859375" x2="1529.140625" y1="2355.90625" y2="2355.90625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="271.5234375" y="2386.1015625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="271.5234375">(api) upload/deploy ves-zookeeper chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="2394.296875" y2="2394.296875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="457.4609375" y="2424.4921875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="457.4609375">(api) deploy ves-zookeeper chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="2432.6875" y2="2432.6875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="393.4765625" y="2462.8828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="393.4765625">(k8s) start ves-zookeeper pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="2471.078125" y2="2471.078125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1163.0546875" y="2491.078125" width="141.953125" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1168.0546875" y="2510.46875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1168.0546875">(zookeeper pod)</tspan></text></g><g class="signal"><text x="783.56640625" y="2549.6640625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="783.56640625">create pod (type=ClusterIP,port=2181)</tspan></text><line x1="658.5859375" x2="1234.03125" y1="2557.859375" y2="2557.859375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="289.1171875" y="2588.0546875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="289.1171875">(api) upload/deploy ves-kafka chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="2596.25" y2="2596.25" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="475.0546875" y="2626.4453125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="475.0546875">(api) deploy ves-kafka chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="2634.640625" y2="2634.640625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="411.0703125" y="2664.8359375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="411.0703125">(k8s) start ves-kafka pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="2673.03125" y2="2673.03125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1180.6484375" y="2693.03125" width="106.765625" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1185.6484375" y="2712.421875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1185.6484375">(kafka pod)</tspan></text></g><g class="signal"><text x="783.56640625" y="2751.6171875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="783.56640625">create pod (type=NodePort,port=30992)</tspan></text><line x1="658.5859375" x2="1234.03125" y1="2759.8125" y2="2759.8125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1154.2578125" y="2779.8125" width="159.546875" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1159.2578125" y="2799.203125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1159.2578125">NodePort required</tspan><tspan dy="1.2em" x="1159.2578125">for barometer</tspan></text></g><g class="signal"><text x="289.1171875" y="2857.6015625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="289.1171875">(api) upload/deploy ves-agent chart</tspan></text><line x1="73.1796875" x2="812.9453125" y1="2865.796875" y2="2865.796875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="475.0546875" y="2895.9921875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="475.0546875">(api) deploy ves-agent chart</tspan></text><line x1="812.9453125" x2="383.4765625" y1="2904.1875" y2="2904.1875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="411.0703125" y="2934.3828125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="411.0703125">(k8s) start ves-agent pod</tspan></text><line x1="383.4765625" x2="658.5859375" y1="2942.578125" y2="2942.578125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="973.90234375" y="2972.7734375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="973.90234375">create pod</tspan></text><line x1="658.5859375" x2="1377.1875" y1="2980.96875" y2="2980.96875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="83.1796875" y="3011.1640625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="83.1796875">(SSH) install barometer container</tspan></text><line x1="73.1796875" x2="383.4765625" y1="3019.359375" y2="3019.359375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1002.3046875" y="3039.359375" width="141.953125" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1007.3046875" y="3058.75" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1007.3046875">(on k8s master)</tspan></text></g><g class="signal"><text x="614.01953125" y="3097.9453125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="614.01953125">create barometer container</tspan></text><line x1="383.4765625" x2="1073.28125" y1="3106.140625" y2="3106.140625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1083.28125" y="3126.734375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1083.28125">(api) register</tspan><tspan dy="1.2em" x="1083.28125">"collectd" topic</tspan></text><line x1="1073.28125" x2="1234.03125" y1="3163.734375" y2="3163.734375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1163.0546875" y="3183.734375" width="141.953125" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1168.0546875" y="3203.125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1168.0546875">configure topic</tspan><tspan dy="1.2em" x="1168.0546875">via zookeeper</tspan></text></g><g class="signal"><text x="220.734375" y="3261.5234375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="220.734375">(SSH) install barometer container</tspan></text><line x1="73.1796875" x2="658.5859375" y1="3269.71875" y2="3269.71875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1002.3046875" y="3289.71875" width="141.953125" height="28.390625" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1007.3046875" y="3309.109375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1007.3046875">(on k8s worker)</tspan></text></g><g class="signal"><text x="751.57421875" y="3348.3046875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="751.57421875">create barometer container</tspan></text><line x1="658.5859375" x2="1073.28125" y1="3356.5" y2="3356.5" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1083.28125" y="3377.09375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1083.28125">(api) register</tspan><tspan dy="1.2em" x="1083.28125">"collectd" topic</tspan></text><line x1="1073.28125" x2="1234.03125" y1="3414.09375" y2="3414.09375" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="note"><rect x="1136.6640625" y="3434.09375" width="194.734375" height="47.59375" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1141.6640625" y="3453.484375" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1141.6640625">(api) configure topic</tspan><tspan dy="1.2em" x="1141.6640625">via zookeeper</tspan></text></g><g class="note"><rect x="1006.703125" y="3501.6875" width="133.15625" height="66.796875" stroke="#000000" fill="#ffffff" style="stroke-width: 2;"></rect><text x="1011.703125" y="3521.078125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1011.703125">(on k8s master</tspan><tspan dy="1.2em" x="1011.703125">and workers,</tspan><tspan dy="1.2em" x="1011.703125">per schedule)</tspan></text></g><g class="signal"><text x="1092.078125" y="3589.078125" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1092.078125">(api) publish</tspan><tspan dy="1.2em" x="1092.078125">collectd stats</tspan></text><line x1="1073.28125" x2="1234.03125" y1="3626.078125" y2="3626.078125" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1244.03125" y="3646.671875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1244.03125">(api)</tspan><tspan dy="1.2em" x="1244.03125">collectd stats</tspan></text><line x1="1234.03125" x2="1377.1875" y1="3683.671875" y2="3683.671875" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1387.1875" y="3713.8671875" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1387.1875">(api) VES event</tspan></text><line x1="1377.1875" x2="1529.140625" y1="3722.0625" y2="3722.0625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g><g class="signal"><text x="1539.140625" y="3742.65625" style="font-size: 16px; font-family: &quot;Andale Mono&quot;, monospace;"><tspan x="1539.140625">(api)</tspan><tspan dy="1.2em" x="1539.140625">veseventsdb entry</tspan></text><line x1="1529.140625" x2="1698.6875" y1="3779.65625" y2="3779.65625" stroke="#000000" fill="none" style="stroke-width: 2; marker-end: url(&quot;#markerArrowBlock&quot;);"></line></g></svg> \ No newline at end of file
diff --git a/docs/source/models_demo_flow.txt b/docs/source/models_demo_flow.txt
new file mode 100644
index 0000000..69f3600
--- /dev/null
+++ b/docs/source/models_demo_flow.txt
@@ -0,0 +1,112 @@
+Title: High-level flow for the OPNFV Models+VES demo deployment.
+# (c) 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# To generate the flow, browse to https://bramp.github.io/js-sequence-diagrams/,
+# select Theme "Simple", and paste the content of this file into the editor.
+
+participant Admin Server as admin
+participant k8s master as k8s_master
+participant k8s worker as k8s_worker
+participant Cloudify Manager as cloudify
+participant k8s pods as k8s_pod
+participant Barometer as barometer
+participant Kafka\nZookeeper as kafka
+participant VES Agent as agent
+participant VES Collector as collector
+participant InfluxDB as influxdb
+participant Grafana as grafana
+Note over admin, grafana: For brevity, action/api responses are not shown (flow continuation indicates success).
+admin->k8s_master: deploy host OS
+admin->k8s_worker: deploy host OS
+admin->k8s_master: (SSH) install k8s master
+note over k8s_master: install k8s and\nkube-system pods
+admin->k8s_master: (SSH) install k8s workers
+k8s_master->k8s_worker: (SSH) install k8s worker
+k8s_worker->k8s_master: (api) register as worker
+admin->k8s_master: (SSH) install helm
+admin->k8s_master: (SSH) test helm chart (nginx)
+k8s_master->k8s_worker: (k8s) start nginx pod
+note over k8s_pod: (nginx pod)
+k8s_worker->k8s_pod: create pod
+k8s_master->k8s_pod: GET http://(nginx-service)
+k8s_master->k8s_worker: (k8s) stop nginx pod
+k8s_worker->k8s_pod: delete pod
+admin->k8s_master: (SSH) deploy ceph-docker\nhelm chart
+k8s_master->k8s_worker: (k8s) install ceph-docker
+admin->k8s_master: (SSH) test helm chart with\n ceph PVC (dokuwiki)
+k8s_master->k8s_worker: (k8s) start dokuwiki pod
+note over k8s_pod: (dokuwiki pod)
+k8s_worker->k8s_pod: create pod
+k8s_master->k8s_pod: GET http://(dokuwiki-service)
+note over k8s_worker: (k8s worker)
+admin->k8s_master: (SSH) install cloudify manager
+note over k8s_master: install cloudify CLI
+note over cloudify: (on k8s master)
+k8s_master->cloudify: create cloudify manager VM
+k8s_master->cloudify: (cfy cli) install cloudify k8s plugin
+admin->cloudify: (api) upload/deploy cloudify-k8s chart (nginx)
+cloudify->k8s_master: (api) deploy nginx chart
+k8s_master->k8s_worker: (k8s) start nginx pod
+note over k8s_pod: (nginx pod)
+k8s_worker->k8s_pod: create pod
+admin->k8s_pod: GET http://(nginx-service)
+admin->cloudify: (api) upload/deploy ves-influxdb chart
+cloudify->k8s_master: (api) deploy ves-influxdb chart
+k8s_master->k8s_worker: (k8s) start ves-influxdb pod
+k8s_worker->influxdb: create pod (type=ClusterIP,port=8086)
+admin->influxdb: (api) create veseventsdb
+admin->cloudify: (api) upload/deploy grafana chart
+cloudify->k8s_master: (api) deploy grafana chart
+k8s_master->k8s_worker: (k8s) start grafana pod
+k8s_worker->grafana: create pod (type=NodePort,port=30300)
+admin->grafana: create VESEvents datasource
+note over grafana: (repeat on\nupdate schedule)
+grafana->influxdb: (api)\nquery veseventsdb
+admin->grafana: create VES dashboard
+note over grafana: display stats\non dashboard
+admin->cloudify: (api) upload/deploy ves-collector chart
+cloudify->k8s_master: (api) deploy ves-collector chart
+k8s_master->k8s_worker: (k8s) start ves-collector pod
+k8s_worker->collector: create pod (type=ClusterIP,port=3001)
+admin->cloudify: (api) upload/deploy ves-zookeeper chart
+cloudify->k8s_master: (api) deploy ves-zookeeper chart
+k8s_master->k8s_worker: (k8s) start ves-zookeeper pod
+note over kafka: (zookeeper pod)
+k8s_worker->kafka: create pod (type=ClusterIP,port=2181)
+admin->cloudify: (api) upload/deploy ves-kafka chart
+cloudify->k8s_master: (api) deploy ves-kafka chart
+k8s_master->k8s_worker: (k8s) start ves-kafka pod
+note over kafka: (kafka pod)
+k8s_worker->kafka: create pod (type=NodePort,port=30992)
+note over kafka: NodePort required\nfor barometer
+admin->cloudify: (api) upload/deploy ves-agent chart
+cloudify->k8s_master: (api) deploy ves-agent chart
+k8s_master->k8s_worker: (k8s) start ves-agent pod
+k8s_worker->agent: create pod
+admin->k8s_master: (SSH) install barometer container
+note over barometer: (on k8s master)
+k8s_master->barometer: create barometer container
+barometer->kafka: (api) register\n"collectd" topic
+note over kafka: configure topic\nvia zookeeper
+admin->k8s_worker: (SSH) install barometer container
+note over barometer: (on k8s worker)
+k8s_worker->barometer: create barometer container
+barometer->kafka: (api) register\n"collectd" topic
+note over kafka: (api) configure topic\nvia zookeeper
+note over barometer: (on k8s master\nand workers,\nper schedule)
+barometer->kafka: (api) publish\ncollectd stats
+kafka->agent: (api)\ncollectd stats
+agent->collector: (api) VES event
+collector->influxdb: (api)\nveseventsdb entry \ No newline at end of file
diff --git a/tests/OpenWRT-clean.sh b/tests/OpenWRT-clean.sh
deleted file mode 100755
index 957e5ac..0000000
--- a/tests/OpenWRT-clean.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/bash
-# Copyright 2015-2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# What this is: Cleanup script for a basic test to validate an OPNFV install.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# $ source ~/git/copper/tests/adhoc/OpenWRT-clean.sh [OpenWRT]
-# OpenWRT: clean OpenWRT resources only (leave public and internal networks)
-
-wget https://git.opnfv.org/cgit/copper/plain/components/congress/install/bash/setenv.sh -O ~/setenv.sh
-source ~/setenv.sh
-
-echo "$0: Delete OpenWRT instance"
-instance=$(nova list | awk "/ OpenWRT / { print \$2 }")
-if [ "$instance" != "" ]; then nova delete $instance; fi
-
-echo "$0: Wait for OpenWRT to terminate"
-COUNTER=5
-RESULT="Wait!"
-until [[ $COUNTER -eq 0 || $RESULT == "Go!" ]]; do
- OpenWRT_id=$(openstack server list | awk "/ cirros1 / { print \$4 }")
- if [[ -z "$OpenWRT_id" ]]; then RESULT="Go!"; fi
- let COUNTER-=1
- sleep 5
-done
-
-echo "$0: Delete 'OpenWRT' security group"
-sg=$(neutron security-group-list | awk "/ OpenWRT / { print \$2 }")
-neutron security-group-delete $sg
-
-echo "$0: Delete floating ip"
-# FLOATING_IP_ID was saved by OpenWRT.sh
-source /tmp/OpenWRT_VARS.sh
-rm /tmp/OpenWRT_VARS.sh
-neutron floatingip-delete $FLOATING_IP_ID
-
-echo "$0: Delete OpenWRT key pair"
-nova keypair-delete OpenWRT
-rm /tmp/OpenWRT
-
-echo "$0: Delete neutron port with fixed_ip 192.168.1.1"
-port=$(neutron port-list | awk "/192.168.1.1/ { print \$2 }")
-if [ "$port" != "" ]; then neutron port-delete $port; fi
-
-echo "$0: Delete OpenWRT subnet"
-neutron subnet-delete OpenWRT
-
-echo "$0: Delete OpenWRT network"
-neutron net-delete OpenWRT
-
-if [[ "$1" == "OpenWRT" ]]; then exit 0; fi
-
-echo "$0: Get 'public_router' ID"
-router=$(neutron router-list | awk "/ public_router / { print \$2 }")
-
-echo "$0: Get internal port ID with subnet 10.0.0.1 on 'public_router'"
-internal_interface=$(neutron router-port-list $router | grep 10.0.0.1 | awk '{print $2}')
-
-echo "$0: If found, delete the port with subnet 10.0.0.1 on 'public_router'"
-if [ "$internal_interface" != "" ]; then neutron router-interface-delete $router port=$internal_interface; fi
-
-echo "$0: Delete remaining neutron ports on subnet 10.0.0.0"
-pid=($(neutron port-list | grep 10.0.0 | awk "/10.0.0/ { print \$2 }")); for id in ${pid[@]}; do neutron port-delete ${id}; done
-
-echo "$0: Clear the router gateway"
-neutron router-gateway-clear public_router
-
-echo "$0: Delete the router"
-neutron router-delete public_router
-
-echo "$0: Delete internal subnet"
-neutron subnet-delete internal
-
-echo "$0: Delete internal network"
-neutron net-delete internal
-
-
-
diff --git a/tests/OpenWRT.sh b/tests/OpenWRT.sh
deleted file mode 100755
index 2857d77..0000000
--- a/tests/OpenWRT.sh
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/bin/bash
-# Copyright 2015-2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: A basic test to validate an OPNFV install. Creates an image,
-# using the OpenWRT project and a private network over which OpenWRT will
-# allocate addresses etc.
-#
-# Status: this is a work in progress, under test. Automated ping test to the
-# internet and between VMs has not yet been implemented.
-#
-# Prequisites:
-# python-openstackclient >=3.2.0
-#
-# How to use:
-# $ bash ~/git/copper/tests/adhoc/OpenWRT.sh
-# After test, cleanup with
-# $ bash ~/git/copper/tests/adhoc/OpenWRT-clean.sh
-
-trap 'fail' ERR
-
-pass() {
- echo "$0: Hooray!"
- set +x #echo off
- exit 0
-}
-
-# Use this to trigger fail() at the right places
-# if [ "$RESULT" == "Test Failed!" ]; then fail; fi
-fail() {
- echo "$0: Test Failed!"
- set +x
- exit 1
-}
-
-# Find external network if any, and details
-function get_external_net () {
- network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
- for id in ${network_ids[@]}; do
- [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id}
- done
- if [[ $ext_net_id ]]; then
- EXTERNAL_NETWORK_NAME=$(openstack network show $ext_net_id | awk "/ name / { print \$4 }")
- EXTERNAL_SUBNET_ID=$(openstack network show $EXTERNAL_NETWORK_NAME | awk "/ subnets / { print \$4 }")
- else
- echo "$0: External network not found"
- echo "$0: Create external network"
- neutron net-create public --router:external
- EXTERNAL_NETWORK_NAME="public"
- echo "$0: Create external subnet"
- neutron subnet-create public 192.168.10.0/24 --name public --enable_dhcp=False --allocation_pool start=192.168.10.6,end=192.168.10.49 --gateway 192.168.10.1
- EXTERNAL_SUBNET_ID=$(openstack subnet show public | awk "/ id / { print \$4 }")
- fi
-}
-
-wget https://git.opnfv.org/cgit/copper/plain/components/congress/install/bash/setenv.sh -O ~/setenv.sh
-source ~/setenv.sh
-
-echo "$0: create OpenWRT image"
-image=$(openstack image list | awk "/ OpenWRT / { print \$2 }")
-if [ -z $image ]; then glance --os-image-api-version 1 image-create --name OpenWRT --disk-format qcow2 --location http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img --container-format bare
-fi
-
-get_external_net
-
-echo "$0: Create floating IP for external subnet"
-FLOATING_IP_ID=$(neutron floatingip-create $EXTERNAL_NETWORK_NAME | awk "/ id / { print \$4 }")
-FLOATING_IP=$(neutron floatingip-show $FLOATING_IP_ID | awk "/ floating_ip_address / { print \$4 }" | cut -d - -f 1)
-# Save ID to pass to cleanup script
-echo "FLOATING_IP_ID=$FLOATING_IP_ID" >/tmp/OpenWRT_VARS.sh
-
-INTERNAL_NET_ID=$(neutron net-list | awk "/ internal / { print \$2 }")
-if [[ -z $INTERNAL_NET_ID ]]; then
- echo "$0: Create internal network"
- neutron net-create internal
-
- echo "$0: Create internal subnet"
- neutron subnet-create internal 10.0.0.0/24 --name internal --gateway 10.0.0.1 --enable-dhcp --allocation-pool start=10.0.0.2,end=10.0.0.254 --dns-nameserver 8.8.8.8
-fi
-
-if [[ -z $(neutron router-list | awk "/ public_router / { print \$2 }") ]]; then
- echo "$0: Create public_router"
- neutron router-create public_router
-
- echo "$0: Create public_router gateway"
- neutron router-gateway-set public_router $EXTERNAL_NETWORK_NAME
-
- echo "$0: Add router interface for internal network"
- neutron router-interface-add public_router subnet=internal
-fi
-
-echo "$0: Create OpenWRT network"
-neutron net-create OpenWRT
-wrt_net_id=$(neutron net-list | awk "/ OpenWRT / { print \$2 }")
-
-echo "$0: Create OpenWRT subnet"
-neutron subnet-create OpenWRT 192.168.1.0/24 --disable-dhcp --name OpenWRT --gateway 192.168.1.1
-
-echo "$0: Create OpenWRT security group"
-neutron security-group-create OpenWRT
-
-echo "$0: Add rules to OpenWRT security group"
-neutron security-group-rule-create --direction ingress --protocol=TCP --remote-ip-prefix 0.0.0.0/0 --port-range-min=22 --port-range-max=22 OpenWRT
-neutron security-group-rule-create --direction ingress --protocol=TCP --remote-ip-prefix 0.0.0.0/0 --port-range-min=80 --port-range-max=80 OpenWRT
-neutron security-group-rule-create --direction ingress --protocol=ICMP --remote-ip-prefix 0.0.0.0/0 OpenWRT
-neutron security-group-rule-create --direction egress --protocol=TCP --remote-ip-prefix 0.0.0.0/0 --port-range-min=22 --port-range-max=22 OpenWRT
-neutron security-group-rule-create --direction egress --protocol=ICMP --remote-ip-prefix 0.0.0.0/0 OpenWRT
-
-echo "$0: Create Nova key pair"
-ssh-keygen -f "$HOME/.ssh/known_hosts" -R 192.168.1.1
-nova keypair-add OpenWRT > /tmp/OpenWRT
-chmod 600 /tmp/OpenWRT
-
-echo "$0: Create OpenWRT port for LAN"
-LAN_PORT_ID=$(neutron port-create OpenWRT --fixed-ip ip_address=192.168.1.1 | awk "/ id / { print \$4 }")
-
-echo "$0: Create OpenWRT port for WAN"
-WAN_PORT_ID=$(neutron port-create internal | awk "/ id / { print \$4 }")
-# The following does not work with a single-NIC compute node
-# EXT_PORT_ID=$(neutron port-create $EXTERNAL_NETWORK_NAME | awk "/ id / { print \$4 }")
-
-echo "$0: Boot OpenWRT with internal net port"
-openstack server create --flavor m1.tiny --image OpenWRT --nic port-id=$WAN_PORT_ID --security-group OpenWRT --security-group default --key-name OpenWRT OpenWRT
-
-echo "$0: Add OpenWRT security group (should have been done thru the server create command but...)"
-openstack server add security group OpenWRT OpenWRT
-
-# failed with: either net-id or port-id should be specified but not both
-# openstack server create --flavor m1.tiny --image OpenWRT --nic net-id=$wrt_net_id,v4-fixed-ip=192.168.1.1 --nic net-id=$INTERNAL_NET_ID --security-group OpenWRT --key-name OpenWRT OpenWRT
-# openstack server create --flavor m1.tiny --image OpenWRT --nic v4-fixed-ip=192.168.1.1 --nic net-id=$INTERNAL_NET_ID --security-group OpenWRT --key-name OpenWRT OpenWRT
-
-echo "$0: Wait for OpenWRT to go ACTIVE"
-COUNTER=12
-RESULT="Test Failed!"
-until [[ $COUNTER -eq 0 || $RESULT == "Test Success!" ]]; do
- status=$(openstack server show OpenWRT | awk "/ status / { print \$4 }")
- if [[ "$status" == "ACTIVE" ]]; then RESULT="Test Success!"; fi
- let COUNTER-=1
- sleep 5
-done
-if [ "$RESULT" == "Test Failed!" ]; then fail; fi
-
-echo "$0: Associate floating IP to OpenWRT external port"
-neutron floatingip-associate $FLOATING_IP_ID $WAN_PORT_ID
-
-echo "$0: Attach eth1 to OpenWRT internal port"
-nova interface-attach --port-id $LAN_PORT_ID OpenWRT
-
-echo "$0: Boot cirros1 with internal net port"
-openstack server create --flavor m1.tiny --image cirros-0.3.3-x86_64 --nic net-id=$INTERNAL_NET_ID --security-group OpenWRT --security-group default --key-name OpenWRT cirros1
-
-echo "$0: Wait for cirros1 to go ACTIVE"
-COUNTER=12
-RESULT="Test Failed!"
-until [[ $COUNTER -eq 0 || $RESULT == "Test Success!" ]]; do
- status=$(openstack server show cirros1 | awk "/ status / { print \$4 }")
- if [[ "$status" == "ACTIVE" ]]; then RESULT="Test Success!"; fi
- let COUNTER-=1
- sleep 5
-done
-if [ "$RESULT" == "Test Failed!" ]; then fail; fi
-
-echo "$0: Create floating IP for external subnet"
-FLOATING_IP_ID=$(neutron floatingip-create $EXTERNAL_NETWORK_NAME | awk "/ id / { print \$4 }")
-FLOATING_IP=$(neutron floatingip-show $FLOATING_IP_ID | awk "/ floating_ip_address / { print \$4 }" | cut -d - -f 1)
-
-echo "$0: Associate floating IP to cirros1 internal port"
-nova floating-ip-associate cirros1 $FLOATING_IP
-
-echo "$0: Create cirros1 port for OpenWRT net"
-INT_PORT_ID=$(neutron port-create OpenWRT --fixed-ip ip_address=192.168.1.2 | awk "/ id / { print \$4 }")
-
-echo "$0: Attach eth1 to cirros1 internal port"
-nova interface-attach --port-id $INT_PORT_ID cirros1
-
-
-
-pass
diff --git a/tests/blueprints/tosca-vnfd-3node-tacker/blueprint.yaml b/tests/blueprints/tosca-vnfd-3node-tacker/blueprint.yaml
index b3063d2..464d321 100644
--- a/tests/blueprints/tosca-vnfd-3node-tacker/blueprint.yaml
+++ b/tests/blueprints/tosca-vnfd-3node-tacker/blueprint.yaml
@@ -31,8 +31,8 @@ topology_template:
<pubkey>
EOM
sudo mount /dev/sr0 /mnt/
- mkdir /tmp/www
- cd /tmp/www
+ mkdir -p ~/tmp/www
+ cd ~/tmp/www
mkdir html
cat >Dockerfile <<EOM
FROM nginx
@@ -62,7 +62,7 @@ topology_template:
</div>
</body></html>
EOM
- wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ wget -O ~/tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
sudo apt-key update
@@ -123,8 +123,8 @@ topology_template:
<pubkey>
EOM
sudo mount /dev/sr0 /mnt/
- mkdir /tmp/www
- cd /tmp/www
+ mkdir -p ~/tmp/www
+ cd ~/tmp/www
mkdir html
cat >Dockerfile <<EOM
FROM nginx
@@ -154,7 +154,7 @@ topology_template:
</div>
</body></html>
EOM
- wget -O /tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
+ wget -O ~/tmp/www/html/favicon.ico https://git.opnfv.org/models/plain/tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico
sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://apt.dockerproject.org/gpg | sudo apt-key add -
sudo apt-key update
@@ -213,7 +213,7 @@ topology_template:
cat << EOM >/home/ubuntu/.ssh/authorized_keys
<pubkey>
EOM
- cat << EOF >/tmp/setup.sh
+ cat << EOF >~/tmp/setup.sh
echo "1" | sudo tee /proc/sys/net/ipv4/ip_forward
sudo sysctl net.ipv4.ip_forward=1
sudo iptables -t nat -A PREROUTING -p tcp --dport 80 -m state \\
@@ -224,7 +224,7 @@ topology_template:
-j DNAT --to-destination <vdu2_ip>:80
sudo iptables -t nat -A POSTROUTING -j MASQUERADE
EOF
- bash /tmp/setup.sh
+ bash ~/tmp/setup.sh
config: |
param0: key1
param1: key2
diff --git a/tests/k8s-cloudify-clearwater.sh b/tests/k8s-cloudify-clearwater.sh
new file mode 100644
index 0000000..133691f
--- /dev/null
+++ b/tests/k8s-cloudify-clearwater.sh
@@ -0,0 +1,233 @@
+#!/bin/bash
+# Copyright 2017 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#. What this is: Setup script for clearwater-docker as deployed by Cloudify
+#. with Kubernetes. See https://github.com/Metaswitch/clearwater-docker
+#. for more info.
+#.
+#. Prerequisites:
+#. - Kubernetes cluster installed per k8s-cluster.sh (in this repo)
+#. - user (running this script) added to the "docker" group
+#. - clearwater-docker images created and uploaded to docker hub under the
+#. <hub-user> account as <hub-user>/clearwater-<vnfc> where vnfc is the name
+#. of the specific containers as built by build/clearwater-docker.sh
+#.
+#. Usage:
+#. From a server with access to the kubernetes master node:
+#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
+#. $ cd ~/models/tools/cloudify/
+#. $ bash k8s-cloudify-clearwater.sh start <k8s_master_hostname> <image_path> <image_tag>
+#. k8s_master_hostname: hostname of the k8s master node
+#. image_path: "image path" for images (e.g. user on docker hub)
+#. image_tag: "image tag" for images e.g. latest, test, stable
+#. $ bash k8s-cloudify-clearwater.sh stop> <k8s_master_hostname>
+#. k8s_master_hostname: hostname of the k8s master node
+#.
+#. Status: this is a work in progress, under test.
+
+function fail() {
+ log "$1"
+ exit 1
+}
+
+function log() {
+ f=$(caller 0 | awk '{print $2}')
+ l=$(caller 0 | awk '{print $1}')
+ echo ""
+ echo "$f:$l ($(date)) $1"
+}
+
+function build_local() {
+ log "deploy local docker registry on k8s master"
+ # Per https://docs.docker.com/registry/deploying/
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ubuntu@$k8s_master sudo docker run -d -p 5000:5000 --restart=always --name \
+ registry registry:2
+
+ # per https://github.com/Metaswitch/clearwater-docker
+ log "clone clearwater-docker"
+ cd ~
+ if [[ ! -d ~/clearwater-docker ]]; then
+ git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+ fi
+
+ log "build docker images"
+ cd clearwater-docker
+ vnfc="base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout"
+ for i in $vnfc ; do
+ docker build -t clearwater/$i $i
+ done
+
+ # workaround for https://www.bountysource.com/issues/37326551-server-gave-http-response-to-https-client-error
+ # May not need both...
+ if [[ "$dist" == "ubuntu" ]]; then
+ check=$(grep -c $k8s_master /etc/default/docker)
+ if [[ $check -eq 0 ]]; then
+ echo "DOCKER_OPTS=\"--insecure-registry $k8s_master:5000\"" | sudo tee -a /etc/default/docker
+ sudo systemctl daemon-reload
+ sudo service docker restart
+ fi
+ fi
+ check=$(grep -c insecure-registry /lib/systemd/system/docker.service)
+ if [[ $check -eq 0 ]]; then
+ sudo sed -i -- "s~ExecStart=/usr/bin/dockerd -H fd://~ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry $k8s_master:5000~" /lib/systemd/system/docker.service
+ sudo systemctl daemon-reload
+ sudo service docker restart
+ fi
+
+ log "deploy local docker registry on k8s master"
+ # Per https://docs.docker.com/registry/deploying/
+ # sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
+
+ log "push images to local docker repo on k8s master"
+ for i in $vnfc ; do
+ docker tag clearwater/$i:latest $k8s_master:5000/clearwater/$i:latest
+ docker push $k8s_master:5000/clearwater/$i:latest
+ done
+}
+
+function start() {
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<EOF
+echo "create configmap"
+kubectl create configmap env-vars --from-literal=ZONE=default.svc.cluster.local --from-literal=ADDITIONAL_SHARED_CONFIG=log_level=5
+
+echo "clone clearwater-docker"
+git clone --recursive https://github.com/Metaswitch/clearwater-docker.git
+cd clearwater-docker/kubernetes
+
+echo "generate k8s config with --image_path=$1 --image_tag=$2"
+./k8s-gencfg --image_path=$1 --image_tag=$2
+
+echo "prefix clearwater- to image names"
+sed -i -- "s~$1/~$1/clearwater-~" *.yaml
+
+echo "change ellis-svc to NodePort"
+sed -i -- "s/clusterIP: None/type: NodePort/" ellis-svc.yaml
+sed -i -- "/port: 80/a\ \ \ \ nodePort: 30880" ellis-svc.yaml
+
+echo "deploying"
+kubectl apply -f ../kubernetes
+EOF
+
+ log "workaround bug in homestead-prov"
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+hpod=$(kubectl get pods --namespace default | grep -v homestead-prov | awk '/homestead/ {print $1}')
+status=$(kubectl get pods -o json --namespace default $hpod | jq -r '.status.phase')
+while [[ "$status" != "Running" ]]; do
+ echo "homestead is $status ... waiting 10 seconds"
+ sleep 10
+ status=$(kubectl get pods -o json --namespace default $hpod | jq -r '.status.phase')
+done
+mkdir ~/tmp
+kubectl cp $hpod:/usr/share/clearwater/bin/clearwater-socket-factory-sig-wrapper ~/tmp/clearwater-socket-factory-sig-wrapper -c homestead
+kubectl cp $hpod:/usr/share/clearwater/bin/clearwater-socket-factory-mgmt-wrapper ~/tmp/clearwater-socket-factory-mgmt-wrapper -c homestead
+kubectl delete deployment --namespace default homestead-prov
+kubectl delete service --namespace default homestead-prov
+
+hppod=$(kubectl get pods --namespace default | awk '/homestead-prov/ {print $1}')
+while [[ "$hppod" != "" ]] ; do
+ echo "waiting 10 seconds for homestead-prov to be deleted..."
+ sleep 10
+ hppod=$(kubectl get pods --namespace default | awk '/homestead-prov/ {print $1}')
+done
+
+echo "Redeploying homestead-prov..."
+cd clearwater-docker/kubernetes
+kubectl apply -f homestead-prov-depl.yaml
+kubectl apply -f homestead-prov-svc.yaml
+
+hppod="null"
+while [[ "$hppod" == "null" ]] ; do
+ echo "homestead-prov pod is not yet created... waiting 10 seconds"
+ sleep 10
+ hppod=$(kubectl get pods --namespace default | awk '/homestead-prov/ {print $1}')
+done
+
+status=$(kubectl get pods -o json --namespace default $hppod | jq -r '.status.phase')
+while [[ "$status" != "Running" ]]; do
+ echo; echo "$hppod is $status ... waiting 10 seconds"
+ sleep 10
+ status=$(kubectl get pods -o json --namespace default $hppod | jq -r '.status.phase')
+done
+
+kubectl cp ~/tmp/clearwater-socket-factory-sig-wrapper $hppod:/usr/share/clearwater/bin/clearwater-socket-factory-sig-wrapper -c homestead-prov
+kubectl cp ~/tmp/clearwater-socket-factory-mgmt-wrapper $hppod://usr/share/clearwater/bin/clearwater-socket-factory-mgmt-wrapper -c homestead-prov
+EOF
+}
+
+function run_test() {
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOG'
+cat <<EOF >~/clearwater-live-test.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: clearwater-live-test
+ namespace: default
+spec:
+ containers:
+ - name: clearwater-live-test
+ image: blsaws/clearwater-live-test:stable
+ command:
+ - sleep
+ - "3600"
+ imagePullPolicy: IfNotPresent
+ restartPolicy: Always
+EOF
+kubectl create -f ~/clearwater-live-test.yaml
+status=$(kubectl get pods -o json --namespace default clearwater-live-test | jq -r '.status.phase')
+while [[ "$status" != "Running" ]]; do
+ echo; echo "clearwater-live-test is $status ... waiting 10 seconds"
+ sleep 10
+ status=$(kubectl get pods -o json --namespace default clearwater-live-test | jq -r '.status.phase')
+done
+kubectl exec -t --namespace default clearwater-live-test rake test[default.svc.cluster.local] SIGNUP_CODE=secret PROXY=bono.default.svc.cluster.local
+kubectl delete pods --namespace default clearwater-live-test
+EOG
+}
+
+function stop() {
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master <<'EOF'
+deps="astaire bono chronos ellis etcd homer homestead homestead-prov ralf sprout cassandra"
+for dep in $deps ; do
+ echo "deleting deployment $dep"
+ kubectl delete deployment --namespace default $dep
+ kubectl delete service --namespace default $dep
+done
+kubectl delete configmap env-vars
+rm -rf clearwater-docker
+EOF
+}
+
+dist=$(grep --m 1 ID /etc/os-release | awk -F '=' '{print $2}')
+source ~/k8s_env_$2.sh
+
+case "$1" in
+ "start")
+ start $3 $4
+ ;;
+ "test")
+ run_test
+ ;;
+ "stop")
+ stop
+ ;;
+ *)
+ grep '#. ' $0
+esac
+
diff --git a/tests/utils/ansible-setup.sh b/tests/utils/ansible-setup.sh
deleted file mode 100644
index 67e4d59..0000000
--- a/tests/utils/ansible-setup.sh
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: Setup script for Ansible in an Unbuntu Xenial docker container.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# $ bash ansible-setup.sh [init|setup|clean]
-# init: Initialize docker container
-# setup: Setup of Ansible in the docker container
-# clean: Clean
-
-pass() {
- echo "$0: Hooray!"
- set +x #echo off
- exit 0
-}
-
-fail() {
- echo "$0: Failed!"
- set +x
- exit 1
-}
-
-function setenv () {
-if [ "$dist" == "Ubuntu" ]; then
- echo "$0: Ubuntu-based install"
- echo "$0: Create the environment file"
- KEYSTONE_HOST=$(juju status --format=short | awk "/keystone\/0/ { print \$3 }")
- cat <<EOF >/tmp/ansible/admin-openrc.sh
-export CONGRESS_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
-export HORIZON_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
-export KEYSTONE_HOST=$KEYSTONE_HOST
-export CEILOMETER_HOST=$(juju status --format=short | awk "/ceilometer\/0/ { print \$3 }")
-export CINDER_HOST=$(juju status --format=short | awk "/cinder\/0/ { print \$3 }")
-export GLANCE_HOST=$(juju status --format=short | awk "/glance\/0/ { print \$3 }")
-export NEUTRON_HOST=$(juju status --format=short | awk "/neutron-api\/0/ { print \$3 }")
-export NOVA_HOST=$(juju status --format=short | awk "/nova-cloud-controller\/0/ { print \$3 }")
-export OS_USERNAME=admin
-export OS_PASSWORD=openstack
-export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://$KEYSTONE_HOST:5000/v2.0
-export OS_REGION_NAME=RegionOne
-EOF
-else
- # Centos
- echo "$0: Centos-based install"
- echo "$0: Setup undercloud environment so we can get overcloud Controller server address"
- source ~/stackrc
- echo "$0: Get address of Controller node"
- export CONTROLLER_HOST1=$(openstack server list | awk "/overcloud-controller-0/ { print \$8 }" | sed 's/ctlplane=//g')
- echo "$0: Create the environment file"
- cat <<EOF >/tmp/ansible/admin-openrc.sh
-export HORIZON_HOST=$CONTROLLER_HOST1
-export CONGRESS_HOST=$CONTROLLER_HOST1
-export KEYSTONE_HOST=$CONTROLLER_HOST1
-export CEILOMETER_HOST=$CONTROLLER_HOST1
-export CINDER_HOST=$CONTROLLER_HOST1
-export GLANCE_HOST=$CONTROLLER_HOST1
-export NEUTRON_HOST=$CONTROLLER_HOST1
-export NOVA_HOST=$CONTROLLER_HOST1
-EOF
- cat ~/overcloudrc >>/tmp/ansible/admin-openrc.sh
- source ~/overcloudrc
- export OS_REGION_NAME=$(openstack endpoint list | awk "/ nova / { print \$4 }")
- # sed command below is a workaound for a bug - region shows up twice for some reason
- cat <<EOF | sed '$d' >>/tmp/ansible/admin-openrc.sh
-export OS_REGION_NAME=$OS_REGION_NAME
-EOF
-fi
-source /tmp/ansible/admin-openrc.sh
-}
-
-function create_container () {
- echo "$0: Creating docker container for Ansible installation"
- # STEP 1: Create the Ansible container and launch it
- echo "$0: Copy this script to /tmp/ansible"
- mkdir /tmp/ansible
- cp $0 /tmp/ansible/.
- chmod 755 /tmp/ansible/*.sh
-
- echo "$0: Setup admin-openrc.sh"
- setenv
-
- echo "$0: Setup container"
- if [ "$dist" == "Ubuntu" ]; then
- # xenial is needed for python 3.5
- sudo docker pull ubuntu:xenial
- sudo service docker start
- sudo docker run -it -d -v /tmp/ansible/:/tmp/ansible --name ansible ubuntu:xenial /bin/bash
- else
- # Centos
- echo "Centos-based install"
- sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
-[dockerrepo]
-name=Docker Repository--parents
-baseurl=https://yum.dockerproject.org/repo/main/centos/7/
-enabled=1
-gpgcheck=1
-gpgkey=https://yum.dockerproject.org/gpg
-EOF
- sudo yum install -y docker-engine
- # xenial is needed for python 3.5
- sudo service docker start
- sudo docker pull ubuntu:xenial
- sudo docker run -i -t -d -v /tmp/ansible/:/tmp/ansible --name ansible ubuntu:xenial /bin/bash
- fi
-}
-
-function setup () {
- echo "$0: Installing Ansible"
- # STEP 2: Install Ansible in the container
- # Per http://docs.ansible.com/ansible/intro_installation.html
- echo "$0: Install dependencies - OS specific"
- apt-get update
- apt-get install -y python
- apt-get install -y python-dev
- apt-get install -y python-pip
- apt-get install -y wget
- apt-get install -y openssh-server
- apt-get install -y git
- apt-get install -y apg
- apt-get install -y libffi-dev
- apt-get install -y libssl-dev
-
- echo "$0: Install Ansible and Shade"
- pip install --upgrade ansible
- pip install --upgrade shade
-
- echo "$0: Create key pair for interacting with servers via Ansible"
- ssh-keygen -t rsa -N "" -f /tmp/ansible/ansible -C ubuntu@ansible
- chmod 600 /tmp/ansible/ansible
-}
-
-function clean () {
- sudo docker stop $(sudo docker ps -a | awk "/ansible/ { print \$1 }")
- sudo docker rm -v $(sudo docker ps -a | awk "/ansible/ { print \$1 }")
-}
-
-dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
-case "$1" in
- "init")
- create_container
- pass
- ;;
- "setup")
- setup
- pass
- ;;
- "clean")
- clean
- pass
- ;;
- *)
- echo "usage: bash Ansible-setup.sh [init|setup|clean]"
- echo "init: Initialize docker container"
- echo "setup: Setup of Ansible in the docker container"
- echo "clean: remove Ansible"
- fail
-esac
diff --git a/tests/utils/apex_wol_workaround.sh b/tests/utils/apex_wol_workaround.sh
deleted file mode 100644
index 0be3fa2..0000000
--- a/tests/utils/apex_wol_workaround.sh
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: Workaround for issues with the undercloud suppor for
-# PXE booting WOL devices. The TFTP RRQ "undionly.kpxe" events (through
-# which the booted host requests the PXE boot image) are not making it
-# to the undercloud. So two workarounds are implemented: a tftp server
-# for undionly.kpxe is installed on the jumphost, and manual invocation
-# of the WOL is invoked (as the undercloud was never issuing the WOL
-# packets).
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# As root on the jumphost, start this script as soon as deploy is started.
-# It will wait for the Ironic log to get created, then watch for the following
-# key log entries and take action on them directly or through notifying the user
-# to take action as needed (e.g. power-off the node).
-# 2016-10-07 23:26:10.597 17686 INFO ironic.drivers.modules.wol [req-ec2f0a60-5f90-4706-a2b3-b7217193166d - - - - -] Reboot called for node 2baf581d-aa47-481e-a28e-304e0959b871. Wake-On-Lan does not fully support this operation. Trying to power on the node.
-# 2016-10-07 23:56:29.876 17686 INFO ironic.drivers.modules.wol [req-92128326-889c-47a8-94ee-2fec77c2de44 - - - - -] Power off called for node 579967bd-1e4d-4212-bf9b-1716a1cd4cfa. Wake-On-Lan does not support this operation. Manual intervention required to perform this action.
-# 2016-10-08 23:57:17.008 17691 WARNING ironic.drivers.modules.agent_base_vendor [req-44232e37-c38a-4099-8d81-871700e4dc2a - - - - -] Failed to soft power off node 165841ec-e8d2-4592-8f15-55742899fff5 in at least 30 seconds. Error: RetryError[Attempts: 7, Value: power on]
-#
-# $ bash apex_wol_workaround.sh
-
-echo "$0: Install tftp server"
-yum install tftp tftp-server xinetd
-cat >/etc/xinetd.d/tftp <<EOF
-# default: off
-# description: The tftp server serves files using the trivial file transfer
-# protocol. The tftp protocol is often used to boot diskless
-# workstations, download configuration files to network-aware printers,
-# and to start the installation process for some operating systems.
-service tftp
-{
- socket_type = dgram
- protocol = udp
- wait = yes
- user = root
- server = /usr/sbin/in.tftpd
- server_args = -c -s /var/lib/tftpboot
- disable = no
- per_source = 11
- cps = 100 2
- flags = IPv4
-}
-EOF
-chmod 777 /var/lib/tftpboot
-iptables -I INPUT -p udp --dport 69 -j ACCEPT
-systemctl enable xinetd.service
-systemctl restart xinetd.service
-curl http://boot.ipxe.org/undionly.kpxe > /var/lib/tftpboot/undionly.kpxe
-
-UNDERCLOUD_MAC=$(virsh domiflist undercloud | grep default | grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
-while [[ -z $UNDERCLOUD_MAC ]]; do
- echo "$0: Waiting 10 seconds for undercloud to be created"
- sleep 10
- UNDERCLOUD_MAC=$(virsh domiflist undercloud | grep default | grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
-done
-
-UNDERCLOUD_IP=$(/usr/sbin/arp -e | grep ${UNDERCLOUD_MAC} | awk {'print $1'})
-while [[ -z $UNDERCLOUD_IP ]]; do
- echo "$0: Waiting 10 seconds for undercloud IP to be assigned"
- sleep 10
- UNDERCLOUD_IP=$(/usr/sbin/arp -e | grep ${UNDERCLOUD_MAC} | awk {'print $1'})
-done
-
-ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$UNDERCLOUD_IP <<EOF
-while [[ ! -f /var/log/ironic/ironic-conductor.log ]]; do
- echo "$0: Waiting 10 seconds for ironic-conductor.log to be created"
- sleep 10
-done
-
-source stackrc
-mkfifo /tmp/myfifo
-tail -f /var/log/ironic/ironic-conductor.log | grep -e "Reboot called for node" -e "Failed to soft power off node" > /tmp/myfifo &
-while read line
-do
- if [[ $(echo "$line" | grep "Reboot called for node") ]]; then
- IRONIC_NODE_ID=$(echo "$line" | sed -e 's/^.*node //' | awk '{print $1}' | sed -e 's/.$//g')
- SERVER_ID=$(ironic node-show $IRONIC_NODE_ID | awk "/ instance_uuid / { print \$4 }")
- SERVER_NAME=$(openstack server show $SERVER_ID | awk "/ name / { print \$4 }")
- echo "$0: Waking $SERVER_NAME"
- if [[ $SERVER_NAME == "overcloud-controller-0" ]]; then sudo ether-wake B8:AE:ED:76:FB:C4
- else sudo ether-wake B8:AE:ED:76:F9:FF
- fi
- fi
-
- if [[ $(echo "$line" | grep "Failed to soft power off node") ]]; then
- IRONIC_NODE_ID=$(echo "$line" | sed -e 's/^.*node //' | awk '{print $1}' | sed -e 's/.$//g')
- SERVER_ID=$(ironic node-show $IRONIC_NODE_ID | awk "/ instance_uuid / { print \$4 }")
- SERVER_NAME=$(openstack server show $SERVER_ID | awk "/ name / { print \$4 }")
- echo "$0: *** POWER OFF $SERVER_NAME NOW! ***"
- fi
-done </var/log/ironic/ironic-conductor.log
-EOF
diff --git a/tests/utils/cloudify-clean.sh b/tests/utils/cloudify-clean.sh
index d7e6aa9..cae416b 100644
--- a/tests/utils/cloudify-clean.sh
+++ b/tests/utils/cloudify-clean.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2015-2016 AT&T Intellectual Property, Inc
+# Copyright 2015-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,7 +19,7 @@
# Status: this is a work in progress, under test.#
#
# Prerequisites:
-# $ bash /tmp/cloudify/cloudify-setup.sh
+# $ bash cloudify-setup.sh
#
# How to use:
# $ bash cloudify-clean.sh
@@ -31,12 +31,12 @@
# flip=($(neutron floatingip-list|grep -v "+"|grep -v id|awk '{print $2}')); for id in ${flip[@]}; do neutron floatingip-delete ${id}; done
function setenv () {
-mkdir -p /tmp/cloudify
+mkdir -p ~/tmp/cloudify
if [ "$dist" == "Ubuntu" ]; then
echo "cloudify-clean.sh: Ubuntu-based install"
echo "cloudify-clean.sh: Create the environment file"
KEYSTONE_HOST=$(juju status --format=short | awk "/keystone\/0/ { print \$3 }")
- cat <<EOF >/tmp/cloudify/admin-openrc.sh
+ cat <<EOF >~/tmp/cloudify/admin-openrc.sh
export CONGRESS_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
export HORIZON_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
export KEYSTONE_HOST=$KEYSTONE_HOST
@@ -59,7 +59,7 @@ else
echo "cloudify-clean.sh: Get address of Controller node"
export CONTROLLER_HOST1=$(openstack server list | awk "/overcloud-controller-0/ { print \$8 }" | sed 's/ctlplane=//g')
echo "cloudify-clean.sh: Create the environment file"
- cat <<EOF >/tmp/cloudify/admin-openrc.sh
+ cat <<EOF >~/tmp/cloudify/admin-openrc.sh
export CONGRESS_HOST=$CONTROLLER_HOST1
export KEYSTONE_HOST=$CONTROLLER_HOST1
export CEILOMETER_HOST=$CONTROLLER_HOST1
@@ -68,16 +68,16 @@ export GLANCE_HOST=$CONTROLLER_HOST1
export NEUTRON_HOST=$CONTROLLER_HOST1
export NOVA_HOST=$CONTROLLER_HOST1
EOF
- cat ~/overcloudrc >>/tmp/cloudify/admin-openrc.sh
+ cat ~/overcloudrc >>~/tmp/cloudify/admin-openrc.sh
source ~/overcloudrc
export OS_REGION_NAME=$(openstack endpoint list | awk "/ nova / { print \$4 }")
# sed command below is a workaound for a bug - region shows up twice for some reason
- cat <<EOF | sed '$d' >>/tmp/cloudify/admin-openrc.sh
+ cat <<EOF | sed '$d' >>~/tmp/cloudify/admin-openrc.sh
export OS_REGION_NAME=$OS_REGION_NAME
EOF
fi
-source /tmp/cloudify/admin-openrc.sh
+source ~/tmp/cloudify/admin-openrc.sh
}
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
diff --git a/tests/utils/cloudify-setup.sh b/tests/utils/cloudify-setup.sh
index 70761c7..6942353 100644
--- a/tests/utils/cloudify-setup.sh
+++ b/tests/utils/cloudify-setup.sh
@@ -43,7 +43,7 @@ if [ "$dist" == "Ubuntu" ]; then
echo "$0: Ubuntu-based install"
echo "$0: Create the environment file"
KEYSTONE_HOST=$(juju status --format=short | awk "/keystone\/0/ { print \$3 }")
- cat <<EOF >/tmp/cloudify/admin-openrc.sh
+ cat <<EOF >~/tmp/cloudify/admin-openrc.sh
export CONGRESS_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
export HORIZON_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }")
export KEYSTONE_HOST=$KEYSTONE_HOST
@@ -66,7 +66,7 @@ else
echo "$0: Get address of Controller node"
export CONTROLLER_HOST1=$(openstack server list | awk "/overcloud-controller-0/ { print \$8 }" | sed 's/ctlplane=//g')
echo "$0: Create the environment file"
- cat <<EOF >/tmp/cloudify/admin-openrc.sh
+ cat <<EOF >~/tmp/cloudify/admin-openrc.sh
export CONGRESS_HOST=$CONTROLLER_HOST1
export KEYSTONE_HOST=$CONTROLLER_HOST1
export CEILOMETER_HOST=$CONTROLLER_HOST1
@@ -75,15 +75,15 @@ export GLANCE_HOST=$CONTROLLER_HOST1
export NEUTRON_HOST=$CONTROLLER_HOST1
export NOVA_HOST=$CONTROLLER_HOST1
EOF
- cat ~/overcloudrc >>/tmp/cloudify/admin-openrc.sh
+ cat ~/overcloudrc >>~/tmp/cloudify/admin-openrc.sh
source ~/overcloudrc
export OS_REGION_NAME=$(openstack endpoint list | awk "/ nova / { print \$4 }")
# sed command below is a workaound for a bug - region shows up twice for some reason
- cat <<EOF | sed '$d' >>/tmp/cloudify/admin-openrc.sh
+ cat <<EOF | sed '$d' >>~/tmp/cloudify/admin-openrc.sh
export OS_REGION_NAME=$OS_REGION_NAME
EOF
fi
-source /tmp/cloudify/admin-openrc.sh
+source ~/tmp/cloudify/admin-openrc.sh
}
function get_external_net () {
@@ -91,7 +91,7 @@ function get_external_net () {
for id in ${network_ids[@]}; do
[[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id}
done
- if [[ $ext_net_id ]]; then
+ if [[ $ext_net_id ]]; then
EXTERNAL_NETWORK_NAME=$(openstack network show $ext_net_id | awk "/ name / { print \$4 }")
EXTERNAL_SUBNET_ID=$(openstack network show $EXTERNAL_NETWORK_NAME | awk "/ subnets / { print \$4 }")
else
@@ -102,10 +102,10 @@ function get_external_net () {
function create_container () {
# STEP 1: Create the container and launch it
- echo "$0: Copy this script to /tmp/cloudify"
- mkdir /tmp/cloudify
- cp $0 /tmp/cloudify/.
- chmod 755 /tmp/cloudify/*.sh
+ echo "$0: Copy this script to ~/tmp/cloudify"
+ mkdir ~/tmp/cloudify
+ cp $0 ~/tmp/cloudify/.
+ chmod 755 ~/tmp/cloudify/*.sh
echo "$0: Setup admin-openrc.sh"
setenv
@@ -115,7 +115,7 @@ function create_container () {
sudo docker pull ubuntu:xenial
sudo service docker start
# sudo docker run -it -v ~/git/joid/ci/cloud/admin-openrc.sh:/root/admin-openrc.sh -v ~/cloudify/cloudify-setup.sh:/root/cloudify-setup.sh ubuntu:xenial /bin/bash
- sudo docker run -it -d -v /tmp/cloudify/:/tmp/cloudify --name cloudify ubuntu:xenial /bin/bash
+ sudo docker run -it -d -v ~/tmp/cloudify/:/home/ubuntu/cloudify --name cloudify ubuntu:xenial /bin/bash
exit 0
else
# Centos
@@ -126,14 +126,14 @@ name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7/
enabled=1
gpgcheck=1
-gpgkey=https://yum.dockerproject.org/gpg
+gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum install -y docker-engine
# xenial is needed for python 3.5
sudo docker pull ubuntu:xenial
sudo service docker start
# sudo docker run -it -v ~/git/joid/ci/cloud/admin-openrc.sh:/root/admin-openrc.sh -v ~/cloudify/cloudify-setup.sh:/root/cloudify-setup.sh ubuntu:xenial /bin/bash
- sudo docker run -i -t -d -v /tmp/cloudify/:/tmp/cloudify ubuntu:xenial /bin/bash
+ sudo docker run -i -t -d -v ~/tmp/cloudify/:/home/ubuntu/cloudify ubuntu:xenial /bin/bash
fi
}
@@ -147,7 +147,7 @@ function setup () {
apt-get install -y wget
apt-get install -y openssh-server
apt-get install -y git
- # apt-get install -y apg git gcc python-dev libxml2 libxslt1-dev libzip-dev
+ # apt-get install -y apg git gcc python-dev libxml2 libxslt1-dev libzip-dev
# pip install --upgrade pip virtualenv setuptools pbr tox
fi
@@ -161,8 +161,8 @@ function setup () {
pip install --upgrade python-neutronclient
echo "$0: cleanup any previous install attempt"
- if [ -d "~/cloudify" ]; then rm -rf ~/cloudify; fi
- if [ -d "~/cloudify-manager" ]; then rm -rf ~/cloudify-manager; fi
+ if [ -d "~/cloudify" ]; then rm -rf ~/cloudify; fi
+ if [ -d "~/cloudify-manager" ]; then rm -rf ~/cloudify-manager; fi
rm ~/get-cloudify.py
echo "$0: Create virtualenv"
@@ -177,7 +177,7 @@ function setup () {
cfy init
echo "$0: Setup admin-openrc.sh"
- source /tmp/cloudify/admin-openrc.sh
+ source ~/tmp/cloudify/admin-openrc.sh
get_external_net
@@ -215,7 +215,7 @@ function setup () {
echo "$0: Setup image_id"
# CentOS-7-x86_64-GenericCloud.qcow2 failed to be routable (?), so changed to 1607 version
image=$(openstack image list | awk "/ CentOS-7-x86_64-GenericCloud-1607 / { print \$2 }")
- if [ -z $image ]; then
+ if [ -z $image ]; then
glance --os-image-api-version 1 image-create --name CentOS-7-x86_64-GenericCloud-1607 --disk-format qcow2 --location http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1607.qcow2 --container-format bare
fi
image=$(openstack image list | awk "/ CentOS-7-x86_64-GenericCloud-1607 / { print \$2 }")
@@ -239,8 +239,8 @@ function setup () {
# See https://cloudifysource.atlassian.net/browse/CFY-5050
cfy ssh -c "sudo yum install -y gcc gcc-c++ python-devel"
- # Note setup_test_environment is not needed since the Manager sets up the
- # needed networks etc
+ # Note setup_test_environment is not needed since the Manager sets up the
+ # needed networks etc
else
echo "$0: Prepare the Cloudify CLI prerequisites and data"
@@ -248,29 +248,29 @@ function setup () {
if [ $(neutron net-list | awk "/ vnf_mgmt / { print \$2 }") ]; then
echo "$0: vnf_mgmt network exists"
else
- neutron net-create vnf_mgmt
+ neutron net-create vnf_mgmt
echo "$0: Create management subnet"
neutron subnet-create vnf_mgmt 10.0.0.0/24 --name vnf_mgmt --gateway 10.0.0.1 --enable-dhcp --allocation-pool start=10.0.0.2,end=10.0.0.254 --dns-nameserver 8.8.8.8
fi
setup_test_environment
-
+
echo "$0: Install Cloudify OpenStack Plugin"
# pip install https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/1.4.zip
- cd /tmp/cloudify
- if [ -d "cloudify-openstack-plugin" ]; then rm -rf cloudify-openstack-plugin; fi
+ cd ~/tmp/cloudify
+ if [ -d "cloudify-openstack-plugin" ]; then rm -rf cloudify-openstack-plugin; fi
git clone https://github.com/cloudify-cosmo/cloudify-openstack-plugin.git
git checkout 1.4
echo "$0: Patch plugin.yaml to reference management network"
- sed -i -- ":a;N;\$!ba;s/management_network_name:\n default: ''/management_network_name:\n default: 'vnf_mgmt'/" /tmp/cloudify/cloudify-openstack-plugin/plugin.yaml
+ sed -i -- ":a;N;\$!ba;s/management_network_name:\n default: ''/management_network_name:\n default: 'vnf_mgmt'/" ~/tmp/cloudify/cloudify-openstack-plugin/plugin.yaml
cd cloudify-openstack-plugin
python setup.py build
# Use "pip install ." as "python setup.py install" does not install dependencies - resulted in an error as cloudify-openstack-plugin requires novaclient 2.26, the setup.py command installed novaclient 2.29
pip install .
echo "$0: Install Cloudify Fabric (SSH) Plugin"
- cd /tmp/cloudify
- if [ -d "cloudify-fabric-plugin" ]; then rm -rf cloudify-fabric-plugin; fi
+ cd ~/tmp/cloudify
+ if [ -d "cloudify-fabric-plugin" ]; then rm -rf cloudify-fabric-plugin; fi
git clone https://github.com/cloudify-cosmo/cloudify-fabric-plugin.git
cd cloudify-fabric-plugin
git checkout 1.4
@@ -282,8 +282,8 @@ function setup () {
clean () {
if [ "$1" == "cloudify-cli" ]; then
- source /tmp/cloudify/admin-openrc.sh
- if [[ -z "$(openstack user list|grep tacker)" ]]; then
+ source ~/tmp/cloudify/admin-openrc.sh
+ if [[ -z "$(openstack user list|grep tacker)" ]]; then
neutron router-gateway-clear vnf_mgmt_router
pid=($(neutron router-port-list vnf_mgmt_router|grep -v name|awk '{print $2}')); for id in ${pid[@]}; do neutron router-interface-delete vnf_mgmt_router vnf_mgmt; done
neutron router-delete vnf_mgmt_router
@@ -338,4 +338,3 @@ case "$2" in
echo "clean: Clean"
exit 1
esac
-
diff --git a/tests/utils/osclient.sh b/tests/utils/osclient.sh
index eab5b90..839aa29 100644
--- a/tests/utils/osclient.sh
+++ b/tests/utils/osclient.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,58 +13,56 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# What this is: Setup script for OpenStack Clients (OSC) running in
-# an Unbuntu Xenial docker container. You can use this script to isolate the
-# OSC from your host, so that the OSC and related install pre-reqs do not
-# pollute your host environment. You can then then modify your tests scripts on
-# your host and run them using the OSC container rather than moving the test
-# scripts to DevStack or an OpenStack installation (see below). You can also
-# attach to the OSC container. Enter "sudo docker attach osclient" then hit enter
-# twice and you will be in the container as root (there are no other users).
-# Once in the container, you can "source /tmp/osclient/admin-openrc.sh" and use
-# any OSC commands you want.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# 1) Obtain the credential script for your OpenStack installation by logging
-# into the OpenStack Dashboard and downloading the OpenStack RD file from
-# Project -> Access & Security -> API Access
-# 2) Edit the *-openrc.sh file:
-# * remove the following lines:
-# echo "Please enter your OpenStack Password for project $OS_TENANT_NAME as user $OS_USERNAME: "
-# read -sr OS_PASSWORD_INPUT
-# * replace $OS_PASSWORD_INPUT with the password
-# 3) execute this command: $ bash osclient.sh setup <path to credential script> [branch]
-# * setup: install the OpenStack CLI clients in a container on the host.
-# * <path to credential script> location of the *-openrc.sh file you edited in step 2
-# * branch: git repo branch to install (e.g. stable/newton) OPTIONAL; if you want the master branch,
-# do not include this parameter
-# * Example:
-# If the admin-openrc.sh file is in the same directory as osclient.sh and you want to use stable/newton:
-# $ bash osclient.sh setup admin-openrc.sh stable/newton
-# If the admin-openrc.sh file is in a different directory and you want to use master:
-# $ bash osclient.sh setup ~/Downloads/admin-openrc.sh
-#
-# Once the Docker container has been created and is running, you can run your scripts
-# $ bash osclient.sh run <command>
-# * run: run a command in the container
-# * <command>: command to run, in quotes e.g.
-# bash osclient.sh run 'openstack service list'
-# bash osclient.sh run 'bash mytest.sh'
-# To run tests in the container:
-# 1) Copy the tests to the shared folder for the container (/tmp/osclient)
-# 2) Run your tests; for example, if you want to run Copper tests:
-# $ bash ~/git/models/tests/utils/osclient.sh run "bash /tmp/osclient/copper/tests/network_bridging.sh"
-# $ bash ~/git/models/tests/utils/osclient.sh run "bash /tmp/osclient/copper/tests/network_bridging-clean.sh"
-# 3) Due to a (?) Docker quirk, you need to remove and re-copy the tests each time you change them, e.g. as you edit the tests during development
-# $ rm -rf /tmp/osclient/copper/tests/; cp -R ~/git/copper/tests/ /tmp/osclient/copper/tests/
-#
-# To stop and then remove the Docker container
-# $ bash osclient.sh clean
-# * clean: remove the osclient container and shared folder
-# Note: you may have to run as sudo in order to delete the files in /tmp/osclient
-
+#.What this is: Setup script for OpenStack Clients (OSC) running in
+#.an Unbuntu Xenial docker container. You can use this script to isolate the
+#.OSC from your host, so that the OSC and related install pre-reqs do not
+#.pollute your host environment. You can then then modify your tests scripts on
+#.your host and run them using the OSC container rather than moving the test
+#.scripts to DevStack or an OpenStack installation (see below). You can also
+#.attach to the OSC container. Enter "sudo docker attach osclient" then hit enter
+#.twice and you will be in the container as root (there are no other users).
+#.Once in the container, you can "source ~/admin-openrc.sh" and use
+#.any OSC commands you want.
+#.
+#.Status: this is a work in progress, under test.
+#.
+#.How to use:
+#. 1) Obtain the credential script for your OpenStack installation by logging
+#. into the OpenStack Dashboard and downloading the OpenStack RD file from
+#. Project -> Access & Security -> API Access
+#. 2) Edit the *-openrc.sh file:
+#. * remove the following lines:
+#. echo "Please enter your OpenStack Password for project $OS_TENANT_NAME as user $OS_USERNAME: "
+#. read -sr OS_PASSWORD_INPUT
+#. * replace $OS_PASSWORD_INPUT with the password
+#. 3) execute this command: $ bash osclient.sh setup <path to credential script> [branch]
+#. * setup: install the OpenStack CLI clients in a container on the host.
+#. * <path to credential script> location of the *-openrc.sh file you edited in step 2
+#. * branch: git repo branch to install (e.g. stable/newton) OPTIONAL; if you want the master branch,
+#. do not include this parameter
+#. * Example:
+#. If the admin-openrc.sh file is in the same directory as osclient.sh and you want to use stable/newton:
+#. $ bash osclient.sh setup admin-openrc.sh stable/newton
+#. If the admin-openrc.sh file is in a different directory and you want to use master:
+#. $ bash osclient.sh setup ~/Downloads/admin-openrc.sh
+#.
+#.Once the Docker container has been created and is running, you can run your scripts
+#. $ bash osclient.sh run <command>
+#. * run: run a command in the container
+#. * <command>: command to run, in quotes e.g.
+#. bash osclient.sh run 'openstack service list'
+#. bash osclient.sh run 'bash mytest.sh'
+#.To run tests in the container:
+#. 1) Copy the tests to the shared folder for the container (/home/ubuntu)
+#. 2) Run your tests; for example, if you want to run Copper tests:
+#. $ bash ~/git/models/tests/utils/osclient.sh run "bash copper/tests/network_bridging.sh"
+#. $ bash ~/git/models/tests/utils/osclient.sh run "bash copper/tests/network_bridging-clean.sh"
+#. 3) Due to a (?) Docker quirk, you need to remove and re-copy the tests each time you change them, e.g. as you edit the tests during development
+#. $ rm -rf ~/tmp/osclient/copper/tests/; cp -R ~/git/copper/tests/ ~/tmp/osclient/copper/tests/
+#.
+#.To stop and then remove the Docker container
+#. $ bash osclient.sh clean
+#. * clean: remove the osclient container and shared folder
trap 'fail' ERR
@@ -103,10 +101,10 @@ function create_container() {
# xenial is needed for python 3.5
sudo docker pull ubuntu:xenial
sudo service docker start
- sudo docker run -i -t -d -v /tmp/osclient/:/tmp/osclient --name osclient \
+ sudo docker run -i -t -d -v ~/tmp/osclient/:/home/ubuntu/ --name osclient \
ubuntu:xenial /bin/bash
- sudo docker exec osclient /bin/bash /tmp/osclient/osclient-setup.sh \
- setup /tmp/osclient/admin-openrc.sh $branch
+ sudo docker exec osclient /bin/bash osclient-setup.sh \
+ setup admin-openrc.sh $branch
else
# Centos
echo "Centos-based install"
@@ -122,10 +120,10 @@ EOF
# xenial is needed for python 3.5
sudo service docker start
sudo docker pull ubuntu:xenial
- sudo docker run -i -t -d -v /tmp/osclient/:/tmp/osclient --name osclient \
+ sudo docker run -i -t -d -v ~/tmp/osclient/:/home/ubuntu/ --name osclient \
ubuntu:xenial /bin/bash
- sudo docker exec osclient /bin/bash /tmp/osclient/osclient-setup.sh setup \
- /tmp/osclient/admin-openrc.sh $branch
+ sudo docker exec osclient /bin/bash osclient-setup.sh setup \
+ admin-openrc.sh $branch
fi
}
@@ -150,7 +148,7 @@ function setup () {
apt-get install -y libffi-dev
apt-get install -y libssl-dev
- cd /tmp/osclient
+ cd ~
echo "$0: $(date) Upgrage pip"
pip install --upgrade pip
@@ -181,47 +179,29 @@ case "$1" in
setup $openrc $branch
else
echo "$0: $(date) Setup shared virtual folder and save $1 script there"
- if [[ ! -d /tmp/osclient ]]; then mkdir /tmp/osclient; fi
- cp $0 /tmp/osclient/osclient-setup.sh
- cp $openrc /tmp/osclient/admin-openrc.sh
- chmod 755 /tmp/osclient/*.sh
+ if [[ ! -d ~/tmp/osclient ]]; then mkdir ~/tmp/osclient; fi
+ cp $0 ~/tmp/osclient/osclient-setup.sh
+ cp $openrc ~/tmp/osclient/admin-openrc.sh
+ chmod 755 ~/tmp/osclient/*.sh
create_container
fi
pass
;;
run)
- cat >/tmp/osclient/command.sh <<EOF
-source /tmp/osclient/admin-openrc.sh
+ cat >~/tmp/osclient/command.sh <<EOF
+source admin-openrc.sh
$2
exit
EOF
- sudo docker exec osclient /bin/bash /tmp/osclient/command.sh "$0"
+ sudo docker exec osclient /bin/bash command.sh "$0"
;;
clean)
sudo docker stop osclient
sudo docker rm -v osclient
- rm -rf /tmp/osclient
+ rm -rf ~/tmp/osclient
pass
;;
*)
-echo " $ bash osclient.sh setup|run|clean (see detailed parameters below)"
-echo " setup: install the OpenStack CLI clients in a container on the host."
-echo " $ bash osclient.sh setup <path to credential script> [branch]"
-echo " <path to credential script>: OpenStack CLI env setup script (e.g."
-echo " admin-openrc.sh), obtained from the OpenStack Dashboard via"
-echo " Project->Access->Security->API. It's also recommended that you set the"
-echo " OpenStack password explicitly in that script rather than take the"
-echo " default which will prompt you on every command you pass to the container."
-echo " For example, if the admin-openrc.sh file is in the same directory as "
-echo " osclient.sh and you want to use stable/newton:"
-echo " $ bash osclient.sh setup admin-openrc.sh stable/newton"
-echo " branch: git repo branch to install (e.g. stable/newton)"
-echo " run: run a command in the container"
-echo " $ bash osclient.sh run <command>"
-echo " <command>: command to run, in quotes e.g."
-echo " bash osclient.sh run 'openstack service list'"
-echo " bash osclient.sh run 'bash mytest.sh'"
-echo " clean: remove the osclient container and shared folder"
-echo " $ bash osclient.sh clean"
-fail
+ grep '#. ' $0
+ ;;
esac
diff --git a/tests/vHello_Cloudify.sh b/tests/vHello_Cloudify.sh
index e3ae81b..e366050 100644
--- a/tests/vHello_Cloudify.sh
+++ b/tests/vHello_Cloudify.sh
@@ -77,18 +77,18 @@ select_manager() {
}
setup() {
- echo "$0: Setup temp test folder /tmp/cloudify and copy this script there"
- mkdir /tmp/cloudify
- chmod 777 /tmp/cloudify/
- cp $0 /tmp/cloudify/.
- chmod 755 /tmp/cloudify/*.sh
+ echo "$0: Setup temp test folder ~/tmp/cloudify and copy this script there"
+ mkdir -p ~/tmp/cloudify
+ chmod 777 ~/tmp/cloudify/
+ cp $0 ~/tmp/cloudify/.
+ chmod 755 ~/tmp/cloudify/*.sh
echo "$0: cloudify-setup part 1"
bash utils/cloudify-setup.sh $1 init
echo "$0: cloudify-setup part 2"
CONTAINER=$(sudo docker ps -l | awk "/cloudify/ { print \$1 }")
- sudo docker exec $CONTAINER /bin/bash /tmp/cloudify/cloudify-setup.sh $1 setup
+ sudo docker exec $CONTAINER /bin/bash cloudify-setup.sh $1 setup
if [ $? -eq 1 ]; then fail; fi
pass
}
@@ -98,9 +98,9 @@ start() {
source ~/cloudify/venv/bin/activate
echo "$0: reset blueprints folder"
- if [[ -d /tmp/cloudify/blueprints ]]; then rm -rf /tmp/cloudify/blueprints; fi
- mkdir -p /tmp/cloudify/blueprints
- cd /tmp/cloudify/blueprints
+ if [[ -d ~/blueprints ]]; then rm -rf ~/blueprints; fi
+ mkdir ~/blueprints
+ cd ~/blueprints
echo "$0: clone cloudify-hello-world-example"
if [[ "$1" == "cloudify-manager" ]]; then
@@ -112,10 +112,10 @@ start() {
cd cloudify-cli-hello-world-example
fi
- cd /tmp/cloudify/blueprints
+ cd ~/blueprints
echo "$0: setup OpenStack CLI environment"
- source /tmp/cloudify/admin-openrc.sh
+ source ~/admin-openrc.sh
echo "$0: Setup trusty-server glance image if needed"
if [[ -z $(openstack image list | awk "/ trusty-server / { print \$2 }") ]]; then glance --os-image-api-version 1 image-create --name trusty-server --disk-format qcow2 --location https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img --container-format bare; fi
@@ -124,7 +124,7 @@ start() {
if [[ "$1" == "cloudify-manager" ]]; then
echo "$0: create Cloudify Manager blueprint inputs file"
# Set host image per Cloudify agent compatibility: http://docs.getcloudify.org/3.4.0/agents/overview/
- cd /tmp/cloudify/blueprints
+ cd ~/blueprints
cat <<EOF >vHello-inputs.yaml
image: trusty-server
flavor: m1.small
@@ -156,7 +156,7 @@ EOF
fi
echo "$0: initialize cloudify environment"
- cd /tmp/cloudify/blueprints
+ cd ~/blueprints
cfy init -r
if [[ "$1" == "cloudify-manager" ]]; then
@@ -198,10 +198,10 @@ stop() {
source ~/cloudify/venv/bin/activate
echo "$0: setup OpenStack CLI environment"
- source /tmp/cloudify/admin-openrc.sh
+ source ~/admin-openrc.sh
echo "$0: initialize cloudify environment"
- cd /tmp/cloudify/blueprints
+ cd ~/blueprints
if [[ "$1" == "cloudify-manager" ]]; then
select_manager
@@ -223,7 +223,7 @@ stop() {
forward_to_container () {
echo "$0: pass $2 command to vHello.sh in cloudify container"
CONTAINER=$(sudo docker ps -a | awk "/cloudify/ { print \$1 }")
- sudo docker exec $CONTAINER /bin/bash /tmp/cloudify/vHello_Cloudify.sh $1 $2 $2
+ sudo docker exec $CONTAINER /bin/bash vHello_Cloudify.sh $1 $2 $2
if [ $? -eq 1 ]; then fail; fi
}
diff --git a/tests/vLamp_Ansible.sh b/tests/vLamp_Ansible.sh
deleted file mode 100644
index 801b3e1..0000000
--- a/tests/vLamp_Ansible.sh
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/bin/bash
-# Copyright 2016 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# What this is: Deployment test for the Tacker Hello World blueprint.
-#
-# Status: this is a work in progress, under test.
-#
-# How to use:
-# $ git clone https://gerrit.opnfv.org/gerrit/models
-# $ cd models/tests
-# $ bash vLamp_Ansible.sh [setup|start|run|stop|clean]
-# setup: setup test environment
-# start: install blueprint and run test
-# run: setup test environment and run test
-# stop: stop test and uninstall blueprint
-# clean: cleanup after test
-
-trap 'fail' ERR
-
-pass() {
- echo "$0: Hooray!"
- set +x #echo off
- exit 0
-}
-
-fail() {
- echo "$0: Test Failed!"
- set +x
- exit 1
-}
-
-get_floating_net () {
- network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}'))
- for id in ${network_ids[@]}; do
- [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && FLOATING_NETWORK_ID=${id}
- done
- if [[ $FLOATING_NETWORK_ID ]]; then
- FLOATING_NETWORK_NAME=$(openstack network show $FLOATING_NETWORK_ID | awk "/ name / { print \$4 }")
- else
- echo "$0: Floating network not found"
- exit 1
- fi
-}
-
-try () {
- count=$1
- $3
- while [[ $? -eq 1 && $count -gt 0 ]]
- do
- sleep $2
- let count=$count-1
- $3
- done
- if [[ $count -eq 0 ]]; then echo "$0: Command \"$3\" was not successful after $1 tries"; fi
-}
-
-setup () {
- echo "$0: Setup temp test folder /tmp/ansible and copy this script there"
- if [ -d /tmp/ansible ]; then sudo rm -rf /tmp/ansible; fi
- mkdir -p /tmp/ansible
- chmod 777 /tmp/ansible/
- cp $0 /tmp/ansible/.
- chmod 755 /tmp/ansible/*.sh
-
- echo "$0: ansible-setup part 1"
- bash utils/ansible-setup.sh init
-
- echo "$0: ansible-setup part 2"
- CONTAINER=$(sudo docker ps -l | awk "/ansible/ { print \$1 }")
- dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
- if [ "$dist" == "Ubuntu" ]; then
- echo "$0: Execute ansible-setup.sh in the container"
- sudo docker exec -it $CONTAINER /bin/bash /tmp/ansible/ansible-setup.sh setup
- else
- echo "$0: Execute ansible-setup.sh in the container"
- sudo docker exec -i -t $CONTAINER /bin/bash /tmp/ansible/ansible-setup.sh setup
- fi
-
- echo "$0: reset blueprints folder"
- if [[ -d /tmp/ansible/blueprints/lampstack ]]; then rm -rf /tmp/ansible/blueprints/lampstack; fi
- mkdir -p /tmp/ansible/blueprints/
-
- echo "$0: copy lampstack to blueprints folder"
- cd /tmp/
- git clone https://github.com/openstack/osops-tools-contrib.git
- cp -r osops-tools-contrib/ansible/lampstack /tmp/ansible/blueprints
-
- echo "$0: setup OpenStack environment"
- source /tmp/ansible/admin-openrc.sh
-
- echo "$0: determine external (public) network as the floating ip network"
- get_floating_net
-
- echo "$0: create lampstack vars file for OPNFV"
- # trusty-server is needed since xenial does not come with python pre-installed
- # TODO: find some way to get ansible to install dependencies!
-cat >/tmp/ansible/blueprints/lampstack/vars/opnfv.yml <<EOF
----
-horizon_url: "http://$HORIZON_HOST"
-
-auth: {
- auth_url: "$OS_AUTH_URL",
- username: "admin",
- password: "{{ password }}",
- project_name: "admin"
-}
-
-app_env: {
- image_name: "trusty-server",
- region_name: "$OS_REGION_NAME",
- availability_zone: "nova",
- validate_certs: True,
- private_net_name: "internal",
- public_net_name: "$FLOATING_NETWORK_NAME",
- flavor_name: "m1.small",
- public_key_file: "/tmp/ansible/ansible.pub",
- stack_size: 4,
- volume_size: 2,
- block_device_name: "/dev/vdb",
- wp_theme: "https://downloads.wordpress.org/theme/iribbon.2.0.65.zip",
- wp_posts: "http://wpcandy.s3.amazonaws.com/resources/postsxml.zip"
-}
-EOF
-
- echo "$0: Disable host key checking (fix for SSH connection issues?)"
- echo "host_key_checking = False" >>/tmp/ansible/blueprints/lampstack/ansible.cfg
-
- echo "$0: Setup trusty-server glance image if needed"
- if [[ -z $(openstack image list | awk "/ trusty-server / { print \$2 }") ]]; then glance --os-image-api-version 1 image-create --name trusty-server --disk-format qcow2 --location https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img --container-format bare; fi
-
- if [[ -z $(neutron net-list | awk "/ internal / { print \$2 }") ]]; then
- echo "$0: Create internal network"
- neutron net-create internal
-
- echo "$0: Create internal subnet"
- neutron subnet-create internal 10.0.0.0/24 --name internal --gateway 10.0.0.1 --enable-dhcp --allocation-pool start=10.0.0.2,end=10.0.0.254 --dns-nameserver 8.8.8.8
- fi
-
- if [[ -z $(neutron router-list | awk "/ public_router / { print \$2 }") ]]; then
- echo "$0: Create router"
- neutron router-create public_router
-
- echo "$0: Create router gateway"
- neutron router-gateway-set public_router $FLOATING_NETWORK_NAME
-
- echo "$0: Add router interface for internal network"
- neutron router-interface-add public_router subnet=internal
- fi
-}
-
-start() {
- echo "$0: Add ssh key"
- chown root /tmp/ansible/ansible
- eval $(ssh-agent -s)
- ssh-add /tmp/ansible/ansible
-
- echo "$0: setup OpenStack environment"
- source /tmp/ansible/admin-openrc.sh
-
- echo "$0: Clear known hosts (workaround for ssh connection issues)"
- rm ~/.ssh/known_hosts
-
- echo "$0: invoke blueprint install via Ansible"
- cd /tmp/ansible/blueprints/lampstack
- ansible-playbook -vvv -e "action=apply env=opnfv password=$OS_PASSWORD" site.yml
-
- pass
-}
-
-stop() {
- echo "$0: Add ssh key"
- eval $(ssh-agent -s)
- ssh-add /tmp/ansible/ansible
-
- echo "$0: setup OpenStack environment"
- source /tmp/ansible/admin-openrc.sh
-
- echo "$0: invoke blueprint destroy via Ansible"
- cd /tmp/ansible/blueprints/lampstack
- ansible-playbook -vvv -e "action=destroy env=opnfv password=$OS_PASSWORD" site.yml
-
- pass
-}
-
-forward_to_container () {
- echo "$0: pass $1 command to vLamp_Ansible.sh in tacker container"
- CONTAINER=$(sudo docker ps -a | awk "/ansible/ { print \$1 }")
- sudo docker exec $CONTAINER /bin/bash /tmp/ansible/vLamp_Ansible.sh $1 $1
- if [ $? -eq 1 ]; then fail; fi
-}
-
-dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
-case "$1" in
- setup)
- setup
- pass
- ;;
- run)
- setup
- forward_to_container start
- pass
- ;;
- start|stop)
- if [[ $# -eq 1 ]]; then forward_to_container $1
- else
- # running inside the tacker container, ready to go
- $1
- fi
- pass
- ;;
- clean)
- echo "$0: Uninstall Ansible and test environment"
- bash utils/ansible-setup.sh clean
- pass
- ;;
- *)
- echo "usage: bash vLamp_Ansible.sh [setup|start|run|clean]"
- echo "setup: setup test environment"
- echo "start: install blueprint and run test"
- echo "run: setup test environment and run test"
- echo "stop: stop test and uninstall blueprint"
- echo "clean: cleanup after test"
- fail
-esac
diff --git a/tools/README.md b/tools/README.md
index a059d3a..5990c07 100644
--- a/tools/README.md
+++ b/tools/README.md
@@ -1,16 +1,41 @@
-This repo contains experimental scripts etc for setting up cloud-native stacks for application deployment and management on bare-metal servers. A lot of cloud-native focus so far has been on public cloud providers (AWS, GCE, Azure) but there aren't many tools and even fewer full-stack open source platforms for setting up bare metal servers with the same types of cloud-native stack features. Further, app modeling methods supported by cloud-native stacks differ substantially. The tools in this repo are intended to help provide a comprehensive, easily deployed set of cloud-native stacks that can be further used for analysis and experimentation on converged app modeling and lifecycle management methods, as well as other purposes, e.g. assessments of efficiency, performance, security, and resilience.
+<!---
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017-2018 AT&T Intellectual Property, Inc
+-->
+
+This repo contains experimental scripts etc for setting up cloud-native and hybrid-cloud stacks for application deployment and management on bare-metal servers. The goal of these tools is to support the OPNFV Models project with various implementations of cloud-native and OpenStack-based clouds, as well as hybrid clouds. This will serve as a platform for testing modeled VNF lifecycle management in any one of these cloud types, or in a hybrid cloud environment.
+
+In the process, this is intended to help developers automate setup of full-featured stacks, to overcome the sometimes complex, out-of-date, incomplete, or unclear directions provided for manual stack setup by the upstream projects.
+
+The tools in this repo are thus intended to help provide a comprehensive, easily deployed set of cloud-native stacks that can be further used for analysis and experimentation on converged app modeling and lifecycle management methods, as well as other purposes, e.g. assessments of efficiency, performance, security, and resilience.
The toolset will eventually include these elements of one or more full-stack platform solutions:
-* hardware prerequisite/options guidance
-* container-focused application runtime environment, e.g.
- * kubernetes
- * docker-ce
- * rancher
+* bare-metal server deployment
+ * [MAAS](https://maas.io)
+ * [Bifrost](https://docs.openstack.org/bifrost/latest/)
+* application runtime environments, also referred to as Virtual Infrastructure Managers (VIM) using the ETSI NFV terminology
+ * container-focused (often referred to as "cloud-native", although that term really refers to broader concepts)
+ * [Kubernetes](https://github.com/kubernetes/kubernetes)
+ * [Docker-CE (Moby)](https://mobyproject.org/)
+ * [Rancher](https://rancher.com/)
+ * VM-focused
+ * [OpenStack Helm](https://wiki.openstack.org/wiki/Openstack-helm)
* software-defined storage backends, e.g.
- * ceph
-* container networking (CNI)
+ * [Ceph](https://ceph.com/)
+* cluster internal networking
+ * [Calico CNI](https://github.com/projectcalico/cni-plugin)
* app orchestration, e.g. via
- * cloudify
- * ONAP
- * helm
-* applications useful for platform characterization \ No newline at end of file
+ * [Cloudify](https://cloudify.co/)
+ * [ONAP](https://www.onap.org/)
+ * [Helm](https://github.com/kubernetes/helm)
+ * [OpenShift Origin](https://www.openshift.org/)
+* monitoring and telemetry
+ * [OPNFV VES](https://github.com/opnfv/ves)
+ * [Prometheus](https://prometheus.io/)
+* applications useful for platform characterization
+ * [Clearwater IMS](http://www.projectclearwater.org/)
+
+An overall concept for how cloud-native and OpenStack cloud platforms will be deployable as a hybrid cloud environment, with additional OPNFV features such as VES, is shown below.
+
+![Hybrid Cloud Cluster](/docs/images/models-k8s.png?raw=true "Resulting Cluster") \ No newline at end of file
diff --git a/tools/anteater-exceptions.yaml b/tools/anteater-exceptions.yaml
new file mode 100644
index 0000000..1d1123d
--- /dev/null
+++ b/tools/anteater-exceptions.yaml
@@ -0,0 +1,73 @@
+# When adding projects all `arrays: []` sections must have
+# a value, Use 'nullvalue' if no waivers are available.
+#
+# This file uses standard regular expression syntax, however be mindful
+# of escaping YAML delimiters too (such as `:`) using double quotes "".
+
+binaries:
+ docs/images/models-k8s.png:
+ - e57b24c5591f20a8892a159fb3e83dbfd987ec0dac637c602b71a14e62da7097
+ tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico:
+ - 2088cf0e9512f999b35b76b89ceec1c0e19b7b4d4444a06485c505cc734ac229
+ tests/blueprints/tosca-vnfd-3node-tacker/logo.png:
+ - 2d6e161ccbcf1bd45faab7a148095977fee40547cac479cae44bb5fddc18eb74
+ docs/images/models-k8s.png:
+ - a0cbf57654f18f4365e3934f2669eb8f23227b20d69a4b62aa274b1e530d4272
+ tests/blueprints/tosca-vnfd-3node-tacker/logo.png:
+ - af757aa0af55c321cfe1231737eefb069450eb23c6cb89d4d87134e3dbc26c92
+ tests/blueprints/tosca-vnfd-3node-tacker/favicon.ico:
+ - f38f1389d374b06d54f1dd410a9e09654848d35858014f1cb86475369dc16c56
+
+
+file_audits:
+ file_names:
+ - README.md
+ file_contents:
+ - "(.*)yum install.*wget"
+ - "(.*)yum install.*git"
+ - "(.*)apt-get install.*wget"
+ - "(.*)apt-get install.*git"
+ - "\\bcurl\\b"
+ - "wget"
+ - "git.*clone(.*)gerrit\\.opnfv\\.org"
+ - "git.*clone(.*)\\.openstack\\.org"
+ - "git.*clone(.*)github\\.com/openstack"
+ - "git.*clone(.*)github\\.com/cloudify-cosmo/cloudify-openstack-plugin"
+ - "git.*clone(.*)github\\.com/cloudify-cosmo/cloudify-fabric-plugin"
+ - "git.*clone(.*)github\\.com/cloudify-cosmo/cloudify-hello-world-example"
+ - "wget (.*)github\\.com/cloudify-cosmo/cloudify-manager-blueprints"
+ - "wget (.*)github\\.com/cloudify-incubator/cloudify-kubernetes-plugin"
+ - "wget (.*)gigaspaces-repository-eu\\.s3\\.amazonaws\\.com/org/cloudify3"
+ - "wget (.*)repository\\.cloudifysource\\.org"
+ - "wget (.*)releases\\.rancher\\.com"
+ - "wget (.*)packages\\.docker\\.com"
+ - "curl (.*)raw\\.githubusercontent\\.com/kubernetes/helm/master/scripts/get"
+ - "git.*clone(.*)github\\.com/blsaws/cloudify-cli-hello-world-example"
+ - "git.*clone(.*)github\\.com/Metaswitch/clearwater-docker"
+ - "git.*clone(.*)github\\.com/Metaswitch/clearwater-live-test"
+ - "git.*clone(.*)github\\.com/kubernetes"
+ - "git.*clone(.*)github\\.com/att/netarbiter"
+ - ".*\\$OS_PASSWORD.*"
+ - ".*\\~/tmp/.*"
+ - ".*SIGNUP_CODE\\=secret.*"
+ - "ssh_key_filename: /root/.ssh/vHello.pem"
+ - "vnf_mgmt 10.0.0.0/24"
+ - "# and setup CLI environment.*"
+ - "eval `ssh-agent`"
+ - ".*Create secrets for kubernetes.*"
+ - "cfy secrets.*"
+ - ".*get_secret: kubernetes.*"
+ - "0\\.0\\.0\\.0:4243"
+ - ".*kubectl create secret.*"
+ - ".*kubectl get secrets.*"
+ - ".*kubectl delete secrets.*"
+ - ".*Create Ceph admin secret.*"
+ - ".*SecretName: ceph.*"
+ - ".*Create ceph-secret.*"
+ - ".*# A similar secret.*"
+ - '.*"user":"", "password":"" }.*'
+ - ".*--password=\\$MYSQL_PASSWORD.*"
+ - ".*#bind_host = 0\\.0\\.0\\.0\\.*"
+ - ".*password = tacker.*"
+ - ".*# < transport_url = rabbit://stackrabbit:secretrabbit.*"
+ - ".*# password = secretservice.*" \ No newline at end of file
diff --git a/tools/anteater.sh b/tools/anteater.sh
new file mode 100755
index 0000000..5fb3d7b
--- /dev/null
+++ b/tools/anteater.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 2018 AT&T Intellectual Property, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# What this is: test script for the OPNFV Anteater toolset to test patches
+# for Anteater exceptions as described at
+# https://wiki.opnfv.org/pages/viewpage.action?pageId=11700198
+#
+#. Usage:
+#. $ git clone https://gerrit.opnfv.org/gerrit/models
+#. $ bash models/tools/anteater.sh [exceptions]
+#. exceptions: exceptions file to test (in Anteater format - see URL above)
+#. if not provided, test exceptions file in the anteater repo
+#.
+
+sudo docker stop anteater
+sudo docker rm -v anteater
+if [[ ! -d ~/releng-anteater ]]; then
+ git clone https://gerrit.opnfv.org/gerrit/releng-anteater ~/releng-anteater
+fi
+cd ~/releng-anteater/docker
+sudo docker build -t anteater .
+sudo docker run -d --name anteater anteater sleep 60
+if [[ "$1" != "" ]]; then
+ sudo docker cp $1 anteater:/home/opnfv/anteater/exceptions/models.yaml
+fi
+sudo docker exec -it anteater /bin/bash -c \
+'cat exceptions/models.yaml; \
+git clone https://gerrit.opnfv.org/gerrit/models ~/models; \
+~/venv/bin/anteater -p models --path ~/models' | tee ~/anteater-models.log
diff --git a/tools/cloudify/k8s-cloudify-clearwater.sh b/tools/cloudify/k8s-cloudify-clearwater.sh
deleted file mode 100644
index 430d31a..0000000
--- a/tools/cloudify/k8s-cloudify-clearwater.sh
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#. What this is: Setup script for clearwater-docker as deployed by Cloudify
-#. with Kubernetes. See https://github.com/Metaswitch/clearwater-docker
-#. for more info.
-#.
-#. Prerequisites:
-#. - Kubernetes cluster installed per k8s-cluster.sh (in this repo)
-#. - user (running this script) added to the "docker" group
-#. - clearwater-docker images created and uploaded to docker hub under the
-#. <hub-user> account as <hub-user>/clearwater-<vnfc> where vnfc is the name
-#. of the specific containers as built by build/clearwater-docker.sh
-#.
-#. Usage:
-#. From a server with access to the kubernetes master node:
-#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ cd ~/models/tools/cloudify/
-#. $ bash k8s-cloudify-clearwater.sh <start|stop> <hub-user> <manager>
-#.
-#. Status: this is a work in progress, under test.
-
-function fail() {
- log "$1"
- exit 1
-}
-
-function log() {
- f=$(caller 0 | awk '{print $2}')
- l=$(caller 0 | awk '{print $1}')
- echo ""
- echo "$f:$l ($(date)) $1"
-}
-
-function build_local() {
- master=$1
- log "deploy local docker registry on k8s master"
- # Per https://docs.docker.com/registry/deploying/
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$master sudo docker run -d -p 5000:5000 --restart=always --name \
- registry registry:2
-
- # per https://github.com/Metaswitch/clearwater-docker
- log "clone clearwater-docker"
- cd ~
- git clone https://github.com/Metaswitch/clearwater-docker.git
-
- log "build docker images"
- cd clearwater-docker
- vnfc="base astaire cassandra chronos bono ellis homer homestead homestead-prov ralf sprout"
- for i in $vnfc ; do
- docker build -t clearwater/$i $i
- done
-
- # workaround for https://www.bountysource.com/issues/37326551-server-gave-http-response-to-https-client-error
- # May not need both...
- if [[ "$dist" == "ubuntu" ]]; then
- check=$(grep -c $master /etc/default/docker)
- if [[ $check -eq 0 ]]; then
- echo "DOCKER_OPTS=\"--insecure-registry $master:5000\"" | sudo tee -a /etc/default/docker
- sudo systemctl daemon-reload
- sudo service docker restart
- fi
- fi
- check=$(grep -c insecure-registry /lib/systemd/system/docker.service)
- if [[ $check -eq 0 ]]; then
- sudo sed -i -- "s~ExecStart=/usr/bin/dockerd -H fd://~ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry $master:5000~" /lib/systemd/system/docker.service
- sudo systemctl daemon-reload
- sudo service docker restart
- fi
-
- log "deploy local docker registry on k8s master"
- # Per https://docs.docker.com/registry/deploying/
- # sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2
-
- log "push images to local docker repo on k8s master"
- for i in $vnfc ; do
- docker tag clearwater/$i:latest $master:5000/clearwater/$i:latest
- docker push $master:5000/clearwater/$i:latest
- done
-}
-
-
-function start() {
- master=$1
-}
-
-function stop() {
- master=$1
-}
-
-dist=$(grep --m 1 ID /etc/os-release | awk -F '=' '{print $2}')
-case "$1" in
- "start")
- start $2
- ;;
- "stop")
- stop $2
- ;;
- *)
- grep '#. ' $0
-esac
-
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index 6e0b7f2..f922880 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,6 +28,7 @@
#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x <user>@<k8s-master> cloudify/k8s-cloudify.sh prereqs
#. <user>: username on the target host. Also used to indicate OS name.
+#. <k8s-master>: IP or hostname of kubernetes master server
#. prereqs: installs prerequisites and configures <user> user for kvm use
#. $ ssh -x <user>@<k8s-master> bash cloudify/k8s-cloudify.sh setup
#. <user>: username on the target host. Also used to indicate OS name.
@@ -35,26 +36,21 @@
#. $ bash k8s-cloudify.sh demo <start|stop>
#. demo: control demo blueprint
#. start|stop: start or stop the demo
-#. <k8s-master>: IP or hostname of kubernetes master server
-#. $ bash k8s-cloudify.sh <start|stop> <name> <blueprint>
+#. $ bash k8s-cloudify.sh <start|stop> <name> <blueprint> ["inputs"]
#. start|stop: start or stop the blueprint
#. name: name of the service in the blueprint
+#. inputs: optional JSON string to pass to Cloudify as deployment inputs
#. blueprint: name of the blueprint folder (in current directory!)
-#. <k8s-master>: IP or hostname of kubernetes master server
-#. $ bash k8s-cloudify.sh port <service> <k8s-master>
-#. port: find assigned node_port for service
+#. $ bash k8s-cloudify.sh nodePort <service>
+#. port: find assigned nodePort for service
+#. service: name of service e.g. nginx
+#. $ bash k8s-cloudify.sh clusterIp <service>
+#. clusterIp: find assigned clusterIp for service
#. service: name of service e.g. nginx
-#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x <user>@<k8s-master> bash cloudify/k8s-cloudify.sh clean
#. <user>: username on the target host. Also used to indicate OS name.
#. clean: uninstalls cloudify CLI and Manager
#.
-#. If using this script to start/stop blueprints with multiple k8s environments,
-#. before invoking the script copy the k8s_env.sh script from the target
-#. cluster and copy to ~/k8s_env.sh, e.g.
-#. scp centos@sm-1:/home/centos/k8s_env.sh ~/k8s_env_sm-1.sh
-#. cp ~/k8s_env_sm-1.sh ~/k8s_env.sh
-#.
#. Status: this is a work in progress, under test.
function fail() {
@@ -68,11 +64,30 @@ function log() {
echo; echo "$f:$l ($(date)) $1"
}
+function step_complete() {
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step completed in $runtime minutes: \"$step\""
+}
+
+function step_start() {
+ step="$1"
+ log "step start: \"$step\""
+ start=$((`date +%s`/60))
+}
+
function prereqs() {
- log "Install prerequisites"
+ step_start "Install prerequisites"
if [[ "$USER" == "ubuntu" ]]; then
sudo apt-get install -y virtinst qemu-kvm libguestfs-tools virtualenv git \
python-pip
+ # workaround for virsh default network inactive
+ status=$(sudo virsh net-list --all | awk '/default/ {print $2}')
+ if [[ "$status" == "inactive" ]]; then
+ sudo ifconfig virbr0 down
+ sudo brctl delbr virbr0
+ sudo virsh net-start default
+ fi
else
# installing libvirt is needed to ensure default network is pre-created
sudo yum install -y libvirt
@@ -96,12 +111,14 @@ EOF
sudo usermod -a -G kvm $USER
sudo chmod 0644 /boot/vmlinuz*
sudo systemctl restart libvirtd
+ step_complete
}
function setup () {
+ step_start "setup"
cd ~/cloudify
source ~/k8s_env.sh
- manager_ip=$k8s_master
+ k8s_master=$k8s_master
log "Setup Cloudify-CLI"
# Per http://docs.getcloudify.org/4.1.0/installation/bootstrapping/#installing-cloudify-manager-in-an-offline-environment
# Installs into /opt/cfy/
@@ -126,8 +143,8 @@ function setup () {
sudo systemctl start libvirtd
if [[ "$USER" == "centos" ]]; then
# copy image to folder that qemu has access to, to avoid: ERROR Cannot access storage file '/home/centos/cloudify/cloudify-manager-community-17.9.21.qcow2' (as uid:107, gid:107): Permission denied
- cp cloudify-manager-community-17.9.21.qcow2 /tmp/.
- img="/tmp/cloudify-manager-community-17.9.21.qcow2"
+ cp cloudify-manager-community-17.9.21.qcow2 ~/tmp/.
+ img="~/tmp/cloudify-manager-community-17.9.21.qcow2"
else
img="cloudify-manager-community-17.9.21.qcow2"
fi
@@ -156,6 +173,12 @@ function setup () {
done
cfy status
+ log "Set iptables to forward $HOST_IP port 80 to Cloudify Manager VM at $VM_IP"
+ HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ sudo iptables -t nat -I PREROUTING -p tcp -d $HOST_IP --dport 80 -j DNAT --to-destination $VM_IP:80
+ sudo iptables -I FORWARD -m state -d $VM_IP/32 --state NEW,RELATED,ESTABLISHED -j ACCEPT
+ sudo iptables -t nat -A POSTROUTING -j MASQUERADE
+
log "Install Cloudify Kubernetes Plugin"
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
@@ -182,19 +205,6 @@ function setup () {
| awk -F ' ' '{print $2}') kubernetes-admin_client_key_data
cfy secrets list
- # get manager VM IP
- VM_MAC=$(virsh domiflist cloudify-manager | grep default | grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
- VM_IP=$(/usr/sbin/arp -e | grep ${VM_MAC} | awk {'print $1'})
-
- # get host IP
- HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
-
- # Forward host port 80 to VM
- log "Setip iptables to forward $HOST_IP port 80 to Cloudify Manager VM at $VM_IP"
- sudo iptables -t nat -I PREROUTING -p tcp -d $HOST_IP --dport 80 -j DNAT --to-destination $VM_IP:80
- sudo iptables -I FORWARD -m state -d $VM_IP/32 --state NEW,RELATED,ESTABLISHED -j ACCEPT
- sudo iptables -t nat -A POSTROUTING -j MASQUERADE
-
# Access to the API via the primary interface, from the local host, is not
# working for some reason... skip this for now
# while ! curl -u admin:admin --header 'Tenant: default_tenant' http://$HOST_IP/api/v3.1/status ; do
@@ -205,126 +215,191 @@ function setup () {
log "Cloudify CLI log is at ~/.cloudify/logs/cli.log"
log "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$HOST_IP/api/v3.1/status"
log "Cloudify setup is complete!"
+ step_complete
}
-function service_port() {
+function cluster_ip() {
name=$1
- manager_ip=$k8s_master
- log "getting node port for service $name at manager $manager_ip"
+ log "getting clusterIp for service $name at manager $k8s_master"
tries=6
- port="null"
- while [[ "$port" == "null" && $tries -gt 0 ]]; do
+ svcId="null"
+ clusterIp="null"
+ while [[ "$clusterIp" == "null" && $tries -gt 0 ]]; do
curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/node-instances
- ni=$(jq -r '.items | length' /tmp/json)
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ ni=$(jq -r '.items | length' ~/tmp/json)
while [[ $ni -ge 0 ]]; do
((ni--))
- id=$(jq -r ".items[$ni].id" /tmp/json)
- if [[ $id == $name\_service* ]]; then
- port=$(jq -r ".items[$ni].runtime_properties.kubernetes.spec.ports[0].node_port" /tmp/json)
- echo $port
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ type=$(jq -r ".items[$ni].runtime_properties.kubernetes.kind" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$type" == "Service" ]]; then
+ svcId=$ni
+ clusterIp=$(jq -r ".items[$ni].runtime_properties.kubernetes.spec.cluster_ip" ~/tmp/json)
+ if [[ "$clusterIp" != "null" ]]; then
+ echo "clusterIp=$clusterIp"
+ export clusterIp
+ fi
fi
done
sleep 10
((tries--))
done
- if [[ "$port" == "null" ]]; then
- jq -r '.items' /tmp/json
- fail "node_port not found for service"
+ if [[ "$clusterIp" == "null" ]]; then
+ log "node-instance resource for $name"
+ jq -r ".items[$svcId]" ~/tmp/json
+ log "clusterIp not found for service"
+ fi
+}
+
+function node_port() {
+ name=$1
+ log "getting nodePort for service $name at manager $k8s_master"
+
+ tries=6
+ svcId="null"
+ nodePort="null"
+ while [[ "$nodePort" == "null" && $tries -gt 0 ]]; do
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ nodePort=$(cat tmp/json | jq -r ".items[] | select(.node_id == \"${name}_service\")" | jq -r '.runtime_properties.kubernetes.spec.ports[0].node_port')
+ sleep 10
+ ((tries--))
+ done
+ if [[ "$nodePort" == "null" ]]; then
+ log "node-instance resource for $name"
+ jq -r ".items[$svcId]" ~/tmp/json
+ log "nodePort not found for service"
+ fi
+}
+
+function wait_terminated() {
+ name=$1
+ workflow=$2
+ log "waiting for $name execution $workflow to be completed ('terminated')"
+ status=""
+ while [[ "$status" != "terminated" ]]; do
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions
+ ni=$(jq -r '.items | length' ~/tmp/json)
+ while [[ $ni -ge 0 ]]; do
+ ((ni--))
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ wflid=$(jq -r ".items[$ni].workflow_id" ~/tmp/json)
+ status=$(jq -r ".items[$ni].status" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$wflid" == "$workflow" ]]; then
+ id=$(jq -r ".items[$ni].id" ~/tmp/json)
+# curl -u admin:admin --header 'Tenant: default_tenant' \
+# http://$k8s_master/api/v3.1/executions/$id | jq
+ if [[ "$status" == "failed" ]]; then fail "execution failed"; fi
+ if [[ "$status" == "terminated" ]]; then break; fi
+ log "$name execution $workflow is $status... waiting 30 seconds"
+ fi
+ done
+ sleep 30
+ done
+ if [[ "$status" == "terminated" ]]; then
+ log "$name execution $workflow is $status"
+ else
+ fail "timeout waiting for $name execution $workflow: status = $status"
fi
}
function start() {
name=$1
bp=$2
- manager_ip=$k8s_master
+ inputs="$3"
+ start=$((`date +%s`/60))
- log "start app $name with blueprint $bp"
+ step_start "start app $name with blueprint $bp and inputs: $inputs"
log "copy kube config from k8s master for insertion into blueprint"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip:/home/$k8s_user/.kube/config $bp/kube.config
+ $k8s_user@$k8s_master:/home/$k8s_user/.kube/config $bp/kube.config
- log "package the blueprint"
- # CLI: cfy blueprints package -o /tmp/$bp $bp
- tar ckf /tmp/blueprint.tar $bp
+ log "package the blueprint"
+ # CLI: cfy blueprints package -o ~/tmp/$bp $bp
+ mkdir -p ~/tmp
+ tar ckf ~/tmp/blueprint.tar $bp
- log "upload the blueprint"
- # CLI: cfy blueprints upload -t default_tenant -b $bp /tmp/$bp.tar.gz
- resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/octet-stream" \
- http://$manager_ip/api/v3.1/blueprints/$bp?application_file_name=blueprint.yaml \
- -T /tmp/blueprint.tar)
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "upload failed, response $resp"
- fi
+ log "upload the blueprint"
+ # CLI: cfy blueprints upload -t default_tenant -b $bp ~/tmp/$bp.tar.gz
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ --header "Content-Type: application/octet-stream" \
+ http://$k8s_master/api/v3.1/blueprints/$bp?application_file_name=blueprint.yaml \
+ -T ~/tmp/blueprint.tar)
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "upload failed, response $resp"
+ fi
- log "create a deployment for the blueprint"
- # CLI: cfy deployments create -t default_tenant -b $bp $bp
- resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
+ log "create a deployment for the blueprint"
+ # CLI: cfy deployments create -t default_tenant -b $bp $bp
+ if [[ "z$inputs" != "z" ]]; then
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
-w "\nResponse: %{http_code}\n" \
--header "Content-Type: application/json" \
- -d "{\"blueprint_id\": \"$bp\"}" \
- http://$manager_ip/api/v3.1/deployments/$bp)
- # response code comes back as "\nResponse: <code>"
- resp=$(echo $resp | awk '/Response/ {print $2}')
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "deployment failed, response $resp"
- fi
- sleep 10
-
- # CLI: cfy workflows list -d $bp
-
- log "install the deployment pod and service"
- # CLI: cfy executions start install -d $bp
- resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
+ -d "{\"blueprint_id\": \"$bp\", \"inputs\": $inputs}" \
+ http://$k8s_master/api/v3.1/deployments/$bp)
+ else
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
-w "\nResponse: %{http_code}\n" \
--header "Content-Type: application/json" \
- -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \
- http://$manager_ip/api/v3.1/executions)
- # response code comes back as "\nResponse: <code>"
- resp=$(echo $resp | awk '/Response/ {print $2}')
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "install failed, response $resp"
- fi
+ -d "{\"blueprint_id\": \"$bp\"}" \
+ http://$k8s_master/api/v3.1/deployments/$bp)
+ fi
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "deployment failed, response $resp"
+ fi
+ sleep 10
- log "get the service's assigned node_port"
- port=""
- service_port $name $manager_ip
+ # CLI: cfy workflows list -d $bp
- log "verify service is responding"
- while ! curl -v http://$manager_ip:$port ; do
- log "$name service is not yet responding at http://$manager_ip:$port, waiting 10 seconds"
- sleep 10
- done
- log "service is active at http://$manager_ip:$port"
+ log "install the deployment pod and service"
+ # CLI: cfy executions start install -d $bp
+ resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -w "\nResponse: %{http_code}\n" \
+ --header "Content-Type: application/json" \
+ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \
+ http://$k8s_master/api/v3.1/executions)
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "install failed, response $resp"
+ fi
+
+ wait_terminated $name create_deployment_environment
+ wait_terminated $name install
+ log "install actions completed"
+ step_complete
}
function cancel_executions() {
- log "cancelling all active executions"
+ log "workaround: cancelling all active executions prior to new execution"
curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/executions
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions
i=0
- exs=$(jq -r '.items[].status' /tmp/json)
+ exs=$(jq -r '.items[].status' ~/tmp/json)
for status in $exs; do
- id=$(jq -r ".items[$i].id" /tmp/json)
- log "execution $id in state $status"
+ id=$(jq -r ".items[$i].id" ~/tmp/json)
if [[ "$status" == "started" ]]; then
+ log "force cancelling execution $id in state $status"
id=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions | jq -r ".items[$i].id")
+ http://$k8s_master/api/v3.1/executions | jq -r ".items[$i].id")
curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \
--header "Content-Type: application/json" \
-d "{\"deployment_id\": \"$bp\", \"action\": \"force-cancel\"}" \
- http://$manager_ip/api/v3.1/executions/$id
+ http://$k8s_master/api/v3.1/executions/$id
fi
((i++))
done
@@ -333,7 +408,7 @@ function cancel_executions() {
while [[ $count -gt 0 && $tries -gt 0 ]]; do
sleep 10
exs=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions | jq -r '.items[].status')
+ http://$k8s_master/api/v3.1/executions | jq -r '.items[].status')
count=0
for status in $exs; do
if [[ "$status" != "terminated" && "$status" != "cancelled" && "$status" != "failed" ]]; then
@@ -349,115 +424,142 @@ function cancel_executions() {
fi
}
-function verify_deleted() {
- log "verifying the resource is deleted: $1"
+function check_resource() {
+ log "checking for presence of resource: $1"
status=""
- if [[ -f /tmp/vfy ]]; then rm /tmp/vfy; fi
- r=$(curl -s -o /tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1)
+ if [[ -f ~/tmp/vfy ]]; then rm ~/tmp/vfy; fi
+ r=$(curl -s -o ~/tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1)
log "Response: $r"
- cat /tmp/vfy
- status=$(cat /tmp/vfy | jq -r '.error_code')
+# cat ~/tmp/vfy
+ status=$(cat ~/tmp/vfy | jq -r '.error_code')
}
function stop() {
name=$1
bp=$2
- manager_ip=$k8s_master
+ step_start "stopping $name with blueprint $bp"
# TODO: fix the need for this workaround
- log "try to first cancel all current executions"
+ log "workaround: try to first cancel all current executions"
cancel_executions
# end workaround
- log "uninstall the service"
- resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" \
- -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
- http://$manager_ip/api/v3.1/executions)
- log "Response: $resp"
- if [[ "$resp" != "201" ]]; then
- log "uninstall action was not accepted"
- cat /tmp/json
- fi
-
- id=$(jq -r ".id" /tmp/json)
- if [[ "$id" != "null" ]]; then
- log "wait for uninstall execution $id to be completed ('terminated')"
- status=""
- tries=1
- while [[ "$status" != "terminated" && $tries -lt 10 ]]; do
- sleep 30
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/executions/$id
- status=$(jq -r ".status" /tmp/json)
- log "try $tries of 10: execution $id is $status"
- ((tries++))
- done
- if [[ $tries == 11 ]]; then
- cat /tmp/json
- fail "uninstall execution did not complete"
+ log "verify $name deployment is present"
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "initiate uninstall action for $name deployment"
+ resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ --header "Content-Type: application/json" \
+ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
+ http://$k8s_master/api/v3.1/executions)
+ log "Response: $resp"
+ if [[ "$resp" != "201" ]]; then
+ log "uninstall action was not accepted"
+ cat ~/tmp/json
fi
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions/$id | jq
- count=1
- state=""
- tries=6
- while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
- sleep 10
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/node-instances
- state=$(jq -r '.items[0].state' /tmp/json)
- ((tries--))
- done
- if [[ "$state" != "deleted" ]]; then
- jq -r '.items' /tmp/json
- # fail "node-instances delete failed"
+ id=$(jq -r ".id" ~/tmp/json)
+ if [[ "$id" != "null" ]]; then
+ log "wait for uninstall execution $id to be completed ('terminated')"
+ status=""
+ tries=10
+ while [[ "$status" != "terminated" && $tries -gt 0 ]]; do
+ if [[ "$status" == "failed" ]]; then break; fi
+ sleep 30
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions/$id
+ status=$(jq -r ".status" ~/tmp/json)
+ log "execution $id is $status"
+ ((tries--))
+ done
+ if [[ "$status" == "failed" || $tries == 0 ]]; then
+ cat ~/tmp/json
+ log "uninstall execution did not complete"
+ else
+ log "wait for node instances to be deleted"
+ state=""
+ tries=18
+ while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
+ sleep 10
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ ni=$(jq -r '.items | length' ~/tmp/json)
+ state="deleted"
+ while [[ $ni -ge 0 ]]; do
+ state=$(jq -r ".items[$ni].state" ~/tmp/json)
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$state" != "deleted" ]]; then
+ state=""
+ id=$(jq -r ".items[$ni].id" ~/tmp/json)
+ log "waiting on deletion of node instance $id for $name"
+ fi
+ ((ni--))
+ done
+ ((tries--))
+ done
+ if [[ "$state" != "deleted" ]]; then
+# jq -r '.items' ~/tmp/json
+ log "node-instances delete did not complete"
+ fi
+ fi
+# curl -s -u admin:admin --header 'Tenant: default_tenant' \
+# http://$k8s_master/api/v3.1/executions/$id | jq
+
+ log "delete the $name deployment"
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/deployments/$bp)
+ log "Response: $resp"
+# cat ~/tmp/json
+ log "verify the $name deployment is deleted"
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "force delete $name deployment via cfy CLI over ssh to $k8s_user@$k8s_master"
+ cancel_executions
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master cfy deployment delete -f -t default_tenant $bp
+ sleep 10
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ fail "deployment $name delete failed"
+ fi
+ fi
+ else
+ log "uninstall execution id = $id"
+ cat ~/tmp/json
fi
+ else
+ log "$name deployment not found"
+ fi
- log "delete the deployment"
- resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
+ log "verify $bp blueprint is present"
+ check_resource http://$k8s_master/api/v3.1/blueprints/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "delete the $bp blueprint"
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/deployments/$bp)
+ -o ~/tmp/json http://$k8s_master/api/v3.1/blueprints/$bp)
log "Response: $resp"
- cat /tmp/json
- log "verify the deployment is deleted"
- verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
- if [[ "$status" != "not_found_error" ]]; then
- log "force delete deployment via cfy CLI over ssh to $k8s_user@$manager_ip"
- cancel_executions
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip cfy deployment delete -f -t default_tenant $bp
+
+ if [[ "$response" != "404" ]]; then
sleep 10
- verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
+ log "verify the blueprint is deleted"
+ check_resource http://$k8s_master/api/v3.1/blueprints/$bp
if [[ "$status" != "not_found_error" ]]; then
- fail "deployment delete failed"
+ cat ~/tmp/json
+ fail "blueprint delete failed"
fi
fi
+ log "blueprint $bp deleted"
else
- log "uninstall execution id = $id"
- cat /tmp/json
+ log "$bp blueprint not found"
fi
-
- sleep 10
- log "delete the blueprint"
- resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/blueprints/$bp)
- log "Response: $resp"
- sleep 10
- log "verify the blueprint is deleted"
- verify_deleted http://$manager_ip/api/v3.1/blueprints/$bp
- if [[ "$status" != "not_found_error" ]]; then
- cat /tmp/json
- fail "blueprint delete failed"
- fi
- log "blueprint deleted"
+ step_complete
}
function demo() {
- manager_ip=$k8s_master
+ step_start "$1 nginx app demo via Cloudyify Manager at $k8s_master"
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
@@ -469,17 +571,18 @@ function demo() {
cd ~/models/tools/cloudify/blueprints
if [[ "$1" == "start" ]]; then
- start nginx k8s-hello-world $manager_ip
+ start nginx k8s-hello-world
else
- stop nginx k8s-hello-world $manager_ip
+ stop nginx k8s-hello-world
fi
+ step_complete
}
# API examples: use '| jq' to format JSON output
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/blueprints | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/deployments | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/executions | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/deployments | jq -r '.items[0].blueprint_id'
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/node-instances | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/blueprints | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/executions | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq -r '.items[0].blueprint_id'
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/node-instances | jq
function clean () {
log "Cleanup cloudify"
@@ -501,14 +604,17 @@ case "$1" in
;;
"start")
cd ~/models/tools/cloudify/blueprints
- start $2 $3
+ start $2 $3 "$4"
cd $WORK_DIR
;;
"stop")
stop $2 $3
;;
- "port")
- service_port $2
+ "nodePort")
+ node_port $2
+ ;;
+ "clusterIp")
+ cluster_ip $2
;;
"clean")
clean
diff --git a/tools/docker/docker-cluster.sh b/tools/docker/docker-cluster.sh
index abb4c31..862a232 100644
--- a/tools/docker/docker-cluster.sh
+++ b/tools/docker/docker-cluster.sh
@@ -46,12 +46,12 @@ function log() {
# Setup master and worker hosts
function setup() {
# Per https://docs.docker.com/engine/swarm/swarm-tutorial/
- cat >/tmp/env.sh <<EOF
+ cat >~/tmp/env.sh <<EOF
master=$1
workers="$2"
EOF
- source /tmp/env.sh
- cat >/tmp/prereqs.sh <<'EOF'
+ source ~/tmp/env.sh
+ cat >~/tmp/prereqs.sh <<'EOF'
#!/bin/bash
# Per https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
sudo apt-get remove -y docker docker-engine docker.io docker-ce
@@ -75,7 +75,7 @@ EOF
# jq is used for parsing API reponses
sudo apt-get install -y jq
- scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$master:/home/ubuntu/prereqs.sh
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/tmp/prereqs.sh ubuntu@$master:/home/ubuntu/prereqs.sh
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$master bash /home/ubuntu/prereqs.sh
# activate docker API
# Per https://www.ivankrizsan.se/2016/05/18/enabling-docker-remote-api-on-ubuntu-16-04/
@@ -97,7 +97,7 @@ EOF
token=$(ssh -o StrictHostKeyChecking=no -x ubuntu@$master sudo docker swarm join-token worker | grep docker)
for worker in $workers; do
log "setting up worker at $worker"
- scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/prereqs.sh ubuntu@$worker:/home/ubuntu/.
+ scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/tmp/prereqs.sh ubuntu@$worker:/home/ubuntu/.
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker bash /home/ubuntu/prereqs.sh
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ubuntu@$worker sudo $token
done
@@ -112,7 +112,7 @@ function create_service() {
log "creating service $1 with $2 replicas"
# sudo docker service create -p 80:80 --replicas $reps --name nginx nginx
# per https://docs.docker.com/engine/api/v1.27/
- source /tmp/env.sh
+ source ~/tmp/env.sh
case "$1" in
nginx)
match="Welcome to nginx!"
@@ -131,7 +131,7 @@ function create_service() {
function check_service() {
log "checking service state for $1 with match string $2"
- source /tmp/env.sh
+ source ~/tmp/env.sh
service=$1
match="$2"
services=$(curl http://$master:4243/services)
@@ -144,12 +144,12 @@ function check_service() {
nodes="$master $workers"
for node in $nodes; do
not=""
- while ! curl -s -o /tmp/resp http://$node:$port ; do
+ while ! curl -s -o ~/tmp/resp http://$node:$port ; do
log "service is not yet active, waiting 10 seconds"
sleep 10
done
- curl -s -o /tmp/resp http://$node:$port
- if [[ $(grep -c "$match" /tmp/resp) == 0 ]]; then
+ curl -s -o ~/tmp/resp http://$node:$port
+ if [[ $(grep -c "$match" ~/tmp/resp) == 0 ]]; then
not="NOT"
fi
echo "$service service is $not active at address http://$node:$port"
@@ -162,7 +162,7 @@ function check_service() {
function delete_service() {
log "deleting service $1"
- source /tmp/env.sh
+ source ~/tmp/env.sh
service=$1
services=$(curl http://$master:4243/services)
n=$(echo $services | jq '. | length')
@@ -183,7 +183,7 @@ function delete_service() {
# Clean the installation
function clean() {
- source /tmp/env.sh
+ source ~/tmp/env.sh
nodes="$master $workers"
for node in $nodes; do
ssh -o StrictHostKeyChecking=no -x ubuntu@$node <<EOF
diff --git a/tools/kubernetes/README.md b/tools/kubernetes/README.md
index 655e743..55a54f8 100644
--- a/tools/kubernetes/README.md
+++ b/tools/kubernetes/README.md
@@ -1,53 +1,64 @@
-This folder contains scripts etc to setup a kubernetes cluster with the following type of environment and components:
-* hardware
- * 2 or more bare metal servers: may also work with VMs
- * two connected networks (public and private): may work if just a single network
- * one or more disks on each server: ceph-osd can be setup on an unused disk, or a folder (/ceph) on the host OS disk
-* Kubernetes
- * single k8s master node
- * other k8s cluster worker nodes
-* Ceph: backend for persistent volume claims (PVCs) for the k8s cluster, deployed using Helm charts from https://github.com/att/netarbiter
-* Helm on k8s master (used for initial cluster deployment only)
- * demo helm charts for Helm install verification etc, cloned from https://github.com/kubernetes/charts and modified/tested to work on this cluster
-* Prometheus: server on the k8s master, exporters on the k8s workers
-* Cloudify CLI and Cloudify Manager with Kubernetes plugin (https://github.com/cloudify-incubator/cloudify-kubernetes-plugin)
-* OPNFV VES Collector and Agent
-* OPNFV Barometer collectd plugin with libvirt and kafka support
-* As many components as possible above will be deployed using k8s charts, managed either through Helm or Cloudify
-
-A larger goal of this work is to demonstrate hybrid cloud deployment as indicated by the presence of OpenStack nodes in the diagram below.
-
-Here is an overview of the deployment process, which if desired can be completed via a single script, in about 50 minutes for a four-node k8s cluster of production-grade servers.
-* demo_deploy.sh: wrapper for the complete process
- * ../maas/deploy.sh: deploys the bare metal host OS (Ubuntu or Centos currently)
- * k8s-cluster.sh: deploy k8s cluster
- * deploy k8s master
- * deploy k8s workers
- * deploy helm
- * verify operation with a hello world k8s chart (nginx)
- * deploy ceph (ceph-helm or on bare metal) and verify basic PVC jobs
- * verify operation with a more complex (PVC-dependent) k8s chart (dokuwiki)
- * ../prometheus/prometheus-tools.sh: setup prometheus server, exporters on all nodes, and grafana
- * ../cloudify/k8s-cloudify.sh: setup cloudify (cli and manager)
- * verify kubernetes+ceph+cloudify operation with a PVC-dependent k8s chart deployed thru cloudify
- * (VES repo) tools/demo_deploy.sh: deploy OPNFV VES
- * deploy VES collector
- * deploy influxdb and VES events database
- * deploy VES dashboard in grafana (reuse existing grafana above)
- * deploy VES agent (OPNFV Barometer "VES Application")
- * on each worker, deploy OPNFV Barometer collectd plugin
-* when done, these demo elements are available
- * Helm-deployed demo app dokuwiki, at the assigned node port on any k8s cluster node (e.g. http://$NODE_IP:$NODE_PORT)
- * Cloudify-deployed demo app nginx at http://$k8s_master:$(assigned node port)
- * Prometheus UI at http://$k8s_master:9090
- * Grafana dashboards at http://$ves_grafana_host:3000
- * Grafana API at http://$ves_grafana_auth@$ves_grafana_host:3000/api/v1/query?query=<string>
- * Kubernetes API at https://$k8s_master:6443/api/v1/
- * Cloudify API at (example): curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status
-
-See comments in [setup script](k8s-cluster.sh) and the other scripts for more info.
-
-This is a work in progress!
-
-![Resulting Cluster](/docs/images/models-k8s.png?raw=true "Resulting Cluster")
-
+<!---
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017-2018 AT&T Intellectual Property, Inc
+-->
+
+This folder contains scripts etc to setup a kubernetes cluster with the following type of environment and components:
+* hardware
+ * 2 or more bare metal servers: may also work with VMs
+ * two connected networks (public and private): may work if just a single network
+ * one or more disks on each server: ceph-osd can be setup on an unused disk, or a folder (/ceph) on the host OS disk
+* Kubernetes
+ * single k8s master node
+ * other k8s cluster worker nodes
+* Ceph: backend for persistent volume claims (PVCs) for the k8s cluster, deployed using Helm charts from [netarbiter](https://github.com/att/netarbiter)
+* Helm on k8s master (used for initial cluster deployment only)
+ * demo helm charts for Helm install verification etc, cloned from [kubernetes charts](https://github.com/kubernetes/charts) and modified/tested to work on this cluster
+* Prometheus: server on the k8s master, exporters on the k8s workers
+* Cloudify CLI and Cloudify Manager with [Kubernetes plugin](https://github.com/cloudify-incubator/cloudify-kubernetes-plugin)
+* OPNFV VES Collector and Agent
+* OPNFV Barometer collectd plugin with libvirt and kafka support
+* As many components as possible above will be deployed using k8s charts, managed either through Helm or Cloudify
+
+A larger goal of this work is to demonstrate hybrid cloud deployment as indicated by the presence of OpenStack nodes in the diagram below.
+
+Here is an overview of the deployment process, which if desired can be completed via a single script, in about 50 minutes for a four-node k8s cluster of production-grade servers.
+* demo_deploy.sh: wrapper for the complete process
+ * [/tools/maas/deploy.sh](/tools/maas/deploy.sh): deploys the bare metal host OS (Ubuntu or Centos currently)
+ * k8s-cluster.sh: deploy k8s cluster
+ * deploy k8s master
+ * deploy k8s workers
+ * deploy helm
+ * verify operation with a hello world k8s chart (nginx)
+ * deploy ceph (ceph-helm or on bare metal) and verify basic PVC jobs
+ * verify operation with a more complex (PVC-dependent) k8s chart (dokuwiki)
+ * [/tools/cloudify/k8s-cloudify.sh](/tools/cloudify/k8s-cloudify.sh): setup cloudify (cli and manager)
+ * verify kubernetes+ceph+cloudify operation with a PVC-dependent k8s chart deployed thru cloudify
+ * (VES repo) tools/demo_deploy.sh: deploy OPNFV VES
+ * deploy VES collector
+ * deploy influxdb and VES events database
+ * deploy VES dashboard in grafana (reuse existing grafana above)
+ * deploy VES agent (OPNFV Barometer "VES Application")
+ * on each worker, deploy OPNFV Barometer collectd plugin
+ * [/tools/prometheus/prometheus-tools.sh](/tools/prometheus/prometheus-tools.sh): setup prometheus server and exporters on all nodes
+ * [/tests/k8s-cloudify-clearwater.sh](/tests/k8s-cloudify-clearwater.sh): deploy clearwater-docker and run clearwater-live-test
+ * note: kubectl is currently used to deploy the clearwater-docker charts; use of cloudify-kubernetes for this is coming soon.
+* when done, these demo elements are available, as described in the script output
+ * Helm-deployed demo app dokuwiki
+ * Cloudify-deployed demo app nginx
+ * Prometheus UI
+ * Grafana dashboards and API
+ * Kubernetes API
+ * Cloudify API
+ * Clearwater-docker
+
+See comments in the [overall demo deploy script](demo_deploy.sh), the [k8s setup script](k8s-cluster.sh), and the other scripts for more info.
+
+See [readme in the folder above](/tools/README.md) for an illustration of the resulting k8s cluster in a hybrid cloud environment.
+
+The flow for this demo deployment is illustrated below (note: clearwater-docker deploy/test not yet shown)
+
+![models_demo_flow.svg](/docs/images/models_demo_flow.svg "models_demo_flow.svg")
+
+This is a work in progress! \ No newline at end of file
diff --git a/tools/kubernetes/ceph-baremetal.sh b/tools/kubernetes/ceph-baremetal.sh
index 06a6926..79edd01 100644
--- a/tools/kubernetes/ceph-baremetal.sh
+++ b/tools/kubernetes/ceph-baremetal.sh
@@ -133,7 +133,7 @@ EOF
kubectl create secret generic ceph-secret-admin --from-literal=key="$admin_key" --namespace=kube-system --type=kubernetes.io/rbd
log "Create rdb storageClass 'general'"
- cat <<EOF >/tmp/ceph-sc.yaml
+ cat <<EOF >~/tmp/ceph-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
@@ -150,7 +150,7 @@ parameters:
EOF
# TODO: find out where in the above ~/.kube folders became owned by root
sudo chown -R ubuntu:ubuntu ~/.kube/*
- kubectl create -f /tmp/ceph-sc.yaml
+ kubectl create -f ~/tmp/ceph-sc.yaml
log "Create storage pool 'kube'"
# https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md method
@@ -167,7 +167,7 @@ EOF
# Per https://github.com/kubernetes/examples/blob/master/staging/persistent-volume-provisioning/README.md
log "Create andtest a persistentVolumeClaim"
- cat <<EOF >/tmp/ceph-pvc.yaml
+ cat <<EOF >~/tmp/ceph-pvc.yaml
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
@@ -189,7 +189,7 @@ EOF
}
}
EOF
- kubectl create -f /tmp/ceph-pvc.yaml
+ kubectl create -f ~/tmp/ceph-pvc.yaml
while [[ "x$(kubectl get pvc -o jsonpath='{.status.phase}' claim1)" != "xBound" ]]; do
log "Waiting for pvc claim1 to be 'Bound'"
kubectl describe pvc
diff --git a/tools/kubernetes/ceph-helm.sh b/tools/kubernetes/ceph-helm.sh
index 031db9a..ffb2c88 100644
--- a/tools/kubernetes/ceph-helm.sh
+++ b/tools/kubernetes/ceph-helm.sh
@@ -171,7 +171,7 @@ EOG
log "Run ceph-osd at $node"
name=$(ssh -x -o StrictHostKeyChecking=no $USER@$node hostname)
# TODO: try sudo due to error
- # command_check_call: Running command: /usr/bin/ceph-osd --cluster ceph --mkfs -i 0 --monmap /var/lib/ceph/tmp/mnt.JKiQbp/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.JKiQbp --osd-uuid 23e72c93-e5b3-48ad-b919-ef59fe92b189 --setuser ceph --setgroup disk ... -1 bluestore(/var/lib/ceph/tmp/mnt.JKiQbp) _setup_block_symlink_or_file failed to open block file: (13) Permission denied
+ # command_check_call: Running command: /usr/bin/ceph-osd --cluster ceph --mkfs -i 0 ...
# TODO: leave out sudo... resulted in "./helm-install-ceph-osd.sh: line 40: helm: command not found"
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
$USER@$node sudo chmod 777 /var/lib/ceph/tmp
diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh
index 2ec3b5f..ebb4dd2 100644
--- a/tools/kubernetes/demo_deploy.sh
+++ b/tools/kubernetes/demo_deploy.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,17 +18,21 @@
#. will be installed:
#. - helm and dokuwiki as a demo helm chart based application
#. - prometheus + grafana for cluster monitoring/stats
+#. And optionally, the following will be installed:
#. - cloudify + kubernetes plugin and a demo hello world (nginx) app installed
#. - OPNFV VES as an ONAP-compatible monitoring platform
+#. - Clearwater-docker as an example complex VNF
#.
#. Prerequisites:
#. - MAAS server as cluster admin for k8s master/worker nodes.
#. - Password-less ssh key provided for node setup
#. - hostname of kubernetes master setup in DNS or /etc/hosts
#. Usage: on the MAAS server
-#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ bash ~/models/tools/kubernetes/demo_deploy.sh "<hosts>" <os> <key>
-#. <master> "<workers>" <pub-net> <priv-net> <ceph-mode> "<ceph-dev>" [<extras>]
+#. $ git clone https://gerrit.opnfv.org/gerrit/models models
+#. $ git clone https://gerrit.opnfv.org/gerrit/ves ves
+#. $ bash models/tools/kubernetes/demo_deploy.sh "<hosts>" <os> <key>
+#. <master> "<workers>" <pub-net> <priv-net> <ceph-mode> "<ceph-dev>"
+#. <base|all> [<extras>]
#. <hosts>: space separated list of hostnames managed by MAAS
#. <os>: OS to deploy, one of "ubuntu" (Xenial) or "centos" (Centos 7)
#. <key>: name of private key for cluster node ssh (in current folder)
@@ -40,15 +44,18 @@
#. <ceph-mode>: "helm" or "baremetal"
#. <ceph-dev>: space-separated list of disks (e.g. sda, sdb) to use on each
#. worker, or folder (e.g. "/ceph")
+#. <base|all>: deploy k8s base services, or (for all) add Cloudify, VES, Clearwater
#. <extras>: optional name of script for extra setup functions as needed
#.
-#. The script will create a k8s environment setup file specific to the master
-#. hostname, e.g. k8s_env_k8s-1.sh. This allows multiple deploys to be invoked
-#. from the same admin server, by
-#.
#. See tools/demo_deploy.sh in the OPNFV VES repo for additional environment
#. variables (mandatory/optional) for VES
+trap 'fail' ERR
+
+function fail() {
+ exit 1
+}
+
function run() {
start=$((`date +%s`/60))
$1
@@ -62,6 +69,7 @@ function step_end() {
}
function run_master() {
+ trap 'fail' ERR
start=$((`date +%s`/60))
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
$k8s_user@$k8s_master <<EOF
@@ -74,19 +82,21 @@ EOF
deploy_start=$((`date +%s`/60))
-extras=${10}
+deploy=${10}
+extras=${11}
if [[ "$4" != "$5" ]]; then
- k8s_master_host=$(echo $1 | cut -d ' ' -f 1)
+ k8s_master_hostname=$(echo "$1" | cut -d ' ' -f 1)
else
- k8s_master_host=$1
+ k8s_master_hostname=$1
fi
-cat <<EOF >~/k8s_env_$k8s_master_host.sh
+cat <<EOF >k8s_env.sh
+#!/bin/bash
k8s_nodes="$1"
k8s_user=$2
k8s_key=$3
k8s_master=$4
-k8s_master_host=$k8s_master_host
+k8s_master_hostname=$k8s_master_hostname
k8s_workers="$5"
k8s_priv_net=$6
k8s_pub_net=$7
@@ -96,35 +106,42 @@ export k8s_nodes
export k8s_user
export k8s_key
export k8s_master
-export k8s_master_host
+export k8s_master_hostname
export k8s_workers
export k8s_priv_net
export k8s_pub_net
export k8s_ceph_mode
export k8s_ceph_dev
EOF
-source ~/k8s_env_$k8s_master_host.sh
+source k8s_env.sh
env | grep k8s_
-source ~/models/tools/maas/deploy.sh $k8s_user $k8s_key "$k8s_nodes" $extras
+echo; echo "$0 $(date): Deploying base OS for master and worker nodes..."
+start=$((`date +%s`/60))
+source models/tools/maas/deploy.sh $k8s_user $k8s_key "$k8s_nodes" $extras
+step_end "source models/tools/maas/deploy.sh $k8s_user $k8s_key \"$k8s_nodes\" $extras"
+
eval `ssh-agent`
ssh-add $k8s_key
-scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
- $k8s_user@$k8s_master:/home/$k8s_user/$k8s_key
+while ! scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
+ $k8s_user@$k8s_master:/home/$k8s_user/$k8s_key ; do
+ echo; echo "$0 $(date): server is not yet ready for ssh; waiting 10 secs"
+ sleep 10
+done
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/k8s_env_$k8s_master_host.sh $k8s_user@$k8s_master:/home/$k8s_user/k8s_env.sh
+ k8s_env.sh $k8s_user@$k8s_master:/home/$k8s_user/k8s_env.sh
echo; echo "$0 $(date): Setting up kubernetes master..."
scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ~/models/tools/kubernetes/* $k8s_user@$k8s_master:/home/$k8s_user/.
+ models/tools/kubernetes/* $k8s_user@$k8s_master:/home/$k8s_user/.
run_master "bash k8s-cluster.sh master"
if [[ "$k8s_master" != "$k8s_workers" ]]; then
echo; echo "$0 $(date): Setting up kubernetes workers..."
run_master "bash k8s-cluster.sh workers \"$k8s_workers\""
else
- echo; echo "Label $k8s_master_host for role=worker"
- run_master "kubectl label nodes $k8s_master_host role=worker --overwrite"
+ echo; echo "Label $k8s_master_hostname for role=worker"
+ run_master "kubectl label nodes $k8s_master_hostname role=worker --overwrite"
fi
echo; echo "$0 $(date): Setting up helm..."
@@ -145,55 +162,62 @@ else
fi
echo; echo "Setting up Prometheus..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/prometheus/* \
+scp -r -o StrictHostKeyChecking=no models/tools/prometheus/* \
$k8s_user@$k8s_master:/home/$k8s_user/.
-run_master "bash prometheus-tools.sh setup"
+run_master "bash prometheus-tools.sh setup prometheus helm"
+run_master "bash prometheus-tools.sh setup grafana helm"
+
+if [[ "$deploy" == "all" ]]; then
+ echo; echo "$0 $(date): Setting up cloudify..."
+ scp -r -o StrictHostKeyChecking=no models/tools/cloudify \
+ $k8s_user@$k8s_master:/home/$k8s_user/.
+ run_master "bash cloudify/k8s-cloudify.sh prereqs"
+ run_master "bash cloudify/k8s-cloudify.sh setup"
+
+ echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
+ run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start"
+
+ echo; echo "$0 $(date): Setting up VES..."
+ # not re-cloned if existing - allows patch testing locally
+ if [[ ! -d ves ]]; then
+ echo; echo "$0 $(date): Cloning VES..."
+ git clone https://gerrit.opnfv.org/gerrit/ves ves
+ fi
+ # Can't pass quoted strings in commands
+ start=$((`date +%s`/60))
+ bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify
+ step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_user $k8s_master cloudify"
-echo; echo "$0 $(date): Setting up cloudify..."
-scp -r -o StrictHostKeyChecking=no ~/models/tools/cloudify \
- $k8s_user@$k8s_master:/home/$k8s_user/.
-run_master "bash cloudify/k8s-cloudify.sh prereqs"
-run_master "bash cloudify/k8s-cloudify.sh setup"
+ echo; echo "Installing clearwater-docker..."
+ run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh start $k8s_master blsaws latest"
-echo; echo "$0 $(date): Verifying kubernetes+helm+ceph+cloudify install..."
-run "bash $HOME/models/tools/cloudify/k8s-cloudify.sh demo start"
+ echo; echo "Waiting 5 minutes for clearwater IMS to be fully ready..."
+ sleep 300
-echo; echo "$0 $(date): Setting up VES"
-# not re-cloned if existing - allows patch testing locally
-if [[ ! -d ~/ves ]]; then
- echo; echo "$0 $(date): Cloning VES"
- git clone https://gerrit.opnfv.org/gerrit/ves ~/ves
+ echo; echo "Run clearwater-live-test..."
+ run "bash $HOME/models/tests/k8s-cloudify-clearwater.sh test $k8s_master"
fi
-ves_influxdb_host=$k8s_master:8086
-export ves_influxdb_host
-ves_grafana_host=$k8s_master:30330
-export ves_grafana_host
-ves_grafana_auth=admin:admin
-export ves_grafana_auth
-ves_kafka_hostname=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master hostname)
-export ves_kafka_hostname
-ves_loglevel=$ves_loglevel
-export ves_loglevel
-# Can't pass quoted strings in commands
-start=$((`date +%s`/60))
-bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_user $k8s_master "$k8s_workers"
-step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_user $k8s_master \"$k8s_workers\""
echo; echo "$0 $(date): All done!"
deploy_end=$((`date +%s`/60))
runtime=$((deploy_end-deploy_start))
log "Deploy \"$1\" duration = $runtime minutes"
-port=$(bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx)
-echo "Prometheus UI is available at http://$k8s_master:30990"
-echo "InfluxDB API is available at http://$ves_influxdb_host/query&db=veseventsdb&q=<string>"
-echo "Grafana dashboards are available at http://$ves_grafana_host (login as $ves_grafana_auth)"
-echo "Grafana API is available at http://$ves_grafana_auth@$ves_grafana_host/api/v1/query?query=<string>"
echo "Kubernetes API is available at https://$k8s_master:6443/api/v1/"
-echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
-echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+echo "Prometheus UI is available at http://$k8s_master:30990"
if [[ "$k8s_master" != "$k8s_workers" ]]; then
export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
fi
+
+if [[ "$deploy" == "all" ]]; then
+ source ves/tools/ves_env.sh
+ echo "InfluxDB API is available at http://$ves_influxdb_host:$ves_influxdb_port/query&db=veseventsdb&q=<string>"
+ echo "Grafana dashboards are available at http://$ves_grafana_host:$ves_grafana_port (login as $ves_grafana_auth)"
+ echo "Grafana API is available at http://$ves_grafana_auth@$ves_grafana_host:$ves_grafana_port/api/v1/query?query=<string>"
+ echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
+ port=$(bash models/tools/cloudify/k8s-cloudify.sh nodePort nginx)
+ echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+fi
+
diff --git a/tools/kubernetes/helm-tools.sh b/tools/kubernetes/helm-tools.sh
index a28b340..fff9a4d 100644
--- a/tools/kubernetes/helm-tools.sh
+++ b/tools/kubernetes/helm-tools.sh
@@ -39,13 +39,16 @@ function log() {
function setup_helm() {
log "Setup helm"
# Install Helm
+ # per https://github.com/kubernetes/helm/blob/master/docs/install.md
cd ~
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh
+ log "Initialize helm"
helm init
- nohup helm serve > /dev/null 2>&1 &
- helm repo update
+# nohup helm serve > /dev/null 2>&1 &
+# log "Run helm repo update"
+# helm repo update
# TODO: Workaround for bug https://github.com/kubernetes/helm/issues/2224
# For testing use only!
kubectl create clusterrolebinding permissive-binding \
@@ -69,15 +72,17 @@ function setup_helm() {
function wait_for_service() {
log "Waiting for service $1 to be available"
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- log "Service $1 is at pod $pod"
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
- while [[ "$ready" != "true" ]]; do
- log "pod $1 is not yet ready... waiting 10 seconds"
+ pods=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
+ log "Service $1 is at pod(s) $pods"
+ ready="false"
+ while [[ "$ready" != "true" ]] ; do
+ log "Waiting 10 seconds to check pod status"
sleep 10
- # TODO: figure out why transient pods sometimes mess up this logic, thus need to re-get the pods
- pod=$(kubectl get pods --namespace default | awk "/$1/ { print \$1 }")
- ready=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ for pod in $pods ; do
+ rdy=$(kubectl get pods --namespace default -o jsonpath='{.status.containerStatuses[0].ready}' $pod)
+ log "pod $pod is ready: $rdy"
+ if [[ "$rdy" == "true" ]]; then ready="true"; fi
+ done
done
log "pod $pod is ready"
host_ip=$(kubectl get pods --namespace default -o jsonpath='{.status.hostIP}' $pod)
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index 65f19b5..9ff75fe 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -46,18 +46,21 @@
#. Status: work in progress, incomplete
#
-trap 'fail' ERR
+# TODO: Debug why some commands below will trigger fail incorrectly
+# trap 'fail' ERR
-function fail() {
- log $1
- exit 1
-}
+# function fail() {
+# log $1
+# exit 1
+# }
function log() {
f=$(caller 0 | awk '{print $2}')
l=$(caller 0 | awk '{print $1}')
echo; echo "$f:$l ($(date)) $1"
- kubectl get pods --all-namespaces
+ if [[ "$kubectl_status" == "ready" ]]; then
+ kubectl get pods --all-namespaces
+ fi
}
function setup_prereqs() {
@@ -85,15 +88,27 @@ if [[ "$dist" == "ubuntu" ]]; then
wait_dpkg; sudo apt-get update
wait_dpkg; sudo apt-get upgrade -y
- echo; echo "prereqs.sh: ($(date)) Install latest docker"
- wait_dpkg; sudo apt-get install -y docker.io
- # Alternate for 1.12.6
- #sudo apt-get install -y libltdl7
- #wget https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine/docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
- #sudo dpkg -i docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb
- sudo service docker restart
+
+ dce=$(dpkg -l | grep -c docker-ce)
+ if [[ $dce -eq 0 ]]; then
+ echo; echo "prereqs.sh: ($(date)) Install latest docker-ce"
+ # Per https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/
+ sudo apt-get remove -y docker docker-engine docker.io docker-ce
+ sudo apt-get update
+ sudo apt-get install -y \
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ software-properties-common
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository "deb [arch=amd64] \
+ https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+ sudo apt-get update
+ sudo apt-get install -y docker-ce
+ fi
+
echo; echo "prereqs.sh: ($(date)) Get k8s packages"
- export KUBE_VERSION=1.7.5
+ export KUBE_VERSION=1.10.0
# per https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# Install kubelet, kubeadm, kubectl per https://kubernetes.io/docs/setup/independent/install-kubeadm/
sudo apt-get update && sudo apt-get install -y apt-transport-https
@@ -179,14 +194,23 @@ function setup_k8s_master() {
log "Reset kubeadm in case pre-existing cluster"
sudo kubeadm reset
# Start cluster
+ log "Workaround issue '/etc/kubernetes/manifests is not empty'"
+ mkdir ~/tmp
+ # workaround for [preflight] Some fatal errors occurred:
+ # /etc/kubernetes/manifests is not empty
+ sudo rm -rf /etc/kubernetes/manifests/*
+ log "Disable swap to workaround k8s incompatibility with swap"
+ # per https://github.com/kubernetes/kubeadm/issues/610
+ sudo swapoff -a
log "Start the cluster"
- sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>/tmp/kubeadm.out
- cat /tmp/kubeadm.out
- export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
+ sudo kubeadm init --pod-network-cidr=192.168.0.0/16 >>~/tmp/kubeadm.out
+ cat ~/tmp/kubeadm.out
+ export k8s_joincmd=$(grep "kubeadm join" ~/tmp/kubeadm.out)
log "Cluster join command for manual use if needed: $k8s_joincmd"
mkdir -p $HOME/.kube
sudo cp -f /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
+ export KUBECONFIG=$HOME/.kube/config
# Deploy pod network
log "Deploy calico as CNI"
# Updated to deploy Calico 2.6 per the create-cluster-kubeadm guide above
@@ -197,7 +221,7 @@ function setup_k8s_master() {
# Failure to wait for all calico pods to be running can cause the first worker
# to be incompletely setup. Symptom is that node_ports cannot be routed
# via that node (no response - incoming SYN packets are dropped).
- log "Wait for calico pods to be Running"
+ log "Wait for all calico pods to be Created"
# calico-etcd, calico-kube-controllers, calico-node
pods=$(kubectl get pods --namespace kube-system | grep -c calico)
while [[ $pods -lt 3 ]]; do
@@ -206,23 +230,24 @@ function setup_k8s_master() {
pods=$(kubectl get pods --namespace kube-system | grep -c calico)
done
- pods=$(kubectl get pods --all-namespaces | awk '/calico/ {print $2}')
+ log "Wait for all calico pods to be Running"
+ pods=$(kubectl get pods --namespace kube-system | awk '/calico/ {print $1}')
for pod in $pods; do
- status=$(kubectl get pods --all-namespaces | awk "/$pod/ {print \$4}")
+ status=$(kubectl get pods --namespace kube-system | awk "/$pod/ {print \$3}")
while [[ "$status" != "Running" ]]; do
log "$pod status is $status. Waiting 10 seconds"
sleep 10
- status=$(kubectl get pods --all-namespaces | awk "/$pod/ {print \$4}")
+ status=$(kubectl get pods --namespace kube-system | awk "/$pod/ {print \$3}")
done
log "$pod status is $status"
done
log "Wait for kubedns to be Running"
- kubedns=$(kubectl get pods --all-namespaces | awk '/kube-dns/ {print $4}')
+ kubedns=$(kubectl get pods --namespace kube-system | awk '/kube-dns/ {print $3}')
while [[ "$kubedns" != "Running" ]]; do
log "kube-dns status is $kubedns. Waiting 60 seconds"
sleep 60
- kubedns=$(kubectl get pods --all-namespaces | awk '/kube-dns/ {print $4}')
+ kubedns=$(kubectl get pods --namespace kube-system | awk '/kube-dns/ {print $3}')
done
log "kube-dns status is $kubedns"
@@ -235,7 +260,7 @@ function setup_k8s_master() {
function setup_k8s_workers() {
workers="$1"
- export k8s_joincmd=$(grep "kubeadm join" /tmp/kubeadm.out)
+ export k8s_joincmd=$(grep "kubeadm join" ~/tmp/kubeadm.out)
log "Installing workers at $1 with joincmd: $k8s_joincmd"
# TODO: kubeadm reset below is workaround for
@@ -261,7 +286,7 @@ EOF
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
$USER@$worker bash prereqs.sh
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
- $USER@$worker:/home/$USER/.
+ $USER@$worker:/home/$USER/k8s_env.sh
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
start_worker.sh $USER@$worker:/home/$USER/.
ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
diff --git a/tools/maas/deploy.sh b/tools/maas/deploy.sh
index bf6b01f..502bde3 100644
--- a/tools/maas/deploy.sh
+++ b/tools/maas/deploy.sh
@@ -22,7 +22,7 @@
#. - Password-less ssh key provided for node setup
#. Usage: on the MAAS server
#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
-#. $ source ~/models/tools/maas/demo_deploy.sh <os> <key> "<hosts>" [<extras>]
+#. $ source ~/models/tools/maas/deploy.sh <os> <key> "<hosts>" [<extras>]
#. <os>: "xenial" (Ubtuntu Xenial) or "centos" (Centos 7)
#. <key>: name of private key for cluster node ssh (in current folder)
#. <hosts>: space separated list of hostnames managed by MAAS
diff --git a/tools/prometheus/README.md b/tools/prometheus/README.md
index a3dfcc5..456a5df 100644
--- a/tools/prometheus/README.md
+++ b/tools/prometheus/README.md
@@ -1,3 +1,9 @@
+<!---
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017-2018 AT&T Intellectual Property, Inc
+-->
+
This folder contains scripts etc to setup [prometheus](https://github.com/prometheus/prometheus) on a server cluster. It installs:
* a prometheus server (on the host OS) and [grafana](https://grafana.com/) (in docker)
* prometheus exporters on a set of other nodes, to be monitored
diff --git a/tools/prometheus/dashboards/Docker_Host_and_Container_Overview-1503539411705.json b/tools/prometheus/dashboards/Docker_Host_and_Container_Overview-1503539411705.json
index 6db3532..d72cb20 100644
--- a/tools/prometheus/dashboards/Docker_Host_and_Container_Overview-1503539411705.json
+++ b/tools/prometheus/dashboards/Docker_Host_and_Container_Overview-1503539411705.json
@@ -1612,7 +1612,7 @@
]
},
"timezone": "browser",
- "title": "Docker Host & Container Overview",
+ "title": "Docker Host and Container Overview",
"version": 1
}
}
diff --git a/tools/prometheus/prometheus-tools.sh b/tools/prometheus/prometheus-tools.sh
index 12650c8..05526f6 100644
--- a/tools/prometheus/prometheus-tools.sh
+++ b/tools/prometheus/prometheus-tools.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,11 +19,22 @@
#. Prerequisites:
#. - Ubuntu server for master and agent nodes
#. - Docker installed
+#. - For helm-based install, k8s+helm installed
#. Usage:
#. $ git clone https://gerrit.opnfv.org/gerrit/models ~/models
#. $ cd ~/models/tools/prometheus
-#. $ bash prometheus-tools.sh setup
-#. $ bash prometheus-tools.sh clean
+#. $ bash prometheus-tools.sh <setup|clean> prometheus <docker|helm> <"agents">
+#. prometheus: setup/clean prometheus
+#. docker: setup/clean via docker
+#. helm: setup/clean via helm
+#. agents: for docker-based setup, a quoted, space-separated list agent nodes
+#. note: node running this script must have ssh-key enabled access to agents
+#. $ bash prometheus-tools.sh <setup|clean> grafana <docker|helm> [server] [creds]
+#. grafana: setup/clean grafana
+#. docker: setup/clean via docker
+#. helm: setup/clean via helm
+#. server: optional host:port of grafana server to use
+#. creds: optional grafana credentials (default: admin:admin)
#
# Prometheus links
@@ -49,55 +60,100 @@ function fail() {
}
function setup_prometheus() {
- # Prerequisites
- sudo apt install -y golang-go jq
- host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ trap 'fail' ERR
+ log "Setup prometheus"
+ log "Setup prerequisites"
+ if [[ "$dist" == "ubuntu" ]]; then
+ sudo apt-get install -y golang-go jq
+ else
+ sudo yum install -y golang-go jq
+ fi
- # Install Prometheus server
- # TODO: add --set server.persistentVolume.storageClass=general
- # TODO: add persistent volume support
- log "Setup prometheus via Helm"
- helm install stable/prometheus --name pm \
- --set alertmanager.enabled=false \
- --set pushgateway.enabled=false \
- --set server.service.nodePort=30990 \
- --set server.service.type=NodePort \
- --set server.persistentVolume.enabled=false
-
- while ! curl -o /tmp/up http://$host_ip:30990/api/v1/query?query=up ; do
+ if [[ "$how" == "docker" ]]; then
+ log "Deploy prometheus node exporter on each agent node"
+ for agent in $agents ; do
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $dist@$agent sudo docker run -d --restart=always -p 9101:9101 \
+ -p 9100:9100 --name prometheus-node-exporter prom/node-exporter
+ done
+ log "Create prometheus config file prometheus.yml"
+ cat <<'EOF' >prometheus.yml
+global:
+ scrape_interval: 15s
+scrape_configs:
+ - job_name: 'prometheus'
+ scrape_interval: 5s
+ static_configs:
+EOF
+ for agent in $agents; do
+ echo " - targets: ['${agent}:9100']" >>prometheus.yml
+ echo " - targets: ['${agent}:9101']" >>prometheus.yml
+ done
+ log "prometheus.yaml:"
+ cat prometheus.yml
+ log "Start prometheus server"
+ sudo docker run -d --restart=unless-stopped -p 9090:9090 -p 30990:9090 \
+ -v /home/$USER/prometheus.yml:/etc/prometheus/prometheus.yml \
+ --name prometheus prom/prometheus
+ fi
+ if [[ "$how" == "helm" ]]; then
+ # Install Prometheus server
+ # TODO: add --set server.persistentVolume.storageClass=general
+ # TODO: add persistent volume support
+ log "Setup prometheus server and agents via Helm"
+ helm install stable/prometheus --name pm \
+ --set alertmanager.enabled=false \
+ --set pushgateway.enabled=false \
+ --set server.service.nodePort=30990 \
+ --set server.service.type=NodePort \
+ --set server.persistentVolume.enabled=false
+ fi
+
+ host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ while ! curl -o ~/tmp/up http://$host_ip:30990/api/v1/query?query=up ; do
log "Prometheus API is not yet responding... waiting 10 seconds"
sleep 10
done
- exp=$(jq '.data.result|length' /tmp/up)
+ exp=$(jq '.data.result|length' ~/tmp/up)
log "$exp exporters are up"
while [[ $exp -gt 0 ]]; do
((exp--))
- eip=$(jq -r ".data.result[$exp].metric.instance" /tmp/up)
- job=$(jq -r ".data.result[$exp].metric.job" /tmp/up)
+ eip=$(jq -r ".data.result[$exp].metric.instance" ~/tmp/up)
+ job=$(jq -r ".data.result[$exp].metric.job" ~/tmp/up)
log "$job at $eip"
done
log "Prometheus dashboard is available at http://$host_ip:30990"
- echo "Prometheus dashboard is available at http://$host_ip:30990" >>/tmp/summary
}
function setup_grafana() {
- # TODO: use randomly generated password
- # TODO: add persistent volume support
- log "Setup grafana via Helm"
- #TODSO: add --set server.persistentVolume.storageClass=general
- helm install --name gf stable/grafana \
- --set server.service.nodePort=30330 \
- --set server.service.type=NodePort \
- --set server.adminPassword=admin \
- --set server.persistentVolume.enabled=false
+ trap 'fail' ERR
+ host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ if [[ "$grafana" == "" ]]; then
+ if [[ "$how" == "docker" ]]; then
+ log "Setup grafana via docker"
+ docker run -d --name=grafana -p 30330:3000 grafana/grafana
+ fi
+ if [[ "$how" == "helm" ]]; then
+ # TODO: use randomly generated password
+ # TODO: add persistent volume support
+ log "Setup grafana via Helm"
+ #TODO: add --set server.persistentVolume.storageClass=general
+ helm install --name gf stable/grafana \
+ --set service.nodePort=30330 \
+ --set service.type=NodePort \
+ --set adminPassword=admin \
+ --set persistentVolume.enabled=false
+ fi
+ grafana=$host_ip:30330
+ fi
log "Setup Grafana datasources and dashboards"
- host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
prometheus_ip=$host_ip
- grafana_ip=$host_ip
-
- while ! curl -X POST http://admin:admin@$grafana_ip:30330/api/login/ping ; do
+ if [[ "$creds" == "" ]]; then
+ creds="admin:admin"
+ fi
+ while ! curl -X POST http://$creds@$grafana/api/login/ping ; do
log "Grafana API is not yet responding... waiting 10 seconds"
sleep 10
done
@@ -108,11 +164,11 @@ function setup_grafana() {
"url":"http://$prometheus_ip:30990/", "basicAuth":false,"isDefault":true, \
"user":"", "password":"" }
EOF
- curl -X POST -o /tmp/json -u admin:admin -H "Accept: application/json" \
+ curl -X POST -o ~/tmp/json -u admin:admin -H "Accept: application/json" \
-H "Content-type: application/json" \
- -d @datasources.json http://admin:admin@$grafana_ip:30330/api/datasources
+ -d @datasources.json http://$creds@$grafana/api/datasources
- if [[ "$(jq -r '.message' /tmp/json)" != "Datasource added" ]]; then
+ if [[ "$(jq -r '.message' ~/tmp/json)" != "Datasource added" ]]; then
fail "Datasource creation failed"
fi
log "Prometheus datasource for Grafana added"
@@ -126,39 +182,88 @@ EOF
cd $WORK_DIR/dashboards
boards=$(ls)
for board in $boards; do
- curl -X POST -u admin:admin -H "Accept: application/json" -H "Content-type: application/json" -d @${board} http://$grafana_ip:30330/api/dashboards/db
+ curl -X POST -u admin:admin \
+ -H "Accept: application/json" -H "Content-type: application/json" \
+ -d @${board} http://$creds@$grafana/api/dashboards/db
done
log "Grafana dashboards are available at http://$host_ip:30330 (login as admin/admin)"
- echo "Grafana dashboards are available at http://$host_ip:30330 (login as admin/admin)" >>/tmp/summary
log "Grafana API is available at http://admin:admin@$host_ip:30330/api/v1/query?query=<string>"
- echo "Grafana API is available at http://admin:admin@$host_ip:30330/api/v1/query?query=<string>" >>/tmp/summary
log "connect_grafana complete"
}
+function clean_prometheus() {
+ if [[ "$how" == "docker" ]]; then
+ log "Clean prometheus via docker"
+ sudo docker stop prometheus
+ sudo docker rm -v prometheus
+ for agent in $agents ; do
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $dist@$agent sudo docker stop prometheus-node-exporter
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $dist@$agent sudo docker rm -v prometheus-node-exporter
+ done
+ fi
+ if [[ "$how" == "helm" ]]; then
+ log "Clean prometheus via Helm"
+ helm delete --purge pm
+ fi
+}
+
+function clean_grafana() {
+ if [[ "$grafana" == "" ]]; then
+ log "Delete grafana server"
+ host_ip=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ grafana=$host_ip:30330
+ if [[ "$how" == "docker" ]]; then
+ sudo docker stop grafana
+ sudo docker rm grafana
+ fi
+ if [[ "$how" == "helm" ]]; then
+ helm delete gf
+ fi
+ else
+ if [[ "$creds" == "" ]]; then
+ creds="admin:admin"
+ fi
+ log "Delete prometheus datasource at grafana server"
+ curl -X DELETE http://$creds@$grafana/api/datasources/name/Prometheus
+ log "Delete prometheus dashboards at grafana server"
+ boards="docker-dashboard docker-host-and-container-overview node-exporter-server-metrics node-exporter-single-server"
+ for board in $boards; do
+ curl -X DELETE http://$creds@$grafana/api/dashboards/db/$board
+ done
+ fi
+}
+
export WORK_DIR=$(pwd)
+dist=$(grep --m 1 ID /etc/os-release | awk -F '=' '{print $2}')
+
+what=$2
+how=$3
case "$1" in
setup)
- setup_prometheus
- setup_grafana
+ if [[ "$what" == "prometheus" ]]; then
+ agents="$4"
+ setup_prometheus
+ fi
+ if [[ "$what" == "grafana" ]]; then
+ grafana="$4"
+ creds="$5"
+ setup_grafana
+ fi
;;
clean)
- sudo kill $(ps -ef | grep "\./prometheus" | grep prometheus.yml | awk '{print $2}')
- rm -rf ~/prometheus
- sudo docker stop grafana
- sudo docker rm grafana
- for node in $nodes; do
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node "sudo kill $(ps -ef | grep ./node_exporter | awk '{print $2}')"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node "rm -rf /home/ubuntu/node_exporter"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node "sudo kill $(ps -ef | grep ./haproxy_exporter | awk '{print $2}')"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- ubuntu@$node "rm -rf /home/ubuntu/haproxy_exporter"
- done
+ if [[ "$what" == "prometheus" ]]; then
+ agents="$4"
+ clean_prometheus
+ fi
+ if [[ "$what" == "grafana" ]]; then
+ grafana="$4"
+ creds="$5"
+ clean_grafana
+ fi
;;
*)
grep '#. ' $0
esac
-cat /tmp/summary
diff --git a/tools/rancher/rancher-cluster.sh b/tools/rancher/rancher-cluster.sh
index 555b9bd..b312982 100644
--- a/tools/rancher/rancher-cluster.sh
+++ b/tools/rancher/rancher-cluster.sh
@@ -117,10 +117,10 @@ function install_cli_tools() {
export RANCHER_URL=http://$1:8080/v1
id=$(wget -qO- http://$1:8080/v2-beta/projects/ | jq -r '.data[0].id')
export RANCHER_ENVIRONMENT=$id
- curl -s -o /tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"accountId":"reference[account]", "description":"string", "name":"string", "publicValue":"string", "secretValue":"password"}' http://$1:8080/v2-beta/projects/$id/apikeys
-# curl -s -o /tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d {"type":"apikey","accountId":"1a1","name":"admin","description":null,"created":null,"kind":null,"removed":null,"uuid":null} http://$1:8080/v2-beta/projects/$id/apikey
- export RANCHER_ACCESS_KEY=$(jq -r '.publicValue' /tmp/keys)
- export RANCHER_SECRET_KEY=$(jq -r '.secretValue' /tmp/keys)
+ curl -s -o ~/tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"accountId":"reference[account]", "description":"string", "name":"string", "publicValue":"string", "secretValue":"password"}' http://$1:8080/v2-beta/projects/$id/apikeys
+# curl -s -o ~/tmp/keys -X POST -H 'Accept: application/json' -H 'Content-Type: application/json' -d {"type":"apikey","accountId":"1a1","name":"admin","description":null,"created":null,"kind":null,"removed":null,"uuid":null} http://$1:8080/v2-beta/projects/$id/apikey
+ export RANCHER_ACCESS_KEY=$(jq -r '.publicValue' ~/tmp/keys)
+ export RANCHER_SECRET_KEY=$(jq -r '.secretValue' ~/tmp/keys)
# create the env file ~/.rancher/cli.json
rancher config <<EOF
$RANCHER_URL
@@ -132,12 +132,12 @@ EOF
log "Create registration token"
# added sleep to allow server time to be ready to create registration tokens (otherwise error is returned)
sleep 5
- curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
- while [[ $(jq -r ".type" /tmp/token) != "registrationToken" ]]; do
+ curl -s -o ~/tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
+ while [[ $(jq -r ".type" ~/tmp/token) != "registrationToken" ]]; do
sleep 5
- curl -s -o /tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
+ curl -s -o ~/tmp/token -X POST -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -H 'Accept: application/json' -H 'Content-Type: application/json' -d '{"name":"master"}' http://$master/v1/registrationtokens
done
- id=$(jq -r ".id" /tmp/token)
+ id=$(jq -r ".id" ~/tmp/token)
log "registration token id=$id"
log "wait until registration command is created"