summaryrefslogtreecommitdiffstats
path: root/tools/cloudify/k8s-cloudify.sh
diff options
context:
space:
mode:
Diffstat (limited to 'tools/cloudify/k8s-cloudify.sh')
-rw-r--r--tools/cloudify/k8s-cloudify.sh504
1 files changed, 305 insertions, 199 deletions
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index 6e0b7f2..f922880 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Copyright 2017 AT&T Intellectual Property, Inc
+# Copyright 2017-2018 AT&T Intellectual Property, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,6 +28,7 @@
#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x <user>@<k8s-master> cloudify/k8s-cloudify.sh prereqs
#. <user>: username on the target host. Also used to indicate OS name.
+#. <k8s-master>: IP or hostname of kubernetes master server
#. prereqs: installs prerequisites and configures <user> user for kvm use
#. $ ssh -x <user>@<k8s-master> bash cloudify/k8s-cloudify.sh setup
#. <user>: username on the target host. Also used to indicate OS name.
@@ -35,26 +36,21 @@
#. $ bash k8s-cloudify.sh demo <start|stop>
#. demo: control demo blueprint
#. start|stop: start or stop the demo
-#. <k8s-master>: IP or hostname of kubernetes master server
-#. $ bash k8s-cloudify.sh <start|stop> <name> <blueprint>
+#. $ bash k8s-cloudify.sh <start|stop> <name> <blueprint> ["inputs"]
#. start|stop: start or stop the blueprint
#. name: name of the service in the blueprint
+#. inputs: optional JSON string to pass to Cloudify as deployment inputs
#. blueprint: name of the blueprint folder (in current directory!)
-#. <k8s-master>: IP or hostname of kubernetes master server
-#. $ bash k8s-cloudify.sh port <service> <k8s-master>
-#. port: find assigned node_port for service
+#. $ bash k8s-cloudify.sh nodePort <service>
+#. port: find assigned nodePort for service
+#. service: name of service e.g. nginx
+#. $ bash k8s-cloudify.sh clusterIp <service>
+#. clusterIp: find assigned clusterIp for service
#. service: name of service e.g. nginx
-#. <k8s-master>: IP or hostname of kubernetes master server
#. $ ssh -x <user>@<k8s-master> bash cloudify/k8s-cloudify.sh clean
#. <user>: username on the target host. Also used to indicate OS name.
#. clean: uninstalls cloudify CLI and Manager
#.
-#. If using this script to start/stop blueprints with multiple k8s environments,
-#. before invoking the script copy the k8s_env.sh script from the target
-#. cluster and copy to ~/k8s_env.sh, e.g.
-#. scp centos@sm-1:/home/centos/k8s_env.sh ~/k8s_env_sm-1.sh
-#. cp ~/k8s_env_sm-1.sh ~/k8s_env.sh
-#.
#. Status: this is a work in progress, under test.
function fail() {
@@ -68,11 +64,30 @@ function log() {
echo; echo "$f:$l ($(date)) $1"
}
+function step_complete() {
+ end=$((`date +%s`/60))
+ runtime=$((end-start))
+ log "step completed in $runtime minutes: \"$step\""
+}
+
+function step_start() {
+ step="$1"
+ log "step start: \"$step\""
+ start=$((`date +%s`/60))
+}
+
function prereqs() {
- log "Install prerequisites"
+ step_start "Install prerequisites"
if [[ "$USER" == "ubuntu" ]]; then
sudo apt-get install -y virtinst qemu-kvm libguestfs-tools virtualenv git \
python-pip
+ # workaround for virsh default network inactive
+ status=$(sudo virsh net-list --all | awk '/default/ {print $2}')
+ if [[ "$status" == "inactive" ]]; then
+ sudo ifconfig virbr0 down
+ sudo brctl delbr virbr0
+ sudo virsh net-start default
+ fi
else
# installing libvirt is needed to ensure default network is pre-created
sudo yum install -y libvirt
@@ -96,12 +111,14 @@ EOF
sudo usermod -a -G kvm $USER
sudo chmod 0644 /boot/vmlinuz*
sudo systemctl restart libvirtd
+ step_complete
}
function setup () {
+ step_start "setup"
cd ~/cloudify
source ~/k8s_env.sh
- manager_ip=$k8s_master
+ k8s_master=$k8s_master
log "Setup Cloudify-CLI"
# Per http://docs.getcloudify.org/4.1.0/installation/bootstrapping/#installing-cloudify-manager-in-an-offline-environment
# Installs into /opt/cfy/
@@ -126,8 +143,8 @@ function setup () {
sudo systemctl start libvirtd
if [[ "$USER" == "centos" ]]; then
# copy image to folder that qemu has access to, to avoid: ERROR Cannot access storage file '/home/centos/cloudify/cloudify-manager-community-17.9.21.qcow2' (as uid:107, gid:107): Permission denied
- cp cloudify-manager-community-17.9.21.qcow2 /tmp/.
- img="/tmp/cloudify-manager-community-17.9.21.qcow2"
+ cp cloudify-manager-community-17.9.21.qcow2 ~/tmp/.
+ img="~/tmp/cloudify-manager-community-17.9.21.qcow2"
else
img="cloudify-manager-community-17.9.21.qcow2"
fi
@@ -156,6 +173,12 @@ function setup () {
done
cfy status
+ log "Set iptables to forward $HOST_IP port 80 to Cloudify Manager VM at $VM_IP"
+ HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
+ sudo iptables -t nat -I PREROUTING -p tcp -d $HOST_IP --dport 80 -j DNAT --to-destination $VM_IP:80
+ sudo iptables -I FORWARD -m state -d $VM_IP/32 --state NEW,RELATED,ESTABLISHED -j ACCEPT
+ sudo iptables -t nat -A POSTROUTING -j MASQUERADE
+
log "Install Cloudify Kubernetes Plugin"
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
@@ -182,19 +205,6 @@ function setup () {
| awk -F ' ' '{print $2}') kubernetes-admin_client_key_data
cfy secrets list
- # get manager VM IP
- VM_MAC=$(virsh domiflist cloudify-manager | grep default | grep -Eo "[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+:[0-9a-f\]+")
- VM_IP=$(/usr/sbin/arp -e | grep ${VM_MAC} | awk {'print $1'})
-
- # get host IP
- HOST_IP=$(ip route get 8.8.8.8 | awk '{print $NF; exit}')
-
- # Forward host port 80 to VM
- log "Setip iptables to forward $HOST_IP port 80 to Cloudify Manager VM at $VM_IP"
- sudo iptables -t nat -I PREROUTING -p tcp -d $HOST_IP --dport 80 -j DNAT --to-destination $VM_IP:80
- sudo iptables -I FORWARD -m state -d $VM_IP/32 --state NEW,RELATED,ESTABLISHED -j ACCEPT
- sudo iptables -t nat -A POSTROUTING -j MASQUERADE
-
# Access to the API via the primary interface, from the local host, is not
# working for some reason... skip this for now
# while ! curl -u admin:admin --header 'Tenant: default_tenant' http://$HOST_IP/api/v3.1/status ; do
@@ -205,126 +215,191 @@ function setup () {
log "Cloudify CLI log is at ~/.cloudify/logs/cli.log"
log "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$HOST_IP/api/v3.1/status"
log "Cloudify setup is complete!"
+ step_complete
}
-function service_port() {
+function cluster_ip() {
name=$1
- manager_ip=$k8s_master
- log "getting node port for service $name at manager $manager_ip"
+ log "getting clusterIp for service $name at manager $k8s_master"
tries=6
- port="null"
- while [[ "$port" == "null" && $tries -gt 0 ]]; do
+ svcId="null"
+ clusterIp="null"
+ while [[ "$clusterIp" == "null" && $tries -gt 0 ]]; do
curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/node-instances
- ni=$(jq -r '.items | length' /tmp/json)
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ ni=$(jq -r '.items | length' ~/tmp/json)
while [[ $ni -ge 0 ]]; do
((ni--))
- id=$(jq -r ".items[$ni].id" /tmp/json)
- if [[ $id == $name\_service* ]]; then
- port=$(jq -r ".items[$ni].runtime_properties.kubernetes.spec.ports[0].node_port" /tmp/json)
- echo $port
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ type=$(jq -r ".items[$ni].runtime_properties.kubernetes.kind" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$type" == "Service" ]]; then
+ svcId=$ni
+ clusterIp=$(jq -r ".items[$ni].runtime_properties.kubernetes.spec.cluster_ip" ~/tmp/json)
+ if [[ "$clusterIp" != "null" ]]; then
+ echo "clusterIp=$clusterIp"
+ export clusterIp
+ fi
fi
done
sleep 10
((tries--))
done
- if [[ "$port" == "null" ]]; then
- jq -r '.items' /tmp/json
- fail "node_port not found for service"
+ if [[ "$clusterIp" == "null" ]]; then
+ log "node-instance resource for $name"
+ jq -r ".items[$svcId]" ~/tmp/json
+ log "clusterIp not found for service"
+ fi
+}
+
+function node_port() {
+ name=$1
+ log "getting nodePort for service $name at manager $k8s_master"
+
+ tries=6
+ svcId="null"
+ nodePort="null"
+ while [[ "$nodePort" == "null" && $tries -gt 0 ]]; do
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ nodePort=$(cat tmp/json | jq -r ".items[] | select(.node_id == \"${name}_service\")" | jq -r '.runtime_properties.kubernetes.spec.ports[0].node_port')
+ sleep 10
+ ((tries--))
+ done
+ if [[ "$nodePort" == "null" ]]; then
+ log "node-instance resource for $name"
+ jq -r ".items[$svcId]" ~/tmp/json
+ log "nodePort not found for service"
+ fi
+}
+
+function wait_terminated() {
+ name=$1
+ workflow=$2
+ log "waiting for $name execution $workflow to be completed ('terminated')"
+ status=""
+ while [[ "$status" != "terminated" ]]; do
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions
+ ni=$(jq -r '.items | length' ~/tmp/json)
+ while [[ $ni -ge 0 ]]; do
+ ((ni--))
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ wflid=$(jq -r ".items[$ni].workflow_id" ~/tmp/json)
+ status=$(jq -r ".items[$ni].status" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$wflid" == "$workflow" ]]; then
+ id=$(jq -r ".items[$ni].id" ~/tmp/json)
+# curl -u admin:admin --header 'Tenant: default_tenant' \
+# http://$k8s_master/api/v3.1/executions/$id | jq
+ if [[ "$status" == "failed" ]]; then fail "execution failed"; fi
+ if [[ "$status" == "terminated" ]]; then break; fi
+ log "$name execution $workflow is $status... waiting 30 seconds"
+ fi
+ done
+ sleep 30
+ done
+ if [[ "$status" == "terminated" ]]; then
+ log "$name execution $workflow is $status"
+ else
+ fail "timeout waiting for $name execution $workflow: status = $status"
fi
}
function start() {
name=$1
bp=$2
- manager_ip=$k8s_master
+ inputs="$3"
+ start=$((`date +%s`/60))
- log "start app $name with blueprint $bp"
+ step_start "start app $name with blueprint $bp and inputs: $inputs"
log "copy kube config from k8s master for insertion into blueprint"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip:/home/$k8s_user/.kube/config $bp/kube.config
+ $k8s_user@$k8s_master:/home/$k8s_user/.kube/config $bp/kube.config
- log "package the blueprint"
- # CLI: cfy blueprints package -o /tmp/$bp $bp
- tar ckf /tmp/blueprint.tar $bp
+ log "package the blueprint"
+ # CLI: cfy blueprints package -o ~/tmp/$bp $bp
+ mkdir -p ~/tmp
+ tar ckf ~/tmp/blueprint.tar $bp
- log "upload the blueprint"
- # CLI: cfy blueprints upload -t default_tenant -b $bp /tmp/$bp.tar.gz
- resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/octet-stream" \
- http://$manager_ip/api/v3.1/blueprints/$bp?application_file_name=blueprint.yaml \
- -T /tmp/blueprint.tar)
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "upload failed, response $resp"
- fi
+ log "upload the blueprint"
+ # CLI: cfy blueprints upload -t default_tenant -b $bp ~/tmp/$bp.tar.gz
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ --header "Content-Type: application/octet-stream" \
+ http://$k8s_master/api/v3.1/blueprints/$bp?application_file_name=blueprint.yaml \
+ -T ~/tmp/blueprint.tar)
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "upload failed, response $resp"
+ fi
- log "create a deployment for the blueprint"
- # CLI: cfy deployments create -t default_tenant -b $bp $bp
- resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
+ log "create a deployment for the blueprint"
+ # CLI: cfy deployments create -t default_tenant -b $bp $bp
+ if [[ "z$inputs" != "z" ]]; then
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
-w "\nResponse: %{http_code}\n" \
--header "Content-Type: application/json" \
- -d "{\"blueprint_id\": \"$bp\"}" \
- http://$manager_ip/api/v3.1/deployments/$bp)
- # response code comes back as "\nResponse: <code>"
- resp=$(echo $resp | awk '/Response/ {print $2}')
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "deployment failed, response $resp"
- fi
- sleep 10
-
- # CLI: cfy workflows list -d $bp
-
- log "install the deployment pod and service"
- # CLI: cfy executions start install -d $bp
- resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
+ -d "{\"blueprint_id\": \"$bp\", \"inputs\": $inputs}" \
+ http://$k8s_master/api/v3.1/deployments/$bp)
+ else
+ resp=$(curl -X PUT -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
-w "\nResponse: %{http_code}\n" \
--header "Content-Type: application/json" \
- -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \
- http://$manager_ip/api/v3.1/executions)
- # response code comes back as "\nResponse: <code>"
- resp=$(echo $resp | awk '/Response/ {print $2}')
- if [[ "$resp" != "201" ]]; then
- log "Response: $resp"
- cat /tmp/json
- fail "install failed, response $resp"
- fi
+ -d "{\"blueprint_id\": \"$bp\"}" \
+ http://$k8s_master/api/v3.1/deployments/$bp)
+ fi
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "deployment failed, response $resp"
+ fi
+ sleep 10
- log "get the service's assigned node_port"
- port=""
- service_port $name $manager_ip
+ # CLI: cfy workflows list -d $bp
- log "verify service is responding"
- while ! curl -v http://$manager_ip:$port ; do
- log "$name service is not yet responding at http://$manager_ip:$port, waiting 10 seconds"
- sleep 10
- done
- log "service is active at http://$manager_ip:$port"
+ log "install the deployment pod and service"
+ # CLI: cfy executions start install -d $bp
+ resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -w "\nResponse: %{http_code}\n" \
+ --header "Content-Type: application/json" \
+ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \
+ http://$k8s_master/api/v3.1/executions)
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat ~/tmp/json
+ fail "install failed, response $resp"
+ fi
+
+ wait_terminated $name create_deployment_environment
+ wait_terminated $name install
+ log "install actions completed"
+ step_complete
}
function cancel_executions() {
- log "cancelling all active executions"
+ log "workaround: cancelling all active executions prior to new execution"
curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/executions
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions
i=0
- exs=$(jq -r '.items[].status' /tmp/json)
+ exs=$(jq -r '.items[].status' ~/tmp/json)
for status in $exs; do
- id=$(jq -r ".items[$i].id" /tmp/json)
- log "execution $id in state $status"
+ id=$(jq -r ".items[$i].id" ~/tmp/json)
if [[ "$status" == "started" ]]; then
+ log "force cancelling execution $id in state $status"
id=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions | jq -r ".items[$i].id")
+ http://$k8s_master/api/v3.1/executions | jq -r ".items[$i].id")
curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \
--header "Content-Type: application/json" \
-d "{\"deployment_id\": \"$bp\", \"action\": \"force-cancel\"}" \
- http://$manager_ip/api/v3.1/executions/$id
+ http://$k8s_master/api/v3.1/executions/$id
fi
((i++))
done
@@ -333,7 +408,7 @@ function cancel_executions() {
while [[ $count -gt 0 && $tries -gt 0 ]]; do
sleep 10
exs=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions | jq -r '.items[].status')
+ http://$k8s_master/api/v3.1/executions | jq -r '.items[].status')
count=0
for status in $exs; do
if [[ "$status" != "terminated" && "$status" != "cancelled" && "$status" != "failed" ]]; then
@@ -349,115 +424,142 @@ function cancel_executions() {
fi
}
-function verify_deleted() {
- log "verifying the resource is deleted: $1"
+function check_resource() {
+ log "checking for presence of resource: $1"
status=""
- if [[ -f /tmp/vfy ]]; then rm /tmp/vfy; fi
- r=$(curl -s -o /tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1)
+ if [[ -f ~/tmp/vfy ]]; then rm ~/tmp/vfy; fi
+ r=$(curl -s -o ~/tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1)
log "Response: $r"
- cat /tmp/vfy
- status=$(cat /tmp/vfy | jq -r '.error_code')
+# cat ~/tmp/vfy
+ status=$(cat ~/tmp/vfy | jq -r '.error_code')
}
function stop() {
name=$1
bp=$2
- manager_ip=$k8s_master
+ step_start "stopping $name with blueprint $bp"
# TODO: fix the need for this workaround
- log "try to first cancel all current executions"
+ log "workaround: try to first cancel all current executions"
cancel_executions
# end workaround
- log "uninstall the service"
- resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" \
- -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
- http://$manager_ip/api/v3.1/executions)
- log "Response: $resp"
- if [[ "$resp" != "201" ]]; then
- log "uninstall action was not accepted"
- cat /tmp/json
- fi
-
- id=$(jq -r ".id" /tmp/json)
- if [[ "$id" != "null" ]]; then
- log "wait for uninstall execution $id to be completed ('terminated')"
- status=""
- tries=1
- while [[ "$status" != "terminated" && $tries -lt 10 ]]; do
- sleep 30
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/executions/$id
- status=$(jq -r ".status" /tmp/json)
- log "try $tries of 10: execution $id is $status"
- ((tries++))
- done
- if [[ $tries == 11 ]]; then
- cat /tmp/json
- fail "uninstall execution did not complete"
+ log "verify $name deployment is present"
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "initiate uninstall action for $name deployment"
+ resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ --header "Content-Type: application/json" \
+ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
+ http://$k8s_master/api/v3.1/executions)
+ log "Response: $resp"
+ if [[ "$resp" != "201" ]]; then
+ log "uninstall action was not accepted"
+ cat ~/tmp/json
fi
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions/$id | jq
- count=1
- state=""
- tries=6
- while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
- sleep 10
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/node-instances
- state=$(jq -r '.items[0].state' /tmp/json)
- ((tries--))
- done
- if [[ "$state" != "deleted" ]]; then
- jq -r '.items' /tmp/json
- # fail "node-instances delete failed"
+ id=$(jq -r ".id" ~/tmp/json)
+ if [[ "$id" != "null" ]]; then
+ log "wait for uninstall execution $id to be completed ('terminated')"
+ status=""
+ tries=10
+ while [[ "$status" != "terminated" && $tries -gt 0 ]]; do
+ if [[ "$status" == "failed" ]]; then break; fi
+ sleep 30
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/executions/$id
+ status=$(jq -r ".status" ~/tmp/json)
+ log "execution $id is $status"
+ ((tries--))
+ done
+ if [[ "$status" == "failed" || $tries == 0 ]]; then
+ cat ~/tmp/json
+ log "uninstall execution did not complete"
+ else
+ log "wait for node instances to be deleted"
+ state=""
+ tries=18
+ while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
+ sleep 10
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances
+ ni=$(jq -r '.items | length' ~/tmp/json)
+ state="deleted"
+ while [[ $ni -ge 0 ]]; do
+ state=$(jq -r ".items[$ni].state" ~/tmp/json)
+ depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json)
+ if [[ "$depid" == "$name" && "$state" != "deleted" ]]; then
+ state=""
+ id=$(jq -r ".items[$ni].id" ~/tmp/json)
+ log "waiting on deletion of node instance $id for $name"
+ fi
+ ((ni--))
+ done
+ ((tries--))
+ done
+ if [[ "$state" != "deleted" ]]; then
+# jq -r '.items' ~/tmp/json
+ log "node-instances delete did not complete"
+ fi
+ fi
+# curl -s -u admin:admin --header 'Tenant: default_tenant' \
+# http://$k8s_master/api/v3.1/executions/$id | jq
+
+ log "delete the $name deployment"
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -o ~/tmp/json http://$k8s_master/api/v3.1/deployments/$bp)
+ log "Response: $resp"
+# cat ~/tmp/json
+ log "verify the $name deployment is deleted"
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "force delete $name deployment via cfy CLI over ssh to $k8s_user@$k8s_master"
+ cancel_executions
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$k8s_master cfy deployment delete -f -t default_tenant $bp
+ sleep 10
+ check_resource http://$k8s_master/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ fail "deployment $name delete failed"
+ fi
+ fi
+ else
+ log "uninstall execution id = $id"
+ cat ~/tmp/json
fi
+ else
+ log "$name deployment not found"
+ fi
- log "delete the deployment"
- resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
+ log "verify $bp blueprint is present"
+ check_resource http://$k8s_master/api/v3.1/blueprints/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "delete the $bp blueprint"
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \
-u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/deployments/$bp)
+ -o ~/tmp/json http://$k8s_master/api/v3.1/blueprints/$bp)
log "Response: $resp"
- cat /tmp/json
- log "verify the deployment is deleted"
- verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
- if [[ "$status" != "not_found_error" ]]; then
- log "force delete deployment via cfy CLI over ssh to $k8s_user@$manager_ip"
- cancel_executions
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip cfy deployment delete -f -t default_tenant $bp
+
+ if [[ "$response" != "404" ]]; then
sleep 10
- verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
+ log "verify the blueprint is deleted"
+ check_resource http://$k8s_master/api/v3.1/blueprints/$bp
if [[ "$status" != "not_found_error" ]]; then
- fail "deployment delete failed"
+ cat ~/tmp/json
+ fail "blueprint delete failed"
fi
fi
+ log "blueprint $bp deleted"
else
- log "uninstall execution id = $id"
- cat /tmp/json
+ log "$bp blueprint not found"
fi
-
- sleep 10
- log "delete the blueprint"
- resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
- -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/blueprints/$bp)
- log "Response: $resp"
- sleep 10
- log "verify the blueprint is deleted"
- verify_deleted http://$manager_ip/api/v3.1/blueprints/$bp
- if [[ "$status" != "not_found_error" ]]; then
- cat /tmp/json
- fail "blueprint delete failed"
- fi
- log "blueprint deleted"
+ step_complete
}
function demo() {
- manager_ip=$k8s_master
+ step_start "$1 nginx app demo via Cloudyify Manager at $k8s_master"
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
@@ -469,17 +571,18 @@ function demo() {
cd ~/models/tools/cloudify/blueprints
if [[ "$1" == "start" ]]; then
- start nginx k8s-hello-world $manager_ip
+ start nginx k8s-hello-world
else
- stop nginx k8s-hello-world $manager_ip
+ stop nginx k8s-hello-world
fi
+ step_complete
}
# API examples: use '| jq' to format JSON output
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/blueprints | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/deployments | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/executions | jq
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/deployments | jq -r '.items[0].blueprint_id'
-# curl -u admin:admin --header 'Tenant: default_tenant' http://$manager_ip/api/v3.1/node-instances | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/blueprints | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/executions | jq
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq -r '.items[0].blueprint_id'
+# curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/node-instances | jq
function clean () {
log "Cleanup cloudify"
@@ -501,14 +604,17 @@ case "$1" in
;;
"start")
cd ~/models/tools/cloudify/blueprints
- start $2 $3
+ start $2 $3 "$4"
cd $WORK_DIR
;;
"stop")
stop $2 $3
;;
- "port")
- service_port $2
+ "nodePort")
+ node_port $2
+ ;;
+ "clusterIp")
+ cluster_ip $2
;;
"clean")
clean