#!/bin/bash # Copyright 2017 AT&T Intellectual Property, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # See the License for the specific language governing permissions and # limitations under the License. # #. What this is: Setup script for Cloudify use with Kubernetes. #. Prerequisites: #. - OPNFV Models repo cloned into ~/models, i.e. #. git clone https://gerrit.opnfv.org/gerrit/models ~/models #. - Kubernetes cluster installed per tools/kubernetes/demo_deploy.sh and #. environment setup file ~/models/tools/k8s_env.sh as setup by demo_deploy.sh #. - Kubernetes environment variables set per the k8s_env_*.sh created by #. the demo_deploy.sh script (* is the hostname of the k8s master node). #. Usage: #. From a server with access to the kubernetes master node: #. $ cd ~/models/tools/cloudify #. $ scp -r ~/models/tools/* @:/home//. #. : username on the target host. Also used to indicate OS name. #. : IP or hostname of kubernetes master server #. $ ssh -x @ cloudify/k8s-cloudify.sh prereqs #. : username on the target host. Also used to indicate OS name. #. : IP or hostname of kubernetes master server #. prereqs: installs prerequisites and configures user for kvm use #. $ ssh -x @ bash cloudify/k8s-cloudify.sh setup #. : username on the target host. Also used to indicate OS name. #. setup: installs cloudify CLI and Manager #. $ bash k8s-cloudify.sh demo #. demo: control demo blueprint #. start|stop: start or stop the demo #. $ bash k8s-cloudify.sh ["inputs"] #. start|stop: start or stop the blueprint #. name: name of the service in the blueprint #. inputs: optional JSON string to pass to Cloudify as deployment inputs #. blueprint: name of the blueprint folder (in current directory!) #. $ bash k8s-cloudify.sh nodePort #. port: find assigned nodePort for service #. service: name of service e.g. nginx #. $ bash k8s-cloudify.sh clusterIp #. clusterIp: find assigned clusterIp for service #. service: name of service e.g. nginx #. $ ssh -x @ bash cloudify/k8s-cloudify.sh clean #. : username on the target host. Also used to indicate OS name. #. clean: uninstalls cloudify CLI and Manager #. #. If using this script to start/stop blueprints with multiple k8s environments, #. before invoking the script copy the k8s_env.sh script from the target #. cluster and copy to ~/k8s_env.sh, e.g. #. scp centos@sm-1:/home/centos/k8s_env.sh ~/k8s_env_sm-1.sh #. cp ~/k8s_env_sm-1.sh ~/k8s_env.sh #. #. Status: this is a work in progress, under test. function fail() { log "$1" exit 1 } function log() { f=$(caller 0 | awk '{print $2}') l=$(caller 0 | awk '{print $1}') echo; echo "$f:$l ($(date)) $1" } function step_complete() { end=$((`date +%s`/60)) runtime=$((end-start)) log "step completed in $runtime minutes: \"$step\"" } function step_start() { step="$1" log "step start: \"$step\"" start=$((`date +%s`/60)) } function prereqs() { step_start "Install prerequisites" if [[ "$USER" == "ubuntu" ]]; then sudo apt-get install -y virtinst qemu-kvm libguestfs-tools virtualenv git \ python-pip # workaround for virsh default network inactive status=$(sudo virsh net-list --all | awk '/default/ {print $2}') if [[ "$status" == "inactive" ]]; then sudo ifconfig virbr0 down sudo brctl delbr virbr0 sudo virsh net-start default fi else # installing libvirt is needed to ensure default network is pre-created sudo yum install -y libvirt sudo virsh net-define /usr/share/libvirt/networks/default.xml sudo yum install -y virt-install sudo yum install -y qemu-kvm libguestfs-tools git python-pip sudo pip install virtualenv fi log "Setup $USER for kvm use" # Per http://libguestfs.org/guestfs-faq.1.html # workaround for virt-customize warning: libguestfs: warning: current user is not a member of the KVM group (group ID 121). This user cannot access /dev/kvm, so libguestfs may run very slowly. It is recommended that you 'chmod 0666 /dev/kvm' or add the current user to the KVM group (you might need to log out and log in again). # Also see: https://help.ubuntu.com/community/KVM/Installation # also to avoid permission denied errors in guestfish, from http://manpages.ubuntu.com/manpages/zesty/man1/guestfs-faq.1.html sudo groupadd -g 7777 libvirt sudo usermod -aG libvirt $USER id $USER | grep libvirt sudo tee -a /etc/libvirt/libvirtd.conf <" resp=$(echo $resp | awk '/Response/ {print $2}') if [[ "$resp" != "201" ]]; then log "Response: $resp" cat ~/tmp/json fail "deployment failed, response $resp" fi sleep 10 # CLI: cfy workflows list -d $bp log "install the deployment pod and service" # CLI: cfy executions start install -d $bp resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \ -u admin:admin --header 'Tenant: default_tenant' \ -w "\nResponse: %{http_code}\n" \ --header "Content-Type: application/json" \ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \ http://$k8s_master/api/v3.1/executions) # response code comes back as "\nResponse: " resp=$(echo $resp | awk '/Response/ {print $2}') if [[ "$resp" != "201" ]]; then log "Response: $resp" cat ~/tmp/json fail "install failed, response $resp" fi wait_terminated $name create_deployment_environment wait_terminated $name install log "install actions completed" step_complete } function cancel_executions() { log "workaround: cancelling all active executions prior to new execution" curl -s -u admin:admin --header 'Tenant: default_tenant' \ -o ~/tmp/json http://$k8s_master/api/v3.1/executions i=0 exs=$(jq -r '.items[].status' ~/tmp/json) for status in $exs; do id=$(jq -r ".items[$i].id" ~/tmp/json) if [[ "$status" == "started" ]]; then log "force cancelling execution $id in state $status" id=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \ http://$k8s_master/api/v3.1/executions | jq -r ".items[$i].id") curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \ --header "Content-Type: application/json" \ -d "{\"deployment_id\": \"$bp\", \"action\": \"force-cancel\"}" \ http://$k8s_master/api/v3.1/executions/$id fi ((i++)) done tries=6 count=1 while [[ $count -gt 0 && $tries -gt 0 ]]; do sleep 10 exs=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \ http://$k8s_master/api/v3.1/executions | jq -r '.items[].status') count=0 for status in $exs; do if [[ "$status" != "terminated" && "$status" != "cancelled" && "$status" != "failed" ]]; then ((count++)) fi done ((tries--)) log "$count active executions remain" done if [[ $count -gt 0 ]]; then echo "$exs" fail "running executions remain" fi } function check_resource() { log "checking for presence of resource: $1" status="" if [[ -f ~/tmp/vfy ]]; then rm ~/tmp/vfy; fi r=$(curl -s -o ~/tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1) log "Response: $r" # cat ~/tmp/vfy status=$(cat ~/tmp/vfy | jq -r '.error_code') } function stop() { name=$1 bp=$2 step_start "stopping $name with blueprint $bp" # TODO: fix the need for this workaround log "workaround: try to first cancel all current executions" cancel_executions # end workaround log "verify $name deployment is present" check_resource http://$k8s_master/api/v3.1/deployments/$bp if [[ "$status" != "not_found_error" ]]; then log "initiate uninstall action for $name deployment" resp=$(curl -X POST -s -w "%{http_code}" -o ~/tmp/json \ -u admin:admin --header 'Tenant: default_tenant' \ --header "Content-Type: application/json" \ -d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \ http://$k8s_master/api/v3.1/executions) log "Response: $resp" if [[ "$resp" != "201" ]]; then log "uninstall action was not accepted" cat ~/tmp/json fi id=$(jq -r ".id" ~/tmp/json) if [[ "$id" != "null" ]]; then log "wait for uninstall execution $id to be completed ('terminated')" status="" tries=10 while [[ "$status" != "terminated" && $tries -gt 0 ]]; do if [[ "$status" == "failed" ]]; then break; fi sleep 30 curl -s -u admin:admin --header 'Tenant: default_tenant' \ -o ~/tmp/json http://$k8s_master/api/v3.1/executions/$id status=$(jq -r ".status" ~/tmp/json) log "execution $id is $status" ((tries--)) done if [[ "$status" == "failed" || $tries == 0 ]]; then cat ~/tmp/json log "uninstall execution did not complete" else log "wait for node instances to be deleted" state="" tries=18 while [[ "$state" != "deleted" && $tries -gt 0 ]]; do sleep 10 curl -s -u admin:admin --header 'Tenant: default_tenant' \ -o ~/tmp/json http://$k8s_master/api/v3.1/node-instances ni=$(jq -r '.items | length' ~/tmp/json) state="deleted" while [[ $ni -ge 0 ]]; do state=$(jq -r ".items[$ni].state" ~/tmp/json) depid=$(jq -r ".items[$ni].deployment_id" ~/tmp/json) if [[ "$depid" == "$name" && "$state" != "deleted" ]]; then state="" id=$(jq -r ".items[$ni].id" ~/tmp/json) log "waiting on deletion of node instance $id for $name" fi ((ni--)) done ((tries--)) done if [[ "$state" != "deleted" ]]; then # jq -r '.items' ~/tmp/json log "node-instances delete did not complete" fi fi # curl -s -u admin:admin --header 'Tenant: default_tenant' \ # http://$k8s_master/api/v3.1/executions/$id | jq log "delete the $name deployment" resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \ -u admin:admin --header 'Tenant: default_tenant' \ -o ~/tmp/json http://$k8s_master/api/v3.1/deployments/$bp) log "Response: $resp" # cat ~/tmp/json log "verify the $name deployment is deleted" check_resource http://$k8s_master/api/v3.1/deployments/$bp if [[ "$status" != "not_found_error" ]]; then log "force delete $name deployment via cfy CLI over ssh to $k8s_user@$k8s_master" cancel_executions ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ $k8s_user@$k8s_master cfy deployment delete -f -t default_tenant $bp sleep 10 check_resource http://$k8s_master/api/v3.1/deployments/$bp if [[ "$status" != "not_found_error" ]]; then fail "deployment $name delete failed" fi fi else log "uninstall execution id = $id" cat ~/tmp/json fi else log "$name deployment not found" fi log "verify $bp blueprint is present" check_resource http://$k8s_master/api/v3.1/blueprints/$bp if [[ "$status" != "not_found_error" ]]; then log "delete the $bp blueprint" resp=$(curl -X DELETE -s -w "%{http_code}" -o ~/tmp/json \ -u admin:admin --header 'Tenant: default_tenant' \ -o ~/tmp/json http://$k8s_master/api/v3.1/blueprints/$bp) log "Response: $resp" if [[ "$response" != "404" ]]; then sleep 10 log "verify the blueprint is deleted" check_resource http://$k8s_master/api/v3.1/blueprints/$bp if [[ "$status" != "not_found_error" ]]; then cat ~/tmp/json fail "blueprint delete failed" fi fi log "blueprint $bp deleted" else log "$bp blueprint not found" fi step_complete } function demo() { step_start "$1 nginx app demo via Cloudyify Manager at $k8s_master" # Per http://docs.getcloudify.org/4.1.0/plugins/container-support/ # Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin # Also per guidance at https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/issues/18 # echo "master-ip: $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $1}')" >~/cloudify/blueprints/k8s-hello-world/inputs.yaml # echo "master-port: $(grep server ~/.kube/config | awk -F '/' '{print $3}' | awk -F ':' '{print $2}')" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml # echo "file_content:" >>~/cloudify/blueprints/k8s-hello-world/inputs.yaml # sed 's/^/ /' ~/.kube/config | tee -a ~/cloudify/blueprints/k8s-hello-world/inputs.yaml cd ~/models/tools/cloudify/blueprints if [[ "$1" == "start" ]]; then start nginx k8s-hello-world else stop nginx k8s-hello-world fi step_complete } # API examples: use '| jq' to format JSON output # curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/blueprints | jq # curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq # curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/executions | jq # curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/deployments | jq -r '.items[0].blueprint_id' # curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/node-instances | jq function clean () { log "Cleanup cloudify" # TODO } export WORK_DIR=$(pwd) dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'` case "$1" in "prereqs") prereqs ;; "setup") setup ;; "demo") demo $2 $3 ;; "start") cd ~/models/tools/cloudify/blueprints start $2 $3 "$4" cd $WORK_DIR ;; "stop") stop $2 $3 ;; "nodePort") node_port $2 ;; "clusterIp") cluster_ip $2 ;; "clean") clean ;; *) grep '#. ' $0 esac