summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Sullivan <bryan.sullivan@att.com>2017-12-14 21:36:55 -0800
committerBryan Sullivan <bryan.sullivan@att.com>2017-12-14 21:36:55 -0800
commitbd3c52aa5273e39edfa177d226d76b94c882ed0a (patch)
tree3dfeec9c8c5b9f28174803f9c63da3af57fd995e
parent6ebfb9bd88513d0e240615eb08f538d76982a295 (diff)
Fix various issues in single-node testing
JIRA: MODELS-2 Change-Id: I8ee88b7954a34f2731c9d4b6d9a37d1981d91731 Signed-off-by: Bryan Sullivan <bryan.sullivan@att.com>
-rw-r--r--tools/cloudify/k8s-cloudify.sh216
-rw-r--r--tools/kubernetes/demo_deploy.sh34
-rw-r--r--tools/kubernetes/k8s-cluster.sh14
3 files changed, 178 insertions, 86 deletions
diff --git a/tools/cloudify/k8s-cloudify.sh b/tools/cloudify/k8s-cloudify.sh
index 751ee32..6e0b7f2 100644
--- a/tools/cloudify/k8s-cloudify.sh
+++ b/tools/cloudify/k8s-cloudify.sh
@@ -18,6 +18,8 @@
#. git clone https://gerrit.opnfv.org/gerrit/models ~/models
#. - Kubernetes cluster installed per tools/kubernetes/demo_deploy.sh and
#. environment setup file ~/models/tools/k8s_env.sh as setup by demo_deploy.sh
+#. - Kubernetes environment variables set per the k8s_env_*.sh created by
+#. the demo_deploy.sh script (* is the hostname of the k8s master node).
#. Usage:
#. From a server with access to the kubernetes master node:
#. $ cd ~/models/tools/cloudify
@@ -46,7 +48,13 @@
#. $ ssh -x <user>@<k8s-master> bash cloudify/k8s-cloudify.sh clean
#. <user>: username on the target host. Also used to indicate OS name.
#. clean: uninstalls cloudify CLI and Manager
-
+#.
+#. If using this script to start/stop blueprints with multiple k8s environments,
+#. before invoking the script copy the k8s_env.sh script from the target
+#. cluster and copy to ~/k8s_env.sh, e.g.
+#. scp centos@sm-1:/home/centos/k8s_env.sh ~/k8s_env_sm-1.sh
+#. cp ~/k8s_env_sm-1.sh ~/k8s_env.sh
+#.
#. Status: this is a work in progress, under test.
function fail() {
@@ -92,6 +100,8 @@ EOF
function setup () {
cd ~/cloudify
+ source ~/k8s_env.sh
+ manager_ip=$k8s_master
log "Setup Cloudify-CLI"
# Per http://docs.getcloudify.org/4.1.0/installation/bootstrapping/#installing-cloudify-manager-in-an-offline-environment
# Installs into /opt/cfy/
@@ -199,6 +209,9 @@ function setup () {
function service_port() {
name=$1
+ manager_ip=$k8s_master
+ log "getting node port for service $name at manager $manager_ip"
+
tries=6
port="null"
while [[ "$port" == "null" && $tries -gt 0 ]]; do
@@ -225,6 +238,8 @@ function service_port() {
function start() {
name=$1
bp=$2
+ manager_ip=$k8s_master
+
log "start app $name with blueprint $bp"
log "copy kube config from k8s master for insertion into blueprint"
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
@@ -236,46 +251,66 @@ function start() {
log "upload the blueprint"
# CLI: cfy blueprints upload -t default_tenant -b $bp /tmp/$bp.tar.gz
- curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/octet-stream" -o /tmp/json \
+ resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ --header "Content-Type: application/octet-stream" \
http://$manager_ip/api/v3.1/blueprints/$bp?application_file_name=blueprint.yaml \
- -T /tmp/blueprint.tar
+ -T /tmp/blueprint.tar)
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat /tmp/json
+ fail "upload failed, response $resp"
+ fi
log "create a deployment for the blueprint"
# CLI: cfy deployments create -t default_tenant -b $bp $bp
- curl -s -X PUT -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" -o /tmp/json \
+ resp=$(curl -X PUT -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -w "\nResponse: %{http_code}\n" \
+ --header "Content-Type: application/json" \
-d "{\"blueprint_id\": \"$bp\"}" \
- http://$manager_ip/api/v3.1/deployments/$bp
+ http://$manager_ip/api/v3.1/deployments/$bp)
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat /tmp/json
+ fail "deployment failed, response $resp"
+ fi
sleep 10
# CLI: cfy workflows list -d $bp
log "install the deployment pod and service"
# CLI: cfy executions start install -d $bp
- curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \
- --header "Content-Type: application/json" -o /tmp/json \
+ resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -w "\nResponse: %{http_code}\n" \
+ --header "Content-Type: application/json" \
-d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"install\"}" \
- http://$manager_ip/api/v3.1/executions
+ http://$manager_ip/api/v3.1/executions)
+ # response code comes back as "\nResponse: <code>"
+ resp=$(echo $resp | awk '/Response/ {print $2}')
+ if [[ "$resp" != "201" ]]; then
+ log "Response: $resp"
+ cat /tmp/json
+ fail "install failed, response $resp"
+ fi
log "get the service's assigned node_port"
port=""
service_port $name $manager_ip
log "verify service is responding"
- while ! curl -s http://$manager_ip:$port ; do
+ while ! curl -v http://$manager_ip:$port ; do
log "$name service is not yet responding at http://$manager_ip:$port, waiting 10 seconds"
sleep 10
done
log "service is active at http://$manager_ip:$port"
}
-function stop() {
- name=$1
- bp=$2
-
- # TODO: fix the need for this workaround
- log "try to first cancel all current executions"
+function cancel_executions() {
+ log "cancelling all active executions"
curl -s -u admin:admin --header 'Tenant: default_tenant' \
-o /tmp/json http://$manager_ip/api/v3.1/executions
i=0
@@ -301,7 +336,7 @@ function stop() {
http://$manager_ip/api/v3.1/executions | jq -r '.items[].status')
count=0
for status in $exs; do
- if [[ "$status" != "terminated" && "$status" != "cancelled" ]]; then
+ if [[ "$status" != "terminated" && "$status" != "cancelled" && "$status" != "failed" ]]; then
((count++))
fi
done
@@ -312,80 +347,118 @@ function stop() {
echo "$exs"
fail "running executions remain"
fi
+}
+
+function verify_deleted() {
+ log "verifying the resource is deleted: $1"
+ status=""
+ if [[ -f /tmp/vfy ]]; then rm /tmp/vfy; fi
+ r=$(curl -s -o /tmp/vfy -u admin:admin --header 'Tenant: default_tenant' $1)
+ log "Response: $r"
+ cat /tmp/vfy
+ status=$(cat /tmp/vfy | jq -r '.error_code')
+}
+
+function stop() {
+ name=$1
+ bp=$2
+ manager_ip=$k8s_master
+
+ # TODO: fix the need for this workaround
+ log "try to first cancel all current executions"
+ cancel_executions
# end workaround
log "uninstall the service"
- curl -s -X POST -u admin:admin --header 'Tenant: default_tenant' \
+ resp=$(curl -X POST -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
--header "Content-Type: application/json" \
-d "{\"deployment_id\":\"$bp\", \"workflow_id\":\"uninstall\"}" \
- -o /tmp/json http://$manager_ip/api/v3.1/executions
- id=$(jq -r ".id" /tmp/json)
- log "uninstall execution id = $id"
- status=""
- tries=1
- while [[ "$status" != "terminated" && $tries -lt 10 ]]; do
- sleep 30
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/executions/$id
- status=$(jq -r ".status" /tmp/json)
- log "try $tries of 10: execution $id is $status"
- ((tries++))
- done
- if [[ $tries == 11 ]]; then
+ http://$manager_ip/api/v3.1/executions)
+ log "Response: $resp"
+ if [[ "$resp" != "201" ]]; then
+ log "uninstall action was not accepted"
cat /tmp/json
- fail "uninstall execution did not complete"
fi
- curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/executions/$id | jq
- count=1
- state=""
- tries=6
- while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
- sleep 10
+ id=$(jq -r ".id" /tmp/json)
+ if [[ "$id" != "null" ]]; then
+ log "wait for uninstall execution $id to be completed ('terminated')"
+ status=""
+ tries=1
+ while [[ "$status" != "terminated" && $tries -lt 10 ]]; do
+ sleep 30
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o /tmp/json http://$manager_ip/api/v3.1/executions/$id
+ status=$(jq -r ".status" /tmp/json)
+ log "try $tries of 10: execution $id is $status"
+ ((tries++))
+ done
+ if [[ $tries == 11 ]]; then
+ cat /tmp/json
+ fail "uninstall execution did not complete"
+ fi
curl -s -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/node-instances
- state=$(jq -r '.items[0].state' /tmp/json)
- ((tries--))
- done
- if [[ "$state" != "deleted" ]]; then
- jq -r '.items' /tmp/json
- fail "node-instances delete failed"
- fi
+ http://$manager_ip/api/v3.1/executions/$id | jq
- log "delete the deployment"
- curl -s -X DELETE -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/deployments/$bp
- log "verify the deployment is deleted"
- error=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/deployments/$bp | jq -r '.error_code')
- if [[ "$error" != "not_found_error" ]]; then
- log "force delete deployment via cfy CLI"
- ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
- $k8s_user@$manager_ip cfy deployment delete -f \
- -t default_tenant $bp
- error=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/deployments/$bp | jq -r '.error_code')
- if [[ "$error" != "not_found_error" ]]; then
+ count=1
+ state=""
+ tries=6
+ while [[ "$state" != "deleted" && $tries -gt 0 ]]; do
+ sleep 10
+ curl -s -u admin:admin --header 'Tenant: default_tenant' \
+ -o /tmp/json http://$manager_ip/api/v3.1/node-instances
+ state=$(jq -r '.items[0].state' /tmp/json)
+ ((tries--))
+ done
+ if [[ "$state" != "deleted" ]]; then
+ jq -r '.items' /tmp/json
+ # fail "node-instances delete failed"
+ fi
+
+ log "delete the deployment"
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -o /tmp/json http://$manager_ip/api/v3.1/deployments/$bp)
+ log "Response: $resp"
+ cat /tmp/json
+ log "verify the deployment is deleted"
+ verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ log "force delete deployment via cfy CLI over ssh to $k8s_user@$manager_ip"
+ cancel_executions
+ ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ $k8s_user@$manager_ip cfy deployment delete -f -t default_tenant $bp
+ sleep 10
+ verify_deleted http://$manager_ip/api/v3.1/deployments/$bp
+ if [[ "$status" != "not_found_error" ]]; then
fail "deployment delete failed"
- fi
+ fi
+ fi
+ else
+ log "uninstall execution id = $id"
+ cat /tmp/json
fi
sleep 10
log "delete the blueprint"
- curl -s -X DELETE -u admin:admin --header 'Tenant: default_tenant' \
- -o /tmp/json http://$manager_ip/api/v3.1/blueprints/$bp
+ resp=$(curl -X DELETE -s -w "%{http_code}" -o /tmp/json \
+ -u admin:admin --header 'Tenant: default_tenant' \
+ -o /tmp/json http://$manager_ip/api/v3.1/blueprints/$bp)
+ log "Response: $resp"
sleep 10
log "verify the blueprint is deleted"
- error=$(curl -s -u admin:admin --header 'Tenant: default_tenant' \
- http://$manager_ip/api/v3.1/blueprints/$bp | jq -r '.error_code')
- if [[ "$error" != "not_found_error" ]]; then
+ verify_deleted http://$manager_ip/api/v3.1/blueprints/$bp
+ if [[ "$status" != "not_found_error" ]]; then
+ cat /tmp/json
fail "blueprint delete failed"
fi
log "blueprint deleted"
}
function demo() {
+ manager_ip=$k8s_master
+
# Per http://docs.getcloudify.org/4.1.0/plugins/container-support/
# Per https://github.com/cloudify-incubator/cloudify-kubernetes-plugin
# Also per guidance at https://github.com/cloudify-incubator/cloudify-kubernetes-plugin/issues/18
@@ -413,9 +486,8 @@ function clean () {
# TODO
}
+export WORK_DIR=$(pwd)
dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'`
-source ~/k8s_env.sh
-manager_ip=$k8s_master
case "$1" in
"prereqs")
@@ -428,7 +500,9 @@ case "$1" in
demo $2 $3
;;
"start")
+ cd ~/models/tools/cloudify/blueprints
start $2 $3
+ cd $WORK_DIR
;;
"stop")
stop $2 $3
diff --git a/tools/kubernetes/demo_deploy.sh b/tools/kubernetes/demo_deploy.sh
index 3ee7fff..f9b884c 100644
--- a/tools/kubernetes/demo_deploy.sh
+++ b/tools/kubernetes/demo_deploy.sh
@@ -42,6 +42,10 @@
#. worker, or folder (e.g. "/ceph")
#. <extras>: optional name of script for extra setup functions as needed
#.
+#. The script will create a k8s environment setup file specific to the master
+#. hostname, e.g. k8s_env_k8s-1.sh. This allows multiple deploys to be invoked
+#. from the same admin server, by
+#.
#. See tools/demo_deploy.sh in the OPNFV VES repo for additional environment
#. variables (mandatory/optional) for VES
@@ -70,12 +74,17 @@ EOF
extras=${10}
-# Note MAAS deploys OS's with default user same as OS name
-cat <<EOF >~/k8s_env.sh
+if [[ "$4" != "$5" ]]; then
+ k8s_master_host=$(echo $1 | cut -d ' ' -f 1)
+else
+ k8s_master_host=$1
+fi
+cat <<EOF >~/k8s_env_$k8s_master_host.sh
k8s_nodes="$1"
k8s_user=$2
k8s_key=$3
k8s_master=$4
+k8s_master_host=$k8s_master_host
k8s_workers="$5"
k8s_priv_net=$6
k8s_pub_net=$7
@@ -85,13 +94,14 @@ export k8s_nodes
export k8s_user
export k8s_key
export k8s_master
+export k8s_master_host
export k8s_workers
export k8s_priv_net
export k8s_pub_net
export k8s_ceph_mode
export k8s_ceph_dev
EOF
-source ~/k8s_env.sh
+source ~/k8s_env_$k8s_master_host.sh
env | grep k8s_
source ~/models/tools/maas/deploy.sh $k8s_user $k8s_key "$k8s_nodes" $extras
@@ -99,8 +109,8 @@ eval `ssh-agent`
ssh-add $k8s_key
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_key \
$k8s_user@$k8s_master:/home/$k8s_user/$k8s_key
-scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ~/k8s_env.sh \
- $k8s_user@$k8s_master:/home/$k8s_user/.
+scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ ~/k8s_env_$k8s_master_host.sh $k8s_user@$k8s_master:/home/$k8s_user/k8s_env.sh
echo; echo "$0 $(date): Setting up kubernetes master..."
scp -r -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
@@ -168,16 +178,16 @@ bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_user $k8s_master "$k8s_workers
step_end "bash $HOME/ves/tools/demo_deploy.sh $k8s_key $k8s_user $k8s_master \"$k8s_workers\""
echo; echo "$0 $(date): All done!"
-if [[ "$k8s_master" != "$k8s_workers" ]]; then
- export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
- export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
- echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
-fi
-port=$( bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx $k8s_master)
-echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+port=$(bash ~/models/tools/cloudify/k8s-cloudify.sh port nginx)
echo "Prometheus UI is available at http://$k8s_master:9090"
echo "InfluxDB API is available at http://$ves_influxdb_host/query&db=veseventsdb&q=<string>"
echo "Grafana dashboards are available at http://$ves_grafana_host (login as $ves_grafana_auth)"
echo "Grafana API is available at http://$ves_grafana_auth@$ves_grafana_host/api/v1/query?query=<string>"
echo "Kubernetes API is available at https://$k8s_master:6443/api/v1/"
echo "Cloudify API access example: curl -u admin:admin --header 'Tenant: default_tenant' http://$k8s_master/api/v3.1/status"
+echo "Cloudify-deployed demo app nginx is available at http://$k8s_master:$port"
+if [[ "$k8s_master" != "$k8s_workers" ]]; then
+ export NODE_PORT=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get --namespace default -o jsonpath="{.spec.ports[0].nodePort}" services dw-dokuwiki)
+ export NODE_IP=$(ssh -x -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no $k8s_user@$k8s_master kubectl get nodes --namespace default -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo "Helm chart demo app dokuwiki is available at http://$NODE_IP:$NODE_PORT/"
+fi
diff --git a/tools/kubernetes/k8s-cluster.sh b/tools/kubernetes/k8s-cluster.sh
index cac35e3..46750d0 100644
--- a/tools/kubernetes/k8s-cluster.sh
+++ b/tools/kubernetes/k8s-cluster.sh
@@ -63,6 +63,13 @@ function setup_prereqs() {
cat <<'EOG' >~/prereqs.sh
#!/bin/bash
# Basic server pre-reqs
+function wait_dpkg() {
+ # TODO: workaround for "E: Could not get lock /var/lib/dpkg/lock - open (11: Resource temporarily unavailable)"
+ echo; echo "waiting for dpkg to be unlocked"
+ while sudo fuser /var/{lib/{dpkg,apt/lists},cache/apt/archives}/lock >/dev/null 2>&1; do
+ sleep 1
+ done
+}
dist=$(grep --m 1 ID /etc/os-release | awk -F '=' '{print $2}' | sed 's/"//g')
if [[ $(grep -c $HOSTNAME /etc/hosts) -eq 0 ]]; then
echo; echo "prereqs.sh: ($(date)) Add $HOSTNAME to /etc/hosts"
@@ -73,10 +80,11 @@ fi
if [[ "$dist" == "ubuntu" ]]; then
# Per https://kubernetes.io/docs/setup/independent/install-kubeadm/
echo; echo "prereqs.sh: ($(date)) Basic prerequisites"
- sudo apt-get update
- sudo apt-get upgrade -y
+
+ wait_dpkg; sudo apt-get update
+ wait_dpkg; sudo apt-get upgrade -y
echo; echo "prereqs.sh: ($(date)) Install latest docker"
- sudo apt-get install -y docker.io
+ wait_dpkg; sudo apt-get install -y docker.io
# Alternate for 1.12.6
#sudo apt-get install -y libltdl7
#wget https://packages.docker.com/1.12/apt/repo/pool/main/d/docker-engine/docker-engine_1.12.6~cs8-0~ubuntu-xenial_amd64.deb