From b8fa06444772f33fcc6e5fdd2543b576c72f1dfa Mon Sep 17 00:00:00 2001 From: blsaws Date: Wed, 31 Aug 2016 14:59:38 -0700 Subject: Tacker installation script baseline. JIRA: MODELS-23 Edits for Cloudify CLI test. Change-Id: I28ba88fa27d309d00167249e8255b521b514cc0c Signed-off-by: blsaws --- tests/utils/cloudify-setup.sh | 2 +- tests/utils/tacker-setup.sh | 251 ++++++++++++++++++++++++++++++++++++++++++ tests/vHello.sh | 107 +++++++----------- 3 files changed, 294 insertions(+), 66 deletions(-) create mode 100644 tests/utils/tacker-setup.sh (limited to 'tests') diff --git a/tests/utils/cloudify-setup.sh b/tests/utils/cloudify-setup.sh index 492e617..71b0822 100644 --- a/tests/utils/cloudify-setup.sh +++ b/tests/utils/cloudify-setup.sh @@ -90,7 +90,7 @@ function get_external_net () { dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'` if [ "$2" == "1" ]; then echo "$0: Copy this script to /tmp/cloudify" - mkdir /tmp/cloudify + mkdir /tmp/cloudify cp $0 /tmp/cloudify/. chmod 755 /tmp/cloudify/*.sh diff --git a/tests/utils/tacker-setup.sh b/tests/utils/tacker-setup.sh new file mode 100644 index 0000000..2fd04f4 --- /dev/null +++ b/tests/utils/tacker-setup.sh @@ -0,0 +1,251 @@ +#!/bin/bash +# Copyright 2016 AT&T Intellectual Property, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# What this is: Setup script for the OpenStack Tacker VNF Manager starting from +# an Unbuntu Xenial docker container. +# +# Status: this is a work in progress, under test. +# +# How to use: +# $ bash tacker-setup.sh [tacker-cli|tacker-api] [ 1 || 2 ] +# tacker-cli: use Tacker CLI +# tacker-api: use Tacker RESTful API +# 1: Initial setup of the docker container +# 2: Setup of Tacker in the docker container + +function setenv () { +if [ "$dist" == "Ubuntu" ]; then + echo "$0: Ubuntu-based install" + echo "$0: Create the environment file" + KEYSTONE_HOST=$(juju status --format=short | awk "/keystone\/0/ { print \$3 }") + cat </tmp/tacker/admin-openrc.sh +export CONGRESS_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }") +export HORIZON_HOST=$(juju status --format=short | awk "/openstack-dashboard/ { print \$3 }") +export KEYSTONE_HOST=$KEYSTONE_HOST +export CEILOMETER_HOST=$(juju status --format=short | awk "/ceilometer\/0/ { print \$3 }") +export CINDER_HOST=$(juju status --format=short | awk "/cinder\/0/ { print \$3 }") +export GLANCE_HOST=$(juju status --format=short | awk "/glance\/0/ { print \$3 }") +export NEUTRON_HOST=$(juju status --format=short | awk "/neutron-api\/0/ { print \$3 }") +export NOVA_HOST=$(juju status --format=short | awk "/nova-cloud-controller\/0/ { print \$3 }") +export HEAT_HOST=$(juju status --format=short | awk "/heat\/0/ { print \$3 }") +export OS_USERNAME=admin +export OS_PASSWORD=openstack +export OS_TENANT_NAME=admin +export OS_AUTH_URL=http://$KEYSTONE_HOST:5000/v2.0 +export OS_REGION_NAME=RegionOne +EOF +else + # Centos + echo "$0: Centos-based install" + echo "$0: Setup undercloud environment so we can get overcloud Controller server address" + source ~/stackrc + echo "$0: Get address of Controller node" + export CONTROLLER_HOST1=$(openstack server list | awk "/overcloud-controller-0/ { print \$8 }" | sed 's/ctlplane=//g') + echo "$0: Create the environment file" + cat </tmp/tacker/admin-openrc.sh +export CONGRESS_HOST=$CONTROLLER_HOST1 +export KEYSTONE_HOST=$CONTROLLER_HOST1 +export CEILOMETER_HOST=$CONTROLLER_HOST1 +export CINDER_HOST=$CONTROLLER_HOST1 +export GLANCE_HOST=$CONTROLLER_HOST1 +export NEUTRON_HOST=$CONTROLLER_HOST1 +export NOVA_HOST=$CONTROLLER_HOST1 +export HEAT_HOST=$CONTROLLER_HOST1 +EOF + cat ~/overcloudrc >>/tmp/tacker/admin-openrc.sh + source ~/overcloudrc + export OS_REGION_NAME=$(openstack endpoint list | awk "/ nova / { print \$4 }") + # sed command below is a workaound for a bug - region shows up twice for some reason + cat <>/tmp/tacker/admin-openrc.sh +export OS_REGION_NAME=$OS_REGION_NAME +EOF +fi +source /tmp/tacker/admin-openrc.sh +} + +function get_external_net () { + network_ids=($(neutron net-list|grep -v "+"|grep -v name|awk '{print $2}')) + for id in ${network_ids[@]}; do + [[ $(neutron net-show ${id}|grep 'router:external'|grep -i "true") != "" ]] && ext_net_id=${id} + done + if [[ $ext_net_id ]]; then + EXTERNAL_NETWORK_NAME=$(openstack network show $ext_net_id | awk "/ name / { print \$4 }") + EXTERNAL_SUBNET_ID=$(openstack network show $EXTERNAL_NETWORK_NAME | awk "/ subnets / { print \$4 }") + else + echo "$0: External network not found" + exit 1 + fi +} + +dist=`grep DISTRIB_ID /etc/*-release | awk -F '=' '{print $2}'` +case "$2" in + "1") + # STEP 1: Create the Tacker container and launch it + echo "$0: Copy this script to /tmp/tacker" + mkdir /tmp/tacker + cp $0 /tmp/tacker/. + chmod 755 /tmp/tacker/*.sh + + echo "$0: Setup admin-openrc.sh" + setenv + + echo "$0: Setup container" + if [ "$dist" == "Ubuntu" ]; then + # xenial is needed for python 3.5 + sudo docker pull ubuntu:xenial + sudo service docker start + sudo docker run -it -d -v /tmp/tacker/:/tmp/tacker ubuntu:xenial /bin/bash + else + # Centos + echo "Centos-based install" + sudo tee /etc/yum.repos.d/docker.repo <<-'EOF' +[dockerrepo] +name=Docker Repository--parents +baseurl=https://yum.dockerproject.org/repo/main/centos/7/ +enabled=1 +gpgcheck=1 +gpgkey=https://yum.dockerproject.org/gpg +EOF + sudo yum install -y docker-engine + # xenial is needed for python 3.5 + sudo docker pull ubuntu:xenial + sudo service docker start + sudo docker run -i -t -d -v /tmp/tacker/:/tmp/tacker ubuntu:xenial /bin/bash + fi + exit 0 + ;; + "2") + ;; + *) + echo "usage: bash tacker-setup.sh [tacker-cli|tacker-api] [ 1 || 2 ]" + echo "1: Initial setup of the docker container" + echo "2: Setup of Tacker in the docker container" + exit 1 +esac + +# STEP 2: Install Tacker in the container +# Per http://docs.openstack.org/developer/tacker/install/manual_installation.html +echo "$0: Install dependencies - OS specific" +if [ "$dist" == "Ubuntu" ]; then + apt-get update + apt-get install -y python + apt-get install -y python-dev + apt-get install -y python-pip + apt-get install -y wget + apt-get install -y openssh-server + apt-get install -y git + apt-get install -y apg + export MYSQL_PASSWORD=$(/usr/bin/apg -n 1 -m 16 -c cl_seed) + debconf-set-selections <<< 'mysql-server mysql-server/root_password password '$MYSQL_PASSWORD + debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password '$MYSQL_PASSWORD + apt-get -q -y install mysql-server python-mysqldb +fi + +cd /tmp/tacker + +echo "$0: create Tacker database" +mysql --user=root --password=$MYSQL_PASSWORD -e "CREATE DATABASE tacker; GRANT ALL PRIVILEGES ON tacker.* TO 'root@localhost' IDENTIFIED BY '"$MYSQL_PASSWORD"'; GRANT ALL PRIVILEGES ON tacker.* TO 'root'@'%' IDENTIFIED BY '"$MYSQL_PASSWORD"';" + +echo "$0: Install dependencies - generic" +pip install --upgrade pip virtualenv + +echo "$0: Upgrage pip again - needs to be the latest version due to errors found in earlier testing" +pip install --upgrade + +echo "$0: install python-openstackclient python-glanceclient" +pip install --upgrade python-openstackclient python-glanceclient python-neutronclient + +echo "$0: Create virtualenv" +virtualenv /tmp/tacker/venv +source /tmp/tacker/venv/bin/activate + +echo "$0: Setup admin-openrc.sh" +source /tmp/tacker/admin-openrc.sh + +echo "$0: Setup Tacker user in OpenStack" +openstack user create --password tacker tacker +openstack role add --project services --user tacker admin + +echo "$0: Create Tacker service in OpenStack" +openstack service create --name tacker --description "Tacker Project" nfv-orchestration +sid=$(openstack service list | awk "/ tacker / { print \$2 }") + +echo "$0: Create Tacker service endpoint in OpenStack" +ip=$(ip addr | awk "/ global eth0/ { print \$2 }" | sed -- 's/\/16//') +openstack endpoint create --region RegionOne \ + --publicurl "http://$ip:9890/" \ + --adminurl "http://$ip:9890/" \ + --internalurl "http://$ip:9890/" $sid + +echo "$0: Clone Tacker" +if [[ -d /tmp/tacker/tacker ]]; then rm -rf /tmp/tacker/tacker; fi +git clone git://git.openstack.org/openstack/tacker +cd tacker +git checkout stable/mitaka + +echo "$0: Install Tacker" +pip install -r requirements.txt +pip install tosca-parser +python setup.py install +mkdir /var/log/tacker + +# Following lines apply to master and not stable/mitaka +#echo "$0: install tox" +#pip install tox +#echo "$0: generate tacker.conf.sample" +#tox -e config-gen + +echo "$0: Update tacker.conf values" +mkdir /usr/local/etc/tacker +cp etc/tacker/tacker.conf /usr/local/etc/tacker/tacker.conf +sed -i -- 's/# auth_strategy = keystone/auth_strategy = keystone/' /usr/local/etc/tacker/tacker.conf +sed -i -- 's/# debug = False/debug = True/' /usr/local/etc/tacker/tacker.conf +sed -i -- 's/# use_syslog = False/use_syslog = False/' /usr/local/etc/tacker/tacker.conf +sed -i -- 's~# state_path = /var/lib/tacker~state_path = /var/lib/tacker~' /usr/local/etc/tacker/tacker.conf +sed -i -- 's/password = service-password/password=tacker/' /usr/local/etc/tacker/tacker.conf +sed -i -- "s~auth_url = http://:35357~auth_url = http://$KEYSTONE_HOST:35357~" /usr/local/etc/tacker/tacker.conf +sed -i -- "s~identity_uri = http://127.0.0.1:5000~# identity_uri = http://127.0.0.1:5000~" /usr/local/etc/tacker/tacker.conf +sed -i -- "s~auth_uri = http://:5000~auth_uri = http://$KEYSTONE_HOST:5000~" /usr/local/etc/tacker/tacker.conf +# Not sure what the effect of the next line is, given that we are running as root +#sed -i -- "s~# root_helper = sudo~root_helper = sudo /usr/local/bin/tacker-rootwrap /usr/local/etc/tacker/rootwrap.conf~" /usr/local/etc/tacker/tacker.conf +sed -i -- "s~# connection = mysql://root:pass@127.0.0.1:3306/tacker~connection = mysql://root:$MYSQL_PASSWORD@$ip:3306/tacker?charset=utf8~" /usr/local/etc/tacker/tacker.conf +sed -i -- ":a;N;$!ba;s~password = service-password\nusername = nova\nauth_url = http://127.0.0.1:35357~password = $OS_PASSWORD\nauth_url = http://$NOVA_HOST:35357~g" /usr/local/etc/tacker/tacker.conf +sed -i -- "s~heat_uri = http://localhost:8004/v1~heat_uri = http://$HEAT_HOST:8004/v1~" /usr/local/etc/tacker/tacker.conf + +echo "$0: Populate Tacker database" +/usr/local/bin/tacker-db-manage --config-file /etc/tacker/tacker.conf upgrade head + +echo "$0: Install Tacker Client" +cd /tmp/tacker +if [[ -d /tmp/tacker/python-tackerclient ]]; then rm -rf /tmp/tacker/python-tackerclient; fi +git clone https://github.com/openstack/python-tackerclient +cd python-tackerclient +git checkout stable/mitaka +python setup.py install + +# deferred until its determined how to get this to Horizon +#echo "$0: Install Tacker Horizon plugin" +#cd /tmp/tacker +#git clone https://github.com/openstack/tacker-horizon +#cd tacker-horizon +#python setup.py install +# The next two commands must affect the Horizon server +#cp openstack_dashboard_extensions/* /usr/share/openstack-dashboard/openstack_dashboard/enabled/ +#service apache2 restart + +echo "$0: Start the Tacker Server" +python /usr/local/bin/tacker-server --config-file /usr/local/etc/tacker/tacker.conf --log-file /var/log/tacker/tacker.log + +# Registering default VIM: deferrred diff --git a/tests/vHello.sh b/tests/vHello.sh index c9bc995..317cd1e 100644 --- a/tests/vHello.sh +++ b/tests/vHello.sh @@ -50,122 +50,99 @@ function get_floating_net () { if [[ $floating_network_id ]]; then floating_network_name=$(openstack network show $floating_network_id | awk "/ name / { print \$4 }") else - echo "vHello.sh: Floating network not found" + echo "$0: Floating network not found" exit 1 fi } select_manager() { - echo "vHello.sh: select manager to use" + echo "$0: select manager to use" MANAGER_IP=$(openstack server list | awk "/ cloudify-manager-server / { print \$9 }") cfy use -t $MANAGER_IP if [ $? -eq 1 ]; then fail; fi } start() { - echo "vHello.sh: reset blueprints folder" + echo "$0: reset blueprints folder" if [[ -d /tmp/cloudify/blueprints ]]; then rm -rf /tmp/cloudify/blueprints; fi mkdir -p /tmp/cloudify/blueprints cd /tmp/cloudify/blueprints - echo "vHello.sh: clone cloudify-hello-world-example" + echo "$0: clone cloudify-hello-world-example" git clone https://github.com/cloudify-cosmo/cloudify-hello-world-example.git cd cloudify-hello-world-example git checkout 3.4.1-build - echo "vHello.sh: setup OpenStack CLI environment" + echo "$0: setup OpenStack CLI environment" source /tmp/cloudify/admin-openrc.sh - echo "vHello.sh: Setup image_id" + echo "$0: Setup image_id" # image=$(openstack image list | awk "/ CentOS-7-x86_64-GenericCloud-1607 / { print \$2 }") image=$(openstack image list | awk "/ xenial-server / { print \$2 }") if [ -z $image ]; then # glance --os-image-api-version 1 image-create --name CentOS-7-x86_64-GenericCloud-1607 --disk-format qcow2 --location http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1607.qcow2 --container-format bare - glance --os-image-api-version 1 image-create --name xenial-server --disk-format qcow2 --location http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img + glance --os-image-api-version 1 image-create --name xenial-server --disk-format qcow2 --location http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img --container-format bare fi # image=$(openstack image list | awk "/ CentOS-7-x86_64-GenericCloud-1607 / { print \$2 }") image=$(openstack image list | awk "/ xenial-server / { print \$2 }") - echo "vHello.sh: create blueprint inputs file" - # Set host image per Cloudify agent compatibility: http://docs.getcloudify.org/3.4.0/agents/overview/ - cd /tmp/cloudify/blueprints - cat <vHello-inputs.yaml + if [[ "$1" == "cloudify-manager" ]]; then + echo "$0: create Cloudify Manager blueprint inputs file" + # Set host image per Cloudify agent compatibility: http://docs.getcloudify.org/3.4.0/agents/overview/ + cd /tmp/cloudify/blueprints + cat <vHello-inputs.yaml image: xenial-server flavor: m1.small agent_user: ubuntu webserver_port: 8080 EOF - - if [[ "$1" == "cloudify-cli" ]]; then - # Workarounds for error in allocating floating IP - # Workflow failed: Task failed 'neutron_plugin.floatingip.create' -> Failed to parse request. Required attribute 'floating_network_id' not specified [status_code=400] + else + # Cloudify CLI use + echo "$0: Get external network for Floating IP allocations" get_floating_net - echo "vHello.sh: update blueprint with parameters needed for Cloudify CLI use" - cat <>vHello-inputs.yaml -external_network_name: $floating_network_name -EOF - - sed -i -- 's/description: Openstack flavor name or id to use for the new server/description: Openstack flavor name or id to use for the new server\n external_network_name:\n description: External network name/g' cloudify-hello-world-example/blueprint.yaml - - sed -i -- 's/type: cloudify.openstack.nodes.FloatingIP/type: cloudify.openstack.nodes.FloatingIP\n properties:\n floatingip:\n floating_network_name: { get_input: external_network_name }/g' cloudify-hello-world-example/blueprint.yaml - - echo "vHello.sh: Create Nova key pair" + echo "$0: Create Nova key pair" mkdir -p ~/.ssh nova keypair-delete vHello nova keypair-add vHello > ~/.ssh/vHello.pem chmod 600 ~/.ssh/vHello.pem -# Workarounds for error in allocating keypair -# Task failed 'nova_plugin.server.create' -> server must have a keypair, yet no keypair was connected to the server node, the "key_name" nested property wasn't used, and there is no agent keypair in the provider context -# Tried the following but keypair is not supported by http://www.getcloudify.org/spec/openstack-plugin/1.4/plugin.yaml -# sed -i -- 's/target: security_group/target: security_group\n - type: cloudify.openstack.server_connected_to_keypair\n target: keypair/g' cloudify-hello-world-example/blueprint.yaml -# sed -i -- 's/description: External network name/description: External network name\n private_key_path:\n description: Path to private key/g' cloudify-hello-world-example/blueprint.yaml -# sed -i -- '0,/interfaces:/s//interfaces:\n cloudify.interfaces.lifecycle:\n start:\n implementation: openstack.nova_plugin.server.start\n inputs:\n private_key_path: { get_input: private_key_path }/' cloudify-hello-world-example/blueprint.yaml - -# 'key_name' is a subproperty of 'server' per test-start-operation-retry-blueprint.yaml in the cloudify-openstack-plugin repo - sed -i -- 's/description: External network name/description: External network name\n key_name:\n description: Name of private key/g' cloudify-hello-world-example/blueprint.yaml - - sed -i -- 's/flavor: { get_input: flavor }/flavor: { get_input: flavor }\n server:\n key_name: { get_input: key_name }/' cloudify-hello-world-example/blueprint.yaml - - echo "vHello.sh: update blueprint with parameters needed for Cloudify CLI use" - #private_key_path: /root/.ssh/vHello.pem - cat <>vHello-inputs.yaml + echo "$0: create Cloudify CLI blueprint inputs file" + cat <vHello-inputs.yaml +image: xenial-server +flavor: m1.small +external_network_name: $floating_network_name +webserver_port: 8080 key_name: vHello EOF - - echo "vHello.sh: disable cloudify agent install in blueprint" - sed -i -- ':a;N;$!ba;s/ agent_user:\n description: User name used when SSH-ing into the started machine\n//g' cloudify-hello-world-example/blueprint.yaml - sed -i -- ':a;N;$!ba;s/agent_config:\n user: { get_input: agent_user }/install_agent: false/' cloudify-hello-world-example/blueprint.yaml - sed -i -- ':a;N;$!ba;s/agent_user: centos\n//' vHello-inputs.yaml fi - echo "vHello.sh: activate cloudify Virtualenv" + echo "$0: activate cloudify Virtualenv" source ~/cloudify/venv/bin/activate - echo "vHello.sh: initialize cloudify environment" + echo "$0: initialize cloudify environment" cd /tmp/cloudify/blueprints cfy init -r if [[ "$1" == "cloudify-manager" ]]; then select_manager - echo "vHello.sh: upload blueprint via manager" + echo "$0: upload blueprint via manager" cfy blueprints delete -b cloudify-hello-world-example cfy blueprints upload -p cloudify-hello-world-example/blueprint.yaml -b cloudify-hello-world-example if [ $? -eq 1 ]; then fail; fi - echo "vHello.sh: create vHello deployment via manager" + echo "$0: create vHello deployment via manager" cfy deployments create --debug -d vHello -i vHello-inputs.yaml -b cloudify-hello-world-example if [ $? -eq 1 ]; then fail; fi - echo "vHello.sh: execute 'install' workflow for vHello deployment via manager" + echo "$0: execute 'install' workflow for vHello deployment via manager" cfy executions start -w install -d vHello --timeout 1800 if [ $? -eq 1 ]; then fail; fi - echo "vHello.sh: get vHello server address" + echo "$0: get vHello server address" SERVER_URL=$(cfy deployments outputs -d vHello | awk "/ Value: / { print \$2 }") else - echo "vHello.sh: install local blueprint" + echo "$0: install local blueprint" cfy local install --install-plugins -i vHello-inputs.yaml -p cloudify-hello-world-example/blueprint.yaml --allow-custom-parameters --parameters="floating_network_name=$floating_network_name" --task-retries=10 --task-retry-interval=30 if [ $? -eq 1 ]; then fail; fi # cfy local install replaces the following, per http://getcloudify.org/2016/04/07/cloudify-update-from-developers-features-improvements-open-source-python-devops.html @@ -175,11 +152,11 @@ EOF # cfy local create-requirements -p cloudify-hello-world-example/blueprint.yaml # if [ $? -eq 1 ]; then fail; fi - echo "vHello.sh: get vHello server address" - SERVER_URL=$(cfy local outputs | awk "/http_endpoint/ { print \$2 }") + echo "$0: get vHello server address" + SERVER_URL=$(cfy local outputs | awk "/http_endpoint/ { print \$2 }" | sed -- 's/"//g') fi - echo "vHello.sh: verify vHello server is running" + echo "$0: verify vHello server is running" apt-get install -y curl if [[ $(curl $SERVER_URL | grep -c "Hello, World!") != 1 ]]; then fail; fi @@ -187,26 +164,26 @@ EOF } clean() { - echo "vHello.sh: activate cloudify Virtualenv" + echo "$0: activate cloudify Virtualenv" source ~/cloudify/venv/bin/activate - echo "vHello.sh: setup OpenStack CLI environment" + echo "$0: setup OpenStack CLI environment" source /tmp/cloudify/admin-openrc.sh - echo "vHello.sh: initialize cloudify environment" + echo "$0: initialize cloudify environment" cd /tmp/cloudify/blueprints if [[ "$1" == "cloudify-manager" ]]; then select_manager - echo "vHello.sh: uninstall vHello blueprint via manager" + echo "$0: uninstall vHello blueprint via manager" cfy executions start -w uninstall -d vHello if [ $? -eq 1 ]; then fail; fi - echo "vHello.sh: delete vHello blueprint" + echo "$0: delete vHello blueprint" cfy deployments delete -d vHello if [ $? -eq 1 ]; then fail; fi else - echo "vHello.sh: uninstall vHello blueprint via CLI" + echo "$0: uninstall vHello blueprint via CLI" cfy local uninstall if [ $? -eq 1 ]; then fail; fi fi @@ -214,16 +191,16 @@ clean() { } if [[ "$2" == "setup" ]]; then - echo "vHello.sh: Setup temp test folder /tmp/cloudify and copy this script there" + echo "$0: Setup temp test folder /tmp/cloudify and copy this script there" mkdir /tmp/cloudify chmod 777 /tmp/cloudify/ cp $0 /tmp/cloudify/. chmod 755 /tmp/cloudify/*.sh - echo "vHello.sh: cloudify-setup part 1" + echo "$0: cloudify-setup part 1" bash utils/cloudify-setup.sh $1 1 - echo "vHello.sh: cloudify-setup part 2" + echo "$0: cloudify-setup part 2" CONTAINER=$(sudo docker ps -l | awk "/ ubuntu:xenial / { print \$1 }") sudo docker exec $CONTAINER /tmp/cloudify/cloudify-setup.sh $1 2 if [ $? -eq 1 ]; then fail; fi @@ -234,7 +211,7 @@ else if [[ "$3" == "start" ]]; then start $1; fi if [[ "$3" == "clean" ]]; then clean $1; fi else - echo "vHello.sh: pass $2 command to vHello.sh in cloudify container" + echo "$0: pass $2 command to vHello.sh in cloudify container" CONTAINER=$(sudo docker ps -l | awk "/ ubuntu:xenial / { print \$1 }") sudo docker exec $CONTAINER /tmp/cloudify/vHello.sh $1 $2 $2 if [ $? -eq 1 ]; then fail; fi -- cgit 1.2.3-korg