summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTim Rozet <trozet@redhat.com>2018-07-17 15:53:03 +0000
committerGerrit Code Review <gerrit@opnfv.org>2018-07-17 15:53:03 +0000
commite4a6c05de01febe062fac4c642613331866a68df (patch)
treeb4ba9ecbbfebe69ea931f6e887e6e94032f7ca9c
parent7fe92aa91773083073f7fea08b69bd0536935c82 (diff)
parentde0b32bfd38d49fa2a329f48c33b11bd8aac6a22 (diff)
Merge "Run ODL CSIT on Apex from Cperf container"
-rwxr-xr-xjjb/3rd_party_ci/detect-snapshot.sh31
-rwxr-xr-xjjb/3rd_party_ci/install-netvirt.sh2
-rw-r--r--jjb/3rd_party_ci/odl-netvirt.yaml32
-rw-r--r--jjb/apex/apex-snapshot-deploy.sh24
-rw-r--r--jjb/cperf/cperf-ci-jobs.yaml112
-rwxr-xr-xjjb/cperf/cperf-prepare-robot.sh32
-rwxr-xr-xjjb/cperf/cperf-robot-netvirt-csit.sh105
-rw-r--r--jjb/cperf/parse-node-yaml.py71
8 files changed, 328 insertions, 81 deletions
diff --git a/jjb/3rd_party_ci/detect-snapshot.sh b/jjb/3rd_party_ci/detect-snapshot.sh
new file mode 100755
index 000000000..46d4dfa2d
--- /dev/null
+++ b/jjb/3rd_party_ci/detect-snapshot.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+set -o errexit
+set -o nounset
+set -o pipefail
+
+echo "Detecting requested OpenStack branch and topology type in gerrit comment"
+parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^opnfv-check\s*//p')
+parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
+if [ ! -z "$parsed_comment" ]; then
+ if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
+ IFS='-' read -r -a array <<< "$parsed_comment"
+ os_version=${array[0]}
+ topo=${array[1]}
+ echo "OS version detected in gerrit comment: ${os_version}"
+ echo "Topology type detected in gerrit comment: ${topo}"
+ else
+ echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
+ exit 1
+ fi
+else
+ echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
+ os_version='master'
+ topo='noha'
+fi
+
+echo "Writing variables to file"
+echo > detected_snapshot << EOI
+OS_VERSION=$os_version
+TOPOLOGY=$topo
+SNAP_CACHE=$HOME/snap_cache/$os_version/$topo
+EOI
diff --git a/jjb/3rd_party_ci/install-netvirt.sh b/jjb/3rd_party_ci/install-netvirt.sh
index 07bbe7739..232d60e5c 100755
--- a/jjb/3rd_party_ci/install-netvirt.sh
+++ b/jjb/3rd_party_ci/install-netvirt.sh
@@ -3,7 +3,7 @@ set -o errexit
set -o nounset
set -o pipefail
-SNAP_CACHE=$HOME/snap_cache
+SNAP_CACHE=$HOME/snap_cache/$OS_VERSION/$TOPOLOGY
# clone opnfv sdnvpn repo
git clone https://gerrit.opnfv.org/gerrit/p/sdnvpn.git $WORKSPACE/sdnvpn
diff --git a/jjb/3rd_party_ci/odl-netvirt.yaml b/jjb/3rd_party_ci/odl-netvirt.yaml
index c78de9bdf..77263d0a7 100644
--- a/jjb/3rd_party_ci/odl-netvirt.yaml
+++ b/jjb/3rd_party_ci/odl-netvirt.yaml
@@ -104,6 +104,9 @@
builders:
- description-setter:
description: "Built on $NODE_NAME"
+ - detect-opnfv-snapshot
+ - inject:
+ properties-file: detected_snapshot
- multijob:
name: create-apex-vms
condition: SUCCESSFUL
@@ -120,6 +123,8 @@
NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
APEX_ENV_NUMBER=$APEX_ENV_NUMBER
GERRIT_EVENT_COMMENT_TEXT=$GERRIT_EVENT_COMMENT_TEXT
+ TOPOLOGY=$TOPOLOGY
+ OS_VERSION=$OS_VERSION
node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
@@ -130,7 +135,7 @@
- name: 'odl-netvirt-verify-virtual-install-netvirt-{stream}'
current-parameters: false
predefined-parameters: |
- ODL_BRANCH={branch}
+ ODL_BRANCH=$BRANCH
BRANCH=$BRANCH
GERRIT_REFSPEC=$GERRIT_REFSPEC
GERRIT_CHANGE_NUMBER=$GERRIT_CHANGE_NUMBER
@@ -138,6 +143,8 @@
GERRIT_PATCHSET_NUMBER=$GERRIT_PATCHSET_NUMBER
GERRIT_PATCHSET_REVISION=$GERRIT_PATCHSET_REVISION
NETVIRT_ARTIFACT=$NETVIRT_ARTIFACT
+ TOPOLOGY=$TOPOLOGY
+ OS_VERSION=$OS_VERSION
node-parameters: true
kill-phase-on: FAILURE
abort-all-job: true
@@ -147,7 +154,7 @@
projects:
- name: 'functest-netvirt-virtual-suite-master'
predefined-parameters: |
- DEPLOY_SCENARIO=os-odl-nofeature-ha
+ DEPLOY_SCENARIO=os-odl-nofeature-$TOPOLOGY
FUNCTEST_MODE=testcase
FUNCTEST_SUITE_NAME=tempest_smoke
RC_FILE_PATH=$HOME/cloner-info/overcloudrc
@@ -155,6 +162,21 @@
kill-phase-on: FAILURE
abort-all-job: false
- multijob:
+ name: csit
+ condition: ALWAYS
+ projects:
+ - name: cperf-apex-csit-{stream}
+ predefined-parameters: |
+ ODL_BRANCH=$BRANCH
+ RC_FILE_PATH=$SNAP_CACHE/overcloudrc
+ NODE_FILE_PATH=$SNAP_CACHE/node.yaml
+ SSH_KEY_PATH=$SNAP_CACHE/id_rsa
+ ODL_CONTAINERIZED=false
+ OS_VERSION=$OS_VERSION
+ node-parameters: true
+ kill-phase-on: NEVER
+ abort-all-job: false
+ - multijob:
name: postprocess
condition: ALWAYS
projects:
@@ -248,3 +270,9 @@
builders:
- shell:
!include-raw: ./postprocess-netvirt.sh
+
+- builder:
+ name: 'detect-opnfv-snapshot'
+ builders:
+ - shell:
+ !include-raw-escape: ./detect-snapshot.sh
diff --git a/jjb/apex/apex-snapshot-deploy.sh b/jjb/apex/apex-snapshot-deploy.sh
index 0a475062c..9738ecb19 100644
--- a/jjb/apex/apex-snapshot-deploy.sh
+++ b/jjb/apex/apex-snapshot-deploy.sh
@@ -25,27 +25,7 @@ pushd ci > /dev/null
sudo opnfv-clean
popd > /dev/null
-echo "Detecting requested OpenStack branch and topology type in gerrit comment"
-parsed_comment=$(echo $GERRIT_EVENT_COMMENT_TEXT | sed -n 's/^opnfv-check\s*//p')
-parsed_comment=$(echo $parsed_comment | sed -n 's/\s*$//p')
-if [ ! -z "$parsed_comment" ]; then
- if echo $parsed_comment | grep -E '^[a-z]+-(no)?ha'; then
- IFS='-' read -r -a array <<< "$parsed_comment"
- os_version=${array[0]}
- topo=${array[1]}
- echo "OS version detected in gerrit comment: ${os_version}"
- echo "Topology type detected in gerrit comment: ${topo}"
- else
- echo "Invalid format given for scenario in gerrit comment: ${parsed_comment}...aborting"
- exit 1
- fi
-else
- echo "No scenario given in gerrit comment, will use default (master OpenStack, noha)"
- os_version='master'
- topo='noha'
-fi
-
-full_snap_url=http://$GS_URL/${os_version}/${topo}
+full_snap_url=http://$GS_URL/${OS_VERSION}/${TOPOLOGY}
echo "Downloading latest snapshot properties file"
if ! wget -O $WORKSPACE/opnfv.properties ${full_snap_url}/snapshot.properties; then
@@ -61,7 +41,7 @@ if [ -z "$latest_snap_checksum" ]; then
fi
local_snap_checksum=""
-SNAP_CACHE=${SNAP_CACHE}/${os_version}/${topo}
+SNAP_CACHE=${SNAP_CACHE}/${OS_VERSION}/${TOPOLOGY}
# check snap cache directory exists
# if snapshot cache exists, find the checksum
diff --git a/jjb/cperf/cperf-ci-jobs.yaml b/jjb/cperf/cperf-ci-jobs.yaml
index fdd3509d1..59afb89c8 100644
--- a/jjb/cperf/cperf-ci-jobs.yaml
+++ b/jjb/cperf/cperf-ci-jobs.yaml
@@ -9,47 +9,29 @@
# -------------------------------
# BRANCH ANCHORS
# -------------------------------
- master: &master
- stream: master
- branch: '{stream}'
- gs-pathname: ''
- docker-tag: 'latest'
- danube: &danube
- stream: danube
- branch: 'stable/{stream}'
- gs-pathname: '/{stream}'
- docker-tag: 'stable'
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ docker-tag: 'latest'
- # -------------------------------
- # POD, INSTALLER, AND BRANCH MAPPING
- # -------------------------------
- pod:
- # -------------------------------
- # master
- # -------------------------------
- - intel-pod2:
- installer: apex
- <<: *master
- - intel-pod2:
- installer: apex
- <<: *danube
+ installer: apex
testsuite:
- - 'daily'
+ - csit
+ - cbench
jobs:
- - 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+ - 'cperf-{installer}-{testsuite}-{stream}'
################################
# job template
################################
- job-template:
- name: 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+ name: 'cperf-{installer}-{testsuite}-{stream}'
concurrent: true
properties:
- - logrotate-default
- throttle:
enabled: true
max-per-node: 1
@@ -57,24 +39,17 @@
wrappers:
- build-name:
- name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+ name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME ODL BRANCH: $ODL_BRANCH'
- timeout:
timeout: 400
abort: true
parameters:
- - project-parameter:
- project: '{project}'
- branch: '{branch}'
- - '{pod}-defaults'
- - '{installer}-defaults'
- cperf-parameter:
testsuite: '{testsuite}'
gs-pathname: '{gs-pathname}'
docker-tag: '{docker-tag}'
-
- scm:
- - git-scm
+ stream: '{stream}'
builders:
- 'cperf-{testsuite}-builder'
@@ -90,6 +65,14 @@
default: '{testsuite}'
description: "Suite name to run"
- string:
+ name: ODL_BRANCH
+ default: 'master'
+ description: "Branch that OpenDaylight is running"
+ - string:
+ name: OS_VERSION
+ default: 'master'
+ description: "OpenStack version (short name, no stable/ prefix)"
+ - string:
name: GS_PATHNAME
default: '{gs-pathname}'
description: "Version directory where the opnfv documents will be stored in gs repository"
@@ -101,6 +84,22 @@
name: DOCKER_TAG
default: '{docker-tag}'
description: 'Tag to pull docker image'
+ - string:
+ name: RC_FILE_PATH
+ default: ''
+ description: "Path to the OS credentials file if given"
+ - string:
+ name: SSH_KEY_PATH
+ default: ''
+ description: "Path to the private SSH key to access OPNFV nodes"
+ - string:
+ name: NODE_FILE_PATH
+ default: ''
+ description: "Path to the yaml file describing overcloud nodes"
+ - string:
+ name: ODL_CONTAINERIZED
+ default: 'true'
+ description: "boolean set true if ODL on overcloud is a container"
########################
# trigger macros
@@ -110,12 +109,26 @@
# builder macros
########################
- builder:
- name: cperf-daily-builder
+ name: cperf-csit-builder
+ builders:
+ - 'cperf-cleanup'
+ - 'cperf-prepare-robot'
+ - 'cperf-robot-netvirt-csit'
+
+- builder:
+ name: cperf-cbench-builder
builders:
- 'cperf-cleanup'
+ - 'cperf-prepare-robot'
- 'cperf-robot-cbench'
- builder:
+ name: cperf-prepare-robot
+ builders:
+ - shell:
+ !include-raw: ./cperf-prepare-robot.sh
+
+- builder:
name: cperf-robot-cbench
builders:
- shell: |
@@ -123,23 +136,6 @@
set -o errexit
set -o nounset
set -o pipefail
- undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
- grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
- INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
- sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
- sudo chmod 755 /tmp/overcloudrc
- source /tmp/overcloudrc
-
- # robot suites need the ssh key to log in to controller nodes, so throwing it
- # in tmp, and mounting /tmp as $HOME as far as robot is concerned
- sudo rm -rf /tmp/.ssh
- sudo mkdir /tmp/.ssh
- sudo chmod 0700 /tmp/.ssh
- sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
- sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
- # done with sudo. jenkins-ci is the user from this point
- chmod 0600 /tmp/.ssh/id_rsa
# cbench requires the openflow drop test feature to be installed.
sshpass -p karaf ssh -o StrictHostKeyChecking=no \
@@ -148,8 +144,6 @@
-p 8101 karaf@$SDN_CONTROLLER_IP \
feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
- docker pull opnfv/cperf:$DOCKER_TAG
-
robot_cmd="pybot -e exclude -L TRACE -d /tmp \
-v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
-v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
@@ -165,6 +159,12 @@
docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
+ name: cperf-robot-netvirt-csit
+ builders:
+ - shell:
+ !include-raw: ./cperf-robot-netvirt-csit.sh
+
+- builder:
name: cperf-cleanup
builders:
- shell: |
diff --git a/jjb/cperf/cperf-prepare-robot.sh b/jjb/cperf/cperf-prepare-robot.sh
new file mode 100755
index 000000000..d88c6d510
--- /dev/null
+++ b/jjb/cperf/cperf-prepare-robot.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z ${RC_FILE_PATH+x} ]; then
+ undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+else
+ cp -f $RC_FILE_PATH ${WORKSPACE}/overcloudrc
+fi
+
+sudo chmod 755 ${WORKSPACE}/overcloudrc
+source ${WORKSPACE}/overcloudrc
+
+# copy ssh key for robot
+
+if [ -z ${SSH_KEY_PATH+x} ]; then
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa ${WORKSPACE}/
+ sudo chown -R jenkins-ci:jenkins-ci ${WORKSPACE}/
+ # done with sudo. jenkins-ci is the user from this point
+ chmod 0600 ${WORKSPACE}/id_rsa
+else
+ cp -f ${SSH_KEY_PATH} ${WORKSPACE}/
+fi
+
+docker pull opnfv/cperf:$DOCKER_TAG
+
+sudo mkdir -p /tmp/robot_results
diff --git a/jjb/cperf/cperf-robot-netvirt-csit.sh b/jjb/cperf/cperf-robot-netvirt-csit.sh
new file mode 100755
index 000000000..3ef747109
--- /dev/null
+++ b/jjb/cperf/cperf-robot-netvirt-csit.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ${WORKSPACE}/overcloudrc
+# note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
+# for admin/public network (since we are running single network deployment)
+
+if [ "$OS_VERSION" == 'master' ]; then
+ FULL_OS_VER='master'
+else
+ FULL_OS_VER="stable/${OS_VERSION}"
+fi
+
+if [ "$ODL_BRANCH" == 'master' ]; then
+ ODL_STREAM='flourine'
+else
+ ODL_STREAM=${ODL_BRANCH}
+fi
+
+NUM_CONTROL_NODES=$(python ./parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
+NUM_COMPUTE_NODES=$(python ./parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
+
+idx=1
+EXTRA_ROBOT_ARGS=""
+for idx in `seq 1 $NUM_CONTROL_NODES`; do
+ CONTROLLER_IP=$(python ./parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
+ EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+ -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
+ -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+ -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
+done
+
+idx=1
+for idx in `seq 1 $NUM_COMPUTE_NODES`; do
+ COMPUTE_IP=$(python ./parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
+ EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
+done
+
+CONTROLLER_1_IP=$(python ./parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
+
+if [ "$ODL_CONTAINERIZED" == 'false' ]; then
+ EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
+ -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
+ -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
+ -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
+ -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
+else
+ EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:\"sudo docker exec opendaylight_api /bin/bash -c 'ps axf | \
+ grep org.apache.karaf | grep -v grep | wc -l' || echo 0\" \
+ -v NODE_START_COMMAND:\"sudo docker start opendaylight_api\" \
+ -v NODE_KILL_COMMAND:\"sudo docker stop opendaylight_api\" \
+ -v NODE_STOP_COMMAND:\"sudo docker stop opendaylight_api\" \
+ -v NODE_FREEZE_COMMAND:\"sudo docker stop opendaylight_api\" "
+fi
+
+robot_cmd="pybot \
+ --removekeywords wuks \
+ --xunit robotxunit.xml \
+ -c critical \
+ -e exclude \
+ -d /tmp/robot_results \
+ -v BUNDLEFOLDER:/opt/opendaylight \
+ -v CONTROLLER_USER:heat-admin \
+ -v DEFAULT_LINUX_PROMPT:\$ \
+ -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
+ -v DEFAULT_USER:heat-admin \
+ -v DEVSTACK_DEPLOY_PATH:/tmp \
+ -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
+ -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
+ -v NUM_OS_SYSTEM:$NUM_CONTROL_NODES \
+ -v NUM_TOOLS_SYSTEM:0 \
+ -v ODL_SNAT_MODE:conntrack \
+ -v ODL_STREAM:$ODL_STREAM \
+ -v ODL_SYSTEM_IP: $CONTROLLER_1_IP \
+ -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
+ -v OPENSTACK_BRANCH:$FULL_OS_VER \
+ -v OS_USER:heat-admin \
+ -v ODL_ENABLE_L3_FWD:yes \
+ -v ODL_SYSTEM_USER:heat-admin \
+ -v ODL_SYSTEM_PROMPT:\$ \
+ -v PRE_CLEAN_OPENSTACK_ALL:True \
+ -v PUBLIC_PHYSICAL_NETWORK:datacentre \
+ -v RESTCONFPORT:8081 \
+ -v ODL_RESTCONF_USER:admin \
+ -v ODL_RESTCONF_PASSWORD:admin \
+ -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
+ -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
+ -v SECURITY_GROUP_MODE:stateful \
+ -v USER:heat-admin \
+ -v USER_HOME:\$HOME \
+ -v TOOLS_SYSTEM_IP:'' \
+ -v NODE_ROLE_INDEX_START:0 \
+ -v WORKSPACE:/tmp \
+ $EXTRA_ROBOT_ARGS \
+ -v of_port:6653 "
+
+docker run -i --net=host \
+ -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
+ -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
+ opnfv/cperf:$DOCKER_TAG \
+ /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
+ $robot_cmd /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity/l2.robot;"
diff --git a/jjb/cperf/parse-node-yaml.py b/jjb/cperf/parse-node-yaml.py
new file mode 100644
index 000000000..5a7575540
--- /dev/null
+++ b/jjb/cperf/parse-node-yaml.py
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import sys
+import yaml
+
+
+def get_node_data_by_number(node_type, node_number):
+ node_idx = 1
+ for node_name, node_data in data['servers'].items():
+ if node_type == node_data['type']:
+ if node_idx == node_number:
+ return node_name, node_data
+ else:
+ node_idx += 1
+
+
+def get_node_value(node_type, node_number, key):
+ node_name, node_data = get_node_data_by_number(node_type, node_number)
+ if not key and node_name is not None:
+ return node_name
+ elif node_data and isinstance(node_data, dict) and key in node_data:
+ return node_data[key]
+
+
+def get_number_of_nodes(node_type):
+ nodes = data['servers']
+ num_nodes = 0
+ for node_name, node_data in nodes.items():
+ if node_data['type'] == node_type:
+ num_nodes += 1
+ return num_nodes
+
+
+FUNCTION_MAP = {'num_nodes':
+ {'func': get_number_of_nodes,
+ 'args': ['node_type']},
+ 'get_value':
+ {'func': get_node_value,
+ 'args': ['node_type', 'node_number', 'key']},
+ }
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('command', choices=FUNCTION_MAP.keys())
+ parser.add_argument('-f', '--file',
+ dest='node_file',
+ required=True)
+ parser.add_argument('--node-type',
+ default='controller',
+ required=False)
+ parser.add_argument('--node-number',
+ default=1,
+ type=int,
+ required=False)
+ parser.add_argument('-k', '--key',
+ required=False)
+ args = parser.parse_args(sys.argv[1:])
+ with open(args.node_file, 'r') as fh:
+ data = yaml.safe_load(fh)
+ assert 'servers' in data
+ func = FUNCTION_MAP[args.command]['func']
+ args = [getattr(args, x) for x in FUNCTION_MAP[args.command]['args']]
+ print(func(*args))