summaryrefslogtreecommitdiffstats
path: root/jjb/cperf
diff options
context:
space:
mode:
Diffstat (limited to 'jjb/cperf')
-rw-r--r--jjb/cperf/cperf-ci-jobs.yaml112
-rwxr-xr-xjjb/cperf/cperf-prepare-robot.sh32
-rwxr-xr-xjjb/cperf/cperf-robot-netvirt-csit.sh105
-rw-r--r--jjb/cperf/parse-node-yaml.py71
4 files changed, 264 insertions, 56 deletions
diff --git a/jjb/cperf/cperf-ci-jobs.yaml b/jjb/cperf/cperf-ci-jobs.yaml
index fdd3509d1..59afb89c8 100644
--- a/jjb/cperf/cperf-ci-jobs.yaml
+++ b/jjb/cperf/cperf-ci-jobs.yaml
@@ -9,47 +9,29 @@
# -------------------------------
# BRANCH ANCHORS
# -------------------------------
- master: &master
- stream: master
- branch: '{stream}'
- gs-pathname: ''
- docker-tag: 'latest'
- danube: &danube
- stream: danube
- branch: 'stable/{stream}'
- gs-pathname: '/{stream}'
- docker-tag: 'stable'
+ stream: master
+ branch: '{stream}'
+ gs-pathname: ''
+ docker-tag: 'latest'
- # -------------------------------
- # POD, INSTALLER, AND BRANCH MAPPING
- # -------------------------------
- pod:
- # -------------------------------
- # master
- # -------------------------------
- - intel-pod2:
- installer: apex
- <<: *master
- - intel-pod2:
- installer: apex
- <<: *danube
+ installer: apex
testsuite:
- - 'daily'
+ - csit
+ - cbench
jobs:
- - 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+ - 'cperf-{installer}-{testsuite}-{stream}'
################################
# job template
################################
- job-template:
- name: 'cperf-{installer}-{pod}-{testsuite}-{stream}'
+ name: 'cperf-{installer}-{testsuite}-{stream}'
concurrent: true
properties:
- - logrotate-default
- throttle:
enabled: true
max-per-node: 1
@@ -57,24 +39,17 @@
wrappers:
- build-name:
- name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME Scenario: $DEPLOY_SCENARIO'
+ name: '$BUILD_NUMBER Suite: $CPERF_SUITE_NAME ODL BRANCH: $ODL_BRANCH'
- timeout:
timeout: 400
abort: true
parameters:
- - project-parameter:
- project: '{project}'
- branch: '{branch}'
- - '{pod}-defaults'
- - '{installer}-defaults'
- cperf-parameter:
testsuite: '{testsuite}'
gs-pathname: '{gs-pathname}'
docker-tag: '{docker-tag}'
-
- scm:
- - git-scm
+ stream: '{stream}'
builders:
- 'cperf-{testsuite}-builder'
@@ -90,6 +65,14 @@
default: '{testsuite}'
description: "Suite name to run"
- string:
+ name: ODL_BRANCH
+ default: 'master'
+ description: "Branch that OpenDaylight is running"
+ - string:
+ name: OS_VERSION
+ default: 'master'
+ description: "OpenStack version (short name, no stable/ prefix)"
+ - string:
name: GS_PATHNAME
default: '{gs-pathname}'
description: "Version directory where the opnfv documents will be stored in gs repository"
@@ -101,6 +84,22 @@
name: DOCKER_TAG
default: '{docker-tag}'
description: 'Tag to pull docker image'
+ - string:
+ name: RC_FILE_PATH
+ default: ''
+ description: "Path to the OS credentials file if given"
+ - string:
+ name: SSH_KEY_PATH
+ default: ''
+ description: "Path to the private SSH key to access OPNFV nodes"
+ - string:
+ name: NODE_FILE_PATH
+ default: ''
+ description: "Path to the yaml file describing overcloud nodes"
+ - string:
+ name: ODL_CONTAINERIZED
+ default: 'true'
+ description: "boolean set true if ODL on overcloud is a container"
########################
# trigger macros
@@ -110,12 +109,26 @@
# builder macros
########################
- builder:
- name: cperf-daily-builder
+ name: cperf-csit-builder
+ builders:
+ - 'cperf-cleanup'
+ - 'cperf-prepare-robot'
+ - 'cperf-robot-netvirt-csit'
+
+- builder:
+ name: cperf-cbench-builder
builders:
- 'cperf-cleanup'
+ - 'cperf-prepare-robot'
- 'cperf-robot-cbench'
- builder:
+ name: cperf-prepare-robot
+ builders:
+ - shell:
+ !include-raw: ./cperf-prepare-robot.sh
+
+- builder:
name: cperf-robot-cbench
builders:
- shell: |
@@ -123,23 +136,6 @@
set -o errexit
set -o nounset
set -o pipefail
- undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
- grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
- INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
-
- sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
- sudo chmod 755 /tmp/overcloudrc
- source /tmp/overcloudrc
-
- # robot suites need the ssh key to log in to controller nodes, so throwing it
- # in tmp, and mounting /tmp as $HOME as far as robot is concerned
- sudo rm -rf /tmp/.ssh
- sudo mkdir /tmp/.ssh
- sudo chmod 0700 /tmp/.ssh
- sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa /tmp/.ssh/
- sudo chown -R jenkins-ci:jenkins-ci /tmp/.ssh
- # done with sudo. jenkins-ci is the user from this point
- chmod 0600 /tmp/.ssh/id_rsa
# cbench requires the openflow drop test feature to be installed.
sshpass -p karaf ssh -o StrictHostKeyChecking=no \
@@ -148,8 +144,6 @@
-p 8101 karaf@$SDN_CONTROLLER_IP \
feature:install odl-openflowplugin-flow-services-ui odl-openflowplugin-drop-test
- docker pull opnfv/cperf:$DOCKER_TAG
-
robot_cmd="pybot -e exclude -L TRACE -d /tmp \
-v ODL_SYSTEM_1_IP:${SDN_CONTROLLER_IP} \
-v ODL_SYSTEM_IP:${SDN_CONTROLLER_IP} \
@@ -165,6 +159,12 @@
docker run -i -v /tmp:/tmp opnfv/cperf:$DOCKER_TAG ${robot_cmd} ${robot_suite}
- builder:
+ name: cperf-robot-netvirt-csit
+ builders:
+ - shell:
+ !include-raw: ./cperf-robot-netvirt-csit.sh
+
+- builder:
name: cperf-cleanup
builders:
- shell: |
diff --git a/jjb/cperf/cperf-prepare-robot.sh b/jjb/cperf/cperf-prepare-robot.sh
new file mode 100755
index 000000000..d88c6d510
--- /dev/null
+++ b/jjb/cperf/cperf-prepare-robot.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+if [ -z ${RC_FILE_PATH+x} ]; then
+ undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \
+ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+")
+ INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'})
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/overcloudrc /tmp/overcloudrc
+else
+ cp -f $RC_FILE_PATH ${WORKSPACE}/overcloudrc
+fi
+
+sudo chmod 755 ${WORKSPACE}/overcloudrc
+source ${WORKSPACE}/overcloudrc
+
+# copy ssh key for robot
+
+if [ -z ${SSH_KEY_PATH+x} ]; then
+ sudo scp -o StrictHostKeyChecking=no root@$INSTALLER_IP:/home/stack/.ssh/id_rsa ${WORKSPACE}/
+ sudo chown -R jenkins-ci:jenkins-ci ${WORKSPACE}/
+ # done with sudo. jenkins-ci is the user from this point
+ chmod 0600 ${WORKSPACE}/id_rsa
+else
+ cp -f ${SSH_KEY_PATH} ${WORKSPACE}/
+fi
+
+docker pull opnfv/cperf:$DOCKER_TAG
+
+sudo mkdir -p /tmp/robot_results
diff --git a/jjb/cperf/cperf-robot-netvirt-csit.sh b/jjb/cperf/cperf-robot-netvirt-csit.sh
new file mode 100755
index 000000000..3ef747109
--- /dev/null
+++ b/jjb/cperf/cperf-robot-netvirt-csit.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source ${WORKSPACE}/overcloudrc
+# note SDN_CONTROLLER_IP is set in overcloudrc, which is the VIP
+# for admin/public network (since we are running single network deployment)
+
+if [ "$OS_VERSION" == 'master' ]; then
+ FULL_OS_VER='master'
+else
+ FULL_OS_VER="stable/${OS_VERSION}"
+fi
+
+if [ "$ODL_BRANCH" == 'master' ]; then
+ ODL_STREAM='flourine'
+else
+ ODL_STREAM=${ODL_BRANCH}
+fi
+
+NUM_CONTROL_NODES=$(python ./parse-node-yaml.py num_nodes --file $NODE_FILE_PATH)
+NUM_COMPUTE_NODES=$(python ./parse-node-yaml.py num_nodes --node-type compute --file $NODE_FILE_PATH)
+
+idx=1
+EXTRA_ROBOT_ARGS=""
+for idx in `seq 1 $NUM_CONTROL_NODES`; do
+ CONTROLLER_IP=$(python ./parse-node-yaml.py get_value -k address --node-number ${idx} --file $NODE_FILE_PATH)
+ EXTRA_ROBOT_ARGS+=" -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+ -v OS_CONTROL_NODE_${idx}_IP:${CONTROLLER_IP} \
+ -v ODL_SYSTEM_${idx}_IP:${CONTROLLER_IP} \
+ -v HA_PROXY_${idx}_IP:${SDN_CONTROLLER_IP}"
+done
+
+idx=1
+for idx in `seq 1 $NUM_COMPUTE_NODES`; do
+ COMPUTE_IP=$(python ./parse-node-yaml.py get_value -k address --node-type compute --node-number ${idx} --file $NODE_FILE_PATH)
+ EXTRA_ROBOT_ARGS+=" -v OS_COMPUTE_${idx}_IP:${COMPUTE_IP}"
+done
+
+CONTROLLER_1_IP=$(python ./parse-node-yaml.py get_value -k address --node-number 1 --file $NODE_FILE_PATH)
+
+if [ "$ODL_CONTAINERIZED" == 'false' ]; then
+ EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:'ps axf | grep org.apache.karaf | grep -v grep | wc -l || echo 0' \
+ -v NODE_START_COMMAND:'sudo systemctl start opendaylight_api' \
+ -v NODE_KILL_COMMAND:'sudo systemctl stop opendaylight_api' \
+ -v NODE_STOP_COMMAND:'sudo systemctl stop opendaylight_api' \
+ -v NODE_FREEZE_COMMAND:'sudo systemctl stop opendaylight_api' "
+else
+ EXTRA_ROBOT_ARGS+=" -v NODE_KARAF_COUNT_COMMAND:\"sudo docker exec opendaylight_api /bin/bash -c 'ps axf | \
+ grep org.apache.karaf | grep -v grep | wc -l' || echo 0\" \
+ -v NODE_START_COMMAND:\"sudo docker start opendaylight_api\" \
+ -v NODE_KILL_COMMAND:\"sudo docker stop opendaylight_api\" \
+ -v NODE_STOP_COMMAND:\"sudo docker stop opendaylight_api\" \
+ -v NODE_FREEZE_COMMAND:\"sudo docker stop opendaylight_api\" "
+fi
+
+robot_cmd="pybot \
+ --removekeywords wuks \
+ --xunit robotxunit.xml \
+ -c critical \
+ -e exclude \
+ -d /tmp/robot_results \
+ -v BUNDLEFOLDER:/opt/opendaylight \
+ -v CONTROLLER_USER:heat-admin \
+ -v DEFAULT_LINUX_PROMPT:\$ \
+ -v DEFAULT_LINUX_PROMPT_STRICT:]\$ \
+ -v DEFAULT_USER:heat-admin \
+ -v DEVSTACK_DEPLOY_PATH:/tmp \
+ -v HA_PROXY_IP:$SDN_CONTROLLER_IP \
+ -v NUM_ODL_SYSTEM:$NUM_CONTROL_NODES \
+ -v NUM_OS_SYSTEM:$NUM_CONTROL_NODES \
+ -v NUM_TOOLS_SYSTEM:0 \
+ -v ODL_SNAT_MODE:conntrack \
+ -v ODL_STREAM:$ODL_STREAM \
+ -v ODL_SYSTEM_IP: $CONTROLLER_1_IP \
+ -v OS_CONTROL_NODE_IP:$CONTROLLER_1_IP \
+ -v OPENSTACK_BRANCH:$FULL_OS_VER \
+ -v OS_USER:heat-admin \
+ -v ODL_ENABLE_L3_FWD:yes \
+ -v ODL_SYSTEM_USER:heat-admin \
+ -v ODL_SYSTEM_PROMPT:\$ \
+ -v PRE_CLEAN_OPENSTACK_ALL:True \
+ -v PUBLIC_PHYSICAL_NETWORK:datacentre \
+ -v RESTCONFPORT:8081 \
+ -v ODL_RESTCONF_USER:admin \
+ -v ODL_RESTCONF_PASSWORD:admin \
+ -v KARAF_PROMPT_LOGIN:'opendaylight-user' \
+ -v KARAF_PROMPT:'opendaylight-user.*root.*>' \
+ -v SECURITY_GROUP_MODE:stateful \
+ -v USER:heat-admin \
+ -v USER_HOME:\$HOME \
+ -v TOOLS_SYSTEM_IP:'' \
+ -v NODE_ROLE_INDEX_START:0 \
+ -v WORKSPACE:/tmp \
+ $EXTRA_ROBOT_ARGS \
+ -v of_port:6653 "
+
+docker run -i --net=host \
+ -v ${WORKSPACE}/id_rsa:/tmp/id_rsa \
+ -v ${WORKSPACE}/overcloudrc:/tmp/overcloudrc \
+ opnfv/cperf:$DOCKER_TAG \
+ /bin/bash -c "source /tmp/overcloudrc; mkdir -p \$HOME/.ssh; cp /tmp/id_rsa \$HOME/.ssh; \
+ $robot_cmd /home/opnfv/repos/odl_test/csit/suites/openstack/connectivity/l2.robot;"
diff --git a/jjb/cperf/parse-node-yaml.py b/jjb/cperf/parse-node-yaml.py
new file mode 100644
index 000000000..5a7575540
--- /dev/null
+++ b/jjb/cperf/parse-node-yaml.py
@@ -0,0 +1,71 @@
+##############################################################################
+# Copyright (c) 2018 Tim Rozet (trozet@redhat.com) and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+import argparse
+import sys
+import yaml
+
+
+def get_node_data_by_number(node_type, node_number):
+ node_idx = 1
+ for node_name, node_data in data['servers'].items():
+ if node_type == node_data['type']:
+ if node_idx == node_number:
+ return node_name, node_data
+ else:
+ node_idx += 1
+
+
+def get_node_value(node_type, node_number, key):
+ node_name, node_data = get_node_data_by_number(node_type, node_number)
+ if not key and node_name is not None:
+ return node_name
+ elif node_data and isinstance(node_data, dict) and key in node_data:
+ return node_data[key]
+
+
+def get_number_of_nodes(node_type):
+ nodes = data['servers']
+ num_nodes = 0
+ for node_name, node_data in nodes.items():
+ if node_data['type'] == node_type:
+ num_nodes += 1
+ return num_nodes
+
+
+FUNCTION_MAP = {'num_nodes':
+ {'func': get_number_of_nodes,
+ 'args': ['node_type']},
+ 'get_value':
+ {'func': get_node_value,
+ 'args': ['node_type', 'node_number', 'key']},
+ }
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('command', choices=FUNCTION_MAP.keys())
+ parser.add_argument('-f', '--file',
+ dest='node_file',
+ required=True)
+ parser.add_argument('--node-type',
+ default='controller',
+ required=False)
+ parser.add_argument('--node-number',
+ default=1,
+ type=int,
+ required=False)
+ parser.add_argument('-k', '--key',
+ required=False)
+ args = parser.parse_args(sys.argv[1:])
+ with open(args.node_file, 'r') as fh:
+ data = yaml.safe_load(fh)
+ assert 'servers' in data
+ func = FUNCTION_MAP[args.command]['func']
+ args = [getattr(args, x) for x in FUNCTION_MAP[args.command]['args']]
+ print(func(*args))