summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--api/base.py11
-rw-r--r--api/resources/env_action.py67
-rwxr-xr-xdocker/exec_tests.sh86
-rw-r--r--docs/testing/user/userguide/04-installation.rst8
-rw-r--r--samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml61
-rw-r--r--samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml61
-rwxr-xr-xtests/ci/clean_images.sh16
-rwxr-xr-xtests/ci/load_images.sh30
-rwxr-xr-xtests/ci/prepare_storperf_admin-rc.sh2
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py8
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_scenario_general.py2
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vnf_generic.py150
-rw-r--r--tests/unit/orchestrator/test_heat.py328
-rw-r--r--yardstick/benchmark/contexts/heat.py77
-rw-r--r--yardstick/benchmark/contexts/model.py2
-rw-r--r--yardstick/benchmark/core/task.py3
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash9
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py13
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py130
-rw-r--r--yardstick/common/constants.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnfdgen.py18
-rw-r--r--yardstick/orchestrator/heat.py93
-rw-r--r--yardstick/resources/scripts/install/storperf.bash6
29 files changed, 1018 insertions, 205 deletions
diff --git a/api/base.py b/api/base.py
index 527008588..6fa2777ce 100644
--- a/api/base.py
+++ b/api/base.py
@@ -23,9 +23,16 @@ logger.setLevel(logging.DEBUG)
class ApiResource(Resource):
def _post_args(self):
- params = common_utils.translate_to_str(request.json)
- action = params.get('action', '')
+ data = request.json if request.json else {}
+ params = common_utils.translate_to_str(data)
+ action = params.get('action', request.form.get('action', ''))
args = params.get('args', {})
+
+ try:
+ args['file'] = request.files['file']
+ except KeyError:
+ pass
+
logger.debug('Input args is: action: %s, args: %s', action, args)
return action, args
diff --git a/api/resources/env_action.py b/api/resources/env_action.py
index 7bfaf27a7..3536559b7 100644
--- a/api/resources/env_action.py
+++ b/api/resources/env_action.py
@@ -16,6 +16,8 @@ import threading
import time
import uuid
import glob
+import yaml
+import collections
from six.moves import configparser
from oslo_serialization import jsonutils
@@ -25,7 +27,7 @@ from api.database.handler import AsyncTaskHandler
from api.utils import influx
from api.utils.common import result_handler
from yardstick.common import constants as consts
-from yardstick.common import utils as yardstick_utils
+from yardstick.common import utils as common_utils
from yardstick.common import openstack_utils
from yardstick.common.httpClient import HttpClient
@@ -98,7 +100,9 @@ def _create_data_source():
def _create_grafana_container(client):
ports = [3000]
port_bindings = {k: k for k in ports}
- host_config = client.create_host_config(port_bindings=port_bindings)
+ restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+ host_config = client.create_host_config(port_bindings=port_bindings,
+ restart_policy=restart_policy)
container = client.create_container(image='%s:%s' % (consts.GRAFANA_IMAGE,
consts.GRAFANA_TAG),
@@ -150,7 +154,9 @@ def _create_influxdb_container(client):
ports = [8083, 8086]
port_bindings = {k: k for k in ports}
- host_config = client.create_host_config(port_bindings=port_bindings)
+ restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+ host_config = client.create_host_config(port_bindings=port_bindings,
+ restart_policy=restart_policy)
container = client.create_container(image='%s:%s' % (consts.INFLUXDB_IMAGE,
consts.INFLUXDB_TAG),
@@ -174,7 +180,7 @@ def _config_influxdb():
def _change_output_to_influxdb():
- yardstick_utils.makedirs(consts.CONF_DIR)
+ common_utils.makedirs(consts.CONF_DIR)
parser = configparser.ConfigParser()
parser.read(consts.CONF_SAMPLE_FILE)
@@ -230,11 +236,11 @@ def _prepare_env_daemon(task_id):
def _create_directories():
- yardstick_utils.makedirs(consts.CONF_DIR)
+ common_utils.makedirs(consts.CONF_DIR)
def _source_file(rc_file):
- yardstick_utils.source_env(rc_file)
+ common_utils.source_env(rc_file)
def _get_remote_rc_file(rc_file, installer_ip, installer_type):
@@ -307,3 +313,52 @@ def _update_task_error(task_id, error):
task = async_handler.get_task_by_taskid(task_id)
async_handler.update_status(task, 2)
async_handler.update_error(task, error)
+
+
+def update_openrc(args):
+ try:
+ openrc_vars = args['openrc']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'openrc must be provided')
+ else:
+ if not isinstance(openrc_vars, collections.Mapping):
+ return result_handler(consts.API_ERROR, 'args should be a dict')
+
+ lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+ logger.debug('Writing: %s', ''.join(lines))
+
+ logger.info('Writing openrc: Writing')
+ common_utils.makedirs(consts.CONF_DIR)
+
+ with open(consts.OPENRC, 'w') as f:
+ f.writelines(lines)
+ logger.info('Writing openrc: Done')
+
+ logger.info('Source openrc: Sourcing')
+ try:
+ _source_file(consts.OPENRC)
+ except Exception as e:
+ logger.exception('Failed to source openrc')
+ return result_handler(consts.API_ERROR, str(e))
+ logger.info('Source openrc: Done')
+
+ return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars})
+
+
+def upload_pod_file(args):
+ try:
+ pod_file = args['file']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'file must be provided')
+
+ logger.info('Checking file')
+ data = yaml.load(pod_file.read())
+ if not isinstance(data, collections.Mapping):
+ return result_handler(consts.API_ERROR, 'invalid yaml file')
+
+ logger.info('Writing file')
+ with open(consts.POD_FILE, 'w') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ logger.info('Writing finished')
+
+ return result_handler(consts.API_SUCCESS, {'pod_info': data})
diff --git a/docker/exec_tests.sh b/docker/exec_tests.sh
index 9726e2b4a..46e5a05bd 100755
--- a/docker/exec_tests.sh
+++ b/docker/exec_tests.sh
@@ -17,39 +17,87 @@ set -e
: ${RELENG_REPO:='https://gerrit.opnfv.org/gerrit/releng'}
: ${RELENG_REPO_DIR:='/home/opnfv/repos/releng'}
# TEMP HACK to freeze releng version to workaround fetch_os_creds.sh problem
-: ${RELENG_BRANCH:='abbf19f'} # branch, tag, sha1 or refspec
+: ${RELENG_BRANCH:='master'} # branch, tag, sha1 or refspec
+
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch {
+ local git_branch=$1
+
+ git checkout -f origin/${git_branch}
+ # a local branch might not exist
+ git branch -D ${git_branch} || true
+ git checkout -b ${git_branch}
+}
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch {
+ local git_branch=$1
+
+ git checkout -b ${git_branch} -t origin/${git_branch}
+}
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag {
+ local git_tag=$1
+
+ git tag -d ${git_tag}
+ # fetching given tag only
+ git fetch origin tag ${git_tag}
+ git checkout -f ${git_tag}
+}
+
+
+# OpenStack Functions
git_checkout()
{
- if git cat-file -e $1^{commit} 2>/dev/null; then
- # branch, tag or sha1 object
- git checkout $1 && git pull
- else
+ local git_ref=$1
+ if [[ -n "$(git show-ref refs/tags/${git_ref})" ]]; then
+ git_update_tag "${git_ref}"
+ elif [[ -n "$(git show-ref refs/heads/${git_ref})" ]]; then
+ git_update_branch "${git_ref}"
+ elif [[ -n "$(git show-ref refs/remotes/origin/${git_ref})" ]]; then
+ git_update_remote_branch "${git_ref}"
+ # check to see if it is a remote ref
+ elif git fetch --tags origin "${git_ref}"; then
# refspec / changeset
- git fetch --tags --progress $2 $1
git checkout FETCH_HEAD
+ else
+ # if we are a random commit id we have to unshallow
+ # to get all the commits
+ git fetch --unshallow origin
+ git checkout -f "${git_ref}"
fi
}
echo
-echo "INFO: Updating releng -> $RELENG_BRANCH"
-if [ ! -d $RELENG_REPO_DIR ]; then
- git clone $RELENG_REPO $RELENG_REPO_DIR
+echo "INFO: Updating releng -> ${RELENG_BRANCH}"
+if [ ! -d ${RELENG_REPO_DIR} ]; then
+ git clone ${RELENG_REPO} ${RELENG_REPO_DIR}
fi
-cd $RELENG_REPO_DIR
-git checkout master
-git_checkout $RELENG_BRANCH $RELENG_REPO
+cd ${RELENG_REPO_DIR}
+# reset remote so we know origin is valid
+git remote set-url origin ${RELENG_REPO}
+# fetch the exact ref
+git fetch --tags origin ${RELENG_BRANCH} || true
+# purge pyc files
+find . -name '*.pyc' -delete
+git_checkout ${RELENG_BRANCH}
echo
-echo "INFO: Updating yardstick -> $YARDSTICK_BRANCH"
-if [ ! -d $YARDSTICK_REPO_DIR ]; then
- git clone $YARDSTICK_REPO $YARDSTICK_REPO_DIR
+echo "INFO: Updating yardstick -> ${YARDSTICK_BRANCH}"
+if [ ! -d ${YARDSTICK_REPO_DIR} ]; then
+ git clone ${YARDSTICK_REPO} ${YARDSTICK_REPO_DIR}
fi
-cd $YARDSTICK_REPO_DIR
-git_checkout $YARDSTICK_BRANCH $YARDSTICK_REPO
+cd ${YARDSTICK_REPO_DIR}
+git_checkout ${YARDSTICK_BRANCH}
# setup the environment
-source $YARDSTICK_REPO_DIR/tests/ci/prepare_env.sh
+source ${YARDSTICK_REPO_DIR}/tests/ci/prepare_env.sh
# execute tests
-$YARDSTICK_REPO_DIR/tests/ci/yardstick-verify $@
+${YARDSTICK_REPO_DIR}/tests/ci/yardstick-verify $@
diff --git a/docs/testing/user/userguide/04-installation.rst b/docs/testing/user/userguide/04-installation.rst
index 0c2bb58cf..660f3b5a8 100644
--- a/docs/testing/user/userguide/04-installation.rst
+++ b/docs/testing/user/userguide/04-installation.rst
@@ -149,7 +149,12 @@ In the Yardstick container, the Yardstick repository is located in the ``/home/o
yardstick env prepare
-**NOTE**: The above command just works for four OPNFV installers -- **Apex**, **Compass**, **Fuel** and **Joid**.
+**NOTE**: The above command works for four OPNFV installers -- **Apex**, **Compass**, **Fuel** and **Joid**.
+For Non-OPNFV installer OpenStack environment, the above command can also be used to configure the environment.
+But before running the above command in a Non-OPNFV installer environment, it is necessary to create the /etc/yardstick/openstack.creds file and
+save OpenStack environment variables in it. For details of the required OpenStack environment variables please refer to
+section **Export OpenStack environment variables**
+
The env prepare command may take up to 6-8 minutes to finish building
yardstick-image and other environment preparation. Meanwhile if you wish to
monitor the env prepare process, you can enter the Yardstick container in a new
@@ -506,3 +511,4 @@ yaml file and add test cases, constraint or task arguments if necessary.
Proxy Support (**Todo**)
---------------------------
+
diff --git a/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml b/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml
new file mode 100644
index 000000000..8826f539e
--- /dev/null
+++ b/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml
@@ -0,0 +1,61 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/fixed.yaml
+ topology: ping_tg_topology.yaml
+
+ nodes:
+ tg__1: trafficgen_1.baremetal
+ vnf__1: vnf.yardstick
+
+ runner:
+ type: Duration
+ duration: 10
+
+contexts:
+ - name: yardstick
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ external_network: "yardstick-public"
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public
+
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private
+
+ - name: baremetal
+ type: Node
+ file: baremetal-pod.yaml
diff --git a/samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml b/samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml
new file mode 100644
index 000000000..394523ffa
--- /dev/null
+++ b/samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml
@@ -0,0 +1,61 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/fixed.yaml
+ topology: ping_tg_topology.yaml
+
+ nodes:
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
+
+ runner:
+ type: Duration
+ duration: 10
+
+context:
+ name: yardstick
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+ trafficgen_1:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ external_network: "yardstick-public"
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private
+
+
diff --git a/tests/ci/clean_images.sh b/tests/ci/clean_images.sh
index 27da9e279..cb5410ab2 100755
--- a/tests/ci/clean_images.sh
+++ b/tests/ci/clean_images.sh
@@ -15,18 +15,24 @@ cleanup()
echo
echo "========== Cleanup =========="
- if ! openstack image list; then
+ if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+ else
+ SECURE=""
+ fi
+
+ if ! openstack "${SECURE}" image list; then
return
fi
- for image in $(openstack image list | grep -e cirros-0.3.5 -e yardstick-image -e Ubuntu-16.04 \
+ for image in $(openstack "${SECURE}" image list | grep -e cirros-0.3.5 -e yardstick-image -e Ubuntu-16.04 \
| awk '{print $2}'); do
echo "Deleting image $image..."
- openstack image delete $image || true
+ openstack "${SECURE}" image delete $image || true
done
- openstack flavor delete yardstick-flavor &> /dev/null || true
- openstack flavor delete storperf &> /dev/null || true
+ openstack "${SECURE}" flavor delete yardstick-flavor &> /dev/null || true
+ openstack "${SECURE}" flavor delete storperf &> /dev/null || true
}
main()
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 487f33e33..054621c05 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -88,7 +88,7 @@ load_yardstick_image()
if [ ! -f "${CLOUD_KERNEL}" ]; then
tar xf "${CLOUD_IMAGE}" "${CLOUD_KERNEL##**/}"
fi
- create_kernel=$(openstack image create \
+ create_kernel=$(openstack "${SECURE}" image create \
--public \
--disk-format qcow2 \
--container-format bare \
@@ -119,7 +119,7 @@ load_yardstick_image()
fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
- output=$(eval openstack image create \
+ output=$(eval openstack "${SECURE}" image create \
--public \
--disk-format raw \
--container-format bare \
@@ -127,7 +127,7 @@ load_yardstick_image()
--file ${RAW_IMAGE} \
yardstick-image)
else
- output=$(eval openstack image create \
+ output=$(eval openstack "${SECURE}" image create \
--public \
--disk-format qcow2 \
--container-format bare \
@@ -150,7 +150,7 @@ load_yardstick_image()
load_cirros_image()
{
- if [[ -n $(openstack image list | grep -e Cirros-0.3.5) ]]; then
+ if [[ -n $(openstack "${SECURE}" image list | grep -e Cirros-0.3.5) ]]; then
echo "Cirros-0.3.5 image already exist, skip loading cirros image"
else
echo
@@ -164,7 +164,7 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(openstack image create \
+ output=$(openstack "${SECURE}" image create \
--disk-format qcow2 \
--container-format bare \
${EXTRA_PARAMS} \
@@ -195,7 +195,7 @@ load_ubuntu_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(openstack image create \
+ output=$(openstack "${SECURE}" image create \
--disk-format qcow2 \
--container-format bare \
$EXTRA_PARAMS \
@@ -215,26 +215,26 @@ load_ubuntu_image()
create_nova_flavor()
{
- if ! openstack flavor list | grep -q yardstick-flavor; then
+ if ! openstack "${SECURE}" flavor list | grep -q yardstick-flavor; then
echo
echo "========== Creating yardstick-flavor =========="
# Create the nova flavor used by some sample test cases
- openstack flavor create --id 100 --ram 1024 --disk 3 --vcpus 1 yardstick-flavor
+ openstack "${SECURE}" flavor create --id 100 --ram 1024 --disk 3 --vcpus 1 yardstick-flavor
# DPDK-enabled OVS requires guest memory to be backed by large pages
if [[ $DEPLOY_SCENARIO == *[_-]ovs[_-]* ]]; then
- openstack flavor set --property hw:mem_page_size=large yardstick-flavor
+ openstack "${SECURE}" flavor set --property hw:mem_page_size=large yardstick-flavor
fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
- openstack flavor set --property hw:mem_page_size=large yardstick-flavor
+ openstack "${SECURE}" flavor set --property hw:mem_page_size=large yardstick-flavor
fi
fi
- if ! openstack flavor list | grep -q storperf; then
+ if ! openstack "${SECURE}" flavor list | grep -q storperf; then
echo
echo "========== Creating storperf flavor =========="
# Create the nova flavor used by storperf test case
- openstack flavor create --id auto --ram 8192 --disk 4 --vcpus 2 storperf
+ openstack "${SECURE}" flavor create --id auto --ram 8192 --disk 4 --vcpus 2 storperf
fi
}
@@ -250,6 +250,12 @@ main()
RAW_IMAGE='/home/opnfv/images/yardstick-image.tar.gz'
fi
+ if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+ else
+ SECURE=""
+ fi
+
build_yardstick_image
load_yardstick_image
if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
diff --git a/tests/ci/prepare_storperf_admin-rc.sh b/tests/ci/prepare_storperf_admin-rc.sh
index a6cf97bef..979728e84 100755
--- a/tests/ci/prepare_storperf_admin-rc.sh
+++ b/tests/ci/prepare_storperf_admin-rc.sh
@@ -33,3 +33,5 @@ echo "OS_PROJECT_ID="$PROJECT_ID >> ~/storperf_admin-rc
echo "OS_TENANT_NAME="$TENANT_NAME >> ~/storperf_admin-rc
echo "OS_TENANT_ID="$TENANT_ID >> ~/storperf_admin-rc
echo "OS_USER_DOMAIN_ID="$USER_DOMAIN_ID >> ~/storperf_admin-rc
+echo "OS_PROJECT_DOMAIN_NAME="$OS_PROJECT_DOMAIN_NAME >> ~/storperf_admin-rc
+echo "OS_USER_DOMAIN_NAME="$OS_USER_DOMAIN_NAME >> ~/storperf_admin-rc
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index d878ebe97..3dadd48eb 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -17,6 +17,7 @@ import logging
import os
import unittest
import uuid
+from collections import OrderedDict
import mock
@@ -37,7 +38,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(self.test_context.name)
self.assertIsNone(self.test_context.stack)
- self.assertEqual(self.test_context.networks, [])
+ self.assertEqual(self.test_context.networks, OrderedDict())
self.assertEqual(self.test_context.servers, [])
self.assertEqual(self.test_context.placement_groups, [])
self.assertEqual(self.test_context.server_groups, [])
@@ -105,7 +106,9 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
netattrs = {'cidr': '10.0.0.0/24', 'provider': None, 'external_network': 'ext_net'}
self.mock_context.name = 'bar'
- self.test_context.networks = [model.Network("fool-network", self.mock_context, netattrs)]
+ self.test_context.networks = OrderedDict(
+ {"fool-network": model.Network("fool-network", self.mock_context,
+ netattrs)})
self.test_context._add_resources_to_template(mock_template)
mock_template.add_keypair.assert_called_with(
@@ -122,6 +125,7 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.name = 'foo'
self.test_context.template_file = '/bar/baz/some-heat-file'
self.test_context.heat_parameters = {'image': 'cirros'}
+ self.test_context.heat_timeout = 5
self.test_context.deploy()
mock_template.assert_called_with(self.test_context.name,
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index ea54fbb9b..de2170b16 100644
--- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -56,6 +56,7 @@ class ScenarioGeneralTestCase(unittest.TestCase):
mock_obj = mock.Mock()
mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
ins.director = mock_obj
+ ins.director.data = {}
ins.run({})
ins.teardown()
@@ -64,5 +65,6 @@ class ScenarioGeneralTestCase(unittest.TestCase):
mock_obj = mock.Mock()
mock_obj.verify.return_value = False
ins.director = mock_obj
+ ins.director.data = {}
ins.run({})
ins.teardown()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 4167d6f3b..111e7812e 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -20,12 +20,13 @@
from __future__ import absolute_import
import os
+import errno
import unittest
-
import mock
from yardstick.benchmark.scenarios.networking.vnf_generic import \
- SshManager, NetworkServiceTestCase, IncorrectConfig, IncorrectSetup
+ SshManager, NetworkServiceTestCase, IncorrectConfig, \
+ IncorrectSetup, open_relative_file
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic.vnf.base import \
GenericTrafficGen, GenericVNF
@@ -288,6 +289,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
}
self.scenario_cfg = {
+ 'task_path': "",
'tc_options': {'rfc2544': {'allowed_drop_rate': '0.8 - 1'}},
'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
'tc': 'tc_ipv4_1Mflow_64B_packetsize',
@@ -350,7 +352,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
vnf = mock.Mock(autospec=GenericVNF)
self.s.get_vnf_impl = mock.Mock(return_value=vnf)
- self.assertIsNotNone(self.s.load_vnf_models(self.context_cfg))
+ self.assertIsNotNone(
+ self.s.load_vnf_models(self.scenario_cfg, self.context_cfg))
def test_map_topology_to_infrastructure(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -488,3 +491,144 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.collector.stop = \
mock.Mock(return_value=True)
self.assertIsNone(self.s.teardown())
+
+ SAMPLE_NETDEVS = {
+ 'enp11s0': {
+ 'address': '0a:de:ad:be:ef:f5',
+ 'device': '0x1533',
+ 'driver': 'igb',
+ 'ifindex': '2',
+ 'interface_name': 'enp11s0',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:0b:00.0',
+ 'subsystem_device': '0x1533',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ },
+ 'lan': {
+ 'address': '0a:de:ad:be:ef:f4',
+ 'device': '0x153a',
+ 'driver': 'e1000e',
+ 'ifindex': '3',
+ 'interface_name': 'lan',
+ 'operstate': 'up',
+ 'pci_bus_id': '0000:00:19.0',
+ 'subsystem_device': '0x153a',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ }
+ }
+ SAMPLE_VM_NETDEVS = {
+ 'eth1': {
+ 'address': 'fa:de:ad:be:ef:5b',
+ 'device': '0x0001',
+ 'driver': 'virtio_net',
+ 'ifindex': '3',
+ 'interface_name': 'eth1',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:00:04.0',
+ 'vendor': '0x1af4'
+ }
+ }
+
+ def test_parse_netdev_info(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/ifindex:2
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/address:0a:de:ad:be:ef:f5
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/operstate:down
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/driver:igb
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/pci_bus_id:0000:0b:00.0
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/ifindex:3
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/address:0a:de:ad:be:ef:f4
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/operstate:up
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/driver:e1000e
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/pci_bus_id:0000:00:19.0
+"""
+ res = NetworkServiceTestCase.parse_netdev_info(output)
+ assert res == self.SAMPLE_NETDEVS
+
+ def test_parse_netdev_info_virtio(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/ifindex:3
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/address:fa:de:ad:be:ef:5b
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/operstate:down
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/vendor:0x1af4
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/device:0x0001
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/driver:virtio_net
+"""
+ res = NetworkServiceTestCase.parse_netdev_info(output)
+ assert res == self.SAMPLE_VM_NETDEVS
+
+ def test_sort_dpdk_port_num(self):
+ netdevs = self.SAMPLE_NETDEVS.copy()
+ NetworkServiceTestCase._sort_dpdk_port_num(netdevs)
+ assert netdevs['lan']['dpdk_port_num'] == 1
+ assert netdevs['enp11s0']['dpdk_port_num'] == 2
+
+ def test_probe_missing_values(self):
+ netdevs = self.SAMPLE_NETDEVS.copy()
+ NetworkServiceTestCase._sort_dpdk_port_num(netdevs)
+ network = {'local_mac': '0a:de:ad:be:ef:f5'}
+ NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+ assert network['dpdk_port_num'] == 2
+
+ network = {'local_mac': '0a:de:ad:be:ef:f4'}
+ NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+ assert network['dpdk_port_num'] == 1
+
+ def test_open_relative_path(self):
+ mock_open = mock.mock_open()
+ mock_open_result = mock_open()
+ mock_open_call_count = 1 # initial call to get result
+
+ module_name = \
+ 'yardstick.benchmark.scenarios.networking.vnf_generic.open'
+
+ # test
+ with mock.patch(module_name, mock_open, create=True):
+ self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertNotIn('bar', mock_open.call_args_list[-1][0][0])
+
+ def open_effect(*args, **kwargs):
+ if kwargs.get('name', args[0]) == os.path.join('bar', 'foo'):
+ return mock_open_result
+ raise IOError(errno.ENOENT, 'not found')
+
+ mock_open.side_effect = open_effect
+ self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError of type ENOENT
+ mock_open.side_effect = IOError(errno.ENOENT, 'not found')
+ with self.assertRaises(IOError):
+ # the second call still raises
+ open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError other than ENOENT
+ mock_open.side_effect = IOError(errno.EBUSY, 'busy')
+ with self.assertRaises(IOError):
+ open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
diff --git a/tests/unit/orchestrator/test_heat.py b/tests/unit/orchestrator/test_heat.py
index 4892f98f8..3b3873301 100644
--- a/tests/unit/orchestrator/test_heat.py
+++ b/tests/unit/orchestrator/test_heat.py
@@ -10,16 +10,43 @@
##############################################################################
# Unittest for yardstick.benchmark.orchestrator.heat
-
+from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import unittest
import uuid
+import time
import mock
from yardstick.benchmark.contexts import node
from yardstick.orchestrator import heat
+TARGET_MODULE = 'yardstick.orchestrator.heat'
+
+
+def mock_patch_target_module(inner_import):
+ return mock.patch('.'.join([TARGET_MODULE, inner_import]))
+
+
+@contextmanager
+def timer():
+ start = time.time()
+ data = {'start': start}
+ try:
+ yield data
+ finally:
+ data['end'] = end = time.time()
+ data['delta'] = end - start
+
+def get_error_message(error):
+ try:
+ # py2
+ return error.message
+ except AttributeError:
+ # py3
+ return next((arg for arg in error.args if isinstance(arg, str)), None)
+
+
class HeatContextTestCase(unittest.TestCase):
def test_get_short_key_uuid(self):
@@ -70,88 +97,245 @@ class HeatTemplateTestCase(unittest.TestCase):
self.assertEqual(self.template.resources['some-server-group']['properties']['policies'], ['anti-affinity'])
def test__add_resources_to_template_raw(self):
-
- self.test_context = node.NodeContext()
- self.test_context.name = 'foo'
- self.test_context.template_file = '/tmp/some-heat-file'
- self.test_context.heat_parameters = {'image': 'cirros'}
- self.test_context.key_filename = "/tmp/1234"
- self.test_context.keypair_name = "foo-key"
- self.test_context.secgroup_name = "foo-secgroup"
- self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
- self._template = {
- 'outputs' : {},
- 'resources' : {}
- }
-
- self.heat_object = heat.HeatObject()
- self.heat_tmp_object = heat.HeatObject()
-
- self.heat_stack = heat.HeatStack("tmpStack")
- self.heat_stack.stacks_exist()
-
- self.test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
- self.test_context.tmpfile.write("heat_template_version: 2015-04-30")
- self.test_context.tmpfile.flush()
- self.test_context.tmpfile.seek(0)
- self.heat_tmp_template = heat.HeatTemplate(self.heat_tmp_object, self.test_context.tmpfile.name,
- heat_parameters= {"dict1": 1, "dict2": 2})
-
- self.heat_template = heat.HeatTemplate(self.heat_object)
- self.heat_template.resources = {}
-
- self.heat_template.add_network("network1")
- self.heat_template.add_network("network2")
- self.heat_template.add_security_group("sec_group1")
- self.heat_template.add_security_group("sec_group2")
- self.heat_template.add_subnet("subnet1", "network1", "cidr1")
- self.heat_template.add_subnet("subnet2", "network2", "cidr2")
- self.heat_template.add_router("router1", "gw1", "subnet1")
- self.heat_template.add_router_interface("router_if1", "router1", "subnet1")
- self.heat_template.add_port("port1", "network1", "subnet1")
- self.heat_template.add_port("port2", "network2", "subnet2", sec_group_id="sec_group1",provider="not-sriov")
- self.heat_template.add_port("port3", "network2", "subnet2", sec_group_id="sec_group1",provider="sriov")
- self.heat_template.add_floating_ip("floating_ip1", "network1", "port1", "router_if1")
- self.heat_template.add_floating_ip("floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
- self.heat_template.add_floating_ip_association("floating_ip1_association", "floating_ip1", "port1")
- self.heat_template.add_servergroup("server_grp2", "affinity")
- self.heat_template.add_servergroup("server_grp3", "anti-affinity")
- self.heat_template.add_security_group("security_group")
- self.heat_template.add_server(name="server1", image="image1", flavor="flavor1", flavors=[])
- self.heat_template.add_server_group(name="servergroup", policies=["policy1","policy2"])
- self.heat_template.add_server_group(name="servergroup", policies="policy1")
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=[], ports=["port1", "port2"],
+ test_context = node.NodeContext()
+ test_context.name = 'foo'
+ test_context.template_file = '/tmp/some-heat-file'
+ test_context.heat_parameters = {'image': 'cirros'}
+ test_context.key_filename = "/tmp/1234"
+ test_context.keypair_name = "foo-key"
+ test_context.secgroup_name = "foo-secgroup"
+ test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
+ heat_object = heat.HeatObject()
+
+ heat_stack = heat.HeatStack("tmpStack")
+ self.assertTrue(heat_stack.stacks_exist())
+
+ test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
+ test_context.tmpfile.write("heat_template_version: 2015-04-30")
+ test_context.tmpfile.flush()
+ test_context.tmpfile.seek(0)
+ heat_template = heat.HeatTemplate(heat_object)
+ heat_template.resources = {}
+
+ heat_template.add_network("network1")
+ heat_template.add_network("network2")
+ heat_template.add_security_group("sec_group1")
+ heat_template.add_security_group("sec_group2")
+ heat_template.add_subnet("subnet1", "network1", "cidr1")
+ heat_template.add_subnet("subnet2", "network2", "cidr2")
+ heat_template.add_router("router1", "gw1", "subnet1")
+ heat_template.add_router_interface("router_if1", "router1", "subnet1")
+ heat_template.add_port("port1", "network1", "subnet1")
+ heat_template.add_port("port2", "network2", "subnet2", sec_group_id="sec_group1",provider="not-sriov")
+ heat_template.add_port("port3", "network2", "subnet2", sec_group_id="sec_group1",provider="sriov")
+ heat_template.add_floating_ip("floating_ip1", "network1", "port1", "router_if1")
+ heat_template.add_floating_ip("floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
+ heat_template.add_floating_ip_association("floating_ip1_association", "floating_ip1", "port1")
+ heat_template.add_servergroup("server_grp2", "affinity")
+ heat_template.add_servergroup("server_grp3", "anti-affinity")
+ heat_template.add_security_group("security_group")
+ heat_template.add_server(name="server1", image="image1", flavor="flavor1", flavors=[])
+ heat_template.add_server_group(name="servergroup", policies=["policy1","policy2"])
+ heat_template.add_server_group(name="servergroup", policies="policy1")
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=[], ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2})
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor1", "flavor2"],
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor1", "flavor2"],
ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2} )
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor3", "flavor4"],
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor3", "flavor4"],
ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2})
- self.heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,extra_specs={"cat": 1, "dog": 2})
- self.heat_template.add_flavor(name=None, vcpus=1, ram=2048)
- self.heat_template.add_server(name="server1",
- image="image1",
- flavor="flavor1",
- flavors=[],
- ports=["port1", "port2"],
- networks=["network1", "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={"cat": 1, "doc": 2},
- additional_properties= {"prop1": 1, "prop2": 2} )
- self.heat_template.add_network("network1")
-
- self.heat_template.add_flavor("test")
- self.assertEqual(self.heat_template.resources['test']['type'], 'OS::Nova::Flavor')
+ heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,extra_specs={"cat": 1, "dog": 2})
+ heat_template.add_flavor(name=None, vcpus=1, ram=2048)
+ heat_template.add_server(name="server1",
+ image="image1",
+ flavor="flavor1",
+ flavors=[],
+ ports=["port1", "port2"],
+ networks=["network1", "network2"],
+ scheduler_hints="hints1",
+ user="user1",
+ key_name="foo-key",
+ user_data="user",
+ metadata={"cat": 1, "doc": 2},
+ additional_properties= {"prop1": 1, "prop2": 2} )
+ heat_template.add_network("network1")
+
+ heat_template.add_flavor("test")
+ self.assertEqual(heat_template.resources['test']['type'], 'OS::Nova::Flavor')
+
+ @mock_patch_target_module('op_utils')
+ @mock_patch_target_module('heatclient.client.Client')
+ def test_create_negative(self, mock_heat_client_class, mock_op_utils):
+ self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+ mock_heat_client = mock_heat_client_class() # get the constructed mock
+
+ # populate attributes of the constructed mock
+ mock_heat_client.stacks.get().stack_status_reason = 'the reason'
+
+ expected_status_calls = 0
+ expected_constructor_calls = 1 # above, to get the instance
+ expected_create_calls = 0
+ expected_op_utils_usage = 0
+
+ with mock.patch.object(self.template, 'status', return_value=None) as mock_status:
+ # block with timeout hit
+ timeout = 2
+ with self.assertRaises(RuntimeError) as raised, timer() as time_data:
+ self.template.create(block=True, timeout=timeout)
+
+ # ensure runtime is approximately the timeout value
+ expected_time_low = timeout - interval * 0.2
+ expected_time_high = timeout + interval * 0.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure op_utils was used
+ expected_op_utils_usage += 1
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor and instance were used
+ expected_constructor_calls += 1
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was used
+ self.assertGreater(mock_status.call_count, expected_status_calls)
+ expected_status_calls = mock_status.call_count # synchronize the value
+
+ # ensure the expected exception was raised
+ error_message = get_error_message(raised.exception)
+ self.assertIn('timeout', error_message)
+ self.assertNotIn('the reason', error_message)
+
+ # block with create failed
+ timeout = 10
+ mock_status.side_effect = iter([None, None, u'CREATE_FAILED'])
+ with self.assertRaises(RuntimeError) as raised, timer() as time_data:
+ self.template.create(block=True, timeout=timeout)
+
+ # ensure runtime is approximately two intervals
+ expected_time_low = interval * 1.8
+ expected_time_high = interval * 2.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure the existing heat_client was used and op_utils was used again
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor was not used but the instance was used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was used three times
+ expected_status_calls += 3
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure the expected exception was raised
+ error_message = get_error_message(raised.exception)
+ self.assertNotIn('timeout', error_message)
+ self.assertIn('the reason', error_message)
+
+ @mock_patch_target_module('op_utils')
+ @mock_patch_target_module('heatclient.client.Client')
+ def test_create(self, mock_heat_client_class, mock_op_utils):
+ self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+ mock_heat_client = mock_heat_client_class()
+
+ # populate attributes of the constructed mock
+ mock_heat_client.stacks.get().outputs = [
+ {'output_key': 'key1', 'output_value': 'value1'},
+ {'output_key': 'key2', 'output_value': 'value2'},
+ {'output_key': 'key3', 'output_value': 'value3'},
+ ]
+ expected_outputs = {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ 'key3': 'value3',
+ }
+
+ expected_status_calls = 0
+ expected_constructor_calls = 1 # above, to get the instance
+ expected_create_calls = 0
+ expected_op_utils_usage = 0
+
+ with mock.patch.object(self.template, 'status') as mock_status:
+ # no block
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
+
+ # ensure runtime is much less than one interval
+ self.assertLess(time_data['delta'], interval * 0.2)
+
+ # ensure op_utils was used
+ expected_op_utils_usage += 1
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor and instance were used
+ expected_constructor_calls += 1
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was not used
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure no outputs because this requires blocking
+ self.assertEqual(self.template.outputs, {})
+
+ # block with immediate complete
+ mock_status.return_value = u'CREATE_COMPLETE'
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+
+ # ensure runtime is less than one interval
+ self.assertLess(time_data['delta'], interval * 0.2)
+
+ # ensure existing instance was re-used and op_utils was not used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure status was checked once
+ expected_status_calls += 1
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure the expected outputs are present
+ self.assertDictEqual(self.template.outputs, expected_outputs)
+
+ # reset template outputs
+ self.template.outputs = None
+
+ # block with delayed complete
+ mock_status.side_effect = iter([None, None, u'CREATE_COMPLETE'])
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+
+ # ensure runtime is approximately two intervals
+ expected_time_low = interval * 1.8
+ expected_time_high = interval * 2.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure existing instance was re-used and op_utils was not used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure status was checked three more times
+ expected_status_calls += 3
+ self.assertEqual(mock_status.call_count, expected_status_calls)
class HeatStackTestCase(unittest.TestCase):
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index b689ac09c..aa134d694 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -13,9 +13,10 @@ from __future__ import print_function
import collections
import logging
import os
-import sys
import uuid
+from collections import OrderedDict
+import ipaddress
import paramiko
import pkg_resources
@@ -29,6 +30,8 @@ from yardstick.common.constants import YARDSTICK_ROOT_PATH
LOG = logging.getLogger(__name__)
+DEFAULT_HEAT_TIMEOUT = 3600
+
class HeatContext(Context):
"""Class that represents a context in the logical model"""
@@ -38,7 +41,7 @@ class HeatContext(Context):
def __init__(self):
self.name = None
self.stack = None
- self.networks = []
+ self.networks = OrderedDict()
self.servers = []
self.placement_groups = []
self.server_groups = []
@@ -68,6 +71,7 @@ class HeatContext(Context):
# no external net defined, assign it to first network usig os.environ
if sorted_networks and not have_external_network:
sorted_networks[0][1]["external_network"] = external_network
+ return sorted_networks
def init(self, attrs): # pragma: no cover
"""initializes itself from the supplied arguments"""
@@ -87,6 +91,8 @@ class HeatContext(Context):
self._flavor = attrs.get("flavor")
+ self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
+
self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
for name, pgattrs in attrs.get(
"placement_groups", {}).items()]
@@ -95,12 +101,15 @@ class HeatContext(Context):
for name, sgattrs in attrs.get(
"server_groups", {}).items()]
- self.assign_external_network(attrs["networks"])
+ # we have to do this first, because we are injecting external_network
+ # into the dict
+ sorted_networks = self.assign_external_network(attrs["networks"])
- self.networks = [Network(name, self, netattrs) for name, netattrs in
- sorted(attrs["networks"].items())]
+ self.networks = OrderedDict(
+ (name, Network(name, self, netattrs)) for name, netattrs in
+ sorted_networks)
- for name, serverattrs in attrs["servers"].items():
+ for name, serverattrs in sorted(attrs["servers"].items()):
server = Server(name, self, serverattrs)
self.servers.append(server)
self._server_map[server.dn] = server
@@ -140,7 +149,7 @@ class HeatContext(Context):
template.add_keypair(self.keypair_name, self.key_uuid)
template.add_security_group(self.secgroup_name)
- for network in self.networks:
+ for network in self.networks.values():
template.add_network(network.stack_name,
network.physical_network,
network.provider)
@@ -190,17 +199,17 @@ class HeatContext(Context):
if not scheduler_hints["different_host"]:
scheduler_hints.pop("different_host", None)
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
else:
scheduler_hints["different_host"] = \
scheduler_hints["different_host"][0]
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
else:
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
added_servers.append(server.stack_name)
@@ -219,7 +228,8 @@ class HeatContext(Context):
scheduler_hints = {}
for pg in server.placement_groups:
update_scheduler_hints(scheduler_hints, added_servers, pg)
- server.add_to_template(template, self.networks, scheduler_hints)
+ server.add_to_template(template, list(self.networks.values()),
+ scheduler_hints)
added_servers.append(server.stack_name)
# add server group
@@ -236,7 +246,8 @@ class HeatContext(Context):
if sg:
scheduler_hints["group"] = {'get_resource': sg.name}
server.add_to_template(template,
- self.networks, scheduler_hints)
+ list(self.networks.values()),
+ scheduler_hints)
def deploy(self):
"""deploys template into a stack using cloud"""
@@ -249,13 +260,14 @@ class HeatContext(Context):
self._add_resources_to_template(heat_template)
try:
- self.stack = heat_template.create()
+ self.stack = heat_template.create(block=True,
+ timeout=self.heat_timeout)
except KeyboardInterrupt:
- sys.exit("\nStack create interrupted")
- except RuntimeError as err:
- sys.exit("error: failed to deploy stack: '%s'" % err.args)
- except Exception as err:
- sys.exit("error: failed to deploy stack: '%s'" % err)
+ raise SystemExit("\nStack create interrupted")
+ except:
+ LOG.exception("stack failed")
+ raise
+ # let the other failures happend, we want stack trace
# copy some vital stack output into server objects
for server in self.servers:
@@ -263,6 +275,11 @@ class HeatContext(Context):
# TODO(hafe) can only handle one internal network for now
port = next(iter(server.ports.values()))
server.private_ip = self.stack.outputs[port["stack_name"]]
+ server.interfaces = {}
+ for network_name, port in server.ports.items():
+ self.make_interface_dict(network_name, port['stack_name'],
+ server,
+ self.stack.outputs)
if server.floating_ip:
server.public_ip = \
@@ -270,6 +287,27 @@ class HeatContext(Context):
print("Context '%s' deployed" % self.name)
+ def make_interface_dict(self, network_name, stack_name, server, outputs):
+ server.interfaces[network_name] = {
+ "private_ip": outputs[stack_name],
+ "subnet_id": outputs[stack_name + "-subnet_id"],
+ "subnet_cidr": outputs[
+ "{}-{}-subnet-cidr".format(self.name, network_name)],
+ "netmask": str(ipaddress.ip_network(
+ outputs["{}-{}-subnet-cidr".format(self.name,
+ network_name)]).netmask),
+ "gateway_ip": outputs[
+ "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
+ "mac_address": outputs[stack_name + "-mac_address"],
+ "device_id": outputs[stack_name + "-device_id"],
+ "network_id": outputs[stack_name + "-network_id"],
+ "network_name": network_name,
+ # to match vnf_generic
+ "local_mac": outputs[stack_name + "-mac_address"],
+ "local_ip": outputs[stack_name],
+ "vld_id": self.networks[network_name].vld_id,
+ }
+
def undeploy(self):
"""undeploys stack from cloud"""
if self.stack:
@@ -324,7 +362,8 @@ class HeatContext(Context):
result = {
"user": server.context.user,
"key_filename": key_filename,
- "private_ip": server.private_ip
+ "private_ip": server.private_ip,
+ "interfaces": server.interfaces,
}
# Target server may only have private_ip
if server.public_ip:
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index 546201e9b..1f8c6f11c 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -111,6 +111,7 @@ class Network(Object):
if "external_network" in attrs:
self.router = Router("router", self.name,
context, attrs["external_network"])
+ self.vld_id = attrs.get("vld_id", "")
Network.list.append(self)
@@ -152,6 +153,7 @@ class Server(Object): # pragma: no cover
self.public_ip = None
self.private_ip = None
self.user_data = ''
+ self.interfaces = {}
if attrs is None:
attrs = {}
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 091aa99d3..478a51f9d 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -493,6 +493,9 @@ class TaskParser(object): # pragma: no cover
task_name = os.path.splitext(os.path.basename(self.path))[0]
scenario["tc"] = task_name
scenario["task_id"] = task_id
+ # embed task path into scenario so we can load other files
+ # relative to task path
+ scenario["task_path"] = os.path.dirname(self.path)
change_server_name(scenario, name_suffix)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index e0e6cf3bf..f7ab23dcd 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -66,3 +66,5 @@ class ProcessAttacker(BaseAttacker):
exit_status, stdout, stderr = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
+ if exit_status:
+ LOG.info("Fail to restart service!")
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
index a6a3e96ca..a865b6551 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -16,10 +16,13 @@ set -e
process_name=$1
if [ "$process_name" = "keystone" ]; then
- killall -9 -u $process_name
+ for pid in $(ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \
+ do
+ kill -9 "${pid}"
+ done
else
- for pid in `ps aux | grep "/usr/.*/${process_name}" | grep -v grep | grep -v /bin/sh | awk '{print $2}'`; \
+ for pid in $(pgrep -f "/usr/.*/${process_name}");
do
- kill -9 ${pid}
+ kill -9 "${pid}"
done
fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
index 941563e7c..8737836e2 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
@@ -14,4 +14,10 @@
set -e
-openstack flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
index e998464c7..617dcf8a3 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
@@ -14,4 +14,10 @@
set -e
-openstack flavor delete $1
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor delete $1
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
index 1b0739602..9b413c965 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
@@ -13,4 +13,10 @@
set -e
-nova flavor-list \ No newline at end of file
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor list
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index 033a2d721..d757bd88d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
+
+import os
import logging
import subprocess
import traceback
@@ -53,6 +55,14 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self._config["command_name"]
+ try:
+ cacert = os.environ['OS_CACERT']
+ except KeyError:
+ pass
+ else:
+ if cacert.lower() == "false":
+ self.cmd = self.cmd + " --insecure"
+
def monitor_func(self):
exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 689d33a34..28bec8aff 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -54,7 +54,18 @@ class ScenarioGeneral(base.Scenario):
pass
self.director.stopMonitors()
- if self.director.verify():
+
+ verify_result = self.director.verify()
+
+ for k, v in self.director.data.items():
+ if v == 0:
+ result['sla_pass'] = 0
+ verify_result = False
+ LOG.info(
+ "\033[92m The service process not found in the host \
+envrioment, the HA test case NOT pass")
+
+ if verify_result:
result['sla_pass'] = 1
LOG.info(
"\033[92m Congratulations, "
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 2e829714d..2f0012ecf 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -71,7 +71,7 @@ class ServiceHA(base.Scenario):
sla_pass = self.monitorMgr.verify_SLA()
for k, v in self.data.items():
- if self.data[k] == 0:
+ if v == 0:
result['sla_pass'] = 0
LOG.info("The service process not found in the host envrioment, \
the HA test case NOT pass")
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be179631e..594edeaa8 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -15,6 +15,14 @@
from __future__ import absolute_import
import logging
+
+import errno
+import os
+
+import re
+from operator import itemgetter
+from collections import defaultdict
+
import yaml
from yardstick.benchmark.scenarios import base
@@ -72,6 +80,15 @@ class SshManager(object):
self.conn.close()
+def open_relative_file(path, task_path):
+ try:
+ return open(path)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ return open(os.path.join(task_path, path))
+ raise
+
+
class NetworkServiceTestCase(base.Scenario):
"""Class handles Generic framework to do pre-deployment VNF &
Network service testing """
@@ -84,8 +101,11 @@ class NetworkServiceTestCase(base.Scenario):
self.context_cfg = context_cfg
# fixme: create schema to validate all fields have been provided
- with open(scenario_cfg["topology"]) as stream:
- self.topology = yaml.load(stream)["nsd:nsd-catalog"]["nsd"][0]
+ with open_relative_file(scenario_cfg["topology"],
+ scenario_cfg['task_path']) as stream:
+ topology_yaml = yaml.load(stream)
+
+ self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
self.vnfs = []
self.collector = None
self.traffic_profile = None
@@ -114,7 +134,8 @@ class NetworkServiceTestCase(base.Scenario):
private = {}
public = {}
try:
- with open(scenario_cfg["traffic_profile"]) as infile:
+ with open_relative_file(scenario_cfg["traffic_profile"],
+ scenario_cfg["task_path"]) as infile:
traffic_profile_tpl = infile.read()
except (KeyError, IOError, OSError):
@@ -123,8 +144,6 @@ class NetworkServiceTestCase(base.Scenario):
return [traffic_profile_tpl, private, public]
def _fill_traffic_profile(self, scenario_cfg, context_cfg):
- traffic_profile = {}
-
flow = self._get_traffic_flow(scenario_cfg)
imix = self._get_traffic_imix(scenario_cfg)
@@ -193,6 +212,26 @@ class NetworkServiceTestCase(base.Scenario):
list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
nodes[node].update(topology["constituent-vnfd"][list_idx])
+ @staticmethod
+ def _sort_dpdk_port_num(netdevs):
+ # dpdk_port_num is PCI BUS ID ordering, lowest first
+ s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
+ for dpdk_port_num, netdev in enumerate(s, 1):
+ netdev['dpdk_port_num'] = dpdk_port_num
+
+ @classmethod
+ def _probe_missing_values(cls, netdevs, network, missing):
+ mac = network['local_mac']
+ for netdev in netdevs.values():
+ if netdev['address'].lower() == mac.lower():
+ network['driver'] = netdev['driver']
+ network['vpci'] = netdev['pci_bus_id']
+ network['dpdk_port_num'] = netdev['dpdk_port_num']
+ network['ifindex'] = netdev['ifindex']
+
+ TOPOLOGY_REQUIRED_KEYS = frozenset({
+ "vpci", "local_ip", "netmask", "local_mac", "driver", "dpdk_port_num"})
+
def map_topology_to_infrastructure(self, context_cfg, topology):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
@@ -208,21 +247,66 @@ class NetworkServiceTestCase(base.Scenario):
exit_status = conn.execute(cmd)[0]
if exit_status != 0:
raise IncorrectSetup("Node's %s lacks ip tool." % node)
-
- for interface in node_dict["interfaces"]:
- network = node_dict["interfaces"][interface]
- keys = ["vpci", "local_ip", "netmask",
- "local_mac", "driver", "dpdk_port_num"]
- missing = set(keys).difference(network)
+ exit_status, stdout, _ = conn.execute(
+ self.FIND_NETDEVICE_STRING)
+ if exit_status != 0:
+ raise IncorrectSetup(
+ "Cannot find netdev info in sysfs" % node)
+ netdevs = node_dict['netdevs'] = self.parse_netdev_info(
+ stdout)
+ self._sort_dpdk_port_num(netdevs)
+
+ for network in node_dict["interfaces"].values():
+ missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
if missing:
- raise IncorrectConfig("Require interface fields '%s' "
- "not found, topology file "
- "corrupted" % ', '.join(missing))
+ try:
+ self._probe_missing_values(netdevs, network,
+ missing)
+ except KeyError:
+ pass
+ else:
+ missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
+ network)
+ if missing:
+ raise IncorrectConfig(
+ "Require interface fields '%s' "
+ "not found, topology file "
+ "corrupted" % ', '.join(missing))
# 3. Use topology file to find connections & resolve dest address
self._resolve_topology(context_cfg, topology)
self._update_context_with_topology(context_cfg, topology)
+ FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
+$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
+$1/device/subsystem_vendor $1/device/subsystem_device ; \
+printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
+' sh \{\}/* \;
+"""
+ BASE_ADAPTER_RE = re.compile(
+ '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
+
+ @classmethod
+ def parse_netdev_info(cls, stdout):
+ network_devices = defaultdict(dict)
+ matches = cls.BASE_ADAPTER_RE.findall(stdout)
+ for bus_path, interface_name, name, value in matches:
+ dirname, bus_id = os.path.split(bus_path)
+ if 'virtio' in bus_id:
+ # for some stupid reason VMs include virtio1/
+ # in PCI device path
+ bus_id = os.path.basename(dirname)
+ # remove extra 'device/' from 'device/vendor,
+ # device/subsystem_vendor', etc.
+ if 'device/' in name:
+ name = name.split('/')[1]
+ network_devices[interface_name][name] = value
+ network_devices[interface_name][
+ 'interface_name'] = interface_name
+ network_devices[interface_name]['pci_bus_id'] = bus_id
+ # convert back to regular dict
+ return dict(network_devices)
+
@classmethod
def get_vnf_impl(cls, vnf_model):
""" Find the implementing class from vnf_model["vnf"]["name"] field
@@ -240,21 +324,24 @@ class NetworkServiceTestCase(base.Scenario):
except StopIteration:
raise IncorrectConfig("No implementation for %s", expected_name)
- def load_vnf_models(self, context_cfg):
+ def load_vnf_models(self, scenario_cfg, context_cfg):
""" Create VNF objects based on YAML descriptors
+ :param scenario_cfg:
+ :type scenario_cfg:
:param context_cfg:
:return:
"""
vnfs = []
- for node in context_cfg["nodes"]:
- LOG.debug(context_cfg["nodes"][node])
- with open(context_cfg["nodes"][node]["VNF model"]) as stream:
+ for node_name, node in context_cfg["nodes"].items():
+ LOG.debug(node)
+ with open_relative_file(node["VNF model"],
+ scenario_cfg['task_path']) as stream:
vnf_model = stream.read()
- vnfd = vnfdgen.generate_vnfd(vnf_model, context_cfg["nodes"][node])
+ vnfd = vnfdgen.generate_vnfd(vnf_model, node)
vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
- vnf_instance.name = node
+ vnf_instance.name = node_name
vnfs.append(vnf_instance)
return vnfs
@@ -264,11 +351,10 @@ class NetworkServiceTestCase(base.Scenario):
:return:
"""
-
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure(self.context_cfg, self.topology)
# 1a. Load VNF models
- self.vnfs = self.load_vnf_models(self.context_cfg)
+ self.vnfs = self.load_vnf_models(self.scenario_cfg, self.context_cfg)
# 1b. Fill traffic profile with information from topology
self.traffic_profile = self._fill_traffic_profile(self.scenario_cfg,
self.context_cfg)
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index cb98c356d..47a519923 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -43,6 +43,7 @@ TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
# file
OPENRC = get_param('file.openrc', '/etc/yardstick/openstack.creds')
CONF_FILE = join(CONF_DIR, 'yardstick.conf')
+POD_FILE = join(CONF_DIR, 'pod.yaml')
CONF_SAMPLE_FILE = join(CONF_SAMPLE_DIR, 'yardstick.conf.sample')
FETCH_SCRIPT = get_param('file.fetch_script', 'utils/fetch_os_creds.sh')
FETCH_SCRIPT = join(RELENG_DIR, FETCH_SCRIPT)
@@ -77,6 +78,9 @@ DOCKER_URL = 'unix://var/run/docker.sock'
INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
SQLITE = 'sqlite:////tmp/yardstick.db'
+API_SUCCESS = 1
+API_ERROR = 2
+
BASE_URL = 'http://localhost:5000'
ENV_ACTION_API = BASE_URL + '/yardstick/env/action'
ASYNC_TASK_API = BASE_URL + '/yardstick/asynctask'
diff --git a/yardstick/network_services/vnf_generic/vnfdgen.py b/yardstick/network_services/vnf_generic/vnfdgen.py
index 97dd97198..40cc14a49 100644
--- a/yardstick/network_services/vnf_generic/vnfdgen.py
+++ b/yardstick/network_services/vnf_generic/vnfdgen.py
@@ -15,9 +15,20 @@
from __future__ import absolute_import
import collections
+
+import jinja2
import yaml
-from yardstick.common.task_template import TaskTemplate
+
+def render(vnf_model, **kwargs):
+ """Render jinja2 VNF template
+
+ :param vnf_model: string that contains template
+ :param kwargs: Dict with template arguments
+ :returns:rendered template str
+ """
+
+ return jinja2.Template(vnf_model).render(**kwargs)
def generate_vnfd(vnf_model, node):
@@ -31,7 +42,10 @@ def generate_vnfd(vnf_model, node):
# get is unused as global method inside template
node["get"] = get
# Set Node details to default if not defined in pod file
- rendered_vnfd = TaskTemplate.render(vnf_model, **node)
+ # we CANNOT use TaskTemplate.render because it does not allow
+ # for missing variables, we need to allow password for key_filename
+ # to be undefined
+ rendered_vnfd = render(vnf_model, **node)
# This is done to get rid of issues with serializing node
del node["get"]
filled_vnfd = yaml.load(rendered_vnfd)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 864f1f9ec..a99d4631d 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015-2017 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -11,6 +11,7 @@
from __future__ import absolute_import
from __future__ import print_function
+from six.moves import range
import collections
import datetime
@@ -47,7 +48,8 @@ class HeatObject(object):
self._heat_client = None
self.uuid = None
- def _get_heat_client(self):
+ @property
+ def heat_client(self):
"""returns a heat client instance"""
if self._heat_client is None:
@@ -61,9 +63,9 @@ class HeatObject(object):
def status(self):
"""returns stack state as a string"""
- heat = self._get_heat_client()
- stack = heat.stacks.get(self.uuid)
- return getattr(stack, 'stack_status')
+ heat_client = self.heat_client
+ stack = heat_client.stacks.get(self.uuid)
+ return stack.stack_status
class HeatStack(HeatObject):
@@ -88,20 +90,18 @@ class HeatStack(HeatObject):
return
log.info("Deleting stack '%s', uuid:%s", self.name, self.uuid)
- heat = self._get_heat_client()
+ heat = self.heat_client
template = heat.stacks.get(self.uuid)
start_time = time.time()
template.delete()
- status = self.status()
- while status != u'DELETE_COMPLETE':
+ for status in iter(self.status, u'DELETE_COMPLETE'):
log.debug("stack state %s", status)
if status == u'DELETE_FAILED':
raise RuntimeError(
heat.stacks.get(self.uuid).stack_status_reason)
time.sleep(2)
- status = self.status()
end_time = time.time()
log.info("Deleted stack '%s' in %d secs", self.name,
@@ -120,15 +120,13 @@ class HeatStack(HeatObject):
self._delete()
return
- i = 0
- while i < retries:
+ for _ in range(retries):
try:
self._delete()
break
except RuntimeError as err:
log.warning(err.args)
time.sleep(2)
- i += 1
# if still not deleted try once more and let it fail everything
if self.uuid is not None:
@@ -177,7 +175,6 @@ name (i.e. %s).\
self.name = name
self.state = "NOT_CREATED"
self.keystone_client = None
- self.heat_client = None
self.heat_parameters = {}
# heat_parameters is passed to heat in stack create, empty dict when
@@ -279,6 +276,14 @@ name (i.e. %s).\
'description': 'subnet %s ID' % name,
'value': {'get_resource': name}
}
+ self._template['outputs'][name + "-cidr"] = {
+ 'description': 'subnet %s cidr' % name,
+ 'value': {'get_attr': [name, 'cidr']}
+ }
+ self._template['outputs'][name + "-gateway_ip"] = {
+ 'description': 'subnet %s gateway_ip' % name,
+ 'value': {'get_attr': [name, 'gateway_ip']}
+ }
def add_router(self, name, ext_gw_net, subnet_name):
"""add to the template a Neutron Router and interface"""
@@ -336,6 +341,22 @@ name (i.e. %s).\
'description': 'Address for interface %s' % name,
'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}
}
+ self._template['outputs'][name + "-subnet_id"] = {
+ 'description': 'Address for interface %s' % name,
+ 'value': {'get_attr': [name, 'fixed_ips', 0, 'subnet_id']}
+ }
+ self._template['outputs'][name + "-mac_address"] = {
+ 'description': 'MAC Address for interface %s' % name,
+ 'value': {'get_attr': [name, 'mac_address']}
+ }
+ self._template['outputs'][name + "-device_id"] = {
+ 'description': 'Device ID for interface %s' % name,
+ 'value': {'get_attr': [name, 'device_id']}
+ }
+ self._template['outputs'][name + "-network_id"] = {
+ 'description': 'Network ID for interface %s' % name,
+ 'value': {'get_attr': [name, 'network_id']}
+ }
def add_floating_ip(self, name, network_name, port_name, router_if_name,
secgroup_name=None):
@@ -508,38 +529,48 @@ name (i.e. %s).\
'value': {'get_resource': name}
}
- def create(self, block=True):
- """creates a template in the target cloud using heat
+ HEAT_WAIT_LOOP_INTERVAL = 2
+
+ def create(self, block=True, timeout=3600):
+ """
+ creates a template in the target cloud using heat
returns a dict with the requested output values from the template
+
+ :param block: Wait for Heat create to finish
+ :type block: bool
+ :param: timeout: timeout in seconds for Heat create, default 3600s
+ :type timeout: int
"""
log.info("Creating stack '%s'", self.name)
# create stack early to support cleanup, e.g. ctrl-c while waiting
stack = HeatStack(self.name)
- heat = self._get_heat_client()
+ heat_client = self.heat_client
start_time = time.time()
- stack.uuid = self.uuid = heat.stacks.create(
+ stack.uuid = self.uuid = heat_client.stacks.create(
stack_name=self.name, template=self._template,
parameters=self.heat_parameters)['stack']['id']
- status = self.status()
- outputs = []
+ if not block:
+ self.outputs = stack.outputs = {}
+ return stack
- if block:
- while status != u'CREATE_COMPLETE':
- log.debug("stack state %s", status)
- if status == u'CREATE_FAILED':
- raise RuntimeError(getattr(heat.stacks.get(self.uuid),
- 'stack_status_reason'))
+ time_limit = start_time + timeout
+ for status in iter(self.status, u'CREATE_COMPLETE'):
+ log.debug("stack state %s", status)
+ if status == u'CREATE_FAILED':
+ raise RuntimeError(
+ heat_client.stacks.get(self.uuid).stack_status_reason)
+ if time.time() > time_limit:
+ raise RuntimeError("Heat stack create timeout")
- time.sleep(2)
- status = self.status()
+ time.sleep(self.HEAT_WAIT_LOOP_INTERVAL)
- end_time = time.time()
- outputs = getattr(heat.stacks.get(self.uuid), 'outputs')
- log.info("Created stack '%s' in %d secs",
- self.name, end_time - start_time)
+ end_time = time.time()
+ outputs = heat_client.stacks.get(self.uuid).outputs
+ log.info("Created stack '%s' in %d secs",
+ self.name, end_time - start_time)
# keep outputs as unicode
self.outputs = {output["output_key"]: output["output_value"] for output
diff --git a/yardstick/resources/scripts/install/storperf.bash b/yardstick/resources/scripts/install/storperf.bash
index 9bbec7206..9d20a5a8a 100644
--- a/yardstick/resources/scripts/install/storperf.bash
+++ b/yardstick/resources/scripts/install/storperf.bash
@@ -20,8 +20,12 @@ set -e
mkdir -p /tmp/storperf-yardstick
docker pull opnfv/storperf
+
+STORPERF_DIR=/tmp/storperf-yardstick/carbon
docker run -t \
--env-file ~/storperf_admin-rc \
-p 5000:5000 -p 8000:8000 \
--v /tmp/storperf-yardstick/carbon:/opt/graphite/storage/whisper \
+-v $STORPERF_DIR:/opt/graphite/storage/whisper \
--name storperf-yardstick opnfv/storperf &
+
+chown www-data:www-data $STORPERF_DIR