summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--api/base.py11
-rw-r--r--api/resources/env_action.py67
-rw-r--r--api/resources/results.py63
-rwxr-xr-xdocker/exec_tests.sh85
-rw-r--r--docs/testing/user/userguide/04-installation.rst8
-rw-r--r--fuel-plugin/LICENSE202
-rw-r--r--fuel-plugin/README.md36
-rwxr-xr-xfuel-plugin/deployment_scripts/install.sh47
-rw-r--r--fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp33
-rw-r--r--fuel-plugin/deployment_tasks.yaml33
-rw-r--r--fuel-plugin/environment_config.yaml0
-rw-r--r--fuel-plugin/metadata.yaml36
-rw-r--r--fuel-plugin/node_roles.yaml21
-rwxr-xr-xfuel-plugin/pre_build_hook33
-rw-r--r--fuel-plugin/tasks.yaml14
-rw-r--r--fuel-plugin/vagrant/Vagrantfile21
-rwxr-xr-xfuel-plugin/vagrant/build_fuel_plugin.sh24
-rw-r--r--requirements.txt107
-rw-r--r--samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml61
-rw-r--r--samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml (renamed from fuel-plugin/fuel_ping.yaml)49
-rwxr-xr-xtests/ci/clean_images.sh16
-rwxr-xr-xtests/ci/load_images.sh30
-rwxr-xr-xtests/ci/prepare_storperf_admin-rc.sh2
-rw-r--r--tests/unit/benchmark/contexts/test_heat.py8
-rw-r--r--tests/unit/benchmark/core/test_task.py1
-rw-r--r--tests/unit/benchmark/runner/test_base.py4
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_scenario_general.py2
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_vnf_generic.py150
-rw-r--r--tests/unit/dispatcher/test_influxdb.py36
-rw-r--r--tests/unit/orchestrator/test_heat.py328
-rw-r--r--yardstick/benchmark/contexts/heat.py77
-rw-r--r--yardstick/benchmark/contexts/model.py2
-rw-r--r--yardstick/benchmark/core/task.py115
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py9
-rwxr-xr-xyardstick/benchmark/runners/base.py74
-rw-r--r--yardstick/benchmark/runners/duration.py9
-rw-r--r--yardstick/benchmark/runners/iteration.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash9
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash8
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py13
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py5
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py130
-rw-r--r--yardstick/cmd/commands/task.py14
-rw-r--r--yardstick/common/constants.py4
-rw-r--r--yardstick/dispatcher/base.py10
-rw-r--r--yardstick/dispatcher/file.py18
-rw-r--r--yardstick/dispatcher/http.py91
-rw-r--r--yardstick/dispatcher/influxdb.py138
-rw-r--r--yardstick/network_services/vnf_generic/vnfdgen.py18
-rw-r--r--yardstick/orchestrator/heat.py93
-rw-r--r--yardstick/resources/scripts/install/storperf.bash6
57 files changed, 1340 insertions, 1079 deletions
diff --git a/api/base.py b/api/base.py
index 527008588..6fa2777ce 100644
--- a/api/base.py
+++ b/api/base.py
@@ -23,9 +23,16 @@ logger.setLevel(logging.DEBUG)
class ApiResource(Resource):
def _post_args(self):
- params = common_utils.translate_to_str(request.json)
- action = params.get('action', '')
+ data = request.json if request.json else {}
+ params = common_utils.translate_to_str(data)
+ action = params.get('action', request.form.get('action', ''))
args = params.get('args', {})
+
+ try:
+ args['file'] = request.files['file']
+ except KeyError:
+ pass
+
logger.debug('Input args is: action: %s, args: %s', action, args)
return action, args
diff --git a/api/resources/env_action.py b/api/resources/env_action.py
index 7bfaf27a7..3536559b7 100644
--- a/api/resources/env_action.py
+++ b/api/resources/env_action.py
@@ -16,6 +16,8 @@ import threading
import time
import uuid
import glob
+import yaml
+import collections
from six.moves import configparser
from oslo_serialization import jsonutils
@@ -25,7 +27,7 @@ from api.database.handler import AsyncTaskHandler
from api.utils import influx
from api.utils.common import result_handler
from yardstick.common import constants as consts
-from yardstick.common import utils as yardstick_utils
+from yardstick.common import utils as common_utils
from yardstick.common import openstack_utils
from yardstick.common.httpClient import HttpClient
@@ -98,7 +100,9 @@ def _create_data_source():
def _create_grafana_container(client):
ports = [3000]
port_bindings = {k: k for k in ports}
- host_config = client.create_host_config(port_bindings=port_bindings)
+ restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+ host_config = client.create_host_config(port_bindings=port_bindings,
+ restart_policy=restart_policy)
container = client.create_container(image='%s:%s' % (consts.GRAFANA_IMAGE,
consts.GRAFANA_TAG),
@@ -150,7 +154,9 @@ def _create_influxdb_container(client):
ports = [8083, 8086]
port_bindings = {k: k for k in ports}
- host_config = client.create_host_config(port_bindings=port_bindings)
+ restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
+ host_config = client.create_host_config(port_bindings=port_bindings,
+ restart_policy=restart_policy)
container = client.create_container(image='%s:%s' % (consts.INFLUXDB_IMAGE,
consts.INFLUXDB_TAG),
@@ -174,7 +180,7 @@ def _config_influxdb():
def _change_output_to_influxdb():
- yardstick_utils.makedirs(consts.CONF_DIR)
+ common_utils.makedirs(consts.CONF_DIR)
parser = configparser.ConfigParser()
parser.read(consts.CONF_SAMPLE_FILE)
@@ -230,11 +236,11 @@ def _prepare_env_daemon(task_id):
def _create_directories():
- yardstick_utils.makedirs(consts.CONF_DIR)
+ common_utils.makedirs(consts.CONF_DIR)
def _source_file(rc_file):
- yardstick_utils.source_env(rc_file)
+ common_utils.source_env(rc_file)
def _get_remote_rc_file(rc_file, installer_ip, installer_type):
@@ -307,3 +313,52 @@ def _update_task_error(task_id, error):
task = async_handler.get_task_by_taskid(task_id)
async_handler.update_status(task, 2)
async_handler.update_error(task, error)
+
+
+def update_openrc(args):
+ try:
+ openrc_vars = args['openrc']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'openrc must be provided')
+ else:
+ if not isinstance(openrc_vars, collections.Mapping):
+ return result_handler(consts.API_ERROR, 'args should be a dict')
+
+ lines = ['export {}={}\n'.format(k, v) for k, v in openrc_vars.items()]
+ logger.debug('Writing: %s', ''.join(lines))
+
+ logger.info('Writing openrc: Writing')
+ common_utils.makedirs(consts.CONF_DIR)
+
+ with open(consts.OPENRC, 'w') as f:
+ f.writelines(lines)
+ logger.info('Writing openrc: Done')
+
+ logger.info('Source openrc: Sourcing')
+ try:
+ _source_file(consts.OPENRC)
+ except Exception as e:
+ logger.exception('Failed to source openrc')
+ return result_handler(consts.API_ERROR, str(e))
+ logger.info('Source openrc: Done')
+
+ return result_handler(consts.API_SUCCESS, {'openrc': openrc_vars})
+
+
+def upload_pod_file(args):
+ try:
+ pod_file = args['file']
+ except KeyError:
+ return result_handler(consts.API_ERROR, 'file must be provided')
+
+ logger.info('Checking file')
+ data = yaml.load(pod_file.read())
+ if not isinstance(data, collections.Mapping):
+ return result_handler(consts.API_ERROR, 'invalid yaml file')
+
+ logger.info('Writing file')
+ with open(consts.POD_FILE, 'w') as f:
+ yaml.dump(data, f, default_flow_style=False)
+ logger.info('Writing finished')
+
+ return result_handler(consts.API_SUCCESS, {'pod_info': data})
diff --git a/api/resources/results.py b/api/resources/results.py
index 86fc25193..a0527ed8c 100644
--- a/api/resources/results.py
+++ b/api/resources/results.py
@@ -28,12 +28,12 @@ def getResult(args):
uuid.UUID(task_id)
except KeyError:
message = 'task_id must be provided'
- return common_utils.error_handler(message)
+ return common_utils.result_handler(2, message)
task = TasksHandler().get_task_by_taskid(task_id)
def _unfinished():
- return common_utils.result_handler(0, [])
+ return common_utils.result_handler(0, {})
def _finished():
testcases = task.details.split(',')
@@ -44,7 +44,7 @@ def getResult(args):
data = common_utils.translate_to_str(influx_utils.query(query_sql))
return data
- result = {k: get_data(k) for k in testcases}
+ result = _format_data({k: get_data(k) for k in testcases})
return common_utils.result_handler(1, result)
@@ -61,4 +61,59 @@ def getResult(args):
}
return switcher.get(status, lambda: 'nothing')()
except IndexError:
- return common_utils.error_handler('no such task')
+ return common_utils.result_handler(2, 'no such task')
+
+
+def _format_data(data):
+ try:
+ first_value = data.values()[0][0]
+ except IndexError:
+ return {'criteria': 'FAIL', 'testcases': {}}
+ else:
+ info = {
+ 'deploy_scenario': first_value.get('deploy_scenario'),
+ 'installer': first_value.get('installer'),
+ 'pod_name': first_value.get('pod_name'),
+ 'version': first_value.get('version')
+ }
+ task_id = first_value.get('task_id')
+ criteria = first_value.get('criteria')
+ testcases = {k: _get_case_data(v) for k, v in data.items()}
+
+ result = {
+ 'criteria': criteria,
+ 'info': info,
+ 'task_id': task_id,
+ 'testcases': testcases
+ }
+ return result
+
+
+def _get_case_data(data):
+ try:
+ scenario = data[0]
+ except IndexError:
+ return {'tc_data': [], 'criteria': 'FAIL'}
+ else:
+ tc_data = [_get_scenario_data(s) for s in data]
+ criteria = scenario.get('criteria')
+ return {'tc_data': tc_data, 'criteria': criteria}
+
+
+def _get_scenario_data(data):
+ result = {
+ 'data': {},
+ 'timestamp': ''
+ }
+
+ blacklist = {'criteria', 'deploy_scenario', 'host', 'installer',
+ 'pod_name', 'runner_id', 'scenarios', 'target',
+ 'task_id', 'time', 'version'}
+
+ keys = set(data.keys()) - set(blacklist)
+ for k in keys:
+ result['data'][k] = data[k]
+
+ result['timestamp'] = data.get('time')
+
+ return result
diff --git a/docker/exec_tests.sh b/docker/exec_tests.sh
index db053f7bc..46e5a05bd 100755
--- a/docker/exec_tests.sh
+++ b/docker/exec_tests.sh
@@ -16,39 +16,88 @@ set -e
: ${RELENG_REPO:='https://gerrit.opnfv.org/gerrit/releng'}
: ${RELENG_REPO_DIR:='/home/opnfv/repos/releng'}
+# TEMP HACK to freeze releng version to workaround fetch_os_creds.sh problem
: ${RELENG_BRANCH:='master'} # branch, tag, sha1 or refspec
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch {
+ local git_branch=$1
+
+ git checkout -f origin/${git_branch}
+ # a local branch might not exist
+ git branch -D ${git_branch} || true
+ git checkout -b ${git_branch}
+}
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch {
+ local git_branch=$1
+
+ git checkout -b ${git_branch} -t origin/${git_branch}
+}
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag {
+ local git_tag=$1
+
+ git tag -d ${git_tag}
+ # fetching given tag only
+ git fetch origin tag ${git_tag}
+ git checkout -f ${git_tag}
+}
+
+
+# OpenStack Functions
+
git_checkout()
{
- if git cat-file -e $1^{commit} 2>/dev/null; then
- # branch, tag or sha1 object
- git checkout $1 && git pull
- else
+ local git_ref=$1
+ if [[ -n "$(git show-ref refs/tags/${git_ref})" ]]; then
+ git_update_tag "${git_ref}"
+ elif [[ -n "$(git show-ref refs/heads/${git_ref})" ]]; then
+ git_update_branch "${git_ref}"
+ elif [[ -n "$(git show-ref refs/remotes/origin/${git_ref})" ]]; then
+ git_update_remote_branch "${git_ref}"
+ # check to see if it is a remote ref
+ elif git fetch --tags origin "${git_ref}"; then
# refspec / changeset
- git fetch --tags --progress $2 $1
git checkout FETCH_HEAD
+ else
+ # if we are a random commit id we have to unshallow
+ # to get all the commits
+ git fetch --unshallow origin
+ git checkout -f "${git_ref}"
fi
}
echo
-echo "INFO: Updating releng -> $RELENG_BRANCH"
-if [ ! -d $RELENG_REPO_DIR ]; then
- git clone $RELENG_REPO $RELENG_REPO_DIR
+echo "INFO: Updating releng -> ${RELENG_BRANCH}"
+if [ ! -d ${RELENG_REPO_DIR} ]; then
+ git clone ${RELENG_REPO} ${RELENG_REPO_DIR}
fi
-cd $RELENG_REPO_DIR
-git checkout master
-git_checkout $RELENG_BRANCH $RELENG_REPO
+cd ${RELENG_REPO_DIR}
+# reset remote so we know origin is valid
+git remote set-url origin ${RELENG_REPO}
+# fetch the exact ref
+git fetch --tags origin ${RELENG_BRANCH} || true
+# purge pyc files
+find . -name '*.pyc' -delete
+git_checkout ${RELENG_BRANCH}
echo
-echo "INFO: Updating yardstick -> $YARDSTICK_BRANCH"
-if [ ! -d $YARDSTICK_REPO_DIR ]; then
- git clone $YARDSTICK_REPO $YARDSTICK_REPO_DIR
+echo "INFO: Updating yardstick -> ${YARDSTICK_BRANCH}"
+if [ ! -d ${YARDSTICK_REPO_DIR} ]; then
+ git clone ${YARDSTICK_REPO} ${YARDSTICK_REPO_DIR}
fi
-cd $YARDSTICK_REPO_DIR
-git_checkout $YARDSTICK_BRANCH $YARDSTICK_REPO
+cd ${YARDSTICK_REPO_DIR}
+git_checkout ${YARDSTICK_BRANCH}
# setup the environment
-source $YARDSTICK_REPO_DIR/tests/ci/prepare_env.sh
+source ${YARDSTICK_REPO_DIR}/tests/ci/prepare_env.sh
# execute tests
-$YARDSTICK_REPO_DIR/tests/ci/yardstick-verify $@
+${YARDSTICK_REPO_DIR}/tests/ci/yardstick-verify $@
diff --git a/docs/testing/user/userguide/04-installation.rst b/docs/testing/user/userguide/04-installation.rst
index 0c2bb58cf..660f3b5a8 100644
--- a/docs/testing/user/userguide/04-installation.rst
+++ b/docs/testing/user/userguide/04-installation.rst
@@ -149,7 +149,12 @@ In the Yardstick container, the Yardstick repository is located in the ``/home/o
yardstick env prepare
-**NOTE**: The above command just works for four OPNFV installers -- **Apex**, **Compass**, **Fuel** and **Joid**.
+**NOTE**: The above command works for four OPNFV installers -- **Apex**, **Compass**, **Fuel** and **Joid**.
+For Non-OPNFV installer OpenStack environment, the above command can also be used to configure the environment.
+But before running the above command in a Non-OPNFV installer environment, it is necessary to create the /etc/yardstick/openstack.creds file and
+save OpenStack environment variables in it. For details of the required OpenStack environment variables please refer to
+section **Export OpenStack environment variables**
+
The env prepare command may take up to 6-8 minutes to finish building
yardstick-image and other environment preparation. Meanwhile if you wish to
monitor the env prepare process, you can enter the Yardstick container in a new
@@ -506,3 +511,4 @@ yaml file and add test cases, constraint or task arguments if necessary.
Proxy Support (**Todo**)
---------------------------
+
diff --git a/fuel-plugin/LICENSE b/fuel-plugin/LICENSE
deleted file mode 100644
index e06d20818..000000000
--- a/fuel-plugin/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/fuel-plugin/README.md b/fuel-plugin/README.md
deleted file mode 100644
index 8c00d4b68..000000000
--- a/fuel-plugin/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-plugin-yardstick
-================
-
-Plugin description
-Installs Yardstick on base-os node via a fuel plugin.
-
-1) install vagrant fuel plugin builder (fpb)
- sudo apt-get install -y ruby-dev rubygems-integration python-pip rpm createrepo dpkg-dev
- sudo gem install fpm
- sudo pip install fuel-plugin-builder
-2) build plugin
- fpb --build <plugin-dir>
- e.g.: fpb --build yardstick/fuel-plugin
-
-3) copy plugin rpm to fuel master
- e.g. scp plugin-yardstick-0.1-0.1.0-1.noarch.rpm <user>@<server-name>:~/
-
-4) install plugin
- fuel plugins --install <plugin-name>.rpm
-
-5) prepare fuel environment
- on fuel dashboard, go to settings/other
- enable yardstick plugin with checkbox
- save settings
-
-6) add nodes to environment
-
-7) deploy
-
-8) run
-Once deployed, SSH to deployed node. Find IP of yardstick node.
-SSH to yardstick node, Activate yardstick:
- source /var/lib/yardstick.openrc
- source /var/lib/yardstick/bin/activate
- export EXTERNAL_NETWORK="admin_floating_net"
- yardstick task start /opt/yardstick/fuel-plugin/fuel_ping.yaml
diff --git a/fuel-plugin/deployment_scripts/install.sh b/fuel-plugin/deployment_scripts/install.sh
deleted file mode 100755
index 251d044bf..000000000
--- a/fuel-plugin/deployment_scripts/install.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-set -eux
-
-HOST=$1
-BIN_HOME=/opt/yardstick
-VAR_HOME=/var/lib/yardstick
-rm -rf $BIN_HOME; mkdir -p $BIN_HOME
-rm -rf $VAR_HOME; mkdir -p $VAR_HOME
-
-apt-get install -y python-dev python-pip libffi-dev libssl-dev libxml2-dev libxslt1-dev
-
-#apt-get install python-virtualenv cannot work
-#use pip to work around the issue
-
-pip install virtualenv
-
-# create python virtual env
-virtualenv $VAR_HOME
-
-export PS1="yardstick"
-source $VAR_HOME/bin/activate
-
-easy_install -U setuptools
-
-cd $BIN_HOME
-
-curl http://$HOST:8080/plugins/fuel-plugin-yardstick-1.0/repositories/ubuntu/yardstick.tar.gz | tar xzvf -
-
-# install dependency
-pip install -r requirements.txt
-
-python setup.py install
diff --git a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp b/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
deleted file mode 100644
index 3741bacf2..000000000
--- a/fuel-plugin/deployment_scripts/puppet/manifests/yardstick-install.pp
+++ /dev/null
@@ -1,33 +0,0 @@
-$master_ip = hiera('master_ip')
-
-$access_hash = hiera_hash('access', {})
-$admin_tenant = $access_hash['tenant']
-$admin_user = $access_hash['user']
-$admin_password = $access_hash['password']
-$region = hiera('region', 'RegionOne')
-
-$auth_api_version = ''
-$service_endpoint = hiera('service_endpoint', $management_vip)
-$ssl_hash = hiera_hash('use_ssl', {})
-$internal_auth_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'protocol', 'http')
-$internal_auth_address = get_ssl_property($ssl_hash, {}, 'keystone', 'internal', 'hostname', [$service_endpoint])
-$identity_uri = "${internal_auth_protocol}://${internal_auth_address}:5000"
-$auth_url = "${identity_uri}/${auth_api_version}"
-
-exec { "install yardstick":
- command => "curl http://${master_ip}:8080/plugins/fuel-plugin-yardstick-1.0/deployment_scripts/install.sh | bash -s ${master_ip}",
- path => "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin";
-}
-
-osnailyfacter::credentials_file { '/var/lib/yardstick.openrc':
- admin_user => $admin_user,
- admin_password => $admin_password,
- admin_tenant => $admin_tenant,
- region_name => $region,
- auth_url => $auth_url,
-}
-
-exec { "run yardstick":
- command => "echo hello",
- path => "/usr/local/bin:/usr/bin:/usr/sbin:/bin:/sbin";
-}
diff --git a/fuel-plugin/deployment_tasks.yaml b/fuel-plugin/deployment_tasks.yaml
deleted file mode 100644
index db57765a3..000000000
--- a/fuel-plugin/deployment_tasks.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-- id: yardstick
- type: group
- role: [yardstick]
- requires: [deploy_start]
- required_for: [deploy_end]
- tasks: [hiera, setup_repositories, fuel_pkgs, globals, tools, logging, netconfig]
- parameters:
- strategy:
- type: parallel
-
-- id: yardstick-install
- type: puppet
- version: 2.0.0
- groups: [yardstick]
- required_for: [post_deployment_end]
- requires: [post_deployment_start]
- parameters:
- puppet_manifest: puppet/manifests/yardstick-install.pp
- puppet_modules: puppet/modules:/etc/puppet/modules
- timeout: 720
diff --git a/fuel-plugin/environment_config.yaml b/fuel-plugin/environment_config.yaml
deleted file mode 100644
index e69de29bb..000000000
--- a/fuel-plugin/environment_config.yaml
+++ /dev/null
diff --git a/fuel-plugin/metadata.yaml b/fuel-plugin/metadata.yaml
deleted file mode 100644
index e9aebaf2a..000000000
--- a/fuel-plugin/metadata.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-# Plugin name
-name: fuel-plugin-yardstick
-# Human-readable name for your plugin
-title: Install Yardstick
-# Plugin version
-version: '1.0.0'
-# Description
-description: Installs Yardstick
-# Required fuel version
-fuel_version: ['10.0']
-# Specify license of your plugin
-licenses: ['Apache License Version 2.0']
-# Specify author or company name
-authors: ['john.hinman@intel.com','david.j.chou@intel.com','ruijing.guo@intel.com']
-# A link to the plugin's page
-homepage: 'https://gerrit.opnfv.org/gerrit/yardstick'
-# Specify a group which your plugin implements, possible options:
-# network, storage, storage::cinder, storage::glance, hypervisor,
-# equipment
-groups: []
-# Change `false` to `true` if the plugin can be installed in the environment
-# after the deployment.
-is_hotpluggable: true
-
-# Version of plugin package
-package_version: '4.0.0'
-# The plugin is compatible with releases in the list
-releases:
- - os: ubuntu
- version: newton-10.0
- mode: ['ha']
- deployment_scripts_path: deployment_scripts/
- repository_path: repositories/ubuntu
-
-# Version of plugin package
-package_version: '4.0.0'
diff --git a/fuel-plugin/node_roles.yaml b/fuel-plugin/node_roles.yaml
deleted file mode 100644
index 34ace6208..000000000
--- a/fuel-plugin/node_roles.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-yardstick:
- name: "Yardstick"
- description: "Install Yardstick on nodes with this role"
- has_primary: false # whether has primary role or not
- public_ip_required: false # whether requires public net or not
- weight: 50 # weight that will be used for ordering on fuel ui
- limits:
- min: 0
diff --git a/fuel-plugin/pre_build_hook b/fuel-plugin/pre_build_hook
deleted file mode 100755
index 9c0a40e65..000000000
--- a/fuel-plugin/pre_build_hook
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-BUILD_FOR=${BUILD_FOR:-ubuntu}
-DIR="$(dirname `readlink -f $0`)"
-
-function build_pkg {
- case $1 in
- ubuntu)
- rm -rf ${DIR}/repositories/ubuntu; mkdir -p ${DIR}/repositories/ubuntu
-
- # fixme: don't support offline installation now since pip in
- # deployment_scripts/install.sh needs to access to internet
- #
- # cd ${DIR}/repositories/ubuntu
- # Use aptititude; apt-get -d will skip download if package is already installed
- # sudo apt-get install aptitude -y
- # Download python packages into the repository
- # aptitude download python-virtualenv python-dev libffi-dev libssl-dev
-
- cd ${DIR}/..
- tar -czf ${DIR}/repositories/ubuntu/yardstick.tar.gz . --exclude=yardstick.tar.gz
- ;;
-
- *) echo "Not supported system"; exit 1;;
- esac
-}
-
-for system in $BUILD_FOR
-do
- build_pkg $system
-done
diff --git a/fuel-plugin/tasks.yaml b/fuel-plugin/tasks.yaml
deleted file mode 100644
index c3ba08a2e..000000000
--- a/fuel-plugin/tasks.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-[]
diff --git a/fuel-plugin/vagrant/Vagrantfile b/fuel-plugin/vagrant/Vagrantfile
deleted file mode 100644
index 271ff7937..000000000
--- a/fuel-plugin/vagrant/Vagrantfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
-
- config.vm.box = "trusty-server-cloudimg-amd64"
- config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box"
-
- config.vm.define "fuel" do | h |
- h.vm.host_name = "fuel"
- h.vm.provision :shell, :inline => "/vagrant/build_fuel_plugin.sh", privileged: false
- h.vm.synced_folder "../..", "/yardstick"
- h.vm.provider :virtualbox do |v|
- v.customize ["modifyvm", :id, "--memory", 4096]
- v.customize ["modifyvm", :id, "--cpus", 4]
- end
- end
-end
diff --git a/fuel-plugin/vagrant/build_fuel_plugin.sh b/fuel-plugin/vagrant/build_fuel_plugin.sh
deleted file mode 100755
index 548ea8457..000000000
--- a/fuel-plugin/vagrant/build_fuel_plugin.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-sudo apt-get update -y
-sudo apt-get install -y ruby-dev rubygems-integration python-pip rpm createrepo dpkg-dev
-sudo gem install fpm
-sudo pip install fuel-plugin-builder
-cp -r /yardstick /home/vagrant
-cd /home/vagrant/yardstick/fuel-plugin;
-rm -rf vagrant/.vagrant
-fpb --debug --build .
-cp *.rpm /vagrant
diff --git a/requirements.txt b/requirements.txt
index 4fb724df5..f283b9921 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,89 +7,88 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-ansible==2.2.2.0
Babel==2.3.4
+Jinja2==2.8.1
+MarkupSafe==0.23
+PyYAML==3.12
+SQLAlchemy==1.1.4
+ansible==2.2.2.0
+appdirs==1.4.3
backport-ipaddress==0.1
chainmap==1.0.2
-cliff==2.3.0
-cmd2==0.6.8
-coverage==4.1b2
-debtcollector==1.3.0
+cliff==2.4.0
+cmd2==0.6.9
+coverage==4.3.4
+debtcollector==1.11.0
django==1.8.17
+docker-py==1.10.6
ecdsa==0.13
-extras==0.0.3
-fixtures==1.4.0
+extras==1.0.0
+fixtures==3.0.0
flake8==2.5.4
-funcsigs==0.4
+flasgger==0.5.13
+flask-restful-swagger==0.19
+flask-restful==0.3.5
+flask==0.11.1
+funcsigs==1.0.2
functools32==3.2.3.post2; python_version <= '2.7'
futures==3.0.5
+influxdb==4.0.0
iso8601==0.1.11
-Jinja2==2.8
-jsonpatch==1.13
+jsonpatch==1.15
jsonpointer==1.10
jsonschema==2.5.1
keystoneauth1==2.18.0
linecache2==1.0.0
-lxml==3.5.0
-MarkupSafe==0.23
+lxml==3.7.2
mccabe==0.4.0
-# upgrade to version 2.0.0 to match python3 unittest.mock features
-mock==2.0.0
-monotonic==1.0
-msgpack-python==0.4.7
-netaddr==0.7.18
-netifaces==0.10.4
+mock==2.0.0 # upgrade to version 2.0.0 to match python3 unittest.mock features
+monotonic==1.2
+msgpack-python==0.4.8
+netaddr==0.7.19
+netifaces==0.10.5
nose==1.3.7
openstacksdk==0.9.13
-os-client-config==1.22.0
-oslo.config==3.14.0
-oslo.i18n==3.4.0
-oslo.serialization==2.4.0
-oslo.utils==3.18.0
-paramiko==1.18.0
-pbr==1.8.1
+os-client-config==1.26.0
+osc-lib==1.3.0
+oslo.config==3.22.0
+oslo.i18n==3.12.0
+oslo.serialization==2.16.0
+oslo.utils==3.22.0
+paramiko==2.1.1
+pbr==1.10.0
pep8==1.7.0
+pika==0.10.0
positional==1.1.1
prettytable==0.7.2
pycrypto==2.6.1
pyflakes==1.0.0
-pyparsing==2.1.0
+pyparsing==2.1.10
+pyroute2==0.4.12
pyrsistent==0.11.12
-osc-lib==1.2.0
-python-cinderclient==1.9.0
-python-glanceclient==2.5.0
-python-heatclient==1.5.0
-python-keystoneclient==3.8.0
-python-mimeparse==1.5.1
-python-neutronclient==6.0.0
-python-novaclient==6.0.0
-python-openstackclient==3.8.0
+python-cinderclient==1.11.0
+python-glanceclient==2.6.0
+python-heatclient==1.8.1
+python-keystoneclient==3.10.0
+python-mimeparse==1.6.0
+python-neutronclient==6.1.0
+python-novaclient==7.1.1
+python-openstackclient==3.8.1
python-subunit==1.2.0
-python-swiftclient==3.0.0
-pytz==2015.7
-PyYAML==3.11
-requests==2.10.0
+python-swiftclient==3.3.0
+pytz==2016.10
+pyzmq==14.5.0 # version 14.5.0 for compatibility with trex traffic generator
+requests==2.13.0
requestsexceptions==1.1.3
scp==0.10.2
shade==1.17.0
-simplejson==3.8.2
+simplejson==3.10.0
six==1.10.0
-stevedore==1.17.1
+stevedore==1.20.0
testrepository==0.0.20
-testtools==2.0.0
+testtools==2.2.0
traceback2==1.4.0
unicodecsv==0.14.1
unittest2==1.1.0
warlock==1.2.0
-wrapt==1.10.6
-flask==0.11.1
-flask-restful==0.3.5
-influxdb==3.0.0
-pyroute2==0.4.10
-docker-py==1.10.6
-flasgger==0.5.13
-flask-restful-swagger==0.19
-SQLAlchemy==1.1.4
-# version 14.5.0 for compatibility with trex traffic generator
-pyzmq==14.5.0
-pika==0.10.0
+wrapt==1.10.8
diff --git a/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml b/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml
new file mode 100644
index 000000000..8826f539e
--- /dev/null
+++ b/samples/vnf_samples/nsut/ping/tc_external_ping_heat_context.yaml
@@ -0,0 +1,61 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/fixed.yaml
+ topology: ping_tg_topology.yaml
+
+ nodes:
+ tg__1: trafficgen_1.baremetal
+ vnf__1: vnf.yardstick
+
+ runner:
+ type: Duration
+ duration: 10
+
+contexts:
+ - name: yardstick
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
+
+ placement_groups:
+ pgrp1:
+ policy: "availability"
+
+ servers:
+ vnf:
+ floating_ip: true
+ placement: "pgrp1"
+
+ networks:
+ mgmt:
+ cidr: '10.0.1.0/24'
+ external_network: "yardstick-public"
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public
+
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private
+
+ - name: baremetal
+ type: Node
+ file: baremetal-pod.yaml
diff --git a/fuel-plugin/fuel_ping.yaml b/samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml
index e6c59480b..394523ffa 100644
--- a/fuel-plugin/fuel_ping.yaml
+++ b/samples/vnf_samples/nsut/ping/tc_ping_heat_context.yaml
@@ -11,46 +11,51 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
----
-# Sample file copied from sampls/ping.yaml
-# context part is modified according to fuel default env
+---
schema: "yardstick:task:0.1"
scenarios:
-
- type: Ping
- options:
- packetsize: 200
- host: athena.demo
- target: ares.demo
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/fixed.yaml
+ topology: ping_tg_topology.yaml
+
+ nodes:
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
runner:
type: Duration
- duration: 60
- interval: 1
-
- sla:
- max_rtt: 10
- action: monitor
+ duration: 10
context:
- name: demo
- image: TestVM
- flavor: m1.micro
- user: cirros
+ name: yardstick
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
placement_groups:
pgrp1:
policy: "availability"
servers:
- athena:
+ vnf:
floating_ip: true
placement: "pgrp1"
- ares:
+ trafficgen_1:
+ floating_ip: true
placement: "pgrp1"
networks:
- test:
- cidr: '192.168.111.0/24'
+ mgmt:
+ cidr: '10.0.1.0/24'
+ external_network: "yardstick-public"
+ xe0:
+ cidr: '10.0.2.0/24'
+ vld_id: public
+ xe1:
+ cidr: '10.0.3.0/24'
+ vld_id: private
+
+
diff --git a/tests/ci/clean_images.sh b/tests/ci/clean_images.sh
index 27da9e279..f25006487 100755
--- a/tests/ci/clean_images.sh
+++ b/tests/ci/clean_images.sh
@@ -15,18 +15,24 @@ cleanup()
echo
echo "========== Cleanup =========="
- if ! openstack image list; then
+ if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+ else
+ SECURE=""
+ fi
+
+ if ! openstack ${SECURE} image list; then
return
fi
- for image in $(openstack image list | grep -e cirros-0.3.5 -e yardstick-image -e Ubuntu-16.04 \
+ for image in $(openstack ${SECURE} image list | grep -e cirros-0.3.5 -e yardstick-image -e Ubuntu-16.04 \
| awk '{print $2}'); do
echo "Deleting image $image..."
- openstack image delete $image || true
+ openstack ${SECURE} image delete $image || true
done
- openstack flavor delete yardstick-flavor &> /dev/null || true
- openstack flavor delete storperf &> /dev/null || true
+ openstack ${SECURE} flavor delete yardstick-flavor &> /dev/null || true
+ openstack ${SECURE} flavor delete storperf &> /dev/null || true
}
main()
diff --git a/tests/ci/load_images.sh b/tests/ci/load_images.sh
index 487f33e33..0c197b313 100755
--- a/tests/ci/load_images.sh
+++ b/tests/ci/load_images.sh
@@ -88,7 +88,7 @@ load_yardstick_image()
if [ ! -f "${CLOUD_KERNEL}" ]; then
tar xf "${CLOUD_IMAGE}" "${CLOUD_KERNEL##**/}"
fi
- create_kernel=$(openstack image create \
+ create_kernel=$(openstack ${SECURE} image create \
--public \
--disk-format qcow2 \
--container-format bare \
@@ -119,7 +119,7 @@ load_yardstick_image()
fi
if [[ "$DEPLOY_SCENARIO" == *"-lxd-"* ]]; then
- output=$(eval openstack image create \
+ output=$(eval openstack ${SECURE} image create \
--public \
--disk-format raw \
--container-format bare \
@@ -127,7 +127,7 @@ load_yardstick_image()
--file ${RAW_IMAGE} \
yardstick-image)
else
- output=$(eval openstack image create \
+ output=$(eval openstack ${SECURE} image create \
--public \
--disk-format qcow2 \
--container-format bare \
@@ -150,7 +150,7 @@ load_yardstick_image()
load_cirros_image()
{
- if [[ -n $(openstack image list | grep -e Cirros-0.3.5) ]]; then
+ if [[ -n $(openstack ${SECURE} image list | grep -e Cirros-0.3.5) ]]; then
echo "Cirros-0.3.5 image already exist, skip loading cirros image"
else
echo
@@ -164,7 +164,7 @@ load_cirros_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(openstack image create \
+ output=$(openstack ${SECURE} image create \
--disk-format qcow2 \
--container-format bare \
${EXTRA_PARAMS} \
@@ -195,7 +195,7 @@ load_ubuntu_image()
EXTRA_PARAMS=$EXTRA_PARAMS" --property hw_mem_page_size=large"
fi
- output=$(openstack image create \
+ output=$(openstack ${SECURE} image create \
--disk-format qcow2 \
--container-format bare \
$EXTRA_PARAMS \
@@ -215,26 +215,26 @@ load_ubuntu_image()
create_nova_flavor()
{
- if ! openstack flavor list | grep -q yardstick-flavor; then
+ if ! openstack ${SECURE} flavor list | grep -q yardstick-flavor; then
echo
echo "========== Creating yardstick-flavor =========="
# Create the nova flavor used by some sample test cases
- openstack flavor create --id 100 --ram 1024 --disk 3 --vcpus 1 yardstick-flavor
+ openstack ${SECURE} flavor create --id 100 --ram 1024 --disk 3 --vcpus 1 yardstick-flavor
# DPDK-enabled OVS requires guest memory to be backed by large pages
if [[ $DEPLOY_SCENARIO == *[_-]ovs[_-]* ]]; then
- openstack flavor set --property hw:mem_page_size=large yardstick-flavor
+ openstack ${SECURE} flavor set --property hw:mem_page_size=large yardstick-flavor
fi
# VPP requires guest memory to be backed by large pages
if [[ "$DEPLOY_SCENARIO" == *"-fdio-"* ]]; then
- openstack flavor set --property hw:mem_page_size=large yardstick-flavor
+ openstack ${SECURE} flavor set --property hw:mem_page_size=large yardstick-flavor
fi
fi
- if ! openstack flavor list | grep -q storperf; then
+ if ! openstack ${SECURE} flavor list | grep -q storperf; then
echo
echo "========== Creating storperf flavor =========="
# Create the nova flavor used by storperf test case
- openstack flavor create --id auto --ram 8192 --disk 4 --vcpus 2 storperf
+ openstack ${SECURE} flavor create --id auto --ram 8192 --disk 4 --vcpus 2 storperf
fi
}
@@ -250,6 +250,12 @@ main()
RAW_IMAGE='/home/opnfv/images/yardstick-image.tar.gz'
fi
+ if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+ else
+ SECURE=""
+ fi
+
build_yardstick_image
load_yardstick_image
if [ "${YARD_IMG_ARCH}" == "arm64" ]; then
diff --git a/tests/ci/prepare_storperf_admin-rc.sh b/tests/ci/prepare_storperf_admin-rc.sh
index a6cf97bef..979728e84 100755
--- a/tests/ci/prepare_storperf_admin-rc.sh
+++ b/tests/ci/prepare_storperf_admin-rc.sh
@@ -33,3 +33,5 @@ echo "OS_PROJECT_ID="$PROJECT_ID >> ~/storperf_admin-rc
echo "OS_TENANT_NAME="$TENANT_NAME >> ~/storperf_admin-rc
echo "OS_TENANT_ID="$TENANT_ID >> ~/storperf_admin-rc
echo "OS_USER_DOMAIN_ID="$USER_DOMAIN_ID >> ~/storperf_admin-rc
+echo "OS_PROJECT_DOMAIN_NAME="$OS_PROJECT_DOMAIN_NAME >> ~/storperf_admin-rc
+echo "OS_USER_DOMAIN_NAME="$OS_USER_DOMAIN_NAME >> ~/storperf_admin-rc
diff --git a/tests/unit/benchmark/contexts/test_heat.py b/tests/unit/benchmark/contexts/test_heat.py
index d878ebe97..3dadd48eb 100644
--- a/tests/unit/benchmark/contexts/test_heat.py
+++ b/tests/unit/benchmark/contexts/test_heat.py
@@ -17,6 +17,7 @@ import logging
import os
import unittest
import uuid
+from collections import OrderedDict
import mock
@@ -37,7 +38,7 @@ class HeatContextTestCase(unittest.TestCase):
self.assertIsNone(self.test_context.name)
self.assertIsNone(self.test_context.stack)
- self.assertEqual(self.test_context.networks, [])
+ self.assertEqual(self.test_context.networks, OrderedDict())
self.assertEqual(self.test_context.servers, [])
self.assertEqual(self.test_context.placement_groups, [])
self.assertEqual(self.test_context.server_groups, [])
@@ -105,7 +106,9 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
netattrs = {'cidr': '10.0.0.0/24', 'provider': None, 'external_network': 'ext_net'}
self.mock_context.name = 'bar'
- self.test_context.networks = [model.Network("fool-network", self.mock_context, netattrs)]
+ self.test_context.networks = OrderedDict(
+ {"fool-network": model.Network("fool-network", self.mock_context,
+ netattrs)})
self.test_context._add_resources_to_template(mock_template)
mock_template.add_keypair.assert_called_with(
@@ -122,6 +125,7 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.name = 'foo'
self.test_context.template_file = '/bar/baz/some-heat-file'
self.test_context.heat_parameters = {'image': 'cirros'}
+ self.test_context.heat_timeout = 5
self.test_context.deploy()
mock_template.assert_called_with(self.test_context.name,
diff --git a/tests/unit/benchmark/core/test_task.py b/tests/unit/benchmark/core/test_task.py
index 8034392f4..b64bb8eed 100644
--- a/tests/unit/benchmark/core/test_task.py
+++ b/tests/unit/benchmark/core/test_task.py
@@ -65,6 +65,7 @@ class TaskTestCase(unittest.TestCase):
runner = mock.Mock()
runner.join.return_value = 0
runner.get_output.return_value = {}
+ runner.get_result.return_value = []
mock_base_runner.Runner.get.return_value = runner
t._run([scenario], False, "yardstick.out")
self.assertTrue(runner.run.called)
diff --git a/tests/unit/benchmark/runner/test_base.py b/tests/unit/benchmark/runner/test_base.py
index 7880fe5a5..6e72fa548 100644
--- a/tests/unit/benchmark/runner/test_base.py
+++ b/tests/unit/benchmark/runner/test_base.py
@@ -13,7 +13,6 @@ from __future__ import print_function
from __future__ import absolute_import
import unittest
-import multiprocessing
import time
from yardstick.benchmark.runners.iteration import IterationRunner
@@ -22,8 +21,7 @@ from yardstick.benchmark.runners.iteration import IterationRunner
class RunnerTestCase(unittest.TestCase):
def test_get_output(self):
- queue = multiprocessing.Queue()
- runner = IterationRunner({}, queue)
+ runner = IterationRunner({})
runner.output_queue.put({'case': 'opnfv_yardstick_tc002'})
runner.output_queue.put({'criteria': 'PASS'})
diff --git a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index ea54fbb9b..de2170b16 100644
--- a/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -56,6 +56,7 @@ class ScenarioGeneralTestCase(unittest.TestCase):
mock_obj = mock.Mock()
mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
ins.director = mock_obj
+ ins.director.data = {}
ins.run({})
ins.teardown()
@@ -64,5 +65,6 @@ class ScenarioGeneralTestCase(unittest.TestCase):
mock_obj = mock.Mock()
mock_obj.verify.return_value = False
ins.director = mock_obj
+ ins.director.data = {}
ins.run({})
ins.teardown()
diff --git a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 4167d6f3b..111e7812e 100644
--- a/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -20,12 +20,13 @@
from __future__ import absolute_import
import os
+import errno
import unittest
-
import mock
from yardstick.benchmark.scenarios.networking.vnf_generic import \
- SshManager, NetworkServiceTestCase, IncorrectConfig, IncorrectSetup
+ SshManager, NetworkServiceTestCase, IncorrectConfig, \
+ IncorrectSetup, open_relative_file
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic.vnf.base import \
GenericTrafficGen, GenericVNF
@@ -288,6 +289,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
}
self.scenario_cfg = {
+ 'task_path': "",
'tc_options': {'rfc2544': {'allowed_drop_rate': '0.8 - 1'}},
'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
'tc': 'tc_ipv4_1Mflow_64B_packetsize',
@@ -350,7 +352,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
vnf = mock.Mock(autospec=GenericVNF)
self.s.get_vnf_impl = mock.Mock(return_value=vnf)
- self.assertIsNotNone(self.s.load_vnf_models(self.context_cfg))
+ self.assertIsNotNone(
+ self.s.load_vnf_models(self.scenario_cfg, self.context_cfg))
def test_map_topology_to_infrastructure(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
@@ -488,3 +491,144 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.collector.stop = \
mock.Mock(return_value=True)
self.assertIsNone(self.s.teardown())
+
+ SAMPLE_NETDEVS = {
+ 'enp11s0': {
+ 'address': '0a:de:ad:be:ef:f5',
+ 'device': '0x1533',
+ 'driver': 'igb',
+ 'ifindex': '2',
+ 'interface_name': 'enp11s0',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:0b:00.0',
+ 'subsystem_device': '0x1533',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ },
+ 'lan': {
+ 'address': '0a:de:ad:be:ef:f4',
+ 'device': '0x153a',
+ 'driver': 'e1000e',
+ 'ifindex': '3',
+ 'interface_name': 'lan',
+ 'operstate': 'up',
+ 'pci_bus_id': '0000:00:19.0',
+ 'subsystem_device': '0x153a',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ }
+ }
+ SAMPLE_VM_NETDEVS = {
+ 'eth1': {
+ 'address': 'fa:de:ad:be:ef:5b',
+ 'device': '0x0001',
+ 'driver': 'virtio_net',
+ 'ifindex': '3',
+ 'interface_name': 'eth1',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:00:04.0',
+ 'vendor': '0x1af4'
+ }
+ }
+
+ def test_parse_netdev_info(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/ifindex:2
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/address:0a:de:ad:be:ef:f5
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/operstate:down
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/driver:igb
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/pci_bus_id:0000:0b:00.0
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/ifindex:3
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/address:0a:de:ad:be:ef:f4
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/operstate:up
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/driver:e1000e
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/pci_bus_id:0000:00:19.0
+"""
+ res = NetworkServiceTestCase.parse_netdev_info(output)
+ assert res == self.SAMPLE_NETDEVS
+
+ def test_parse_netdev_info_virtio(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/ifindex:3
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/address:fa:de:ad:be:ef:5b
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/operstate:down
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/vendor:0x1af4
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/device:0x0001
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/driver:virtio_net
+"""
+ res = NetworkServiceTestCase.parse_netdev_info(output)
+ assert res == self.SAMPLE_VM_NETDEVS
+
+ def test_sort_dpdk_port_num(self):
+ netdevs = self.SAMPLE_NETDEVS.copy()
+ NetworkServiceTestCase._sort_dpdk_port_num(netdevs)
+ assert netdevs['lan']['dpdk_port_num'] == 1
+ assert netdevs['enp11s0']['dpdk_port_num'] == 2
+
+ def test_probe_missing_values(self):
+ netdevs = self.SAMPLE_NETDEVS.copy()
+ NetworkServiceTestCase._sort_dpdk_port_num(netdevs)
+ network = {'local_mac': '0a:de:ad:be:ef:f5'}
+ NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+ assert network['dpdk_port_num'] == 2
+
+ network = {'local_mac': '0a:de:ad:be:ef:f4'}
+ NetworkServiceTestCase._probe_missing_values(netdevs, network, set())
+ assert network['dpdk_port_num'] == 1
+
+ def test_open_relative_path(self):
+ mock_open = mock.mock_open()
+ mock_open_result = mock_open()
+ mock_open_call_count = 1 # initial call to get result
+
+ module_name = \
+ 'yardstick.benchmark.scenarios.networking.vnf_generic.open'
+
+ # test
+ with mock.patch(module_name, mock_open, create=True):
+ self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertNotIn('bar', mock_open.call_args_list[-1][0][0])
+
+ def open_effect(*args, **kwargs):
+ if kwargs.get('name', args[0]) == os.path.join('bar', 'foo'):
+ return mock_open_result
+ raise IOError(errno.ENOENT, 'not found')
+
+ mock_open.side_effect = open_effect
+ self.assertEqual(open_relative_file('foo', 'bar'), mock_open_result)
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError of type ENOENT
+ mock_open.side_effect = IOError(errno.ENOENT, 'not found')
+ with self.assertRaises(IOError):
+ # the second call still raises
+ open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 2 # two more calls expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
+ self.assertIn('foo', mock_open.call_args_list[-1][0][0])
+ self.assertIn('bar', mock_open.call_args_list[-1][0][0])
+
+ # test an IOError other than ENOENT
+ mock_open.side_effect = IOError(errno.EBUSY, 'busy')
+ with self.assertRaises(IOError):
+ open_relative_file('foo', 'bar')
+
+ mock_open_call_count += 1 # one more call expected
+ self.assertEqual(mock_open.call_count, mock_open_call_count)
diff --git a/tests/unit/dispatcher/test_influxdb.py b/tests/unit/dispatcher/test_influxdb.py
index dca3c4189..a5d9b0754 100644
--- a/tests/unit/dispatcher/test_influxdb.py
+++ b/tests/unit/dispatcher/test_influxdb.py
@@ -94,31 +94,31 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
}
}
- self.yardstick_conf = {'yardstick': {}}
-
- def test_record_result_data_no_target(self):
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
- influxdb.target = ''
- self.assertEqual(influxdb.record_result_data(self.data1), -1)
-
- def test_record_result_data_no_case_name(self):
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
- self.assertEqual(influxdb.record_result_data(self.data2), -1)
+ self.yardstick_conf = {'dispatcher_influxdb': {}}
@mock.patch('yardstick.dispatcher.influxdb.requests')
def test_record_result_data(self, mock_requests):
type(mock_requests.post.return_value).status_code = 204
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
- self.assertEqual(influxdb.record_result_data(self.data1), 0)
- self.assertEqual(influxdb.record_result_data(self.data2), 0)
- self.assertEqual(influxdb.flush_result_data(), 0)
+ influxdb = InfluxdbDispatcher(self.yardstick_conf)
+ data = {
+ 'status': 1,
+ 'result': {
+ 'criteria': 'PASS',
+ 'info': {
+ },
+ 'task_id': 'b9e2bbc2-dfd8-410d-8c24-07771e9f7979',
+ 'testcases': {
+ }
+ }
+ }
+ self.assertEqual(influxdb.flush_result_data(data), 0)
def test__dict_key_flatten(self):
line = 'mpstat.loadavg1=0.29,rtt=1.03,mpstat.loadavg0=1.09,' \
'mpstat.cpu0.%idle=99.00,mpstat.cpu0.%sys=0.00'
# need to sort for assert to work
line = ",".join(sorted(line.split(',')))
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
+ influxdb = InfluxdbDispatcher(self.yardstick_conf)
flattened_data = influxdb._dict_key_flatten(
self.data3['benchmark']['data'])
result = ",".join(
@@ -126,15 +126,15 @@ class InfluxdbDispatcherTestCase(unittest.TestCase):
self.assertEqual(result, line)
def test__get_nano_timestamp(self):
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
- results = {'benchmark': {'timestamp': '1451461248.925574'}}
+ influxdb = InfluxdbDispatcher(self.yardstick_conf)
+ results = {'timestamp': '1451461248.925574'}
self.assertEqual(influxdb._get_nano_timestamp(results),
'1451461248925574144')
@mock.patch('yardstick.dispatcher.influxdb.time')
def test__get_nano_timestamp_except(self, mock_time):
results = {}
- influxdb = InfluxdbDispatcher(None, self.yardstick_conf)
+ influxdb = InfluxdbDispatcher(self.yardstick_conf)
mock_time.time.return_value = 1451461248.925574
self.assertEqual(influxdb._get_nano_timestamp(results),
'1451461248925574144')
diff --git a/tests/unit/orchestrator/test_heat.py b/tests/unit/orchestrator/test_heat.py
index 4892f98f8..3b3873301 100644
--- a/tests/unit/orchestrator/test_heat.py
+++ b/tests/unit/orchestrator/test_heat.py
@@ -10,16 +10,43 @@
##############################################################################
# Unittest for yardstick.benchmark.orchestrator.heat
-
+from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import unittest
import uuid
+import time
import mock
from yardstick.benchmark.contexts import node
from yardstick.orchestrator import heat
+TARGET_MODULE = 'yardstick.orchestrator.heat'
+
+
+def mock_patch_target_module(inner_import):
+ return mock.patch('.'.join([TARGET_MODULE, inner_import]))
+
+
+@contextmanager
+def timer():
+ start = time.time()
+ data = {'start': start}
+ try:
+ yield data
+ finally:
+ data['end'] = end = time.time()
+ data['delta'] = end - start
+
+def get_error_message(error):
+ try:
+ # py2
+ return error.message
+ except AttributeError:
+ # py3
+ return next((arg for arg in error.args if isinstance(arg, str)), None)
+
+
class HeatContextTestCase(unittest.TestCase):
def test_get_short_key_uuid(self):
@@ -70,88 +97,245 @@ class HeatTemplateTestCase(unittest.TestCase):
self.assertEqual(self.template.resources['some-server-group']['properties']['policies'], ['anti-affinity'])
def test__add_resources_to_template_raw(self):
-
- self.test_context = node.NodeContext()
- self.test_context.name = 'foo'
- self.test_context.template_file = '/tmp/some-heat-file'
- self.test_context.heat_parameters = {'image': 'cirros'}
- self.test_context.key_filename = "/tmp/1234"
- self.test_context.keypair_name = "foo-key"
- self.test_context.secgroup_name = "foo-secgroup"
- self.test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
- self._template = {
- 'outputs' : {},
- 'resources' : {}
- }
-
- self.heat_object = heat.HeatObject()
- self.heat_tmp_object = heat.HeatObject()
-
- self.heat_stack = heat.HeatStack("tmpStack")
- self.heat_stack.stacks_exist()
-
- self.test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
- self.test_context.tmpfile.write("heat_template_version: 2015-04-30")
- self.test_context.tmpfile.flush()
- self.test_context.tmpfile.seek(0)
- self.heat_tmp_template = heat.HeatTemplate(self.heat_tmp_object, self.test_context.tmpfile.name,
- heat_parameters= {"dict1": 1, "dict2": 2})
-
- self.heat_template = heat.HeatTemplate(self.heat_object)
- self.heat_template.resources = {}
-
- self.heat_template.add_network("network1")
- self.heat_template.add_network("network2")
- self.heat_template.add_security_group("sec_group1")
- self.heat_template.add_security_group("sec_group2")
- self.heat_template.add_subnet("subnet1", "network1", "cidr1")
- self.heat_template.add_subnet("subnet2", "network2", "cidr2")
- self.heat_template.add_router("router1", "gw1", "subnet1")
- self.heat_template.add_router_interface("router_if1", "router1", "subnet1")
- self.heat_template.add_port("port1", "network1", "subnet1")
- self.heat_template.add_port("port2", "network2", "subnet2", sec_group_id="sec_group1",provider="not-sriov")
- self.heat_template.add_port("port3", "network2", "subnet2", sec_group_id="sec_group1",provider="sriov")
- self.heat_template.add_floating_ip("floating_ip1", "network1", "port1", "router_if1")
- self.heat_template.add_floating_ip("floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
- self.heat_template.add_floating_ip_association("floating_ip1_association", "floating_ip1", "port1")
- self.heat_template.add_servergroup("server_grp2", "affinity")
- self.heat_template.add_servergroup("server_grp3", "anti-affinity")
- self.heat_template.add_security_group("security_group")
- self.heat_template.add_server(name="server1", image="image1", flavor="flavor1", flavors=[])
- self.heat_template.add_server_group(name="servergroup", policies=["policy1","policy2"])
- self.heat_template.add_server_group(name="servergroup", policies="policy1")
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=[], ports=["port1", "port2"],
+ test_context = node.NodeContext()
+ test_context.name = 'foo'
+ test_context.template_file = '/tmp/some-heat-file'
+ test_context.heat_parameters = {'image': 'cirros'}
+ test_context.key_filename = "/tmp/1234"
+ test_context.keypair_name = "foo-key"
+ test_context.secgroup_name = "foo-secgroup"
+ test_context.key_uuid = "2f2e4997-0a8e-4eb7-9fa4-f3f8fbbc393b"
+ heat_object = heat.HeatObject()
+
+ heat_stack = heat.HeatStack("tmpStack")
+ self.assertTrue(heat_stack.stacks_exist())
+
+ test_context.tmpfile = NamedTemporaryFile(delete=True, mode='w+t')
+ test_context.tmpfile.write("heat_template_version: 2015-04-30")
+ test_context.tmpfile.flush()
+ test_context.tmpfile.seek(0)
+ heat_template = heat.HeatTemplate(heat_object)
+ heat_template.resources = {}
+
+ heat_template.add_network("network1")
+ heat_template.add_network("network2")
+ heat_template.add_security_group("sec_group1")
+ heat_template.add_security_group("sec_group2")
+ heat_template.add_subnet("subnet1", "network1", "cidr1")
+ heat_template.add_subnet("subnet2", "network2", "cidr2")
+ heat_template.add_router("router1", "gw1", "subnet1")
+ heat_template.add_router_interface("router_if1", "router1", "subnet1")
+ heat_template.add_port("port1", "network1", "subnet1")
+ heat_template.add_port("port2", "network2", "subnet2", sec_group_id="sec_group1",provider="not-sriov")
+ heat_template.add_port("port3", "network2", "subnet2", sec_group_id="sec_group1",provider="sriov")
+ heat_template.add_floating_ip("floating_ip1", "network1", "port1", "router_if1")
+ heat_template.add_floating_ip("floating_ip2", "network2", "port2", "router_if2", "foo-secgroup")
+ heat_template.add_floating_ip_association("floating_ip1_association", "floating_ip1", "port1")
+ heat_template.add_servergroup("server_grp2", "affinity")
+ heat_template.add_servergroup("server_grp3", "anti-affinity")
+ heat_template.add_security_group("security_group")
+ heat_template.add_server(name="server1", image="image1", flavor="flavor1", flavors=[])
+ heat_template.add_server_group(name="servergroup", policies=["policy1","policy2"])
+ heat_template.add_server_group(name="servergroup", policies="policy1")
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=[], ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2})
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor1", "flavor2"],
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor1", "flavor2"],
ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2} )
- self.heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor3", "flavor4"],
+ heat_template.add_server(name="server2", image="image1", flavor="flavor1", flavors=["flavor3", "flavor4"],
ports=["port1", "port2"],
networks=["network1", "network2"], scheduler_hints="hints1", user="user1",
key_name="foo-key", user_data="user", metadata={"cat": 1, "doc": 2},
additional_properties={"prop1": 1, "prop2": 2})
- self.heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,extra_specs={"cat": 1, "dog": 2})
- self.heat_template.add_flavor(name=None, vcpus=1, ram=2048)
- self.heat_template.add_server(name="server1",
- image="image1",
- flavor="flavor1",
- flavors=[],
- ports=["port1", "port2"],
- networks=["network1", "network2"],
- scheduler_hints="hints1",
- user="user1",
- key_name="foo-key",
- user_data="user",
- metadata={"cat": 1, "doc": 2},
- additional_properties= {"prop1": 1, "prop2": 2} )
- self.heat_template.add_network("network1")
-
- self.heat_template.add_flavor("test")
- self.assertEqual(self.heat_template.resources['test']['type'], 'OS::Nova::Flavor')
+ heat_template.add_flavor(name="flavor1", vcpus=1, ram=2048, disk=1,extra_specs={"cat": 1, "dog": 2})
+ heat_template.add_flavor(name=None, vcpus=1, ram=2048)
+ heat_template.add_server(name="server1",
+ image="image1",
+ flavor="flavor1",
+ flavors=[],
+ ports=["port1", "port2"],
+ networks=["network1", "network2"],
+ scheduler_hints="hints1",
+ user="user1",
+ key_name="foo-key",
+ user_data="user",
+ metadata={"cat": 1, "doc": 2},
+ additional_properties= {"prop1": 1, "prop2": 2} )
+ heat_template.add_network("network1")
+
+ heat_template.add_flavor("test")
+ self.assertEqual(heat_template.resources['test']['type'], 'OS::Nova::Flavor')
+
+ @mock_patch_target_module('op_utils')
+ @mock_patch_target_module('heatclient.client.Client')
+ def test_create_negative(self, mock_heat_client_class, mock_op_utils):
+ self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+ mock_heat_client = mock_heat_client_class() # get the constructed mock
+
+ # populate attributes of the constructed mock
+ mock_heat_client.stacks.get().stack_status_reason = 'the reason'
+
+ expected_status_calls = 0
+ expected_constructor_calls = 1 # above, to get the instance
+ expected_create_calls = 0
+ expected_op_utils_usage = 0
+
+ with mock.patch.object(self.template, 'status', return_value=None) as mock_status:
+ # block with timeout hit
+ timeout = 2
+ with self.assertRaises(RuntimeError) as raised, timer() as time_data:
+ self.template.create(block=True, timeout=timeout)
+
+ # ensure runtime is approximately the timeout value
+ expected_time_low = timeout - interval * 0.2
+ expected_time_high = timeout + interval * 0.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure op_utils was used
+ expected_op_utils_usage += 1
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor and instance were used
+ expected_constructor_calls += 1
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was used
+ self.assertGreater(mock_status.call_count, expected_status_calls)
+ expected_status_calls = mock_status.call_count # synchronize the value
+
+ # ensure the expected exception was raised
+ error_message = get_error_message(raised.exception)
+ self.assertIn('timeout', error_message)
+ self.assertNotIn('the reason', error_message)
+
+ # block with create failed
+ timeout = 10
+ mock_status.side_effect = iter([None, None, u'CREATE_FAILED'])
+ with self.assertRaises(RuntimeError) as raised, timer() as time_data:
+ self.template.create(block=True, timeout=timeout)
+
+ # ensure runtime is approximately two intervals
+ expected_time_low = interval * 1.8
+ expected_time_high = interval * 2.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure the existing heat_client was used and op_utils was used again
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor was not used but the instance was used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was used three times
+ expected_status_calls += 3
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure the expected exception was raised
+ error_message = get_error_message(raised.exception)
+ self.assertNotIn('timeout', error_message)
+ self.assertIn('the reason', error_message)
+
+ @mock_patch_target_module('op_utils')
+ @mock_patch_target_module('heatclient.client.Client')
+ def test_create(self, mock_heat_client_class, mock_op_utils):
+ self.template.HEAT_WAIT_LOOP_INTERVAL = interval = 0.2
+ mock_heat_client = mock_heat_client_class()
+
+ # populate attributes of the constructed mock
+ mock_heat_client.stacks.get().outputs = [
+ {'output_key': 'key1', 'output_value': 'value1'},
+ {'output_key': 'key2', 'output_value': 'value2'},
+ {'output_key': 'key3', 'output_value': 'value3'},
+ ]
+ expected_outputs = {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ 'key3': 'value3',
+ }
+
+ expected_status_calls = 0
+ expected_constructor_calls = 1 # above, to get the instance
+ expected_create_calls = 0
+ expected_op_utils_usage = 0
+
+ with mock.patch.object(self.template, 'status') as mock_status:
+ # no block
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=False, timeout=2), heat.HeatStack)
+
+ # ensure runtime is much less than one interval
+ self.assertLess(time_data['delta'], interval * 0.2)
+
+ # ensure op_utils was used
+ expected_op_utils_usage += 1
+ self.assertEqual(mock_op_utils.get_session.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_endpoint.call_count, expected_op_utils_usage)
+ self.assertEqual(mock_op_utils.get_heat_api_version.call_count, expected_op_utils_usage)
+
+ # ensure the constructor and instance were used
+ expected_constructor_calls += 1
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure that the status was not used
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure no outputs because this requires blocking
+ self.assertEqual(self.template.outputs, {})
+
+ # block with immediate complete
+ mock_status.return_value = u'CREATE_COMPLETE'
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+
+ # ensure runtime is less than one interval
+ self.assertLess(time_data['delta'], interval * 0.2)
+
+ # ensure existing instance was re-used and op_utils was not used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure status was checked once
+ expected_status_calls += 1
+ self.assertEqual(mock_status.call_count, expected_status_calls)
+
+ # ensure the expected outputs are present
+ self.assertDictEqual(self.template.outputs, expected_outputs)
+
+ # reset template outputs
+ self.template.outputs = None
+
+ # block with delayed complete
+ mock_status.side_effect = iter([None, None, u'CREATE_COMPLETE'])
+ with timer() as time_data:
+ self.assertIsInstance(self.template.create(block=True, timeout=2), heat.HeatStack)
+
+ # ensure runtime is approximately two intervals
+ expected_time_low = interval * 1.8
+ expected_time_high = interval * 2.2
+ self.assertTrue(expected_time_low < time_data['delta'] < expected_time_high)
+
+ # ensure existing instance was re-used and op_utils was not used
+ expected_create_calls += 1
+ self.assertEqual(mock_heat_client_class.call_count, expected_constructor_calls)
+ self.assertEqual(mock_heat_client.stacks.create.call_count, expected_create_calls)
+
+ # ensure status was checked three more times
+ expected_status_calls += 3
+ self.assertEqual(mock_status.call_count, expected_status_calls)
class HeatStackTestCase(unittest.TestCase):
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index b689ac09c..aa134d694 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -13,9 +13,10 @@ from __future__ import print_function
import collections
import logging
import os
-import sys
import uuid
+from collections import OrderedDict
+import ipaddress
import paramiko
import pkg_resources
@@ -29,6 +30,8 @@ from yardstick.common.constants import YARDSTICK_ROOT_PATH
LOG = logging.getLogger(__name__)
+DEFAULT_HEAT_TIMEOUT = 3600
+
class HeatContext(Context):
"""Class that represents a context in the logical model"""
@@ -38,7 +41,7 @@ class HeatContext(Context):
def __init__(self):
self.name = None
self.stack = None
- self.networks = []
+ self.networks = OrderedDict()
self.servers = []
self.placement_groups = []
self.server_groups = []
@@ -68,6 +71,7 @@ class HeatContext(Context):
# no external net defined, assign it to first network usig os.environ
if sorted_networks and not have_external_network:
sorted_networks[0][1]["external_network"] = external_network
+ return sorted_networks
def init(self, attrs): # pragma: no cover
"""initializes itself from the supplied arguments"""
@@ -87,6 +91,8 @@ class HeatContext(Context):
self._flavor = attrs.get("flavor")
+ self.heat_timeout = attrs.get("timeout", DEFAULT_HEAT_TIMEOUT)
+
self.placement_groups = [PlacementGroup(name, self, pgattrs["policy"])
for name, pgattrs in attrs.get(
"placement_groups", {}).items()]
@@ -95,12 +101,15 @@ class HeatContext(Context):
for name, sgattrs in attrs.get(
"server_groups", {}).items()]
- self.assign_external_network(attrs["networks"])
+ # we have to do this first, because we are injecting external_network
+ # into the dict
+ sorted_networks = self.assign_external_network(attrs["networks"])
- self.networks = [Network(name, self, netattrs) for name, netattrs in
- sorted(attrs["networks"].items())]
+ self.networks = OrderedDict(
+ (name, Network(name, self, netattrs)) for name, netattrs in
+ sorted_networks)
- for name, serverattrs in attrs["servers"].items():
+ for name, serverattrs in sorted(attrs["servers"].items()):
server = Server(name, self, serverattrs)
self.servers.append(server)
self._server_map[server.dn] = server
@@ -140,7 +149,7 @@ class HeatContext(Context):
template.add_keypair(self.keypair_name, self.key_uuid)
template.add_security_group(self.secgroup_name)
- for network in self.networks:
+ for network in self.networks.values():
template.add_network(network.stack_name,
network.physical_network,
network.provider)
@@ -190,17 +199,17 @@ class HeatContext(Context):
if not scheduler_hints["different_host"]:
scheduler_hints.pop("different_host", None)
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
else:
scheduler_hints["different_host"] = \
scheduler_hints["different_host"][0]
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
else:
server.add_to_template(template,
- self.networks,
+ list(self.networks.values()),
scheduler_hints)
added_servers.append(server.stack_name)
@@ -219,7 +228,8 @@ class HeatContext(Context):
scheduler_hints = {}
for pg in server.placement_groups:
update_scheduler_hints(scheduler_hints, added_servers, pg)
- server.add_to_template(template, self.networks, scheduler_hints)
+ server.add_to_template(template, list(self.networks.values()),
+ scheduler_hints)
added_servers.append(server.stack_name)
# add server group
@@ -236,7 +246,8 @@ class HeatContext(Context):
if sg:
scheduler_hints["group"] = {'get_resource': sg.name}
server.add_to_template(template,
- self.networks, scheduler_hints)
+ list(self.networks.values()),
+ scheduler_hints)
def deploy(self):
"""deploys template into a stack using cloud"""
@@ -249,13 +260,14 @@ class HeatContext(Context):
self._add_resources_to_template(heat_template)
try:
- self.stack = heat_template.create()
+ self.stack = heat_template.create(block=True,
+ timeout=self.heat_timeout)
except KeyboardInterrupt:
- sys.exit("\nStack create interrupted")
- except RuntimeError as err:
- sys.exit("error: failed to deploy stack: '%s'" % err.args)
- except Exception as err:
- sys.exit("error: failed to deploy stack: '%s'" % err)
+ raise SystemExit("\nStack create interrupted")
+ except:
+ LOG.exception("stack failed")
+ raise
+ # let the other failures happend, we want stack trace
# copy some vital stack output into server objects
for server in self.servers:
@@ -263,6 +275,11 @@ class HeatContext(Context):
# TODO(hafe) can only handle one internal network for now
port = next(iter(server.ports.values()))
server.private_ip = self.stack.outputs[port["stack_name"]]
+ server.interfaces = {}
+ for network_name, port in server.ports.items():
+ self.make_interface_dict(network_name, port['stack_name'],
+ server,
+ self.stack.outputs)
if server.floating_ip:
server.public_ip = \
@@ -270,6 +287,27 @@ class HeatContext(Context):
print("Context '%s' deployed" % self.name)
+ def make_interface_dict(self, network_name, stack_name, server, outputs):
+ server.interfaces[network_name] = {
+ "private_ip": outputs[stack_name],
+ "subnet_id": outputs[stack_name + "-subnet_id"],
+ "subnet_cidr": outputs[
+ "{}-{}-subnet-cidr".format(self.name, network_name)],
+ "netmask": str(ipaddress.ip_network(
+ outputs["{}-{}-subnet-cidr".format(self.name,
+ network_name)]).netmask),
+ "gateway_ip": outputs[
+ "{}-{}-subnet-gateway_ip".format(self.name, network_name)],
+ "mac_address": outputs[stack_name + "-mac_address"],
+ "device_id": outputs[stack_name + "-device_id"],
+ "network_id": outputs[stack_name + "-network_id"],
+ "network_name": network_name,
+ # to match vnf_generic
+ "local_mac": outputs[stack_name + "-mac_address"],
+ "local_ip": outputs[stack_name],
+ "vld_id": self.networks[network_name].vld_id,
+ }
+
def undeploy(self):
"""undeploys stack from cloud"""
if self.stack:
@@ -324,7 +362,8 @@ class HeatContext(Context):
result = {
"user": server.context.user,
"key_filename": key_filename,
- "private_ip": server.private_ip
+ "private_ip": server.private_ip,
+ "interfaces": server.interfaces,
}
# Target server may only have private_ip
if server.public_ip:
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index 546201e9b..1f8c6f11c 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -111,6 +111,7 @@ class Network(Object):
if "external_network" in attrs:
self.router = Router("router", self.name,
context, attrs["external_network"])
+ self.vld_id = attrs.get("vld_id", "")
Network.list.append(self)
@@ -152,6 +153,7 @@ class Server(Object): # pragma: no cover
self.public_ip = None
self.private_ip = None
self.user_data = ''
+ self.interfaces = {}
if attrs is None:
attrs = {}
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index c44081b73..478a51f9d 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -24,6 +24,7 @@ from six.moves import filter
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.runners import base as base_runner
+from yardstick.dispatcher.base import Base as DispatcherBase
from yardstick.common.task_template import TaskTemplate
from yardstick.common.utils import source_env
from yardstick.common import utils
@@ -42,7 +43,6 @@ class Task(object): # pragma: no cover
"""
def __init__(self):
- self.config = {}
self.contexts = []
self.outputs = {}
@@ -55,7 +55,14 @@ class Task(object): # pragma: no cover
check_environment()
- self.config['yardstick'] = utils.parse_ini_file(config_file)
+ output_config = utils.parse_ini_file(config_file)
+ self._init_output_config(output_config)
+ self._set_output_config(output_config, args.output_file)
+ LOG.debug('Output configuration is: %s', output_config)
+
+ if output_config['DEFAULT'].get('dispatcher') == 'file':
+ result = {'status': 0, 'result': {}}
+ utils.write_json_to_file(args.output_file, result)
total_start_time = time.time()
parser = TaskParser(args.inputfile[0])
@@ -75,6 +82,7 @@ class Task(object): # pragma: no cover
if args.parse_only:
sys.exit(0)
+ testcases = {}
# parse task_files
for i in range(0, len(task_files)):
one_task_start_time = time.time()
@@ -90,7 +98,15 @@ class Task(object): # pragma: no cover
meet_precondition)
continue
- self._run(scenarios, run_in_parallel, args.output_file)
+ case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+ try:
+ data = self._run(scenarios, run_in_parallel, args.output_file)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+ else:
+ testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -104,6 +120,10 @@ class Task(object): # pragma: no cover
LOG.info("task %s finished in %d secs", task_files[i],
one_task_end_time - one_task_start_time)
+ result = self._get_format_result(testcases)
+
+ self._do_output(output_config, result)
+
total_end_time = time.time()
LOG.info("total finished in %d secs",
total_end_time - total_start_time)
@@ -114,6 +134,65 @@ class Task(object): # pragma: no cover
print("Done, exiting")
+ def _init_output_config(self, output_config):
+ output_config.setdefault('DEFAULT', {})
+ output_config.setdefault('dispatcher_http', {})
+ output_config.setdefault('dispatcher_file', {})
+ output_config.setdefault('dispatcher_influxdb', {})
+ output_config.setdefault('nsb', {})
+
+ def _set_output_config(self, output_config, file_path):
+ try:
+ out_type = os.environ['DISPATCHER']
+ except KeyError:
+ output_config['DEFAULT'].setdefault('dispatcher', 'file')
+ else:
+ output_config['DEFAULT']['dispatcher'] = out_type
+
+ output_config['dispatcher_file']['file_path'] = file_path
+
+ try:
+ target = os.environ['TARGET']
+ except KeyError:
+ pass
+ else:
+ k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher'])
+ output_config[k]['target'] = target
+
+ def _get_format_result(self, testcases):
+ criteria = self._get_task_criteria(testcases)
+
+ info = {
+ 'deploy_scenario': os.environ.get('DEPLOY_SCENARIO', 'unknown'),
+ 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
+ 'pod_name': os.environ.get('NODE_NAME', 'unknown'),
+ 'version': os.environ.get('YARDSTICK_BRANCH', 'unknown')
+ }
+
+ result = {
+ 'status': 1,
+ 'result': {
+ 'criteria': criteria,
+ 'task_id': self.task_id,
+ 'info': info,
+ 'testcases': testcases
+ }
+ }
+
+ return result
+
+ def _get_task_criteria(self, testcases):
+ criteria = any(t.get('criteria') != 'PASS' for t in testcases.values())
+ if criteria:
+ return 'FAIL'
+ else:
+ return 'PASS'
+
+ def _do_output(self, output_config, result):
+
+ dispatcher = DispatcherBase.get(output_config)
+ dispatcher.flush_result_data(result)
+
def _run(self, scenarios, run_in_parallel, output_file):
"""Deploys context and calls runners"""
for context in self.contexts:
@@ -121,6 +200,7 @@ class Task(object): # pragma: no cover
background_runners = []
+ result = []
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
scenario["runner"] = dict(type="Duration", duration=1000000000)
@@ -136,16 +216,23 @@ class Task(object): # pragma: no cover
# Wait for runners to finish
for runner in runners:
- runner_join(runner)
+ status = runner_join(runner)
+ if status != 0:
+ raise RuntimeError
self.outputs.update(runner.get_output())
+ result.extend(runner.get_result())
print("Runner ended, output in", output_file)
else:
# run serially
for scenario in scenarios:
if not _is_background_scenario(scenario):
runner = self.run_one_scenario(scenario, output_file)
- runner_join(runner)
+ status = runner_join(runner)
+ if status != 0:
+ LOG.error('Scenario: %s ERROR', scenario.get('type'))
+ raise RuntimeError
self.outputs.update(runner.get_output())
+ result.extend(runner.get_result())
print("Runner ended, output in", output_file)
# Abort background runners
@@ -154,15 +241,21 @@ class Task(object): # pragma: no cover
# Wait for background runners to finish
for runner in background_runners:
- if runner.join(timeout=60) is None:
+ status = runner.join(timeout=60)
+ if status is None:
# Nuke if it did not stop nicely
base_runner.Runner.terminate(runner)
- runner_join(runner)
+ status = runner_join(runner)
self.outputs.update(runner.get_output())
+ result.extend(runner.get_result())
else:
base_runner.Runner.release(runner)
+ if status != 0:
+ raise RuntimeError
print("Background task ended")
+ return result
+
def atexit_handler(self):
"""handler for process termination"""
base_runner.Runner.terminate_all()
@@ -227,7 +320,7 @@ class Task(object): # pragma: no cover
if "nodes" in scenario_cfg:
context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
- runner = base_runner.Runner.get(runner_cfg, self.config)
+ runner = base_runner.Runner.get(runner_cfg)
print("Starting runner of type '%s'" % runner_cfg["type"])
runner.run(scenario_cfg, context_cfg)
@@ -400,6 +493,9 @@ class TaskParser(object): # pragma: no cover
task_name = os.path.splitext(os.path.basename(self.path))[0]
scenario["tc"] = task_name
scenario["task_id"] = task_id
+ # embed task path into scenario so we can load other files
+ # relative to task path
+ scenario["task_path"] = os.path.dirname(self.path)
change_server_name(scenario, name_suffix)
@@ -489,8 +585,7 @@ def runner_join(runner):
"""join (wait for) a runner, exit process at runner failure"""
status = runner.join()
base_runner.Runner.release(runner)
- if status != 0:
- sys.exit("Runner failed")
+ return status
def print_invalid_header(source_name, args):
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 7ec593396..7898ae2bc 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -63,10 +63,6 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
benchmark.setup()
method = getattr(benchmark, method_name)
- queue.put({'runner_id': runner_cfg['runner_id'],
- 'scenario_cfg': scenario_cfg,
- 'context_cfg': context_cfg})
-
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
@@ -132,10 +128,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
'errors': errors
}
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
+ queue.put(benchmark_output)
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
diff --git a/yardstick/benchmark/runners/base.py b/yardstick/benchmark/runners/base.py
index ebb9a91b5..f6816c7ed 100755
--- a/yardstick/benchmark/runners/base.py
+++ b/yardstick/benchmark/runners/base.py
@@ -22,46 +22,13 @@ import logging
import multiprocessing
import subprocess
import time
-import os
import traceback
-from oslo_config import cfg
-
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios import base as base_scenario
-from yardstick.dispatcher.base import Base as DispatcherBase
log = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-
-def _output_serializer_main(filename, queue, config):
- """entrypoint for the singleton subprocess writing to outfile
- Use of this process enables multiple instances of a scenario without
- messing up the output file.
- """
- try:
- out_type = config['yardstick'].get('DEFAULT', {})['dispatcher']
- except KeyError:
- out_type = os.environ.get('DISPATCHER', 'file')
-
- conf = {
- 'type': out_type.capitalize(),
- 'file_path': filename
- }
-
- dispatcher = DispatcherBase.get(conf, config)
-
- while True:
- # blocks until data becomes available
- record = queue.get()
- if record == '_TERMINATE_':
- dispatcher.flush_result_data()
- break
- else:
- dispatcher.record_result_data(record)
-
def _execute_shell_command(command):
"""execute shell script with error handling"""
@@ -110,8 +77,6 @@ def _periodic_action(interval, command, queue):
class Runner(object):
- queue = None
- dump_process = None
runners = []
@staticmethod
@@ -131,30 +96,10 @@ class Runner(object):
return types
@staticmethod
- def get(runner_cfg, config):
+ def get(runner_cfg):
"""Returns instance of a scenario runner for execution type.
"""
- # if there is no runner, start the output serializer subprocess
- if not Runner.runners:
- log.debug("Starting dump process file '%s'",
- runner_cfg["output_filename"])
- Runner.queue = multiprocessing.Queue()
- Runner.dump_process = multiprocessing.Process(
- target=_output_serializer_main,
- name="Dumper",
- args=(runner_cfg["output_filename"], Runner.queue, config))
- Runner.dump_process.start()
-
- return Runner.get_cls(runner_cfg["type"])(runner_cfg, Runner.queue)
-
- @staticmethod
- def release_dump_process():
- """Release the dumper process"""
- log.debug("Stopping dump process")
- if Runner.dump_process:
- Runner.queue.put('_TERMINATE_')
- Runner.dump_process.join()
- Runner.dump_process = None
+ return Runner.get_cls(runner_cfg["type"])(runner_cfg)
@staticmethod
def release(runner):
@@ -162,10 +107,6 @@ class Runner(object):
if runner in Runner.runners:
Runner.runners.remove(runner)
- # if this was the last runner, stop the output serializer subprocess
- if not Runner.runners:
- Runner.release_dump_process()
-
@staticmethod
def terminate(runner):
"""Terminate the runner"""
@@ -179,7 +120,6 @@ class Runner(object):
# release dumper process as some errors before any runner is created
if not Runner.runners:
- Runner.release_dump_process()
return
for runner in Runner.runners:
@@ -193,11 +133,11 @@ class Runner(object):
runner.periodic_action_process = None
Runner.release(runner)
- def __init__(self, config, queue):
+ def __init__(self, config):
self.config = config
self.periodic_action_process = None
- self.result_queue = queue
self.output_queue = multiprocessing.Queue()
+ self.result_queue = multiprocessing.Queue()
self.process = None
self.aborted = multiprocessing.Event()
Runner.runners.append(self)
@@ -276,3 +216,9 @@ class Runner(object):
while not self.output_queue.empty():
result.update(self.output_queue.get())
return result
+
+ def get_result(self):
+ result = []
+ while not self.result_queue.empty():
+ result.append(self.result_queue.get())
+ return result
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 2bf2cd2fe..69d744562 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -52,10 +52,6 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
- queue.put({'runner_id': runner_cfg['runner_id'],
- 'scenario_cfg': scenario_cfg,
- 'context_cfg': context_cfg})
-
start = time.time()
while True:
@@ -90,10 +86,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
'errors': errors
}
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
+ queue.put(benchmark_output)
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 973bb9ac4..50fe106bd 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -53,10 +53,6 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
method = getattr(benchmark, method_name)
- queue.put({'runner_id': runner_cfg['runner_id'],
- 'scenario_cfg': scenario_cfg,
- 'context_cfg': context_cfg})
-
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
@@ -105,10 +101,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
'errors': errors
}
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
+ queue.put(benchmark_output)
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"],
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index 74ff82204..68e272c57 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -57,10 +57,6 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
benchmark.setup()
method = getattr(benchmark, method_name)
- queue.put({'runner_id': runner_cfg['runner_id'],
- 'scenario_cfg': scenario_cfg,
- 'context_cfg': context_cfg})
-
sla_action = None
if "sla" in scenario_cfg:
sla_action = scenario_cfg["sla"].get("action", "assert")
@@ -99,10 +95,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
'errors': errors
}
- record = {'runner_id': runner_cfg['runner_id'],
- 'benchmark': benchmark_output}
-
- queue.put(record)
+ queue.put(benchmark_output)
LOG.debug("runner=%(runner)s seq=%(sequence)s END",
{"runner": runner_cfg["runner_id"], "sequence": sequence})
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index e0e6cf3bf..f7ab23dcd 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -66,3 +66,5 @@ class ProcessAttacker(BaseAttacker):
exit_status, stdout, stderr = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
+ if exit_status:
+ LOG.info("Fail to restart service!")
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
index a6a3e96ca..a865b6551 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_process_kill.bash
@@ -16,10 +16,13 @@ set -e
process_name=$1
if [ "$process_name" = "keystone" ]; then
- killall -9 -u $process_name
+ for pid in $(ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}'); \
+ do
+ kill -9 "${pid}"
+ done
else
- for pid in `ps aux | grep "/usr/.*/${process_name}" | grep -v grep | grep -v /bin/sh | awk '{print $2}'`; \
+ for pid in $(pgrep -f "/usr/.*/${process_name}");
do
- kill -9 ${pid}
+ kill -9 "${pid}"
done
fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
index 941563e7c..8737836e2 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
@@ -14,4 +14,10 @@
set -e
-openstack flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
index e998464c7..617dcf8a3 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
@@ -14,4 +14,10 @@
set -e
-openstack flavor delete $1
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor delete $1
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
index 1b0739602..9b413c965 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
@@ -13,4 +13,10 @@
set -e
-nova flavor-list \ No newline at end of file
+if [ $OS_CACERT ] && [ "$(echo $OS_CACERT | tr '[:upper:]' '[:lower:]')" = "false" ]; then
+ SECURE="--insecure"
+else
+ SECURE=""
+fi
+
+openstack "${SECURE}" flavor list
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index 033a2d721..d757bd88d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from __future__ import absolute_import
+
+import os
import logging
import subprocess
import traceback
@@ -53,6 +55,14 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self._config["command_name"]
+ try:
+ cacert = os.environ['OS_CACERT']
+ except KeyError:
+ pass
+ else:
+ if cacert.lower() == "false":
+ self.cmd = self.cmd + " --insecure"
+
def monitor_func(self):
exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 689d33a34..28bec8aff 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -54,7 +54,18 @@ class ScenarioGeneral(base.Scenario):
pass
self.director.stopMonitors()
- if self.director.verify():
+
+ verify_result = self.director.verify()
+
+ for k, v in self.director.data.items():
+ if v == 0:
+ result['sla_pass'] = 0
+ verify_result = False
+ LOG.info(
+ "\033[92m The service process not found in the host \
+envrioment, the HA test case NOT pass")
+
+ if verify_result:
result['sla_pass'] = 1
LOG.info(
"\033[92m Congratulations, "
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 2e829714d..2f0012ecf 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -71,7 +71,7 @@ class ServiceHA(base.Scenario):
sla_pass = self.monitorMgr.verify_SLA()
for k, v in self.data.items():
- if self.data[k] == 0:
+ if v == 0:
result['sla_pass'] = 0
LOG.info("The service process not found in the host envrioment, \
the HA test case NOT pass")
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index 95367b3bb..a929e5337 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -76,7 +76,10 @@ class Ping(base.Scenario):
raise RuntimeError(stderr)
if stdout:
- target_vm_name = target_vm.split('.')[0]
+ if isinstance(target_vm, dict):
+ target_vm_name = target_vm.get("name")
+ else:
+ target_vm_name = target_vm.split('.')[0]
rtt_result[target_vm_name] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be179631e..594edeaa8 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -15,6 +15,14 @@
from __future__ import absolute_import
import logging
+
+import errno
+import os
+
+import re
+from operator import itemgetter
+from collections import defaultdict
+
import yaml
from yardstick.benchmark.scenarios import base
@@ -72,6 +80,15 @@ class SshManager(object):
self.conn.close()
+def open_relative_file(path, task_path):
+ try:
+ return open(path)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ return open(os.path.join(task_path, path))
+ raise
+
+
class NetworkServiceTestCase(base.Scenario):
"""Class handles Generic framework to do pre-deployment VNF &
Network service testing """
@@ -84,8 +101,11 @@ class NetworkServiceTestCase(base.Scenario):
self.context_cfg = context_cfg
# fixme: create schema to validate all fields have been provided
- with open(scenario_cfg["topology"]) as stream:
- self.topology = yaml.load(stream)["nsd:nsd-catalog"]["nsd"][0]
+ with open_relative_file(scenario_cfg["topology"],
+ scenario_cfg['task_path']) as stream:
+ topology_yaml = yaml.load(stream)
+
+ self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
self.vnfs = []
self.collector = None
self.traffic_profile = None
@@ -114,7 +134,8 @@ class NetworkServiceTestCase(base.Scenario):
private = {}
public = {}
try:
- with open(scenario_cfg["traffic_profile"]) as infile:
+ with open_relative_file(scenario_cfg["traffic_profile"],
+ scenario_cfg["task_path"]) as infile:
traffic_profile_tpl = infile.read()
except (KeyError, IOError, OSError):
@@ -123,8 +144,6 @@ class NetworkServiceTestCase(base.Scenario):
return [traffic_profile_tpl, private, public]
def _fill_traffic_profile(self, scenario_cfg, context_cfg):
- traffic_profile = {}
-
flow = self._get_traffic_flow(scenario_cfg)
imix = self._get_traffic_imix(scenario_cfg)
@@ -193,6 +212,26 @@ class NetworkServiceTestCase(base.Scenario):
list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
nodes[node].update(topology["constituent-vnfd"][list_idx])
+ @staticmethod
+ def _sort_dpdk_port_num(netdevs):
+ # dpdk_port_num is PCI BUS ID ordering, lowest first
+ s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
+ for dpdk_port_num, netdev in enumerate(s, 1):
+ netdev['dpdk_port_num'] = dpdk_port_num
+
+ @classmethod
+ def _probe_missing_values(cls, netdevs, network, missing):
+ mac = network['local_mac']
+ for netdev in netdevs.values():
+ if netdev['address'].lower() == mac.lower():
+ network['driver'] = netdev['driver']
+ network['vpci'] = netdev['pci_bus_id']
+ network['dpdk_port_num'] = netdev['dpdk_port_num']
+ network['ifindex'] = netdev['ifindex']
+
+ TOPOLOGY_REQUIRED_KEYS = frozenset({
+ "vpci", "local_ip", "netmask", "local_mac", "driver", "dpdk_port_num"})
+
def map_topology_to_infrastructure(self, context_cfg, topology):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
@@ -208,21 +247,66 @@ class NetworkServiceTestCase(base.Scenario):
exit_status = conn.execute(cmd)[0]
if exit_status != 0:
raise IncorrectSetup("Node's %s lacks ip tool." % node)
-
- for interface in node_dict["interfaces"]:
- network = node_dict["interfaces"][interface]
- keys = ["vpci", "local_ip", "netmask",
- "local_mac", "driver", "dpdk_port_num"]
- missing = set(keys).difference(network)
+ exit_status, stdout, _ = conn.execute(
+ self.FIND_NETDEVICE_STRING)
+ if exit_status != 0:
+ raise IncorrectSetup(
+ "Cannot find netdev info in sysfs" % node)
+ netdevs = node_dict['netdevs'] = self.parse_netdev_info(
+ stdout)
+ self._sort_dpdk_port_num(netdevs)
+
+ for network in node_dict["interfaces"].values():
+ missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
if missing:
- raise IncorrectConfig("Require interface fields '%s' "
- "not found, topology file "
- "corrupted" % ', '.join(missing))
+ try:
+ self._probe_missing_values(netdevs, network,
+ missing)
+ except KeyError:
+ pass
+ else:
+ missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
+ network)
+ if missing:
+ raise IncorrectConfig(
+ "Require interface fields '%s' "
+ "not found, topology file "
+ "corrupted" % ', '.join(missing))
# 3. Use topology file to find connections & resolve dest address
self._resolve_topology(context_cfg, topology)
self._update_context_with_topology(context_cfg, topology)
+ FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
+$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
+$1/device/subsystem_vendor $1/device/subsystem_device ; \
+printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
+' sh \{\}/* \;
+"""
+ BASE_ADAPTER_RE = re.compile(
+ '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)
+
+ @classmethod
+ def parse_netdev_info(cls, stdout):
+ network_devices = defaultdict(dict)
+ matches = cls.BASE_ADAPTER_RE.findall(stdout)
+ for bus_path, interface_name, name, value in matches:
+ dirname, bus_id = os.path.split(bus_path)
+ if 'virtio' in bus_id:
+ # for some stupid reason VMs include virtio1/
+ # in PCI device path
+ bus_id = os.path.basename(dirname)
+ # remove extra 'device/' from 'device/vendor,
+ # device/subsystem_vendor', etc.
+ if 'device/' in name:
+ name = name.split('/')[1]
+ network_devices[interface_name][name] = value
+ network_devices[interface_name][
+ 'interface_name'] = interface_name
+ network_devices[interface_name]['pci_bus_id'] = bus_id
+ # convert back to regular dict
+ return dict(network_devices)
+
@classmethod
def get_vnf_impl(cls, vnf_model):
""" Find the implementing class from vnf_model["vnf"]["name"] field
@@ -240,21 +324,24 @@ class NetworkServiceTestCase(base.Scenario):
except StopIteration:
raise IncorrectConfig("No implementation for %s", expected_name)
- def load_vnf_models(self, context_cfg):
+ def load_vnf_models(self, scenario_cfg, context_cfg):
""" Create VNF objects based on YAML descriptors
+ :param scenario_cfg:
+ :type scenario_cfg:
:param context_cfg:
:return:
"""
vnfs = []
- for node in context_cfg["nodes"]:
- LOG.debug(context_cfg["nodes"][node])
- with open(context_cfg["nodes"][node]["VNF model"]) as stream:
+ for node_name, node in context_cfg["nodes"].items():
+ LOG.debug(node)
+ with open_relative_file(node["VNF model"],
+ scenario_cfg['task_path']) as stream:
vnf_model = stream.read()
- vnfd = vnfdgen.generate_vnfd(vnf_model, context_cfg["nodes"][node])
+ vnfd = vnfdgen.generate_vnfd(vnf_model, node)
vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
- vnf_instance.name = node
+ vnf_instance.name = node_name
vnfs.append(vnf_instance)
return vnfs
@@ -264,11 +351,10 @@ class NetworkServiceTestCase(base.Scenario):
:return:
"""
-
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure(self.context_cfg, self.topology)
# 1a. Load VNF models
- self.vnfs = self.load_vnf_models(self.context_cfg)
+ self.vnfs = self.load_vnf_models(self.scenario_cfg, self.context_cfg)
# 1b. Fill traffic profile with information from topology
self.traffic_profile = self._fill_traffic_profile(self.scenario_cfg,
self.context_cfg)
diff --git a/yardstick/cmd/commands/task.py b/yardstick/cmd/commands/task.py
index 16a4db291..6384e6eb1 100644
--- a/yardstick/cmd/commands/task.py
+++ b/yardstick/cmd/commands/task.py
@@ -14,7 +14,6 @@ from __future__ import absolute_import
from yardstick.benchmark.core.task import Task
from yardstick.common.utils import cliargs
from yardstick.common.utils import write_json_to_file
-from yardstick.common.utils import read_json_from_file
from yardstick.cmd.commands import change_osloobj_to_paras
output_file_default = "/tmp/yardstick.out"
@@ -46,22 +45,11 @@ class TaskCommands(object):
param = change_osloobj_to_paras(args)
self.output_file = param.output_file
- self._init_result_file()
-
try:
Task().start(param, **kwargs)
- self._finish()
except Exception as e:
self._write_error_data(e)
-
- def _init_result_file(self):
- data = {'status': 0, 'result': []}
- write_json_to_file(self.output_file, data)
-
- def _finish(self):
- result = read_json_from_file(self.output_file).get('result')
- data = {'status': 1, 'result': result}
- write_json_to_file(self.output_file, data)
+ raise
def _write_error_data(self, error):
data = {'status': 2, 'result': str(error)}
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index cb98c356d..47a519923 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -43,6 +43,7 @@ TESTSUITE_DIR = join(YARDSTICK_ROOT_PATH, 'tests/opnfv/test_suites/')
# file
OPENRC = get_param('file.openrc', '/etc/yardstick/openstack.creds')
CONF_FILE = join(CONF_DIR, 'yardstick.conf')
+POD_FILE = join(CONF_DIR, 'pod.yaml')
CONF_SAMPLE_FILE = join(CONF_SAMPLE_DIR, 'yardstick.conf.sample')
FETCH_SCRIPT = get_param('file.fetch_script', 'utils/fetch_os_creds.sh')
FETCH_SCRIPT = join(RELENG_DIR, FETCH_SCRIPT)
@@ -77,6 +78,9 @@ DOCKER_URL = 'unix://var/run/docker.sock'
INSTALLERS = ['apex', 'compass', 'fuel', 'joid']
SQLITE = 'sqlite:////tmp/yardstick.db'
+API_SUCCESS = 1
+API_ERROR = 2
+
BASE_URL = 'http://localhost:5000'
ENV_ACTION_API = BASE_URL + '/yardstick/env/action'
ASYNC_TASK_API = BASE_URL + '/yardstick/asynctask'
diff --git a/yardstick/dispatcher/base.py b/yardstick/dispatcher/base.py
index a1c858297..e77249c54 100644
--- a/yardstick/dispatcher/base.py
+++ b/yardstick/dispatcher/base.py
@@ -38,15 +38,13 @@ class Base(object):
raise RuntimeError("No such dispatcher_type %s" % dispatcher_type)
@staticmethod
- def get(conf, config):
+ def get(config):
"""Returns instance of a dispatcher for dispatcher type.
"""
- return Base.get_cls(conf["type"])(conf, config)
+ out_type = config['DEFAULT']['dispatcher']
- @abc.abstractmethod
- def record_result_data(self, data):
- """Recording result data interface."""
+ return Base.get_cls(out_type.capitalize())(config)
@abc.abstractmethod
- def flush_result_data(self):
+ def flush_result_data(self, data):
"""Flush result data into permanent storage media interface."""
diff --git a/yardstick/dispatcher/file.py b/yardstick/dispatcher/file.py
index 8acd5dfbb..24fc22dd4 100644
--- a/yardstick/dispatcher/file.py
+++ b/yardstick/dispatcher/file.py
@@ -29,18 +29,10 @@ class FileDispatcher(DispatchBase):
__dispatcher_type__ = "File"
- def __init__(self, conf, config):
+ def __init__(self, conf):
super(FileDispatcher, self).__init__(conf)
- self.result = []
+ self.target = conf['dispatcher_file'].get('file_path',
+ consts.DEFAULT_OUTPUT_FILE)
- def record_result_data(self, data):
- self.result.append(data)
-
- def flush_result_data(self):
- file_path = self.conf.get('file_path', consts.DEFAULT_OUTPUT_FILE)
-
- res = utils.read_json_from_file(file_path).get('result')
- res.extend(self.result)
-
- data = {'status': 0, 'result': res}
- utils.write_json_to_file(file_path, data)
+ def flush_result_data(self, data):
+ utils.write_json_to_file(self.target, data)
diff --git a/yardstick/dispatcher/http.py b/yardstick/dispatcher/http.py
index 0d8d2a346..9bf9af33b 100644
--- a/yardstick/dispatcher/http.py
+++ b/yardstick/dispatcher/http.py
@@ -20,30 +20,15 @@ from __future__ import absolute_import
import logging
import os
+from datetime import datetime
from oslo_serialization import jsonutils
import requests
-from oslo_config import cfg
from yardstick.dispatcher.base import Base as DispatchBase
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-http_dispatcher_opts = [
- cfg.StrOpt('target',
- default=os.getenv('TARGET', 'http://127.0.0.1:8000/results'),
- help='The target where the http request will be sent. '
- 'If this is not set, no data will be posted. For '
- 'example: target = http://hostname:1234/path'),
- cfg.IntOpt('timeout',
- default=5,
- help='The max time in seconds to wait for a request to '
- 'timeout.'),
-]
-
-CONF.register_opts(http_dispatcher_opts, group="dispatcher_http")
-
class HttpDispatcher(DispatchBase):
"""Dispatcher class for posting data into a http target.
@@ -51,55 +36,61 @@ class HttpDispatcher(DispatchBase):
__dispatcher_type__ = "Http"
- def __init__(self, conf, config):
+ def __init__(self, conf):
super(HttpDispatcher, self).__init__(conf)
+ http_conf = conf['dispatcher_http']
self.headers = {'Content-type': 'application/json'}
- self.timeout = CONF.dispatcher_http.timeout
- self.target = CONF.dispatcher_http.target
- self.raw_result = []
- self.result = {
- "project_name": "yardstick",
- "description": "yardstick test cases result",
- "pod_name": os.environ.get('NODE_NAME', 'unknown'),
- "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
- "version": os.environ.get('YARDSTICK_VERSION', 'unknown'),
- "build_tag": os.environ.get('BUILD_TAG')
- }
-
- def record_result_data(self, data):
- self.raw_result.append(data)
+ self.timeout = int(http_conf.get('timeout', 5))
+ self.target = http_conf.get('target', 'http://127.0.0.1:8000/results')
- def flush_result_data(self):
+ def flush_result_data(self, data):
if self.target == '':
# if the target was not set, do not do anything
LOG.error('Dispatcher target was not set, no data will'
'be posted.')
return
- self.result["details"] = {'results': self.raw_result}
-
- case_name = ""
- for v in self.raw_result:
- if isinstance(v, dict) and "scenario_cfg" in v:
- case_name = v["scenario_cfg"]["tc"]
- break
- if case_name == "":
- LOG.error('Test result : %s',
- jsonutils.dump_as_bytes(self.result))
- LOG.error('The case_name cannot be found, no data will be posted.')
- return
+ result = data['result']
+ self.info = result['info']
+ self.task_id = result['task_id']
+ self.criteria = result['criteria']
+ testcases = result['testcases']
+
+ for case, data in testcases.items():
+ self._upload_case_result(case, data)
- self.result["case_name"] = case_name
+ def _upload_case_result(self, case, data):
+ try:
+ scenario_data = data.get('tc_data', [])[0]
+ except IndexError:
+ current_time = datetime.now()
+ else:
+ timestamp = float(scenario_data.get('timestamp', 0.0))
+ current_time = datetime.fromtimestamp(timestamp)
+
+ result = {
+ "project_name": "yardstick",
+ "case_name": case,
+ "description": "yardstick ci scenario status",
+ "scenario": self.info.get('deploy_scenario'),
+ "version": self.info.get('version'),
+ "pod_name": self.info.get('pod_name'),
+ "installer": self.info.get('installer'),
+ "build_tag": os.environ.get('BUILD_TAG'),
+ "criteria": data.get('criteria'),
+ "start_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+ "stop_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+ "trust_indicator": "",
+ "details": ""
+ }
try:
- LOG.debug('Test result : %s',
- jsonutils.dump_as_bytes(self.result))
+ LOG.debug('Test result : %s', result)
res = requests.post(self.target,
- data=jsonutils.dump_as_bytes(self.result),
+ data=jsonutils.dump_as_bytes(result),
headers=self.headers,
timeout=self.timeout)
LOG.debug('Test result posting finished with status code'
' %d.' % res.status_code)
except Exception as err:
- LOG.exception('Failed to record result data: %s',
- err)
+ LOG.exception('Failed to record result data: %s', err)
diff --git a/yardstick/dispatcher/influxdb.py b/yardstick/dispatcher/influxdb.py
index 53af79c71..373aae13a 100644
--- a/yardstick/dispatcher/influxdb.py
+++ b/yardstick/dispatcher/influxdb.py
@@ -10,13 +10,11 @@
from __future__ import absolute_import
import logging
-import os
import time
import collections
import requests
import six
-from oslo_serialization import jsonutils
from third_party.influxdb.influxdb_line_protocol import make_lines
from yardstick.dispatcher.base import Base as DispatchBase
@@ -30,28 +28,66 @@ class InfluxdbDispatcher(DispatchBase):
__dispatcher_type__ = "Influxdb"
- def __init__(self, conf, config):
+ def __init__(self, conf):
super(InfluxdbDispatcher, self).__init__(conf)
- db_conf = config['yardstick'].get('dispatcher_influxdb', {})
+ db_conf = conf['dispatcher_influxdb']
self.timeout = int(db_conf.get('timeout', 5))
self.target = db_conf.get('target', 'http://127.0.0.1:8086')
self.db_name = db_conf.get('db_name', 'yardstick')
self.username = db_conf.get('username', 'root')
self.password = db_conf.get('password', 'root')
+
self.influxdb_url = "%s/write?db=%s" % (self.target, self.db_name)
- self.raw_result = []
- self.case_name = ""
- self.tc = ""
+
self.task_id = -1
- self.runners_info = {}
- self.static_tags = {
- "pod_name": os.environ.get('NODE_NAME', 'unknown'),
- "installer": os.environ.get('INSTALLER_TYPE', 'unknown'),
- "deploy_scenario": os.environ.get('DEPLOY_SCENARIO', 'unknown'),
- "version": os.path.basename(os.environ.get('YARDSTICK_BRANCH',
- 'unknown'))
+ def flush_result_data(self, data):
+ LOG.debug('Test result all : %s', data)
+ if self.target == '':
+ # if the target was not set, do not do anything
+ LOG.error('Dispatcher target was not set, no data will be posted.')
+
+ result = data['result']
+ self.tags = result['info']
+ self.task_id = result['task_id']
+ self.criteria = result['criteria']
+ testcases = result['testcases']
+
+ for case, data in testcases.items():
+ tc_criteria = data['criteria']
+ for record in data['tc_data']:
+ self._upload_one_record(record, case, tc_criteria)
+
+ return 0
+
+ def _upload_one_record(self, data, case, tc_criteria):
+ try:
+ line = self._data_to_line_protocol(data, case, tc_criteria)
+ LOG.debug('Test result line format : %s', line)
+ res = requests.post(self.influxdb_url,
+ data=line,
+ auth=(self.username, self.password),
+ timeout=self.timeout)
+ if res.status_code != 204:
+ LOG.error('Test result posting finished with status code'
+ ' %d.', res.status_code)
+ LOG.error(res.text)
+
+ except Exception as err:
+ LOG.exception('Failed to record result data: %s', err)
+
+ def _data_to_line_protocol(self, data, case, criteria):
+ msg = {}
+ point = {
+ "measurement": case,
+ "fields": self._dict_key_flatten(data["data"]),
+ "time": self._get_nano_timestamp(data),
+ "tags": self._get_extended_tags(criteria),
}
+ msg["points"] = [point]
+ msg["tags"] = self.tags
+
+ return make_lines(msg).encode('utf-8')
def _dict_key_flatten(self, data):
next_data = {}
@@ -76,84 +112,16 @@ class InfluxdbDispatcher(DispatchBase):
def _get_nano_timestamp(self, results):
try:
- timestamp = results["benchmark"]["timestamp"]
+ timestamp = results["timestamp"]
except Exception:
timestamp = time.time()
return str(int(float(timestamp) * 1000000000))
- def _get_extended_tags(self, data):
- runner_info = self.runners_info[data["runner_id"]]
+ def _get_extended_tags(self, criteria):
tags = {
- "runner_id": data["runner_id"],
"task_id": self.task_id,
- "scenarios": runner_info["scenarios"]
+ "criteria": criteria
}
- if "host" in runner_info:
- tags["host"] = runner_info["host"]
- if "target" in runner_info:
- tags["target"] = runner_info["target"]
return tags
-
- def _data_to_line_protocol(self, data):
- msg = {}
- point = {
- "measurement": self.tc,
- "fields": self._dict_key_flatten(data["benchmark"]["data"]),
- "time": self._get_nano_timestamp(data),
- "tags": self._get_extended_tags(data),
- }
- msg["points"] = [point]
- msg["tags"] = self.static_tags
-
- return make_lines(msg).encode('utf-8')
-
- def record_result_data(self, data):
- LOG.debug('Test result : %s', jsonutils.dump_as_bytes(data))
- self.raw_result.append(data)
- if self.target == '':
- # if the target was not set, do not do anything
- LOG.error('Dispatcher target was not set, no data will'
- 'be posted.')
- return -1
-
- if isinstance(data, dict) and "scenario_cfg" in data:
- self.tc = data["scenario_cfg"]["tc"]
- self.task_id = data["scenario_cfg"]["task_id"]
- scenario_cfg = data["scenario_cfg"]
- runner_id = data["runner_id"]
- self.runners_info[runner_id] = {"scenarios": scenario_cfg["type"]}
- if "host" in scenario_cfg:
- self.runners_info[runner_id]["host"] = scenario_cfg["host"]
- if "target" in scenario_cfg:
- self.runners_info[runner_id]["target"] = scenario_cfg["target"]
- return 0
-
- if self.tc == "":
- LOG.error('Test result : %s', jsonutils.dump_as_bytes(data))
- LOG.error('The case_name cannot be found, no data will be posted.')
- return -1
-
- try:
- line = self._data_to_line_protocol(data)
- LOG.debug('Test result line format : %s', line)
- res = requests.post(self.influxdb_url,
- data=line,
- auth=(self.username, self.password),
- timeout=self.timeout)
- if res.status_code != 204:
- LOG.error('Test result posting finished with status code'
- ' %d.', res.status_code)
- LOG.error(res.text)
-
- except Exception as err:
- LOG.exception('Failed to record result data: %s',
- err)
- return -1
- return 0
-
- def flush_result_data(self):
- LOG.debug('Test result all : %s',
- jsonutils.dump_as_bytes(self.raw_result))
- return 0
diff --git a/yardstick/network_services/vnf_generic/vnfdgen.py b/yardstick/network_services/vnf_generic/vnfdgen.py
index 97dd97198..40cc14a49 100644
--- a/yardstick/network_services/vnf_generic/vnfdgen.py
+++ b/yardstick/network_services/vnf_generic/vnfdgen.py
@@ -15,9 +15,20 @@
from __future__ import absolute_import
import collections
+
+import jinja2
import yaml
-from yardstick.common.task_template import TaskTemplate
+
+def render(vnf_model, **kwargs):
+ """Render jinja2 VNF template
+
+ :param vnf_model: string that contains template
+ :param kwargs: Dict with template arguments
+ :returns:rendered template str
+ """
+
+ return jinja2.Template(vnf_model).render(**kwargs)
def generate_vnfd(vnf_model, node):
@@ -31,7 +42,10 @@ def generate_vnfd(vnf_model, node):
# get is unused as global method inside template
node["get"] = get
# Set Node details to default if not defined in pod file
- rendered_vnfd = TaskTemplate.render(vnf_model, **node)
+ # we CANNOT use TaskTemplate.render because it does not allow
+ # for missing variables, we need to allow password for key_filename
+ # to be undefined
+ rendered_vnfd = render(vnf_model, **node)
# This is done to get rid of issues with serializing node
del node["get"]
filled_vnfd = yaml.load(rendered_vnfd)
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 864f1f9ec..a99d4631d 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
+# Copyright (c) 2015-2017 Ericsson AB and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
@@ -11,6 +11,7 @@
from __future__ import absolute_import
from __future__ import print_function
+from six.moves import range
import collections
import datetime
@@ -47,7 +48,8 @@ class HeatObject(object):
self._heat_client = None
self.uuid = None
- def _get_heat_client(self):
+ @property
+ def heat_client(self):
"""returns a heat client instance"""
if self._heat_client is None:
@@ -61,9 +63,9 @@ class HeatObject(object):
def status(self):
"""returns stack state as a string"""
- heat = self._get_heat_client()
- stack = heat.stacks.get(self.uuid)
- return getattr(stack, 'stack_status')
+ heat_client = self.heat_client
+ stack = heat_client.stacks.get(self.uuid)
+ return stack.stack_status
class HeatStack(HeatObject):
@@ -88,20 +90,18 @@ class HeatStack(HeatObject):
return
log.info("Deleting stack '%s', uuid:%s", self.name, self.uuid)
- heat = self._get_heat_client()
+ heat = self.heat_client
template = heat.stacks.get(self.uuid)
start_time = time.time()
template.delete()
- status = self.status()
- while status != u'DELETE_COMPLETE':
+ for status in iter(self.status, u'DELETE_COMPLETE'):
log.debug("stack state %s", status)
if status == u'DELETE_FAILED':
raise RuntimeError(
heat.stacks.get(self.uuid).stack_status_reason)
time.sleep(2)
- status = self.status()
end_time = time.time()
log.info("Deleted stack '%s' in %d secs", self.name,
@@ -120,15 +120,13 @@ class HeatStack(HeatObject):
self._delete()
return
- i = 0
- while i < retries:
+ for _ in range(retries):
try:
self._delete()
break
except RuntimeError as err:
log.warning(err.args)
time.sleep(2)
- i += 1
# if still not deleted try once more and let it fail everything
if self.uuid is not None:
@@ -177,7 +175,6 @@ name (i.e. %s).\
self.name = name
self.state = "NOT_CREATED"
self.keystone_client = None
- self.heat_client = None
self.heat_parameters = {}
# heat_parameters is passed to heat in stack create, empty dict when
@@ -279,6 +276,14 @@ name (i.e. %s).\
'description': 'subnet %s ID' % name,
'value': {'get_resource': name}
}
+ self._template['outputs'][name + "-cidr"] = {
+ 'description': 'subnet %s cidr' % name,
+ 'value': {'get_attr': [name, 'cidr']}
+ }
+ self._template['outputs'][name + "-gateway_ip"] = {
+ 'description': 'subnet %s gateway_ip' % name,
+ 'value': {'get_attr': [name, 'gateway_ip']}
+ }
def add_router(self, name, ext_gw_net, subnet_name):
"""add to the template a Neutron Router and interface"""
@@ -336,6 +341,22 @@ name (i.e. %s).\
'description': 'Address for interface %s' % name,
'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}
}
+ self._template['outputs'][name + "-subnet_id"] = {
+ 'description': 'Address for interface %s' % name,
+ 'value': {'get_attr': [name, 'fixed_ips', 0, 'subnet_id']}
+ }
+ self._template['outputs'][name + "-mac_address"] = {
+ 'description': 'MAC Address for interface %s' % name,
+ 'value': {'get_attr': [name, 'mac_address']}
+ }
+ self._template['outputs'][name + "-device_id"] = {
+ 'description': 'Device ID for interface %s' % name,
+ 'value': {'get_attr': [name, 'device_id']}
+ }
+ self._template['outputs'][name + "-network_id"] = {
+ 'description': 'Network ID for interface %s' % name,
+ 'value': {'get_attr': [name, 'network_id']}
+ }
def add_floating_ip(self, name, network_name, port_name, router_if_name,
secgroup_name=None):
@@ -508,38 +529,48 @@ name (i.e. %s).\
'value': {'get_resource': name}
}
- def create(self, block=True):
- """creates a template in the target cloud using heat
+ HEAT_WAIT_LOOP_INTERVAL = 2
+
+ def create(self, block=True, timeout=3600):
+ """
+ creates a template in the target cloud using heat
returns a dict with the requested output values from the template
+
+ :param block: Wait for Heat create to finish
+ :type block: bool
+ :param: timeout: timeout in seconds for Heat create, default 3600s
+ :type timeout: int
"""
log.info("Creating stack '%s'", self.name)
# create stack early to support cleanup, e.g. ctrl-c while waiting
stack = HeatStack(self.name)
- heat = self._get_heat_client()
+ heat_client = self.heat_client
start_time = time.time()
- stack.uuid = self.uuid = heat.stacks.create(
+ stack.uuid = self.uuid = heat_client.stacks.create(
stack_name=self.name, template=self._template,
parameters=self.heat_parameters)['stack']['id']
- status = self.status()
- outputs = []
+ if not block:
+ self.outputs = stack.outputs = {}
+ return stack
- if block:
- while status != u'CREATE_COMPLETE':
- log.debug("stack state %s", status)
- if status == u'CREATE_FAILED':
- raise RuntimeError(getattr(heat.stacks.get(self.uuid),
- 'stack_status_reason'))
+ time_limit = start_time + timeout
+ for status in iter(self.status, u'CREATE_COMPLETE'):
+ log.debug("stack state %s", status)
+ if status == u'CREATE_FAILED':
+ raise RuntimeError(
+ heat_client.stacks.get(self.uuid).stack_status_reason)
+ if time.time() > time_limit:
+ raise RuntimeError("Heat stack create timeout")
- time.sleep(2)
- status = self.status()
+ time.sleep(self.HEAT_WAIT_LOOP_INTERVAL)
- end_time = time.time()
- outputs = getattr(heat.stacks.get(self.uuid), 'outputs')
- log.info("Created stack '%s' in %d secs",
- self.name, end_time - start_time)
+ end_time = time.time()
+ outputs = heat_client.stacks.get(self.uuid).outputs
+ log.info("Created stack '%s' in %d secs",
+ self.name, end_time - start_time)
# keep outputs as unicode
self.outputs = {output["output_key"]: output["output_value"] for output
diff --git a/yardstick/resources/scripts/install/storperf.bash b/yardstick/resources/scripts/install/storperf.bash
index 9bbec7206..9d20a5a8a 100644
--- a/yardstick/resources/scripts/install/storperf.bash
+++ b/yardstick/resources/scripts/install/storperf.bash
@@ -20,8 +20,12 @@ set -e
mkdir -p /tmp/storperf-yardstick
docker pull opnfv/storperf
+
+STORPERF_DIR=/tmp/storperf-yardstick/carbon
docker run -t \
--env-file ~/storperf_admin-rc \
-p 5000:5000 -p 8000:8000 \
--v /tmp/storperf-yardstick/carbon:/opt/graphite/storage/whisper \
+-v $STORPERF_DIR:/opt/graphite/storage/whisper \
--name storperf-yardstick opnfv/storperf &
+
+chown www-data:www-data $STORPERF_DIR