aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--api/resources/v1/env.py52
-rw-r--r--api/resources/v2/containers.py2
-rw-r--r--api/resources/v2/environments.py8
-rw-r--r--api/resources/v2/images.py8
-rw-r--r--api/resources/v2/openrcs.py8
-rw-r--r--api/resources/v2/pods.py8
-rw-r--r--api/resources/v2/projects.py8
-rw-r--r--api/resources/v2/tasks.py8
-rw-r--r--api/resources/v2/testcases.py8
-rw-r--r--api/resources/v2/testsuites.py8
-rw-r--r--api/utils/thread.py8
-rw-r--r--plugin/CI/storperf.yaml4
-rw-r--r--samples/storperf.yaml1
-rw-r--r--samples/vnf_samples/nsut/ping/tc_ping_ovs_dpdk_context.yaml42
-rwxr-xr-xtests/ci/prepare_env.sh8
-rw-r--r--tests/ci/scp_storperf_files.sh29
-rwxr-xr-xtests/ci/yardstick-verify15
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml9
-rw-r--r--tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml3
-rw-r--r--tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml6
-rw-r--r--tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml32
-rw-r--r--tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml63
-rw-r--r--tests/unit/benchmark/contexts/nodes_sample_new.yaml96
-rw-r--r--tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml82
-rw-r--r--tests/unit/benchmark/contexts/nodes_sample_ovs.yaml104
-rw-r--r--tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml104
-rw-r--r--tests/unit/benchmark/contexts/ovs_sample_password.yaml104
-rw-r--r--tests/unit/benchmark/contexts/ovs_sample_ssh_key.yaml69
-rw-r--r--tests/unit/benchmark/contexts/ovs_sample_write_to_file.txt1
-rw-r--r--tests/unit/benchmark/contexts/sriov_sample_password.yaml52
-rw-r--r--tests/unit/benchmark/contexts/sriov_sample_ssh_key.yaml54
-rw-r--r--tests/unit/benchmark/contexts/sriov_sample_write_to_file.txt1
-rw-r--r--tests/unit/benchmark/contexts/test_model.py35
-rw-r--r--tests/unit/benchmark/contexts/test_ovsdpdk.py325
-rw-r--r--tests/unit/benchmark/contexts/test_sriov.py421
-rw-r--r--tests/unit/benchmark/contexts/test_standalone.py590
-rw-r--r--tests/unit/benchmark/scenarios/availability/test_util.py19
-rw-r--r--tests/unit/benchmark/scenarios/networking/test_pktgen.py534
-rw-r--r--yardstick/benchmark/contexts/heat.py7
-rw-r--r--yardstick/benchmark/contexts/model.py29
-rw-r--r--yardstick/benchmark/contexts/ovsdpdk.py369
-rw-r--r--yardstick/benchmark/contexts/sriov.py431
-rw-r--r--yardstick/benchmark/contexts/standalone.py90
-rw-r--r--yardstick/benchmark/core/plugin.py8
-rw-r--r--yardstick/benchmark/core/task.py7
-rw-r--r--yardstick/benchmark/scenarios/availability/actionplayers.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker_conf.yaml7
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py7
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash42
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash65
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash2
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash2
-rw-r--r--yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash2
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash70
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor_conf.yaml2
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/baseoperation.py1
-rw-r--r--yardstick/benchmark/scenarios/availability/operation/operation_general.py20
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py4
-rw-r--r--yardstick/benchmark/scenarios/availability/util.py33
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py242
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash139
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py32
-rw-r--r--yardstick/benchmark/scenarios/storage/storperf.py7
-rw-r--r--yardstick/common/constants.py2
-rw-r--r--yardstick/common/utils.py10
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py1
-rw-r--r--yardstick/network_services/vnf_generic/vnfdgen.py2
-rw-r--r--yardstick/orchestrator/heat.py36
69 files changed, 4418 insertions, 201 deletions
diff --git a/api/resources/v1/env.py b/api/resources/v1/env.py
index 4632f15fe..8943db3d1 100644
--- a/api/resources/v1/env.py
+++ b/api/resources/v1/env.py
@@ -65,16 +65,20 @@ class V1Env(ApiResource):
client.pull(consts.GRAFANA_IMAGE, consts.GRAFANA_TAG)
LOG.info('Createing grafana container')
- self._create_grafana_container(client)
+ container = self._create_grafana_container(client)
LOG.info('Grafana container is created')
time.sleep(5)
+ container = client.inspect_container(container['Id'])
+ ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+ LOG.debug('container ip is: %s', ip)
+
LOG.info('Creating data source for grafana')
- self._create_data_source()
+ self._create_data_source(ip)
LOG.info('Creating dashboard for grafana')
- self._create_dashboard()
+ self._create_dashboard(ip)
self._update_task_status(task_id)
LOG.info('Finished')
@@ -82,8 +86,8 @@ class V1Env(ApiResource):
self._update_task_error(task_id, str(e))
LOG.exception('Create grafana failed')
- def _create_dashboard(self):
- url = 'http://admin:admin@%s:3000/api/dashboards/db' % consts.GRAFANA_IP
+ def _create_dashboard(self, ip):
+ url = 'http://admin:admin@{}:{}/api/dashboards/db'.format(ip, consts.GRAFANA_PORT)
path = os.path.join(consts.REPOS_DIR, 'dashboard', '*dashboard.json')
for i in sorted(glob.iglob(path)):
@@ -95,13 +99,21 @@ class V1Env(ApiResource):
LOG.exception('Create dashboard %s failed', i)
raise
- def _create_data_source(self):
- url = 'http://admin:admin@%s:3000/api/datasources' % consts.GRAFANA_IP
+ def _create_data_source(self, ip):
+ url = 'http://admin:admin@{}:{}/api/datasources'.format(ip, consts.GRAFANA_PORT)
+ influx_conf = utils.parse_ini_file(consts.CONF_FILE)
+
+ try:
+ influx_url = influx_conf['dispatcher_influxdb']['target']
+ except KeyError:
+ LOG.exception('influxdb url not set in yardstick.conf')
+ raise
+
data = {
"name": "yardstick",
"type": "influxdb",
"access": "proxy",
- "url": "http://%s:8086" % consts.INFLUXDB_IP,
+ "url": influx_url,
"password": "root",
"user": "root",
"database": "yardstick",
@@ -117,8 +129,8 @@ class V1Env(ApiResource):
raise
def _create_grafana_container(self, client):
- ports = [3000]
- port_bindings = {k: k for k in ports}
+ ports = [consts.GRAFANA_PORT]
+ port_bindings = {consts.GRAFANA_PORT: consts.GRAFANA_MAPPING_PORT}
restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
host_config = client.create_host_config(port_bindings=port_bindings,
restart_policy=restart_policy)
@@ -133,6 +145,7 @@ class V1Env(ApiResource):
host_config=host_config)
LOG.info('Starting container')
client.start(container)
+ return container
def _check_image_exist(self, client, t):
return any(t in a['RepoTags'][0]
@@ -152,9 +165,6 @@ class V1Env(ApiResource):
client = Client(base_url=consts.DOCKER_URL)
try:
- LOG.info('Changing output to influxdb')
- self._change_output_to_influxdb()
-
LOG.info('Checking if influxdb image exist')
if not self._check_image_exist(client, '%s:%s' %
(consts.INFLUXDB_IMAGE,
@@ -163,11 +173,18 @@ class V1Env(ApiResource):
client.pull(consts.INFLUXDB_IMAGE, tag=consts.INFLUXDB_TAG)
LOG.info('Createing influxdb container')
- self._create_influxdb_container(client)
+ container = self._create_influxdb_container(client)
LOG.info('Influxdb container is created')
time.sleep(5)
+ container = client.inspect_container(container['Id'])
+ ip = container['NetworkSettings']['Networks']['bridge']['IPAddress']
+ LOG.debug('container ip is: %s', ip)
+
+ LOG.info('Changing output to influxdb')
+ self._change_output_to_influxdb(ip)
+
LOG.info('Config influxdb')
self._config_influxdb()
@@ -180,7 +197,7 @@ class V1Env(ApiResource):
def _create_influxdb_container(self, client):
- ports = [8083, 8086]
+ ports = [consts.INFLUXDB_DASHBOARD_PORT, consts.INFLUXDB_PORT]
port_bindings = {k: k for k in ports}
restart_policy = {"MaximumRetryCount": 0, "Name": "always"}
host_config = client.create_host_config(port_bindings=port_bindings,
@@ -196,6 +213,7 @@ class V1Env(ApiResource):
host_config=host_config)
LOG.info('Starting container')
client.start(container)
+ return container
def _config_influxdb(self):
try:
@@ -208,7 +226,7 @@ class V1Env(ApiResource):
except Exception:
LOG.exception('Config influxdb failed')
- def _change_output_to_influxdb(self):
+ def _change_output_to_influxdb(self, ip):
utils.makedirs(consts.CONF_DIR)
parser = configparser.ConfigParser()
@@ -218,7 +236,7 @@ class V1Env(ApiResource):
LOG.info('Set dispatcher to influxdb')
parser.set('DEFAULT', 'dispatcher', 'influxdb')
parser.set('dispatcher_influxdb', 'target',
- 'http://%s:8086' % consts.INFLUXDB_IP)
+ 'http://{}:{}'.format(ip, consts.INFLUXDB_PORT))
LOG.info('Writing to %s', consts.CONF_FILE)
with open(consts.CONF_FILE, 'w') as f:
diff --git a/api/resources/v2/containers.py b/api/resources/v2/containers.py
index ce7130376..66dc94120 100644
--- a/api/resources/v2/containers.py
+++ b/api/resources/v2/containers.py
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
diff --git a/api/resources/v2/environments.py b/api/resources/v2/environments.py
index e4679b0d6..f021a3c5a 100644
--- a/api/resources/v2/environments.py
+++ b/api/resources/v2/environments.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import uuid
import logging
diff --git a/api/resources/v2/images.py b/api/resources/v2/images.py
index a1577b5d3..8359e105b 100644
--- a/api/resources/v2/images.py
+++ b/api/resources/v2/images.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import logging
import subprocess
import threading
diff --git a/api/resources/v2/openrcs.py b/api/resources/v2/openrcs.py
index 5f3b9382f..cb506d0e8 100644
--- a/api/resources/v2/openrcs.py
+++ b/api/resources/v2/openrcs.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import uuid
import logging
import re
diff --git a/api/resources/v2/pods.py b/api/resources/v2/pods.py
index ebc1312da..f2316d353 100644
--- a/api/resources/v2/pods.py
+++ b/api/resources/v2/pods.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import uuid
import yaml
import logging
diff --git a/api/resources/v2/projects.py b/api/resources/v2/projects.py
index 376cf1a37..2ff61d0fe 100644
--- a/api/resources/v2/projects.py
+++ b/api/resources/v2/projects.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import uuid
import logging
diff --git a/api/resources/v2/tasks.py b/api/resources/v2/tasks.py
index e95ae0550..885a190c6 100644
--- a/api/resources/v2/tasks.py
+++ b/api/resources/v2/tasks.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import uuid
import logging
from datetime import datetime
diff --git a/api/resources/v2/testcases.py b/api/resources/v2/testcases.py
index 8d5b5e398..b47a8f6b7 100644
--- a/api/resources/v2/testcases.py
+++ b/api/resources/v2/testcases.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import logging
import errno
import os
diff --git a/api/resources/v2/testsuites.py b/api/resources/v2/testsuites.py
index ee942eff9..56ad47375 100644
--- a/api/resources/v2/testsuites.py
+++ b/api/resources/v2/testsuites.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import os
import errno
import logging
diff --git a/api/utils/thread.py b/api/utils/thread.py
index 5f4ec7e94..20bd07a12 100644
--- a/api/utils/thread.py
+++ b/api/utils/thread.py
@@ -1,3 +1,11 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
import threading
import os
import logging
diff --git a/plugin/CI/storperf.yaml b/plugin/CI/storperf.yaml
index e144dd150..70915f661 100644
--- a/plugin/CI/storperf.yaml
+++ b/plugin/CI/storperf.yaml
@@ -7,7 +7,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-# StorPerf plugin configration file for huawei-pod1
+# StorPerf plugin configration file for compass pod in CI
# Used for integration StorPerf into Yardstick as a plugin
schema: "yardstick:plugin:0.1"
@@ -16,6 +16,6 @@ plugins:
name: storperf
deployment:
- ip: 192.168.10.6
+ ip: local
user: root
password: root
diff --git a/samples/storperf.yaml b/samples/storperf.yaml
index 5000759e0..2ea022173 100644
--- a/samples/storperf.yaml
+++ b/samples/storperf.yaml
@@ -18,6 +18,7 @@ scenarios:
options:
agent_count: 1
agent_image: "Ubuntu-16.04"
+ agent_flavor: "storperf"
public_network: "ext-net"
volume_size: 2
# target:
diff --git a/samples/vnf_samples/nsut/ping/tc_ping_ovs_dpdk_context.yaml b/samples/vnf_samples/nsut/ping/tc_ping_ovs_dpdk_context.yaml
new file mode 100644
index 000000000..7654b0f96
--- /dev/null
+++ b/samples/vnf_samples/nsut/ping/tc_ping_ovs_dpdk_context.yaml
@@ -0,0 +1,42 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: "yardstick:task:0.1"
+
+scenarios:
+-
+ type: NSPerf
+ traffic_profile: ../../traffic_profiles/fixed.yaml
+ topology: ping_tg_topology.yaml # TODO: look in relative path where the tc.yaml is found
+
+ nodes: # This section is copied from pod.xml or resolved via Heat
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
+
+ vnf_options:
+ tg__1:
+ target_ip: pingvnf__1.xe0.local_ip # TODO: resolve to config vars
+ vnf__1:
+ target_ip: pinggen__1.xe1.local_ip # TODO: resolve to config vars
+ runner:
+ type: Duration
+ duration: 10
+
+context:
+ type: Standalone
+ name: yardstick
+ nfvi_type: Ovsdpdk
+ vm_deploy: True
+ file: /etc/yardstick/nodes/pod_ovs.yaml
diff --git a/tests/ci/prepare_env.sh b/tests/ci/prepare_env.sh
index 3d9cc298f..c3ee4c76b 100755
--- a/tests/ci/prepare_env.sh
+++ b/tests/ci/prepare_env.sh
@@ -11,7 +11,7 @@
# Perepare the environment to run yardstick ci
: ${DEPLOY_TYPE:='bm'} # Can be any of 'bm' (Bare Metal) or 'virt' (Virtual)
-
+: ${INSTALLER_TYPE:='unknown'}
: ${NODE_NAME:='unknown'}
: ${EXTERNAL_NETWORK:='admin_floating_net'}
@@ -61,9 +61,9 @@ export EXTERNAL_NETWORK INSTALLER_TYPE DEPLOY_TYPE NODE_NAME
# Prepare a admin-rc file for StorPerf integration
$YARDSTICK_REPO_DIR/tests/ci/prepare_storperf_admin-rc.sh
-# copy a admin-rc file for StorPerf integration to the deployment location
-if [ "$NODE_NAME" == "huawei-pod1" ]; then
- bash $YARDSTICK_REPO_DIR/tests/ci/scp_storperf_files.sh
+# copy Storperf related files to the deployment location
+if [ "$INSTALLER_TYPE" == "compass" ]; then
+ source $YARDSTICK_REPO_DIR/tests/ci/scp_storperf_files.sh
fi
# Fetching id_rsa file from jump_server..."
diff --git a/tests/ci/scp_storperf_files.sh b/tests/ci/scp_storperf_files.sh
index 234032cf1..71306eb80 100644
--- a/tests/ci/scp_storperf_files.sh
+++ b/tests/ci/scp_storperf_files.sh
@@ -12,9 +12,26 @@
# Copy storperf_admin-rc to deployment location.
ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
-sshpass -p root scp 2>/dev/null $ssh_options ~/storperf_admin-rc \
- root@192.168.10.6:/root/ &> /dev/null
-sshpass -p root scp 2>/dev/null $ssh_options /home/opnfv/repos/storperf/docker-compose/docker-compose.yaml \
- root@192.168.10.6:/root/ &> /dev/null
-sshpass -p root scp 2>/dev/null $ssh_options /home/opnfv/repos/storperf/docker-compose/nginx.conf \
- root@192.168.10.6:/root/ &> /dev/null
+
+scp_files(){
+ export JUMP_HOST_IP
+ sshpass -p root scp 2>/dev/null $ssh_options ~/storperf_admin-rc \
+ root@${JUMP_HOST_IP}:/root/ &> /dev/null
+ sshpass -p root scp 2>/dev/null $ssh_options /home/opnfv/repos/storperf/docker-compose/docker-compose.yaml \
+ root@${JUMP_HOST_IP}:/root/ &> /dev/null
+}
+
+case "$NODE_NAME" in
+ "huawei-pod1")
+ JUMP_HOST_IP='192.168.10.6'
+ scp_files
+ ;;
+ "huawei-pod2")
+ JUMP_HOST_IP='192.168.11.2'
+ scp_files
+ ;;
+ *)
+ # no node name, exit
+ echo "storperf test case will not run on this pod, skipping scp files..."
+ ;;
+esac
diff --git a/tests/ci/yardstick-verify b/tests/ci/yardstick-verify
index 096ea534f..16598df7b 100755
--- a/tests/ci/yardstick-verify
+++ b/tests/ci/yardstick-verify
@@ -99,8 +99,8 @@ set -o pipefail
install_storperf()
{
- # Install Storper on huawei-pod1
- if [ "$NODE_NAME" == "huawei-pod1" ]; then
+ # Install Storper on huawei-pod1 and huawei-pod2
+ if [ "$NODE_NAME" == "huawei-pod1" -o "$NODE_NAME" == "huawei-pod2" ]; then
echo
echo "========== Installing storperf =========="
@@ -114,8 +114,8 @@ install_storperf()
remove_storperf()
{
- # remove Storper from huawei-pod1
- if [ "$NODE_NAME" == "huawei-pod1" ]; then
+ # remove Storper from huawei-pod1 and huawei-pod2
+ if [ "$NODE_NAME" == "huawei-pod1" -o "$NODE_NAME" == "huawei-pod2" ]; then
echo
echo "========== Removing storperf =========="
@@ -293,8 +293,13 @@ main()
echo
# check OpenStack services
+ if [[ $OS_INSECURE ]] && [[ "$(echo $OS_INSECURE | tr '[:upper:]' '[:lower:]')" = "true" ]]; then
+ SECURE="--insecure"
+ else
+ SECURE=""
+ fi
echo "Checking OpenStack services:"
- for cmd in "openstack image list" "openstack server list" "openstack stack list"; do
+ for cmd in "openstack ${SECURE} image list" "openstack ${SECURE} server list" "openstack ${SECURE} stack list"; do
echo " checking ${cmd} ..."
if ! $cmd >/dev/null; then
echo "error: command \"$cmd\" failed"
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
index 4c7fdab90..f5ccb255a 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc008.yaml
@@ -29,12 +29,21 @@ scenarios:
packetsize: {{pkt_size}}
number_of_ports: {{num_ports}}
duration: 20
+ # choose vnic name: default to eth0
+ # vnic_name: 'ens3'
+ # turn on multiqueue inside VM
+ # multiqueue: True
+ # choose starting pps: default 1M;
+ # works with binary search runner Dynamictp to find max throughput per sla
+ # pps: 3000000
host: demeter.yardstick-TC008
target: poseidon.yardstick-TC008
runner:
type: Iteration
+ # binary search runner
+ # type: Dynamictp
iterations: 10
interval: 1
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
index 5bd3f676f..ef4f02c9e 100644
--- a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
+++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml
@@ -14,13 +14,14 @@ description: >
StorPerf is a tool to measure block and object storage performance in an NFVI.
{% set public_network = public_network or "ext-net" %}
-{% set StorPerf_ip = StorPerf_ip or "192.168.10.6" %}
+{% set StorPerf_ip = StorPerf_ip or "192.168.200.1" %}
scenarios:
-
type: StorPerf
options:
agent_count: 1
agent_image: "Ubuntu-16.04"
+ agent_flavor: "storperf"
public_network: {{public_network}}
volume_size: 4
block_sizes: "4096"
diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml
index ba1a93cec..dea44c8b3 100644
--- a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml
+++ b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml
@@ -134,10 +134,12 @@ test_cases:
file_name: opnfv_yardstick_tc074.yaml
constraint:
installer: compass
- pod: huawei-pod1
+ pod: huawei-pod1, huawei-pod2
task_args:
huawei-pod1: '{"public_network": "ext-net",
- "StorPerf_ip": "192.168.200.1"}'
+ "StorPerf_ip": "192.168.10.6"}'
+ huawei-pod2: '{"public_network": "ext-net",
+ "StorPerf_ip": "192.168.11.2"}'
-
file_name: opnfv_yardstick_tc075.yaml
constraint:
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml b/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml
new file mode 100644
index 000000000..306915ca1
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_duplicate_sample_new.yaml
@@ -0,0 +1,32 @@
+nodes:
+-
+ name: sriov
+ role: Sriov1
+ ip: 10.123.123.122
+ user: root
+ auth_type: password
+ password: password
+ vf_macs:
+ - "00:00:00:00:00:00"
+ - "00:00:00:00:00:00"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+-
+ name: sriov
+ role: Sriov1
+ ip: 10.123.123.111
+ user: root
+ auth_type: password
+ password: password
+ vf_macs:
+ - "00:00:00:00:00:00"
+ - "00:00:00:00:00:00"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
diff --git a/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml b/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml
new file mode 100644
index 000000000..65449c91c
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_duplicate_sample_ovs.yaml
@@ -0,0 +1,63 @@
+# Copyright (c) 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+nodes:
+-
+ name: ovs
+ role: test
+ ip: 10.223.197.222
+ user: root
+ auth_type: password
+ password: intel123
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+-
+ name: ovs
+ role: test
+ ip: 10.223.197.112
+ user: root
+ auth_type: password
+ password: intel123
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
diff --git a/tests/unit/benchmark/contexts/nodes_sample_new.yaml b/tests/unit/benchmark/contexts/nodes_sample_new.yaml
new file mode 100644
index 000000000..a400bec03
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_sample_new.yaml
@@ -0,0 +1,96 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.123.123.123
+ user: root
+ auth_type: password
+ password: password
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+-
+ name: sriov
+ role: Sriov
+ ip: 10.123.123.122
+ user: root
+ auth_type: password
+ password: password
+ vf_macs:
+ - "00:00:00:00:00:00"
+ - "00:00:00:00:00:00"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
+-
+ name: vnf
+ role: vnf
+ ip: 10.123.123.121
+ user: root
+ auth_type: password
+ password: password
+ host: 10.123.123.121 #BM host == ip, SRIOV & ovs-dpdk host == compute node.
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:06:00.0"
+ driver: i40e
+ dpdk_port_num: 0
+ local_ip: "152.16.100.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:06:00.1"
+ driver: i40e
+ dpdk_port_num: 1
+ local_ip: "152.16.40.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+ routing_table:
+ - network: "152.16.100.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.100.20"
+ if: "xe0"
+ - network: "152.16.40.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.40.20"
+ if: "xe1"
+ nd_route_tbl:
+ - network: "0064:ff9b:0:0:0:0:9810:6414"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:6414"
+ if: "xe0"
+ - network: "0064:ff9b:0:0:0:0:9810:2814"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:2814"
+ if: "xe1"
+
diff --git a/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml b/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml
new file mode 100644
index 000000000..55ff2e778
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_sample_new_sriov.yaml
@@ -0,0 +1,82 @@
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.123.123.123
+ user: root
+ auth_type: password
+ password: password
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+-
+ name: sriov
+ role: Sriov1
+ ip: 10.123.123.122
+ user: root
+ auth_type: password
+ password: password
+ vf_macs:
+ - "00:00:00:00:00:00"
+ - "00:00:00:00:00:00"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
+-
+ name: vnf
+ role: vnf
+ ip: 10.123.123.121
+ user: root
+ auth_type: password
+ password: password
+ host: 10.123.123.121 #BM host == ip, SRIOV & ovs-dpdk host == compute node.
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:06:00.0"
+ driver: i40e
+ dpdk_port_num: 0
+ local_ip: "152.16.100.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:06:00.1"
+ driver: i40e
+ dpdk_port_num: 1
+ local_ip: "152.16.40.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:00"
+ routing_table:
+ - network: "152.16.100.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.100.20"
+ if: "xe0"
+ - network: "152.16.40.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.40.20"
+ if: "xe1"
+ nd_route_tbl:
+ - network: "0064:ff9b:0:0:0:0:9810:6414"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:6414"
+ if: "xe0"
+ - network: "0064:ff9b:0:0:0:0:9810:2814"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:2814"
+ if: "xe1"
+
diff --git a/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml b/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml
new file mode 100644
index 000000000..b1da1ea9f
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_sample_ovs.yaml
@@ -0,0 +1,104 @@
+# Copyright (c) 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.223.197.182
+ user: root
+ auth_type: password
+ password: intel123
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: ovs
+ role: Ovsdpdk
+ ip: 10.223.197.222
+ user: root
+ auth_type: password
+ password: intel123
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
+-
+ name: vnf
+ role: vnf
+ ip: 10.223.197.155
+ user: root
+ auth_type: password
+ password: intel123
+ host: 10.223.197.140
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:04.0"
+ driver: virtio-pci
+ dpdk_port_num: 0
+ local_ip: "152.16.100.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:03"
+
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:05.0"
+ driver: virtio-pci
+ dpdk_port_num: 1
+ local_ip: "152.16.40.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:04"
+ routing_table:
+ - network: "152.16.100.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.100.20"
+ if: "xe0"
+ - network: "152.16.40.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.40.20"
+ if: "xe1"
+ nd_route_tbl:
+ - network: "0064:ff9b:0:0:0:0:9810:6414"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:6414"
+ if: "xe0"
+ - network: "0064:ff9b:0:0:0:0:9810:2814"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:2814"
+ if: "xe1"
diff --git a/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml b/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml
new file mode 100644
index 000000000..c02849a05
--- /dev/null
+++ b/tests/unit/benchmark/contexts/nodes_sample_ovsdpdk.yaml
@@ -0,0 +1,104 @@
+# Copyright (c) 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.223.197.182
+ user: root
+ auth_type: password
+ password: intel123
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: ovs
+ role: Ovsdpdk1
+ ip: 10.223.197.222
+ user: root
+ auth_type: password
+ password: intel123
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
+-
+ name: vnf
+ role: vnf
+ ip: 10.223.197.155
+ user: root
+ auth_type: password
+ password: intel123
+ host: 10.223.197.140
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:04.0"
+ driver: virtio-pci
+ dpdk_port_num: 0
+ local_ip: "152.16.100.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:03"
+
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:05.0"
+ driver: virtio-pci
+ dpdk_port_num: 1
+ local_ip: "152.16.40.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:04"
+ routing_table:
+ - network: "152.16.100.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.100.20"
+ if: "xe0"
+ - network: "152.16.40.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.40.20"
+ if: "xe1"
+ nd_route_tbl:
+ - network: "0064:ff9b:0:0:0:0:9810:6414"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:6414"
+ if: "xe0"
+ - network: "0064:ff9b:0:0:0:0:9810:2814"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:2814"
+ if: "xe1"
diff --git a/tests/unit/benchmark/contexts/ovs_sample_password.yaml b/tests/unit/benchmark/contexts/ovs_sample_password.yaml
new file mode 100644
index 000000000..b1da1ea9f
--- /dev/null
+++ b/tests/unit/benchmark/contexts/ovs_sample_password.yaml
@@ -0,0 +1,104 @@
+# Copyright (c) 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.223.197.182
+ user: root
+ auth_type: password
+ password: intel123
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: ovs
+ role: Ovsdpdk
+ ip: 10.223.197.222
+ user: root
+ auth_type: password
+ password: intel123
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
+-
+ name: vnf
+ role: vnf
+ ip: 10.223.197.155
+ user: root
+ auth_type: password
+ password: intel123
+ host: 10.223.197.140
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:04.0"
+ driver: virtio-pci
+ dpdk_port_num: 0
+ local_ip: "152.16.100.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:03"
+
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:00:05.0"
+ driver: virtio-pci
+ dpdk_port_num: 1
+ local_ip: "152.16.40.19"
+ netmask: "255.255.255.0"
+ local_mac: "00:00:00:00:00:04"
+ routing_table:
+ - network: "152.16.100.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.100.20"
+ if: "xe0"
+ - network: "152.16.40.20"
+ netmask: "255.255.255.0"
+ gateway: "152.16.40.20"
+ if: "xe1"
+ nd_route_tbl:
+ - network: "0064:ff9b:0:0:0:0:9810:6414"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:6414"
+ if: "xe0"
+ - network: "0064:ff9b:0:0:0:0:9810:2814"
+ netmask: "112"
+ gateway: "0064:ff9b:0:0:0:0:9810:2814"
+ if: "xe1"
diff --git a/tests/unit/benchmark/contexts/ovs_sample_ssh_key.yaml b/tests/unit/benchmark/contexts/ovs_sample_ssh_key.yaml
new file mode 100644
index 000000000..896ec33bb
--- /dev/null
+++ b/tests/unit/benchmark/contexts/ovs_sample_ssh_key.yaml
@@ -0,0 +1,69 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+# Sample config file about the POD information, including the
+# name/IP/user/ssh key of Bare Metal and Controllers/Computes
+#
+# The options of this config file include:
+# name: the name of this node
+# role: node's role, support role: Master/Controller/Comupte/BareMetal
+# ip: the node's IP address
+# user: the username for login
+# key_filename:the path of the private key file for login
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.10.10.10
+ auth_type: ssh_key
+ user: root
+ ssh_port: 22
+ key_filename: /root/.ssh/id_rsa
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: ovs
+ role: Ovsdpdk
+ ip: 10.223.197.222
+ auth_type: ssh_key
+ user: root
+ ssh_port: 22
+ key_filename: /root/.ssh/id_rsa
+ vpath: "/usr/local/"
+ vports:
+ - dpdkvhostuser0
+ - dpdkvhostuser1
+ vports_mac:
+ - "00:00:00:00:00:03"
+ - "00:00:00:00:00:04"
+ phy_ports: # Physical ports to configure ovs
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ flow:
+ - ovs-ofctl add-flow br0 in_port=1,action=output:3
+ - ovs-ofctl add-flow br0 in_port=3,action=output:1
+ - ovs-ofctl add-flow br0 in_port=4,action=output:2
+ - ovs-ofctl add-flow br0 in_port=2,action=output:4
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
+
diff --git a/tests/unit/benchmark/contexts/ovs_sample_write_to_file.txt b/tests/unit/benchmark/contexts/ovs_sample_write_to_file.txt
new file mode 100644
index 000000000..f0eec86f6
--- /dev/null
+++ b/tests/unit/benchmark/contexts/ovs_sample_write_to_file.txt
@@ -0,0 +1 @@
+some content \ No newline at end of file
diff --git a/tests/unit/benchmark/contexts/sriov_sample_password.yaml b/tests/unit/benchmark/contexts/sriov_sample_password.yaml
new file mode 100644
index 000000000..4f60e46d5
--- /dev/null
+++ b/tests/unit/benchmark/contexts/sriov_sample_password.yaml
@@ -0,0 +1,52 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.10.10.10
+ auth_type: password
+ user: root
+ password: password
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: sriov
+ role: Sriov
+ ip: 10.10.10.11
+ auth_type: password
+ user: root
+ password: password
+ vf_macs:
+ - "00:00:00:71:7d:25"
+ - "00:00:00:71:7d:26"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
diff --git a/tests/unit/benchmark/contexts/sriov_sample_ssh_key.yaml b/tests/unit/benchmark/contexts/sriov_sample_ssh_key.yaml
new file mode 100644
index 000000000..faa496771
--- /dev/null
+++ b/tests/unit/benchmark/contexts/sriov_sample_ssh_key.yaml
@@ -0,0 +1,54 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nodes:
+-
+ name: trafficgen_1
+ role: TrafficGen
+ ip: 10.10.10.10
+ auth_type: ssh_key
+ user: root
+ ssh_port: 22
+ key_filename: /root/.ssh/id_rsa
+ interfaces:
+ xe0: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.0"
+ driver: ixgbe
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:68"
+ xe1: # logical name from topology.yaml and vnfd.yaml
+ vpci: "0000:03:00.1"
+ driver: ixgbe
+ dpdk_port_num: 1
+ local_ip: "152.16.100.21"
+ netmask: "255.255.255.0"
+ local_mac: "90:e2:ba:77:ce:69"
+-
+ name: sriov
+ role: Sriov
+ ip: 10.10.10.11
+ auth_type: ssh_key
+ user: root
+ ssh_port: 22
+ key_filename: /root/.ssh/id_rsa
+ vf_macs:
+ - "00:00:00:71:7d:25"
+ - "00:00:00:71:7d:26"
+ phy_ports: # Physical ports to configure sriov
+ - "0000:06:00.0"
+ - "0000:06:00.1"
+ phy_driver: i40e # kernel driver
+ images: "/var/lib/libvirt/images/ubuntu1.img"
diff --git a/tests/unit/benchmark/contexts/sriov_sample_write_to_file.txt b/tests/unit/benchmark/contexts/sriov_sample_write_to_file.txt
new file mode 100644
index 000000000..f0eec86f6
--- /dev/null
+++ b/tests/unit/benchmark/contexts/sriov_sample_write_to_file.txt
@@ -0,0 +1 @@
+some content \ No newline at end of file
diff --git a/tests/unit/benchmark/contexts/test_model.py b/tests/unit/benchmark/contexts/test_model.py
index 3fb186b9b..1ce550306 100644
--- a/tests/unit/benchmark/contexts/test_model.py
+++ b/tests/unit/benchmark/contexts/test_model.py
@@ -161,6 +161,23 @@ class NetworkTestCase(unittest.TestCase):
self.assertEqual(model.Network.find_external_network(), 'ext_net')
+ def test_construct_gateway_ip_is_null(self):
+
+ attrs = {'gateway_ip': 'null'}
+ test_network = model.Network('foo', self.mock_context, attrs)
+ self.assertEqual(test_network.gateway_ip, 'null')
+
+ def test_construct_gateway_ip_is_none(self):
+
+ attrs = {'gateway_ip': None}
+ test_network = model.Network('foo', self.mock_context, attrs)
+ self.assertEqual(test_network.gateway_ip, 'null')
+
+ def test_construct_gateway_ip_is_absent(self):
+
+ attrs = {}
+ test_network = model.Network('foo', self.mock_context, attrs)
+ self.assertIsNone(test_network.gateway_ip)
class ServerTestCase(unittest.TestCase):
@@ -214,11 +231,12 @@ class ServerTestCase(unittest.TestCase):
attrs = {'image': 'some-image', 'flavor': 'some-flavor', 'floating_ip': '192.168.1.10', 'floating_ip_assoc': 'some-vm'}
test_server = model.Server('foo', self.mock_context, attrs)
- self.mock_context.flavors = ['flavor1', 'flavor2', 'some-flavor']
+ self.mock_context.flavors = ['flavor1', 'flavor2', 'some-flavor']
mock_network = mock.Mock()
mock_network.name = 'some-network'
mock_network.stack_name = 'some-network-stack'
+ mock_network.allowed_address_pairs = ["1", "2"]
mock_network.subnet_stack_name = 'some-network-stack-subnet'
mock_network.provider = 'sriov'
mock_network.external_network = 'ext_net'
@@ -232,7 +250,8 @@ class ServerTestCase(unittest.TestCase):
mock_network.stack_name,
mock_network.subnet_stack_name,
sec_group_id=self.mock_context.secgroup_name,
- provider=mock_network.provider)
+ provider=mock_network.provider,
+ allowed_address_pairs=mock_network.allowed_address_pairs)
mock_template.add_floating_ip.assert_called_with(
'some-server-fip',
@@ -290,11 +309,12 @@ class ServerTestCase(unittest.TestCase):
}
test_server = model.Server('ServerFlavor-2', self.mock_context, attrs)
- self.mock_context.flavors = ['flavor2']
+ self.mock_context.flavors = ['flavor2']
mock_network = mock.Mock()
- mock_network.configure_mock(name='some-network', stack_name= 'some-network-stack',
- subnet_stack_name = 'some-network-stack-subnet',
- provider = 'some-provider')
+ mock_network.allowed_address_pairs = ["1", "2"]
+ mock_network.configure_mock(name='some-network', stack_name='some-network-stack',
+ subnet_stack_name='some-network-stack-subnet',
+ provider='some-provider')
test_server._add_instance(mock_template, 'ServerFlavor-2',
[mock_network], 'hints')
@@ -304,7 +324,8 @@ class ServerTestCase(unittest.TestCase):
mock_network.stack_name,
mock_network.subnet_stack_name,
provider=mock_network.provider,
- sec_group_id=self.mock_context.secgroup_name)
+ sec_group_id=self.mock_context.secgroup_name,
+ allowed_address_pairs=mock_network.allowed_address_pairs)
mock_template.add_server.assert_called_with(
'ServerFlavor-2', 'some-image',
diff --git a/tests/unit/benchmark/contexts/test_ovsdpdk.py b/tests/unit/benchmark/contexts/test_ovsdpdk.py
new file mode 100644
index 000000000..ac25ec877
--- /dev/null
+++ b/tests/unit/benchmark/contexts/test_ovsdpdk.py
@@ -0,0 +1,325 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import mock
+import unittest
+
+from yardstick.benchmark.contexts import ovsdpdk
+
+NIC_INPUT = {
+ 'interface': {},
+ 'vports_mac': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+DRIVER = "i40e"
+NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vports_mac': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+
+CORRECT_FILE_PATH = "/etc/yardstick/nodes/pod_ovs.yaml"
+WRONG_FILE_PATH = "/etc/yardstick/wrong.yaml"
+SAMPLE_FILE = "ovs_sample_write_to_file.txt"
+
+OVS = [{
+ 'auth_type': 'ssh_key',
+ 'name': 'ovs',
+ 'ssh_port': 22,
+ 'ip': '10.10.10.11',
+ 'key_filename': '/root/.ssh/id_rsa',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'vpath': '/usr/local/',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'flow': ['ovs-ofctl add-flow br0 in_port=1,action=output:3',
+ 'ovs-ofctl add-flow br0 in_port=3,action=output:1',
+ 'ovs-ofctl add-flow br0 in_port=4,action=output:2',
+ 'ovs-ofctl add-flow br0 in_port=2,action=output:4'],
+ 'phy_driver': 'i40e',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+OVS_PASSWORD = [{
+ 'auth_type': 'password',
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.10.10.11',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'flow': ['ovs-ofctl add-flow br0 in_port=1,action=output:3',
+ 'ovs-ofctl add-flow br0 in_port=3,action=output:1',
+ 'ovs-ofctl add-flow br0 in_port=4,action=output:2',
+ 'ovs-ofctl add-flow br0 in_port=2,action=output:4'],
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+#vfnic = "i40evf"
+PCIS = ['0000:06:00.0', '0000:06:00.1']
+
+
+class OvsdpdkTestCase(unittest.TestCase):
+
+ NODES_SAMPLE_SSH = "ovs_sample_ssh_key.yaml"
+ NODES_SAMPLE_PASSWORD = "ovs_sample_password.yaml"
+
+ def setUp(self):
+ self.test_context = ovsdpdk.Ovsdpdk()
+
+ def test_construct(self):
+ self.assertIsNone(self.test_context.name)
+ self.assertIsNone(self.test_context.file_path)
+ self.assertEqual(self.test_context.nodes, [])
+ self.assertEqual(self.test_context.ovs, [])
+ self.assertFalse(self.test_context.vm_deploy)
+ self.assertTrue(self.test_context.first_run)
+ self.assertEqual(self.test_context.user, "")
+ self.assertEqual(self.test_context.ssh_ip, "")
+ self.assertEqual(self.test_context.passwd, "")
+ self.assertEqual(self.test_context.ssh_port, "")
+ self.assertEqual(self.test_context.auth_type, "")
+
+ def test_init(self):
+ self.test_context.parse_pod_and_get_data = mock.Mock()
+ self.test_context.file_path = CORRECT_FILE_PATH
+ self.test_context.init()
+ self.assertIsNone(self.test_context.init())
+
+ def test_successful_init_with_ssh(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_SSH)
+ self.test_context.parse_pod_and_get_data(CORRECT_FILE_PATH)
+
+ def test_successful_init_with_password(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_PASSWORD)
+ self.test_context.parse_pod_and_get_data(CORRECT_FILE_PATH)
+
+ def test_unsuccessful_init(self):
+ self.assertRaises(
+ IOError,
+ lambda: self.test_context.parse_pod_and_get_data(WRONG_FILE_PATH))
+
+ def test_ssh_connection(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+
+ @mock.patch('yardstick.network_services.utils.provision_tool', return_value="b")
+ def test_ssh_connection(self, mock_prov):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "b", ""))
+ ssh.return_value = ssh_mock
+ mock_prov.provision_tool = mock.Mock()
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS_PASSWORD
+ self.assertIsNone(ovs_obj.ssh_remote_machine())
+
+ @mock.patch('yardstick.network_services.utils.provision_tool', return_value="b")
+ def test_ssh_connection_ssh_key(self, mock_prov):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "b", ""))
+ ssh.return_value = ssh_mock
+ mock_prov.provision_tool = mock.Mock()
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ ovs_obj.key_filename = '/root/.ssh/id_rsa'
+ self.assertIsNone(ovs_obj.ssh_remote_machine())
+
+ def test_get_nic_details(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "eth0 eth1", ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.ovs = OVS
+ ovs_obj.connection = ssh_mock
+ self.assertIsNotNone(ovs_obj.get_nic_details())
+
+ def test_install_req_libs(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.first_run = True
+ ovs_obj.connection = ssh_mock
+ self.assertIsNone(ovs_obj.install_req_libs())
+
+ def test_setup_ovs(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ self.assertIsNone(ovs_obj.setup_ovs({"eth0 eth1"}))
+
+ def test_start_ovs_serverswitch(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ self.assertIsNone(ovs_obj.start_ovs_serverswitch())
+
+ def test_setup_ovs_bridge(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ self.assertIsNone(ovs_obj.setup_ovs_bridge())
+
+ def test_add_oflows(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ self.assertIsNone(ovs_obj.add_oflows())
+
+ def test_setup_ovs_context_vm_already_present(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ mock_ovs = mock.Mock()
+ ssh_mock.put = mock.Mock()
+ ovs_obj.check_output = mock.Mock(return_value=(0, "vm1"))
+ with mock.patch("yardstick.benchmark.contexts.ovsdpdk.time"):
+ self.assertIsNone(ovs_obj.setup_ovs_context(PCIS, NIC_DETAILS, DRIVER))
+
+ @mock.patch(
+ 'yardstick.benchmark.contexts.ovsdpdk',
+ return_value="Domain vm1 created from /tmp/vm_ovs.xml")
+ def test_is_vm_created(self, NIC_INPUT):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ mock_ovs = mock.Mock()
+ ret_create = mock.Mock()
+ pcis = NIC_DETAILS['pci']
+ driver = NIC_DETAILS['phy_driver']
+ self.assertIsNotNone(
+ mock_ovs.ovs_obj.setup_ovs_context(
+ pcis,
+ NIC_DETAILS,
+ driver))
+
+ def test_check_output(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ cmd = "command"
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ self.assertIsNotNone(ovs_obj.check_output(cmd, None))
+
+ def test_split_cpu_list_available(self):
+ with mock.patch("itertools.chain") as iter1:
+ iter1 = mock.Mock()
+ print("{0}".format(iter1))
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ self.assertIsNotNone(ovs_obj.split_cpu_list('0,5'))
+
+ def test_split_cpu_list_null(self):
+ with mock.patch("itertools.chain") as iter1:
+ iter1 = mock.Mock()
+ print("{0}".format(iter1))
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ self.assertEqual(ovs_obj.split_cpu_list([]), [])
+
+ def test_destroy_vm_successful(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ ovs_obj.check_output = mock.Mock(return_value=(0, "vm1"))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "0 i40e"))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "0 i40e"))
+ self.assertIsNone(ovs_obj.destroy_vm())
+
+ def test_destroy_vm_unsuccessful(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ ovs_obj.connection = ssh_mock
+ ovs_obj.ovs = OVS
+ ovs_obj.check_output = mock.Mock(return_value=(1, {}))
+ self.assertIsNone(ovs_obj.destroy_vm())
+
+ def test_read_from_file(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_PASSWORD)
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ self.assertIsNotNone(ovs_obj.read_from_file(CORRECT_FILE_PATH))
+
+ def test_write_to_file(self):
+ ovs_obj = ovsdpdk.Ovsdpdk()
+ self.assertIsNone(ovs_obj.write_to_file(SAMPLE_FILE, "some content"))
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/benchmark/contexts/test_sriov.py b/tests/unit/benchmark/contexts/test_sriov.py
new file mode 100644
index 000000000..a8641a2eb
--- /dev/null
+++ b/tests/unit/benchmark/contexts/test_sriov.py
@@ -0,0 +1,421 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import mock
+import unittest
+
+from yardstick.benchmark.contexts import sriov
+
+NIC_INPUT = {
+ 'interface': {},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+DRIVER = "i40e"
+NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+
+CORRECT_FILE_PATH = "/etc/yardstick/nodes/pod_sriov.yaml"
+WRONG_FILE_PATH = "/etc/yardstick/wrong.yaml"
+SAMPLE_FILE = "sriov_sample_write_to_file.txt"
+
+SRIOV = [{
+ 'auth_type': 'ssh_key',
+ 'name': 'sriov',
+ 'ssh_port': 22,
+ 'ip': '10.10.10.11',
+ 'key_filename': '/root/.ssh/id_rsa',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+SRIOV_PASSWORD = [{
+ 'auth_type': 'password',
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.10.10.11',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+vfnic = "i40evf"
+PCIS = ['0000:06:00.0', '0000:06:00.1']
+
+
+class SriovTestCase(unittest.TestCase):
+
+ NODES_SAMPLE_SSH = "sriov_sample_ssh_key.yaml"
+ NODES_SAMPLE_PASSWORD = "sriov_sample_password.yaml"
+
+ def setUp(self):
+ self.test_context = sriov.Sriov()
+
+ def test_construct(self):
+ self.assertIsNone(self.test_context.name)
+ self.assertIsNone(self.test_context.file_path)
+ self.assertEqual(self.test_context.nodes, [])
+ self.assertEqual(self.test_context.sriov, [])
+ self.assertFalse(self.test_context.vm_deploy)
+ self.assertTrue(self.test_context.first_run)
+ self.assertEqual(self.test_context.user, "")
+ self.assertEqual(self.test_context.ssh_ip, "")
+ self.assertEqual(self.test_context.passwd, "")
+ self.assertEqual(self.test_context.ssh_port, "")
+ self.assertEqual(self.test_context.auth_type, "")
+
+ def test_init(self):
+ self.test_context.parse_pod_and_get_data = mock.Mock()
+ self.test_context.file_path = CORRECT_FILE_PATH
+ self.test_context.init()
+ self.assertIsNone(self.test_context.init())
+
+ def test_successful_init_with_ssh(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_SSH)
+ self.test_context.parse_pod_and_get_data(CORRECT_FILE_PATH)
+
+ def test_successful_init_with_password(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_PASSWORD)
+ self.test_context.parse_pod_and_get_data(CORRECT_FILE_PATH)
+
+ def test_unsuccessful_init(self):
+ self.assertRaises(
+ IOError,
+ lambda: self.test_context.parse_pod_and_get_data(WRONG_FILE_PATH))
+
+ @mock.patch('yardstick.network_services.utils.provision_tool', return_value="a")
+ def test_ssh_connection(self, mock_prov):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "a", ""))
+ ssh.return_value = ssh_mock
+ mock_prov.provision_tool = mock.Mock()
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV_PASSWORD
+ self.assertIsNone(sriov_obj.ssh_remote_machine())
+
+ @mock.patch('yardstick.network_services.utils.provision_tool', return_value="a")
+ def test_ssh_connection_ssh_key(self, mock_prov):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "a", ""))
+ ssh.return_value = ssh_mock
+ mock_prov.provision_tool = mock.Mock()
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV
+ sriov_obj.key_filename = '/root/.ssh/id_rsa'
+ self.assertIsNone(sriov_obj.ssh_remote_machine())
+
+ def test_get_nic_details(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "eth0 eth1", ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.sriov = SRIOV
+ sriov_obj.connection = ssh_mock
+ self.assertIsNotNone(sriov_obj.get_nic_details())
+
+ def test_install_req_libs(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.first_run = True
+ sriov_obj.connection = ssh_mock
+ self.assertIsNone(sriov_obj.install_req_libs())
+
+ def test_configure_nics_for_sriov(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ nic_details = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e',
+ 'vf_pci': [{}, {}]}
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock((DRIVER), return_value=(0, "0 driver", ""))
+ ssh.return_value = ssh_mock
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ for i in range(len(NIC_DETAILS['pci'])):
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ ssh_mock.execute = \
+ mock.Mock(return_value=(
+ 0,
+ "{'0':'06:02:00','1':'06:06:00'}",
+ ""))
+ sriov_obj.get_vf_datas = mock.Mock(return_value={
+ '0000:06:00.0': '0000:06:02.0'})
+ nic_details['vf_pci'][i] = sriov_obj.get_vf_datas.return_value
+ vf_pci = [[], []]
+ vf_pci[i] = sriov_obj.get_vf_datas.return_value
+ with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+ self.assertIsNotNone(sriov_obj.configure_nics_for_sriov(DRIVER, NIC_DETAILS))
+
+ def test_setup_sriov_context(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ nic_details = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e',
+ 'vf_pci': [{'vf_pci': '06:02.00'}, {'vf_pci': '06:06.00'}]}
+ vf = [{'vf_pci': '06:02.00'}, {'vf_pci': '06:06.00'}]
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV
+ blacklist = "/etc/modprobe.d/blacklist.conf"
+ self.assertEqual(vfnic, "i40evf")
+ mock_sriov = mock.Mock()
+ mock_sriov.sriov_obj.read_from_file(blacklist)
+ sriov_obj.read_from_file = mock.Mock(
+ return_value="some random text")
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ sriov_obj.configure_nics_for_sriov = mock.Mock(
+ return_value=nic_details)
+ nic_details = sriov_obj.configure_nics_for_sriov.return_value
+ self.assertEqual(vf, nic_details['vf_pci'])
+ vf = [
+ {'vf_pci': '06:02.00', 'mac': '00:00:00:00:00:0a'},
+ {'vf_pci': '06:06.00', 'mac': '00:00:00:00:00:0b'}]
+ sriov_obj.add_sriov_interface = mock.Mock()
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.put = mock.Mock()
+ sriov_obj.check_output = mock.Mock(return_value=(1, {}))
+ with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+ self.assertIsNone(sriov_obj.setup_sriov_context(PCIS, nic_details, DRIVER))
+
+ def test_setup_sriov_context_vm_already_present(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ nic_details = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e',
+ 'vf_pci': [{'vf_pci': '06:02.00'}, {'vf_pci': '06:06.00'}]}
+ vf = [{'vf_pci': '06:02.00'}, {'vf_pci': '06:06.00'}]
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV
+ blacklist = "/etc/modprobe.d/blacklist.conf"
+ self.assertEqual(vfnic, "i40evf")
+ mock_sriov = mock.Mock()
+ mock_sriov.sriov_obj.read_from_file(blacklist)
+ sriov_obj.read_from_file = mock.Mock(
+ return_value="some random text")
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ sriov_obj.configure_nics_for_sriov = mock.Mock(
+ return_value=nic_details)
+ nic_details = sriov_obj.configure_nics_for_sriov.return_value
+ self.assertEqual(vf, nic_details['vf_pci'])
+ vf = [
+ {'vf_pci': '06:02.00', 'mac': '00:00:00:00:00:0a'},
+ {'vf_pci': '06:06.00', 'mac': '00:00:00:00:00:0b'}]
+ sriov_obj.add_sriov_interface = mock.Mock()
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.put = mock.Mock()
+ sriov_obj.check_output = mock.Mock(return_value=(0, "vm1"))
+ with mock.patch("yardstick.benchmark.contexts.sriov.time"):
+ self.assertIsNone(sriov_obj.setup_sriov_context(PCIS, nic_details, DRIVER))
+
+ @mock.patch(
+ 'yardstick.benchmark.contexts.sriov',
+ return_value="Domain vm1 created from /tmp/vm_sriov.xml")
+ def test_is_vm_created(self, NIC_INPUT):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ mock_sriov = mock.Mock()
+ pcis = NIC_DETAILS['pci']
+ driver = NIC_DETAILS['phy_driver']
+ self.assertIsNotNone(
+ mock_sriov.sriov_obj.setup_sriov_context(
+ pcis,
+ NIC_DETAILS,
+ driver))
+
+ def test_add_sriov_interface(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ with mock.patch("xml.etree.ElementTree.parse") as parse:
+ with mock.patch("re.search") as re:
+ with mock.patch("xml.etree.ElementTree.SubElement") \
+ as elem:
+ parse = mock.Mock(return_value="root")
+ re = mock.Mock()
+ elem = mock.Mock()
+ print("{0} {1} {2}".format(parse, re, elem))
+ self.assertIsNone(sriov_obj.add_sriov_interface(
+ 0,
+ "0000:06:02.0",
+ "00:00:00:00:00:0a",
+ "/tmp/vm_sriov.xml"))
+
+ def test_get_virtual_devices(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ pci_out = " \
+ PCI_CLASS=20000 \
+ PCI_ID=8086:154C \
+ PCI_SUBSYS_ID=8086:0000 \
+ PCI_SLOT_NAME=0000:06:02.0 \
+ MODALIAS= \
+ pci:v00008086d0000154Csv00008086sd00000000bc02sc00i00"
+ pci = "0000:06:00.0"
+ sriov_obj.check_output = mock.Mock(return_value=(0, pci_out))
+ with mock.patch("re.search") as re:
+ re = mock.Mock(return_value="a")
+ print("{0}".format(re))
+ self.assertIsNotNone(sriov_obj.get_virtual_devices(pci))
+
+ def test_get_vf_datas(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.get_virtual_devices = mock.Mock(
+ return_value={'0000:06:00.0': '0000:06:02.0'})
+ with mock.patch("re.search") as re:
+ re = mock.Mock()
+ print("{0}".format(re))
+ self.assertIsNotNone(sriov_obj.get_vf_datas(
+ 'vf_pci',
+ {'0000:06:00.0': '0000:06:02.0'},
+ "00:00:00:00:00:0a"))
+
+ def test_check_output(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ cmd = "command"
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ self.assertIsNotNone(sriov_obj.check_output(cmd, None))
+
+ def test_split_cpu_list_available(self):
+ with mock.patch("itertools.chain") as iter1:
+ iter1 = mock.Mock()
+ print("{0}".format(iter1))
+ sriov_obj = sriov.Sriov()
+ self.assertIsNotNone(sriov_obj.split_cpu_list('0,5'))
+
+ def test_split_cpu_list_null(self):
+ with mock.patch("itertools.chain") as iter1:
+ iter1 = mock.Mock()
+ print("{0}".format(iter1))
+ sriov_obj = sriov.Sriov()
+ self.assertEqual(sriov_obj.split_cpu_list([]), [])
+
+ def test_destroy_vm_successful(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV
+ sriov_obj.check_output = mock.Mock(return_value=(0, "vm1"))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "0 i40e"))
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "0 i40e"))
+ self.assertIsNone(sriov_obj.destroy_vm())
+
+ def test_destroy_vm_unsuccessful(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, {}, ""))
+ ssh.return_value = ssh_mock
+ sriov_obj = sriov.Sriov()
+ sriov_obj.connection = ssh_mock
+ sriov_obj.sriov = SRIOV
+ sriov_obj.check_output = mock.Mock(return_value=(1, {}))
+ self.assertIsNone(sriov_obj.destroy_vm())
+
+ def test_read_from_file(self):
+ CORRECT_FILE_PATH = self._get_file_abspath(self.NODES_SAMPLE_PASSWORD)
+ sriov_obj = sriov.Sriov()
+ self.assertIsNotNone(sriov_obj.read_from_file(CORRECT_FILE_PATH))
+
+ def test_write_to_file(self):
+ sriov_obj = sriov.Sriov()
+ self.assertIsNone(sriov_obj.write_to_file(SAMPLE_FILE, "some content"))
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/unit/benchmark/contexts/test_standalone.py b/tests/unit/benchmark/contexts/test_standalone.py
index a6fd776e8..1fc740393 100644
--- a/tests/unit/benchmark/contexts/test_standalone.py
+++ b/tests/unit/benchmark/contexts/test_standalone.py
@@ -20,117 +20,624 @@
from __future__ import absolute_import
import os
import unittest
+import mock
from yardstick.benchmark.contexts import standalone
+from yardstick.benchmark.contexts import sriov
+from yardstick.benchmark.contexts import ovsdpdk
+MOCKS = {
+ 'yardstick.benchmark.contexts': mock.MagicMock(),
+ 'yardstick.benchmark.contexts.sriov': mock.MagicMock(),
+ 'yardstick.benchmark.contexts.ovsdpdk': mock.MagicMock(),
+ 'yardstick.benchmark.contexts.standalone': mock.MagicMock(),
+}
+
+@mock.patch('yardstick.benchmark.contexts.ovsdpdk.time')
+@mock.patch('yardstick.benchmark.contexts.standalone.time')
+@mock.patch('yardstick.benchmark.contexts.sriov.time')
class StandaloneContextTestCase(unittest.TestCase):
+ NODES_SAMPLE = "nodes_sample_new.yaml"
+ NODES_SAMPLE_SRIOV = "nodes_sample_new_sriov.yaml"
+ NODES_DUPLICATE_SAMPLE = "nodes_duplicate_sample_new.yaml"
- NODES_SAMPLE = "standalone_sample.yaml"
- NODES_DUPLICATE_SAMPLE = "standalone_duplicate_sample.yaml"
+ NODES_SAMPLE_OVSDPDK = "nodes_sample_ovs.yaml"
+ NODES_SAMPLE_OVSDPDK_ROLE = "nodes_sample_ovsdpdk.yaml"
+ NODES_DUPLICATE_OVSDPDK = "nodes_duplicate_sample_ovs.yaml"
def setUp(self):
self.test_context = standalone.StandaloneContext()
- def test_construct(self):
-
+ def test_construct(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
self.assertIsNone(self.test_context.name)
self.assertIsNone(self.test_context.file_path)
self.assertEqual(self.test_context.nodes, [])
self.assertEqual(self.test_context.nfvi_node, [])
- def test_unsuccessful_init(self):
-
+ def test_unsuccessful_init(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
attrs = {
'name': 'foo',
'file': self._get_file_abspath("error_file")
}
-
self.assertRaises(IOError, self.test_context.init, attrs)
- def test_successful_init(self):
-
- attrs = {
+ def test_successful_init_sriov(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs_sriov = {
+ 'name': 'sriov',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.Mock()
+ self.test_context.init(attrs_sriov)
+ self.assertEqual(self.test_context.name, "sriov")
+ self.assertEqual(len(self.test_context.nodes), 2)
+ self.assertEqual(len(self.test_context.nfvi_node), 2)
+ self.assertEqual(self.test_context.nfvi_node[0]["name"], "sriov")
+
+ def test_successful_init_ovs(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ attrs_ovs = {
+ 'name': 'ovs',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.Mock()
+ self.test_context.init(attrs_ovs)
+ self.assertEqual(self.test_context.name, "ovs")
+ self.assertEqual(len(self.test_context.nodes), 2)
+ self.assertEqual(len(self.test_context.nfvi_node), 2)
+ self.assertEqual(self.test_context.nfvi_node[0]["name"], "ovs")
+
+ def test__get_server_with_dic_attr_name_sriov(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs_sriov = {
'name': 'foo',
'file': self._get_file_abspath(self.NODES_SAMPLE)
}
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.init(attrs_sriov)
+ attr_name = {'name': 'foo.bar'}
+ result = self.test_context._get_server(attr_name)
+ self.assertEqual(result, None)
- self.test_context.init(attrs)
-
- self.assertEqual(self.test_context.name, "foo")
- self.assertEqual(len(self.test_context.nodes), 3)
- self.assertEqual(len(self.test_context.nfvi_node), 1)
- self.assertEqual(self.test_context.nfvi_node[0]["name"], "node2")
-
- def test__get_server_with_dic_attr_name(self):
+ def test__get_server_with_dic_attr_name_ovs(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs_ovs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.init(attrs_ovs)
+ attr_name = {'name': 'foo.bar'}
+ result = self.test_context._get_server(attr_name)
+ self.assertEqual(result, None)
+ def test__get_server_not_found_sriov(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
attrs = {
'name': 'foo',
'file': self._get_file_abspath(self.NODES_SAMPLE)
}
-
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
self.test_context.init(attrs)
-
- attr_name = {'name': 'foo.bar'}
+ attr_name = 'bar.foo'
result = self.test_context._get_server(attr_name)
-
self.assertEqual(result, None)
- def test__get_server_not_found(self):
-
+ def test__get_server_not_found_ovs(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
attrs = {
'name': 'foo',
- 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
}
-
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
self.test_context.init(attrs)
-
attr_name = 'bar.foo'
result = self.test_context._get_server(attr_name)
-
self.assertEqual(result, None)
- def test__get_server_duplicate(self):
+
+ def test__get_server_duplicate_sriov(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
attrs = {
'name': 'foo',
'file': self._get_file_abspath(self.NODES_DUPLICATE_SAMPLE)
}
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.Mock(return_value="sriov")
+ self.test_context.init(attrs)
+ attr_name = 'sriov.foo'
+ # self.test_context.name = "sriov"
+ self.assertRaises(ValueError, self.test_context._get_server, attr_name)
+
+ def test__get_server_duplicate_ovs(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_DUPLICATE_OVSDPDK)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.get_nfvi_obj = mock.Mock(return_value="OvsDpdk")
+ self.test_context.init(attrs)
+ attr_name = 'ovs.foo'
+ self.assertRaises(
+ ValueError,
+ self.test_context._get_server,
+ attr_name)
+ def test__get_server_found_sriov(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_SRIOV)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.get_nfvi_obj = mock.Mock(return_value="OvsDpdk")
self.test_context.init(attrs)
+ attr_name = 'sriov.foo'
+ result = self.test_context._get_server(attr_name)
+ self.assertEqual(result['ip'], '10.123.123.122')
+ self.assertEqual(result['name'], 'sriov.foo')
+ self.assertEqual(result['user'], 'root')
- attr_name = 'node2.foo'
+ def test__get_server_found_ovs(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK_ROLE)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.Mock(return_value="OvsDpdk")
+ self.test_context.init(attrs)
+ attr_name = 'ovs.foo'
+ result = self.test_context._get_server(attr_name)
+ self.assertEqual(result['ip'], '10.223.197.222')
+ self.assertEqual(result['name'], 'ovs.foo')
+ self.assertEqual(result['user'], 'root')
- self.assertRaises(ValueError, self.test_context._get_server, attr_name)
+ def test__deploy_unsuccessful(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ self.test_context.vm_deploy = False
- def test__get_server_found(self):
+ def test__deploy_sriov_firsttime(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ MYSRIOV = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.vm_deploy = True
+
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.nfvi_obj.sriov = MYSRIOV
+ self.test_context.nfvi_obj.ssh_remote_machine = mock.Mock()
+ self.test_context.nfvi_obj.first_run = True
+ self.test_context.nfvi_obj.install_req_libs()
+ self.test_context.nfvi_obj.get_nic_details = mock.Mock()
+ PORTS = ['0000:06:00.0', '0000:06:00.1']
+ NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+ DRIVER = 'i40e'
+ result = self.test_context.nfvi_obj.setup_sriov_context(
+ PORTS,
+ NIC_DETAILS,
+ DRIVER)
+ print("{0}".format(result))
+ self.assertIsNone(self.test_context.deploy())
+ def test__deploy_sriov_notfirsttime(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
attrs = {
'name': 'foo',
'file': self._get_file_abspath(self.NODES_SAMPLE)
}
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ MYSRIOV = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.vm_deploy = True
+ self.test_context.get_nfvi_obj = mock.MagicMock()
self.test_context.init(attrs)
+ self.test_context.nfvi_obj.sriov = MYSRIOV
+ self.test_context.nfvi_obj.ssh_remote_machine = mock.Mock()
+ self.test_context.nfvi_obj.first_run = False
+ self.test_context.nfvi_obj.get_nic_details = mock.Mock()
+ PORTS = ['0000:06:00.0', '0000:06:00.1']
+ NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+ DRIVER = 'i40e'
+ result = self.test_context.nfvi_obj.setup_sriov_context(
+ PORTS,
+ NIC_DETAILS,
+ DRIVER)
+ print("{0}".format(result))
+ self.assertIsNone(self.test_context.deploy())
- attr_name = 'node1.foo'
- result = self.test_context._get_server(attr_name)
+ def test__deploy_ovs_firsttime(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
- self.assertEqual(result['ip'], '1.1.1.1')
- self.assertEqual(result['name'], 'node1.foo')
- self.assertEqual(result['user'], 'root')
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ MYOVS = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'flow': ['ovs-ofctl add-flow br0 in_port=1,action=output:3',
+ 'ovs-ofctl add-flow br0 in_port=3,action=output:1'
+ 'ovs-ofctl add-flow br0 in_port=4,action=output:2'
+ 'ovs-ofctl add-flow br0 in_port=2,action=output:4'],
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.vm_deploy = True
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.ovs = MYOVS
+ self.test_context.nfvi_obj.ssh_remote_machine = mock.Mock()
+ self.test_context.nfvi_obj.first_run = True
+ self.test_context.nfvi_obj.install_req_libs()
+ self.test_context.nfvi_obj.get_nic_details = mock.Mock()
+ PORTS = ['0000:06:00.0', '0000:06:00.1']
+ NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vports_mac': ['00:00:00:00:00:05', '00:00:00:00:00:06'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+ DRIVER = 'i40e'
+
+ self.test_context.nfvi_obj.setup_ovs = mock.Mock()
+ self.test_context.nfvi_obj.start_ovs_serverswitch = mock.Mock()
+ self.test_context.nfvi_obj.setup_ovs_bridge = mock.Mock()
+ self.test_context.nfvi_obj.add_oflows = mock.Mock()
+
+ # self.test_context.nfvi_obj.setup_ovs(PORTS)
+ # self.test_context.nfvi_obj.start_ovs_serverswitch()
+ # self.test_context.nfvi_obj.setup_ovs_bridge()
+ # self.test_context.nfvi_obj.add_oflows()
+
+ result = self.test_context.nfvi_obj.setup_ovs_context(
+ PORTS,
+ NIC_DETAILS,
+ DRIVER)
+ print("{0}".format(result))
+ self.assertIsNone(self.test_context.deploy())
- def test_deploy(self):
+ def test__deploy_ovs_notfirsttime(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ MYOVS = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'flow': ['ovs-ofctl add-flow br0 in_port=1,action=output:3',
+ 'ovs-ofctl add-flow br0 in_port=3,action=output:1'
+ 'ovs-ofctl add-flow br0 in_port=4,action=output:2'
+ 'ovs-ofctl add-flow br0 in_port=2,action=output:4'],
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.vm_deploy = True
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.ovs = MYOVS
+ self.test_context.nfvi_obj.ssh_remote_machine = mock.Mock()
+ self.test_context.nfvi_obj.first_run = False
+ self.test_context.nfvi_obj.get_nic_details = mock.Mock()
+ PORTS = ['0000:06:00.0', '0000:06:00.1']
+ NIC_DETAILS = {
+ 'interface': {0: 'enp6s0f0', 1: 'enp6s0f1'},
+ 'vports_mac': ['00:00:00:00:00:05', '00:00:00:00:00:06'],
+ 'pci': ['0000:06:00.0', '0000:06:00.1'],
+ 'phy_driver': 'i40e'}
+ DRIVER = 'i40e'
+
+ self.test_context.nfvi_obj.setup_ovs(PORTS)
+ self.test_context.nfvi_obj.start_ovs_serverswitch()
+ self.test_context.nfvi_obj.setup_ovs_bridge()
+ self.test_context.nfvi_obj.add_oflows()
+
+ result = self.test_context.nfvi_obj.setup_ovs_context(
+ PORTS,
+ NIC_DETAILS,
+ DRIVER)
+ print("{0}".format(result))
self.assertIsNone(self.test_context.deploy())
- def test_undeploy(self):
+ def test_undeploy_sriov(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ }
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.nfvi_obj.destroy_vm = mock.Mock()
+ self.assertIsNone(self.test_context.undeploy())
+
+ def test_undeploy_ovs(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ attrs = {
+ 'name': 'foo',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
+
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.nfvi_obj.destroy_vm = mock.Mock()
self.assertIsNone(self.test_context.undeploy())
+ def test_get_nfvi_obj_sriov(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ with mock.patch('yardstick.benchmark.contexts.sriov'):
+ attrs = {
+ 'name': 'sriov',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE)
+ }
+ self.test_context.init(attrs)
+ self.test_context.nfvi_obj.file_path = self._get_file_abspath(
+ self.NODES_SAMPLE)
+ self.test_context.nfvi_node = [{
+ 'name': 'sriov',
+ 'vf_macs': ['00:00:00:71:7d:25', '00:00:00:71:7d:26'],
+ 'ip': '10.223.197.140',
+ 'role': 'Sriov',
+ 'user': 'root',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'intel123',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.get_context_impl = mock.Mock(
+ return_value=sriov.Sriov)
+ self.assertIsNotNone(self.test_context.get_nfvi_obj())
+
+ def test_get_nfvi_obj_ovs(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
+ with mock.patch('yardstick.benchmark.contexts.ovsdpdk'):
+ attrs = {
+ 'name': 'ovs',
+ 'file': self._get_file_abspath(self.NODES_SAMPLE_OVSDPDK)
+ }
+ self.test_context.init(attrs)
+ self.test_context.nfvi_obj.file_path = self._get_file_abspath(
+ self.NODES_SAMPLE)
+ self.test_context.nfvi_node = [{
+ 'name': 'ovs',
+ 'vports_mac': ['00:00:00:00:00:03', '00:00:00:00:00:04'],
+ 'ip': '10.223.197.140',
+ 'role': 'Ovsdpdk',
+ 'user': 'root',
+ 'vpath': '/usr/local/',
+ 'images': '/var/lib/libvirt/images/ubuntu1.img',
+ 'phy_driver': 'i40e',
+ 'password': 'password',
+ 'phy_ports': ['0000:06:00.0', '0000:06:00.1']}]
+ self.test_context.get_nfvi_obj = mock.MagicMock()
+ self.test_context.init(attrs)
+ self.test_context.get_context_impl = mock.Mock(
+ return_value=ovsdpdk.Ovsdpdk)
+ self.assertIsNotNone(self.test_context.get_nfvi_obj())
+
+ def test_get_context_impl_correct_obj(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ with mock.patch.dict("sys.modules", MOCKS):
+ self.assertIsNotNone(self.test_context.get_context_impl('Sriov'))
+
+ def test_get_context_impl_wrong_obj(self, mock_sriov_time, mock_standlalone_time,
+ mock_ovsdpdk_time):
+ with mock.patch.dict("sys.modules", MOCKS):
+ self.assertRaises(
+ ValueError,
+ lambda: self.test_context.get_context_impl('wrong_object'))
+
def _get_file_abspath(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(curr_path, filename)
return file_path
- def test__get_network(self):
+ def test__get_network(self, mock_sriov_time, mock_standlalone_time, mock_ovsdpdk_time):
network1 = {
'name': 'net_1',
'vld_id': 'vld111',
@@ -174,3 +681,6 @@ class StandaloneContextTestCase(unittest.TestCase):
expected = network1
result = self.test_context._get_network(attr_name)
self.assertDictEqual(result, expected)
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/tests/unit/benchmark/scenarios/availability/test_util.py b/tests/unit/benchmark/scenarios/availability/test_util.py
index bb0e6bc79..2e4fff417 100644
--- a/tests/unit/benchmark/scenarios/availability/test_util.py
+++ b/tests/unit/benchmark/scenarios/availability/test_util.py
@@ -19,6 +19,25 @@ from yardstick.benchmark.scenarios.availability import util
@mock.patch('yardstick.benchmark.scenarios.availability.util.subprocess')
class ExecuteShellTestCase(unittest.TestCase):
+ def setUp(self):
+ self.param_config = {'serviceName': '$serviceName', 'value': 1}
+ self.intermediate_variables = {'$serviceName': 'nova-api'}
+ self.std_output = '| id | 1 |'
+ self.cmd_config = {'cmd':'ls','param':'-a'}
+
+ def test_util_build_command_shell(self,mock_subprocess):
+ result = util.build_shell_command(self.param_config, True,
+ self.intermediate_variables)
+ self.assertEqual("nova-api" in result, True)
+
+ def test_read_stdout_item(self,mock_subprocess):
+ result = util.read_stdout_item(self.std_output,'id')
+ self.assertEquals('1',result)
+
+ def test_buildshellparams(self,mock_subprocess):
+ result = util.buildshellparams(self.cmd_config,True)
+ self.assertEquals('/bin/bash -s {0} {1}', result)
+
def test__fun_execute_shell_command_successful(self, mock_subprocess):
cmd = "env"
mock_subprocess.check_output.return_value = (0, 'unittest')
diff --git a/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index d4eb1246f..2914c8e02 100644
--- a/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -138,6 +138,7 @@ class PktgenTestCase(unittest.TestCase):
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
+ expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_successful_sla(self, mock_ssh):
@@ -164,6 +165,7 @@ class PktgenTestCase(unittest.TestCase):
p.run(result)
expected_result = jsonutils.loads(sample_output)
expected_result["packets_received"] = 149300
+ expected_result["packetsize"] = 60
self.assertEqual(result, expected_result)
def test_pktgen_unsuccessful_sla(self, mock_ssh):
@@ -204,6 +206,538 @@ class PktgenTestCase(unittest.TestCase):
mock_ssh.SSH.from_node().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
+ def test_pktgen_get_vnic_driver_name(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, 'ixgbevf', '')
+
+ vnic_driver_name = p._get_vnic_driver_name()
+ self.assertEqual(vnic_driver_name, 'ixgbevf')
+
+ def test_pktgen_unsuccessful_get_vnic_driver_name(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._get_vnic_driver_name)
+
+ def test_pktgen_get_sriov_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+
+ p.queue_number = p._get_sriov_queue_number()
+ self.assertEqual(p.queue_number, 2)
+
+ def test_pktgen_unsuccessful_get_sriov_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._get_sriov_queue_number)
+
+ def test_pktgen_get_available_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+
+ p._get_available_queue_number()
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "sudo ethtool -l eth0 | grep Combined | head -1 |" \
+ "awk '{printf $2}'")
+
+ def test_pktgen_unsuccessful_get_available_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._get_available_queue_number)
+
+ def test_pktgen_get_usable_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+
+ p._get_usable_queue_number()
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "sudo ethtool -l eth0 | grep Combined | tail -1 |" \
+ "awk '{printf $2}'")
+
+ def test_pktgen_unsuccessful_get_usable_queue_number(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._get_usable_queue_number)
+
+ def test_pktgen_enable_ovs_multiqueue(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = 1
+ p._get_usable_queue_number = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = 4
+ p._get_available_queue_number = mock_result2
+
+ p.queue_number = p._enable_ovs_multiqueue()
+ self.assertEqual(p.queue_number, 4)
+
+ def test_pktgen_enable_ovs_multiqueue_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = 1
+ p._get_usable_queue_number = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = 1
+ p._get_available_queue_number = mock_result2
+
+ p.queue_number = p._enable_ovs_multiqueue()
+ self.assertEqual(p.queue_number, 1)
+
+ def test_pktgen_unsuccessful_enable_ovs_multiqueue(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = 1
+ p._get_usable_queue_number = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = 4
+ p._get_available_queue_number = mock_result2
+
+ self.assertRaises(RuntimeError, p._enable_ovs_multiqueue)
+
+ def test_pktgen_setup_irqmapping_ovs(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+
+ p._setup_irqmapping_ovs(4)
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "echo 8 | sudo tee /proc/irq/10/smp_affinity")
+
+ def test_pktgen_setup_irqmapping_ovs_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+
+ p._setup_irqmapping_ovs(1)
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "echo 1 | sudo tee /proc/irq/10/smp_affinity")
+
+ def test_pktgen_unsuccessful_setup_irqmapping_ovs(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 4)
+
+ def test_pktgen_unsuccessful_setup_irqmapping_ovs_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._setup_irqmapping_ovs, 1)
+
+ def test_pktgen_setup_irqmapping_sriov(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+
+ p._setup_irqmapping_sriov(2)
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "echo 2 | sudo tee /proc/irq/10/smp_affinity")
+
+ def test_pktgen_setup_irqmapping_sriov_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '10', '')
+
+ p._setup_irqmapping_sriov(1)
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "echo 1 | sudo tee /proc/irq/10/smp_affinity")
+
+ def test_pktgen_unsuccessful_setup_irqmapping_sriov(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 2)
+
+ def test_pktgen_unsuccessful_setup_irqmapping_sriov_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._setup_irqmapping_sriov, 1)
+
+ def test_pktgen_is_irqbalance_disabled(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ p._is_irqbalance_disabled()
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "grep ENABLED /etc/default/irqbalance")
+
+ def test_pktgen_unsuccessful_is_irqbalance_disabled(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._is_irqbalance_disabled)
+
+ def test_pktgen_disable_irqbalance(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
+
+ p._disable_irqbalance()
+
+ mock_ssh.SSH.from_node().execute.assert_called_with(
+ "sudo service irqbalance disable")
+
+ def test_pktgen_unsuccessful_disable_irqbalance(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+
+ self.assertRaises(RuntimeError, p._disable_irqbalance)
+
+ def test_pktgen_multiqueue_setup_ovs(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'multiqueue': True},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '4', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = False
+ p._is_irqbalance_disabled = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = "virtio_net"
+ p._get_vnic_driver_name = mock_result2
+
+ mock_result3 = mock.Mock()
+ mock_result3.return_value = 1
+ p._get_usable_queue_number = mock_result3
+
+ mock_result4 = mock.Mock()
+ mock_result4.return_value = 4
+ p._get_available_queue_number = mock_result4
+
+ p.multiqueue_setup()
+
+ self.assertEqual(p.queue_number, 4)
+
+ def test_pktgen_multiqueue_setup_ovs_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'multiqueue': True},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = False
+ p._is_irqbalance_disabled = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = "virtio_net"
+ p._get_vnic_driver_name = mock_result2
+
+ mock_result3 = mock.Mock()
+ mock_result3.return_value = 1
+ p._get_usable_queue_number = mock_result3
+
+ mock_result4 = mock.Mock()
+ mock_result4.return_value = 1
+ p._get_available_queue_number = mock_result4
+
+ p.multiqueue_setup()
+
+ self.assertEqual(p.queue_number, 1)
+
+ def test_pktgen_multiqueue_setup_sriov(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'multiqueue': True},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '2', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = False
+ p._is_irqbalance_disabled = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = "ixgbevf"
+ p._get_vnic_driver_name = mock_result2
+
+ p.multiqueue_setup()
+
+ self.assertEqual(p.queue_number, 2)
+
+ def test_pktgen_multiqueue_setup_sriov_1q(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'multiqueue': True},
+ }
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_ssh.SSH.from_node().execute.return_value = (0, '1', '')
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = False
+ p._is_irqbalance_disabled = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = "ixgbevf"
+ p._get_vnic_driver_name = mock_result2
+
+ p.multiqueue_setup()
+
+ self.assertEqual(p.queue_number, 1)
+
+ def test_pktgen_run_with_setup_done(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10, 'duration': 20, 'multiqueue': True},
+ 'sla': {'max_ppm': 1}
+ }
+ result = {}
+ p = pktgen.Pktgen(args, self.ctx)
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ p.setup_done = True
+ p.multiqueue_setup_done = True
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149300, "flows": 110}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ p.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ expected_result["packets_received"] = 149300
+ expected_result["packetsize"] = 60
+ self.assertEqual(result, expected_result)
+
+ def test_pktgen_run_with_ovs_multiqueque(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10, 'duration': 20, 'multiqueue': True},
+ 'sla': {'max_ppm': 1}
+ }
+ result = {}
+
+ p = pktgen.Pktgen(args, self.ctx)
+
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_result = mock.Mock()
+ mock_result.return_value = "virtio_net"
+ p._get_vnic_driver_name = mock_result
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = 1
+ p._get_usable_queue_number = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = 4
+ p._get_available_queue_number = mock_result2
+
+ mock_result3 = mock.Mock()
+ mock_result3.return_value = 4
+ p._enable_ovs_multiqueue = mock_result3
+
+ mock_result4 = mock.Mock()
+ p._setup_irqmapping_ovs = mock_result4
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149300, "flows": 110}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ p.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ expected_result["packets_received"] = 149300
+ expected_result["packetsize"] = 60
+ self.assertEqual(result, expected_result)
+
+ def test_pktgen_run_with_sriov_multiqueque(self, mock_ssh):
+ args = {
+ 'options': {'packetsize': 60, 'number_of_ports': 10, 'duration': 20, 'multiqueue': True},
+ 'sla': {'max_ppm': 1}
+ }
+ result = {}
+
+ p = pktgen.Pktgen(args, self.ctx)
+
+ p.server = mock_ssh.SSH.from_node()
+ p.client = mock_ssh.SSH.from_node()
+
+ mock_result1 = mock.Mock()
+ mock_result1.return_value = "ixgbevf"
+ p._get_vnic_driver_name = mock_result1
+
+ mock_result2 = mock.Mock()
+ mock_result2.return_value = 2
+ p._get_sriov_queue_number = mock_result2
+
+ mock_result3 = mock.Mock()
+ p._setup_irqmapping_sriov = mock_result3
+
+ mock_iptables_result = mock.Mock()
+ mock_iptables_result.return_value = 149300
+ p._iptables_get_result = mock_iptables_result
+
+ sample_output = '{"packets_per_second": 9753, "errors": 0, \
+ "packets_sent": 149300, "flows": 110}'
+ mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
+
+ p.run(result)
+ expected_result = jsonutils.loads(sample_output)
+ expected_result["packets_received"] = 149300
+ expected_result["packetsize"] = 60
+ self.assertEqual(result, expected_result)
def main():
unittest.main()
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 0a94dd976..d5349eab5 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -152,9 +152,12 @@ class HeatContext(Context):
template.add_network(network.stack_name,
network.physical_network,
network.provider,
- network.segmentation_id)
+ network.segmentation_id,
+ network.port_security_enabled)
template.add_subnet(network.subnet_stack_name, network.stack_name,
- network.subnet_cidr)
+ network.subnet_cidr,
+ network.enable_dhcp,
+ network.gateway_ip)
if network.router:
template.add_router(network.router.stack_name,
diff --git a/yardstick/benchmark/contexts/model.py b/yardstick/benchmark/contexts/model.py
index 06538d8a9..6601ecf3b 100644
--- a/yardstick/benchmark/contexts/model.py
+++ b/yardstick/benchmark/contexts/model.py
@@ -104,11 +104,24 @@ class Network(Object):
self.stack_name = context.name + "-" + self.name
self.subnet_stack_name = self.stack_name + "-subnet"
self.subnet_cidr = attrs.get('cidr', '10.0.1.0/24')
+ self.enable_dhcp = attrs.get('enable_dhcp', 'true')
self.router = None
self.physical_network = attrs.get('physical_network', 'physnet1')
self.provider = attrs.get('provider')
self.segmentation_id = attrs.get('segmentation_id')
self.network_type = attrs.get('network_type')
+ self.port_security_enabled = attrs.get('port_security_enabled', True)
+ self.allowed_address_pairs = attrs.get('allowed_address_pairs', [])
+ try:
+ # we require 'null' or '' to disable setting gateway_ip
+ self.gateway_ip = attrs['gateway_ip']
+ except KeyError:
+ # default to explicit None
+ self.gateway_ip = None
+ else:
+ # null is None in YAML, so we have to convert back to string
+ if self.gateway_ip is None:
+ self.gateway_ip = "null"
if "external_network" in attrs:
self.router = Router("router", self.name,
@@ -234,10 +247,16 @@ class Server(Object): # pragma: no cover
for network in networks:
port_name = server_name + "-" + network.name + "-port"
self.ports[network.name] = {"stack_name": port_name}
- template.add_port(port_name, network.stack_name,
- network.subnet_stack_name,
- sec_group_id=self.secgroup_name,
- provider=network.provider)
+ # we can't use secgroups if port_security_enabled is False
+ if network.port_security_enabled:
+ sec_group_id = self.secgroup_name
+ else:
+ sec_group_id = None
+ # don't refactor to pass in network object, that causes JSON
+ # circular ref encode errors
+ template.add_port(port_name, network.stack_name, network.subnet_stack_name,
+ sec_group_id=sec_group_id, provider=network.provider,
+ allowed_address_pairs=network.allowed_address_pairs)
port_name_list.append(port_name)
if self.floating_ip:
@@ -248,7 +267,7 @@ class Server(Object): # pragma: no cover
external_network,
port_name,
network.router.stack_if_name,
- self.secgroup_name)
+ sec_group_id)
self.floating_ip_assoc["stack_name"] = \
server_name + "-fip-assoc"
template.add_floating_ip_association(
diff --git a/yardstick/benchmark/contexts/ovsdpdk.py b/yardstick/benchmark/contexts/ovsdpdk.py
new file mode 100644
index 000000000..cf5529d89
--- /dev/null
+++ b/yardstick/benchmark/contexts/ovsdpdk.py
@@ -0,0 +1,369 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import yaml
+import time
+import glob
+import itertools
+import logging
+from yardstick import ssh
+from yardstick.benchmark.contexts.standalone import StandaloneContext
+
+BIN_PATH = "/opt/isb_bin/"
+DPDK_NIC_BIND = "dpdk_nic_bind.py"
+
+log = logging.getLogger(__name__)
+
+VM_TEMPLATE = """
+<domain type='kvm'>
+ <name>vm1</name>
+ <uuid>18230c0c-635d-4c50-b2dc-a213d30acb34</uuid>
+ <memory unit='KiB'>20971520</memory>
+ <currentMemory unit="KiB">20971520</currentMemory>
+ <memoryBacking>
+ <hugepages/>
+ </memoryBacking>
+ <vcpu placement='static'>20</vcpu>
+ <os>
+ <type arch='x86_64' machine='pc'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ </features>
+ <cpu match="exact" mode='host-model'>
+ <model fallback='allow'/>
+ <topology sockets='1' cores='10' threads='2'/>
+ </cpu>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>destroy</on_crash>
+ <devices>
+ <emulator>/usr/bin/qemu-system-x86_64</emulator>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none'/>
+ <source file="{vm_image}"/>
+ <target dev='vda' bus='virtio'/>
+ <address bus="0x00" domain="0x0000"
+ function="0x0" slot="0x04" type="pci" />
+ </disk>
+ <!--disk type='dir' device='disk'>
+ <driver name='qemu' type='fat'/>
+ <source dir='/opt/isb_bin/dpdk'/>
+ <target dev='vdb' bus='virtio'/>
+ <readonly/>
+ </disk-->
+ <interface type="bridge">
+ <mac address="00:00:00:ab:cd:ef" />
+ <source bridge="br-int" />
+ </interface>
+ <interface type='vhostuser'>
+ <mac address='00:00:00:00:00:01'/>
+ <source type='unix' path='/usr/local/var/run/openvswitch/dpdkvhostuser0' mode='client'/>
+ <model type='virtio'/>
+ <driver queues='4'>
+ <host mrg_rxbuf='off'/>
+ </driver>
+ </interface>
+ <interface type='vhostuser'>
+ <mac address='00:00:00:00:00:02'/>
+ <source type='unix' path='/usr/local/var/run/openvswitch/dpdkvhostuser1' mode='client'/>
+ <model type='virtio'/>
+ <driver queues='4'>
+ <host mrg_rxbuf='off'/>
+ </driver>
+ </interface>
+ <serial type='pty'>
+ <target port='0'/>
+ </serial>
+ <console type='pty'>
+ <target type='serial' port='0'/>
+ </console>
+ <graphics autoport="yes" listen="0.0.0.0" port="1" type="vnc" />
+ </devices>
+</domain>
+"""
+
+
+class Ovsdpdk(StandaloneContext):
+ def __init__(self):
+ self.name = None
+ self.file_path = None
+ self.nodes = []
+ self.vm_deploy = False
+ self.ovs = []
+ self.first_run = True
+ self.dpdk_nic_bind = BIN_PATH + DPDK_NIC_BIND
+ self.user = ""
+ self.ssh_ip = ""
+ self.passwd = ""
+ self.ssh_port = ""
+ self.auth_type = ""
+
+ def init(self):
+ '''initializes itself'''
+ log.debug("In init")
+ self.parse_pod_and_get_data()
+
+ def parse_pod_and_get_data(self, file_path):
+ self.file_path = file_path
+ print("parsing pod file: {0}".format(self.file_path))
+ try:
+ with open(self.file_path) as stream:
+ cfg = yaml.load(stream)
+ except IOError:
+ print("File {0} does not exist".format(self.file_path))
+ raise
+
+ self.ovs.extend([node for node in cfg["nodes"]
+ if node["role"] == "Ovsdpdk"])
+ self.user = self.ovs[0]['user']
+ self.ssh_ip = self.ovs[0]['ip']
+ if self.ovs[0]['auth_type'] == "password":
+ self.passwd = self.ovs[0]['password']
+ else:
+ self.ssh_port = self.ovs[0]['ssh_port']
+ self.key_filename = self.ovs[0]['key_filename']
+
+ def ssh_remote_machine(self):
+ if self.ovs[0]['auth_type'] == "password":
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ password=self.passwd)
+ self.connection.wait()
+ else:
+ if self.ssh_port is not None:
+ ssh_port = self.ssh_port
+ else:
+ ssh_port = ssh.DEFAULT_PORT
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ port=ssh_port,
+ key_filename=self.key_filename)
+ self.connection.wait()
+
+ def get_nic_details(self):
+ nic_details = {}
+ nic_details['interface'] = {}
+ nic_details['pci'] = self.ovs[0]['phy_ports']
+ nic_details['phy_driver'] = self.ovs[0]['phy_driver']
+ nic_details['vports_mac'] = self.ovs[0]['vports_mac']
+ # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
+ for i, _ in enumerate(nic_details['pci']):
+ err, out, _ = self.connection.execute(
+ "{dpdk_nic_bind} --force -b {driver} {port}".format(
+ dpdk_nic_bind=self.dpdk_nic_bind,
+ driver=self.ovs[0]['phy_driver'],
+ port=self.ovs[0]['phy_ports'][i]))
+ err, out, _ = self.connection.execute(
+ "lshw -c network -businfo | grep '{port}'".format(
+ port=self.ovs[0]['phy_ports'][i]))
+ a = out.split()[1]
+ err, out, _ = self.connection.execute(
+ "ip -s link show {interface}".format(
+ interface=out.split()[1]))
+ nic_details['interface'][i] = str(a)
+ print("{0}".format(nic_details))
+ return nic_details
+
+ def install_req_libs(self):
+ if self.first_run:
+ err, out, _ = self.connection.execute("apt-get update")
+ print("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install qemu-kvm libvirt-bin")
+ print("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install libvirt-dev bridge-utils numactl")
+ print("{0}".format(out))
+ self.first_run = False
+
+ def setup_ovs(self, vpcis):
+ self.connection.execute("/usr/bin/chmod 0666 /dev/vfio/*")
+ self.connection.execute("/usr/bin/chmod a+x /dev/vfio")
+ self.connection.execute("pkill -9 ovs")
+ self.connection.execute("ps -ef | grep ovs | grep -v grep | "
+ "awk '{print $2}' | xargs -r kill -9")
+ self.connection.execute("killall -r 'ovs*'")
+ self.connection.execute(
+ "mkdir -p {0}/etc/openvswitch".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "mkdir -p {0}/var/run/openvswitch".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "rm {0}/etc/openvswitch/conf.db".format(self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "ovsdb-tool create {0}/etc/openvswitch/conf.db "
+ "{0}/share/openvswitch/"
+ "vswitch.ovsschema".format(self.ovs[0]["vpath"]))
+ self.connection.execute("modprobe vfio-pci")
+ self.connection.execute("chmod a+x /dev/vfio")
+ self.connection.execute("chmod 0666 /dev/vfio/*")
+ for vpci in vpcis:
+ self.connection.execute(
+ "/opt/isb_bin/dpdk_nic_bind.py "
+ "--bind=vfio-pci {0}".format(vpci))
+
+ def start_ovs_serverswitch(self):
+ self.connection.execute("mkdir -p /usr/local/var/run/openvswitch")
+ self.connection.execute(
+ "ovsdb-server --remote=punix:"
+ "/usr/local/var/run/openvswitch/db.sock --pidfile --detach")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-init=true")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-lcore-mask=0x3")
+ self.connection.execute(
+ "ovs-vsctl --no-wait set "
+ "Open_vSwitch . other_config:dpdk-socket-mem='2048,0'")
+ self.connection.execute(
+ "ovs-vswitchd unix:{0}/"
+ "var/run/openvswitch/db.sock --pidfile --detach "
+ "--log-file=/var/log/openvswitch/"
+ "ovs-vswitchd.log".format(
+ self.ovs[0]["vpath"]))
+ self.connection.execute(
+ "ovs-vsctl set Open_vSwitch . other_config:pmd-cpu-mask=2C")
+
+ def setup_ovs_bridge(self):
+ self.connection.execute("ovs-vsctl del-br br0")
+ self.connection.execute(
+ "rm -rf /usr/local/var/run/openvswitch/dpdkvhostuser*")
+ self.connection.execute(
+ "ovs-vsctl add-br br0 -- set bridge br0 datapath_type=netdev")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdk0 -- set Interface dpdk0 type=dpdk")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdk1 -- set Interface dpdk1 type=dpdk")
+ self.connection.execute(
+ "ovs-vsctl add-port br0 dpdkvhostuser0 -- set Interface "
+ "dpdkvhostuser0 type=dpdkvhostuser")
+ self.connection.execute("ovs-vsctl add-port br0 dpdkvhostuser1 "
+ "-- set Interface dpdkvhostuser1 "
+ "type=dpdkvhostuser")
+ self.connection.execute(
+ "chmod 0777 {0}/var/run/"
+ "openvswitch/dpdkvhostuser*".format(self.ovs[0]["vpath"]))
+
+ def add_oflows(self):
+ self.connection.execute("ovs-ofctl del-flows br0")
+ for flow in self.ovs[0]["flow"]:
+ self.connection.execute(flow)
+ self.connection.execute("ovs-ofctl dump-flows br0")
+ self.connection.execute(
+ "ovs-vsctl set Interface dpdk0 options:n_rxq=4")
+ self.connection.execute(
+ "ovs-vsctl set Interface dpdk1 options:n_rxq=4")
+
+ def setup_ovs_context(self, pcis, nic_details, host_driver):
+
+ ''' 1: Setup vm_ovs.xml to launch VM.'''
+ cfg_ovs = '/tmp/vm_ovs.xml'
+ vm_ovs_xml = VM_TEMPLATE.format(vm_image=self.ovs[0]["images"])
+ with open(cfg_ovs, 'w') as f:
+ f.write(vm_ovs_xml)
+
+ ''' 2: Create and start the VM'''
+ self.connection.put(cfg_ovs, cfg_ovs)
+ time.sleep(10)
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ if out == "vm1":
+ print("VM is already present")
+ else:
+ ''' FIXME: launch through libvirt'''
+ print("virsh create ...")
+ err, out, _ = self.connection.execute(
+ "virsh create /tmp/vm_ovs.xml")
+ time.sleep(10)
+ print("err : {0}".format(err))
+ print("{0}".format(_))
+ print("out : {0}".format(out))
+
+ ''' 3: Tuning for better performace.'''
+ self.pin_vcpu(pcis)
+ self.connection.execute(
+ "echo 1 > /sys/module/kvm/parameters/"
+ "allow_unsafe_assigned_interrupts")
+ self.connection.execute(
+ "echo never > /sys/kernel/mm/transparent_hugepage/enabled")
+ print("After tuning performance ...")
+
+ ''' This is roughly compatible with check_output function in subprocess
+ module which is only available in python 2.7.'''
+ def check_output(self, cmd, stderr=None):
+ '''Run a command and capture its output'''
+ err, out, _ = self.connection.execute(cmd)
+ return err, out
+
+ def read_from_file(self, filename):
+ data = ""
+ with open(filename, 'r') as the_file:
+ data = the_file.read()
+ return data
+
+ def write_to_file(self, filename, content):
+ with open(filename, 'w') as the_file:
+ the_file.write(content)
+
+ def pin_vcpu(self, pcis):
+ nodes = self.get_numa_nodes()
+ print("{0}".format(nodes))
+ num_nodes = len(nodes)
+ for i in range(0, 10):
+ self.connection.execute(
+ "virsh vcpupin vm1 {0} {1}".format(
+ i, nodes[str(num_nodes - 1)][i % len(nodes[str(num_nodes - 1)])]))
+
+ def get_numa_nodes(self):
+ nodes_sysfs = glob.iglob("/sys/devices/system/node/node*")
+ nodes = {}
+ for node_sysfs in nodes_sysfs:
+ num = os.path.basename(node_sysfs).replace("node", "")
+ with open(os.path.join(node_sysfs, "cpulist")) as cpulist_file:
+ cpulist = cpulist_file.read().strip()
+ print("cpulist: {0}".format(cpulist))
+ nodes[num] = self.split_cpu_list(cpulist)
+ print("nodes: {0}".format(nodes))
+ return nodes
+
+ def split_cpu_list(self, cpu_list):
+ if cpu_list:
+ ranges = cpu_list.split(',')
+ bounds = ([int(b) for b in r.split('-')] for r in ranges)
+ range_objects =\
+ (range(bound[0], bound[1] + 1 if len(bound) == 2
+ else bound[0] + 1) for bound in bounds)
+
+ return sorted(itertools.chain.from_iterable(range_objects))
+ else:
+ return []
+
+ def destroy_vm(self):
+ host_driver = self.ovs[0]['phy_driver']
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ print("{0}".format(out))
+ if err == 0:
+ self.connection.execute("virsh shutdown vm1")
+ self.connection.execute("virsh destroy vm1")
+ self.check_output("rmmod {0}".format(host_driver))[1].splitlines()
+ self.check_output("modprobe {0}".format(host_driver))[
+ 1].splitlines()
+ else:
+ print("error : ", err)
diff --git a/yardstick/benchmark/contexts/sriov.py b/yardstick/benchmark/contexts/sriov.py
new file mode 100644
index 000000000..fe27d2579
--- /dev/null
+++ b/yardstick/benchmark/contexts/sriov.py
@@ -0,0 +1,431 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import os
+import yaml
+import re
+import time
+import glob
+import uuid
+import random
+import logging
+import itertools
+import xml.etree.ElementTree as ET
+from yardstick import ssh
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.utils import provision_tool
+from yardstick.benchmark.contexts.standalone import StandaloneContext
+
+log = logging.getLogger(__name__)
+
+VM_TEMPLATE = """
+<domain type="kvm">
+ <name>vm1</name>
+ <uuid>{random_uuid}</uuid>
+ <memory unit="KiB">102400</memory>
+ <currentMemory unit="KiB">102400</currentMemory>
+ <memoryBacking>
+ <hugepages />
+ </memoryBacking>
+ <vcpu placement="static">20</vcpu>
+ <os>
+ <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
+ <boot dev="hd" />
+ </os>
+ <features>
+ <acpi />
+ <apic />
+ <pae />
+ </features>
+ <cpu match="exact" mode="custom">
+ <model fallback="allow">SandyBridge</model>
+ <topology cores="10" sockets="1" threads="2" />
+ </cpu>
+ <clock offset="utc">
+ <timer name="rtc" tickpolicy="catchup" />
+ <timer name="pit" tickpolicy="delay" />
+ <timer name="hpet" present="no" />
+ </clock>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <emulator>/usr/bin/kvm-spice</emulator>
+ <disk device="disk" type="file">
+ <driver name="qemu" type="qcow2" />
+ <source file="{vm_image}"/>
+ <target bus="virtio" dev="vda" />
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x04" type="pci" />
+ </disk>
+ <controller index="0" model="ich9-ehci1" type="usb">
+ <address bus="0x00" domain="0x0000"
+function="0x7" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci1" type="usb">
+ <master startport="0" />
+ <address bus="0x00" domain="0x0000" function="0x0"
+multifunction="on" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci2" type="usb">
+ <master startport="2" />
+ <address bus="0x00" domain="0x0000"
+function="0x1" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="ich9-uhci3" type="usb">
+ <master startport="4" />
+ <address bus="0x00" domain="0x0000"
+function="0x2" slot="0x05" type="pci" />
+ </controller>
+ <controller index="0" model="pci-root" type="pci" />
+ <serial type="pty">
+ <target port="0" />
+ </serial>
+ <console type="pty">
+ <target port="0" type="serial" />
+ </console>
+ <input bus="usb" type="tablet" />
+ <input bus="ps2" type="mouse" />
+ <input bus="ps2" type="keyboard" />
+ <graphics autoport="yes" listen="0.0.0.0" port="-1" type="vnc" />
+ <video>
+ <model heads="1" type="cirrus" vram="16384" />
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x02" type="pci" />
+ </video>
+ <memballoon model="virtio">
+ <address bus="0x00" domain="0x0000"
+function="0x0" slot="0x06" type="pci" />
+ </memballoon>
+ <interface type="bridge">
+ <mac address="{mac_addr}" />
+ <source bridge="virbr0" />
+ </interface>
+ </devices>
+</domain>
+"""
+
+
+class Sriov(StandaloneContext):
+ def __init__(self):
+ self.name = None
+ self.file_path = None
+ self.nodes = []
+ self.vm_deploy = False
+ self.sriov = []
+ self.first_run = True
+ self.dpdk_nic_bind = ""
+ self.user = ""
+ self.ssh_ip = ""
+ self.passwd = ""
+ self.ssh_port = ""
+ self.auth_type = ""
+
+ def init(self):
+ log.debug("In init")
+ self.parse_pod_and_get_data(self.file_path)
+
+ def parse_pod_and_get_data(self, file_path):
+ self.file_path = file_path
+ log.debug("parsing pod file: {0}".format(self.file_path))
+ try:
+ with open(self.file_path) as stream:
+ cfg = yaml.load(stream)
+ except IOError:
+ log.error("File {0} does not exist".format(self.file_path))
+ raise
+
+ self.sriov.extend([node for node in cfg["nodes"]
+ if node["role"] == "Sriov"])
+ self.user = self.sriov[0]['user']
+ self.ssh_ip = self.sriov[0]['ip']
+ if self.sriov[0]['auth_type'] == "password":
+ self.passwd = self.sriov[0]['password']
+ else:
+ self.ssh_port = self.sriov[0]['ssh_port']
+ self.key_filename = self.sriov[0]['key_filename']
+
+ def ssh_remote_machine(self):
+ if self.sriov[0]['auth_type'] == "password":
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ password=self.passwd)
+ self.connection.wait()
+ else:
+ if self.ssh_port is not None:
+ ssh_port = self.ssh_port
+ else:
+ ssh_port = ssh.DEFAULT_PORT
+ self.connection = ssh.SSH(
+ self.user,
+ self.ssh_ip,
+ port=ssh_port,
+ key_filename=self.key_filename)
+ self.connection.wait()
+ self.dpdk_nic_bind = provision_tool(
+ self.connection,
+ os.path.join(get_nsb_option("bin_path"), "dpdk_nic_bind.py"))
+
+ def get_nic_details(self):
+ nic_details = {}
+ nic_details = {
+ 'interface': {},
+ 'pci': self.sriov[0]['phy_ports'],
+ 'phy_driver': self.sriov[0]['phy_driver'],
+ 'vf_macs': self.sriov[0]['vf_macs']
+ }
+ # Make sure that ports are bound to kernel drivers e.g. i40e/ixgbe
+ for i, _ in enumerate(nic_details['pci']):
+ err, out, _ = self.connection.execute(
+ "{dpdk_nic_bind} --force -b {driver} {port}".format(
+ dpdk_nic_bind=self.dpdk_nic_bind,
+ driver=self.sriov[0]['phy_driver'],
+ port=self.sriov[0]['phy_ports'][i]))
+ err, out, _ = self.connection.execute(
+ "lshw -c network -businfo | grep '{port}'".format(
+ port=self.sriov[0]['phy_ports'][i]))
+ a = out.split()[1]
+ err, out, _ = self.connection.execute(
+ "ip -s link show {interface}".format(
+ interface=out.split()[1]))
+ nic_details['interface'][i] = str(a)
+ log.info("{0}".format(nic_details))
+ return nic_details
+
+ def install_req_libs(self):
+ if self.first_run:
+ log.info("Installing required libraries...")
+ err, out, _ = self.connection.execute("apt-get update")
+ log.debug("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install qemu-kvm libvirt-bin")
+ log.debug("{0}".format(out))
+ err, out, _ = self.connection.execute(
+ "apt-get -y install libvirt-dev bridge-utils numactl")
+ log.debug("{0}".format(out))
+ self.first_run = False
+
+ def configure_nics_for_sriov(self, host_driver, nic_details):
+ vf_pci = [[], []]
+ self.connection.execute(
+ "rmmod {0}".format(host_driver))[1].splitlines()
+ self.connection.execute(
+ "modprobe {0} num_vfs=1".format(host_driver))[1].splitlines()
+ nic_details['vf_pci'] = {}
+ for i in range(len(nic_details['pci'])):
+ self.connection.execute(
+ "echo 1 > /sys/bus/pci/devices/{0}/sriov_numvfs".format(
+ nic_details['pci'][i]))
+ err, out, _ = self.connection.execute(
+ "ip link set {interface} vf 0 mac {mac}".format(
+ interface=nic_details['interface'][i],
+ mac=nic_details['vf_macs'][i]))
+ time.sleep(3)
+ vf_pci[i] = self.get_vf_datas(
+ 'vf_pci',
+ nic_details['pci'][i],
+ nic_details['vf_macs'][i])
+ nic_details['vf_pci'][i] = vf_pci[i]
+ log.debug("NIC DETAILS : {0}".format(nic_details))
+ return nic_details
+
+ def setup_sriov_context(self, pcis, nic_details, host_driver):
+ blacklist = "/etc/modprobe.d/blacklist.conf"
+
+ # 1 : Blacklist the vf driver in /etc/modprobe.d/blacklist.conf
+ vfnic = "{0}vf".format(host_driver)
+ lines = self.read_from_file(blacklist)
+ if vfnic not in lines:
+ vfblacklist = "blacklist {vfnic}".format(vfnic=vfnic)
+ self.connection.execute(
+ "echo {vfblacklist} >> {blacklist}".format(
+ vfblacklist=vfblacklist,
+ blacklist=blacklist))
+
+ # 2 : modprobe host_driver with num_vfs
+ nic_details = self.configure_nics_for_sriov(host_driver, nic_details)
+
+ # 3: Setup vm_sriov.xml to launch VM
+ cfg_sriov = '/tmp/vm_sriov.xml'
+ mac = [0x00, 0x24, 0x81,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ mac_address = ':'.join(map(lambda x: "%02x" % x, mac))
+ vm_sriov_xml = VM_TEMPLATE.format(
+ random_uuid=uuid.uuid4(),
+ mac_addr=mac_address,
+ vm_image=self.sriov[0]["images"])
+ with open(cfg_sriov, 'w') as f:
+ f.write(vm_sriov_xml)
+
+ vf = nic_details['vf_pci']
+ for index in range(len(nic_details['vf_pci'])):
+ self.add_sriov_interface(
+ index,
+ vf[index]['vf_pci'],
+ mac_address,
+ "/tmp/vm_sriov.xml")
+ self.connection.execute(
+ "ifconfig {interface} up".format(
+ interface=nic_details['interface'][index]))
+
+ # 4: Create and start the VM
+ self.connection.put(cfg_sriov, cfg_sriov)
+ time.sleep(10)
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ try:
+ if out == "vm1":
+ log.info("VM is already present")
+ else:
+ # FIXME: launch through libvirt
+ log.info("virsh create ...")
+ err, out, _ = self.connection.execute(
+ "virsh create /tmp/vm_sriov.xml")
+ time.sleep(10)
+ log.error("err : {0}".format(err))
+ log.error("{0}".format(_))
+ log.debug("out : {0}".format(out))
+ except ValueError:
+ raise
+
+ # 5: Tunning for better performace
+ self.pin_vcpu(pcis)
+ self.connection.execute(
+ "echo 1 > /sys/module/kvm/parameters/"
+ "allow_unsafe_assigned_interrupts")
+ self.connection.execute(
+ "echo never > /sys/kernel/mm/transparent_hugepage/enabled")
+
+ def add_sriov_interface(self, index, vf_pci, vfmac, xml):
+ root = ET.parse(xml)
+ pattern = "0000:(\d+):(\d+).(\d+)"
+ m = re.search(pattern, vf_pci, re.MULTILINE)
+ device = root.find('devices')
+
+ interface = ET.SubElement(device, 'interface')
+ interface.set('managed', 'yes')
+ interface.set('type', 'hostdev')
+
+ mac = ET.SubElement(interface, 'mac')
+ mac.set('address', vfmac)
+ source = ET.SubElement(interface, 'source')
+
+ addr = ET.SubElement(source, "address")
+ addr.set('domain', "0x0")
+ addr.set('bus', "{0}".format(m.group(1)))
+ addr.set('function', "{0}".format(m.group(3)))
+ addr.set('slot', "{0}".format(m.group(2)))
+ addr.set('type', "pci")
+
+ vf_pci = ET.SubElement(interface, 'address')
+ vf_pci.set('type', 'pci')
+ vf_pci.set('domain', '0x0000')
+ vf_pci.set('bus', '0x00')
+ vf_pci.set('slot', '0x0{0}'.format(index + 7))
+ vf_pci.set('function', '0x00')
+
+ root.write(xml)
+
+ # This is roughly compatible with check_output function in subprocess
+ # module which is only available in python 2.7
+ def check_output(self, cmd, stderr=None):
+ # Run a command and capture its output
+ err, out, _ = self.connection.execute(cmd)
+ return err, out
+
+ def get_virtual_devices(self, pci):
+ pf_vfs = {}
+ err, extra_info = self.check_output(
+ "cat /sys/bus/pci/devices/{0}/virtfn0/uevent".format(pci))
+ pattern = "PCI_SLOT_NAME=(?P<name>[0-9:.\s.]+)"
+ m = re.search(pattern, extra_info, re.MULTILINE)
+
+ if m:
+ pf_vfs.update({pci: str(m.group(1).rstrip())})
+ log.info("pf_vfs : {0}".format(pf_vfs))
+ return pf_vfs
+
+ def get_vf_datas(self, key, value, vfmac):
+ vfret = {}
+ pattern = "0000:(\d+):(\d+).(\d+)"
+
+ vfret["mac"] = vfmac
+ vfs = self.get_virtual_devices(value)
+ log.info("vfs: {0}".format(vfs))
+ for k, v in vfs.items():
+ m = re.search(pattern, k, re.MULTILINE)
+ m1 = re.search(pattern, value, re.MULTILINE)
+ if m.group(1) == m1.group(1):
+ vfret["vf_pci"] = str(v)
+ break
+
+ return vfret
+
+ def read_from_file(self, filename):
+ data = ""
+ with open(filename, 'r') as the_file:
+ data = the_file.read()
+ return data
+
+ def write_to_file(self, filename, content):
+ with open(filename, 'w') as the_file:
+ the_file.write(content)
+
+ def pin_vcpu(self, pcis):
+ nodes = self.get_numa_nodes()
+ log.info("{0}".format(nodes))
+ num_nodes = len(nodes)
+ for i in range(0, 10):
+ self.connection.execute(
+ "virsh vcpupin vm1 {0} {1}".format(
+ i, nodes[str(num_nodes - 1)][i % len(nodes[str(num_nodes - 1)])]))
+
+ def get_numa_nodes(self):
+ nodes_sysfs = glob.iglob("/sys/devices/system/node/node*")
+ nodes = {}
+ for node_sysfs in nodes_sysfs:
+ num = os.path.basename(node_sysfs).replace("node", "")
+ with open(os.path.join(node_sysfs, "cpulist")) as cpulist_file:
+ cpulist = cpulist_file.read().strip()
+ nodes[num] = self.split_cpu_list(cpulist)
+ log.info("nodes: {0}".format(nodes))
+ return nodes
+
+ def split_cpu_list(self, cpu_list):
+ if cpu_list:
+ ranges = cpu_list.split(',')
+ bounds = ([int(b) for b in r.split('-')] for r in ranges)
+ range_objects =\
+ (range(bound[0], bound[1] + 1 if len(bound) == 2
+ else bound[0] + 1) for bound in bounds)
+
+ return sorted(itertools.chain.from_iterable(range_objects))
+ else:
+ return []
+
+ def destroy_vm(self):
+ host_driver = self.sriov[0]["phy_driver"]
+ err, out = self.check_output("virsh list --name | grep -i vm1")
+ log.info("{0}".format(out))
+ if err == 0:
+ self.connection.execute("virsh shutdown vm1")
+ self.connection.execute("virsh destroy vm1")
+ self.check_output("rmmod {0}".format(host_driver))[1].splitlines()
+ self.check_output("modprobe {0}".format(host_driver))[
+ 1].splitlines()
+ else:
+ log.error("error : {0}".format(err))
diff --git a/yardstick/benchmark/contexts/standalone.py b/yardstick/benchmark/contexts/standalone.py
index 8614f0cac..2bc1f3755 100644
--- a/yardstick/benchmark/contexts/standalone.py
+++ b/yardstick/benchmark/contexts/standalone.py
@@ -18,9 +18,11 @@ import logging
import errno
import collections
import yaml
+import time
from yardstick.benchmark.contexts.base import Context
from yardstick.common.constants import YARDSTICK_ROOT_PATH
+from yardstick.common.utils import import_modules_from_package, itersubclasses
LOG = logging.getLogger(__name__)
@@ -38,7 +40,8 @@ class StandaloneContext(Context):
self.nodes = []
self.networks = {}
self.nfvi_node = []
- super(StandaloneContext, self).__init__()
+ self.nfvi_obj = None
+ super(self.__class__, self).__init__()
def read_config_file(self):
"""Read from config file"""
@@ -48,6 +51,14 @@ class StandaloneContext(Context):
cfg = yaml.load(stream)
return cfg
+ def get_nfvi_obj(self):
+ print("{0}".format(self.nfvi_node[0]['role']))
+ context_type = self.get_context_impl(self.nfvi_node[0]['role'])
+ nfvi_obj = context_type()
+ nfvi_obj.__init__()
+ nfvi_obj.parse_pod_and_get_data(self.file_path)
+ return nfvi_obj
+
def init(self, attrs):
"""initializes itself from the supplied arguments"""
@@ -64,11 +75,24 @@ class StandaloneContext(Context):
else:
raise
- self.nodes.extend(cfg["nodes"])
- self.nfvi_node.extend([node for node in cfg["nodes"]
- if node["role"] == "nfvi_node"])
+ self.vm_deploy = attrs.get("vm_deploy", True)
+ self.nodes.extend([node for node in cfg["nodes"]
+ if str(node["role"]) != "Sriov" and
+ str(node["role"]) != "Ovsdpdk"])
+ for node in cfg["nodes"]:
+ if str(node["role"]) == "Sriov":
+ self.nfvi_node.extend([node for node in cfg["nodes"]
+ if str(node["role"]) == "Sriov"])
+ if str(node["role"]) == "Ovsdpdk":
+ self.nfvi_node.extend([node for node in cfg["nodes"]
+ if str(node["role"]) == "Ovsdpdk"])
+ LOG.info("{0}".format(node["role"]))
+ else:
+ LOG.debug("Node role is other than SRIOV and OVS")
+ self.nfvi_obj = self.get_nfvi_obj()
# add optional static network definition
self.networks.update(cfg.get("networks", {}))
+ self.nfvi_obj = self.get_nfvi_obj()
LOG.debug("Nodes: %r", self.nodes)
LOG.debug("NFVi Node: %r", self.nfvi_node)
LOG.debug("Networks: %r", self.networks)
@@ -77,13 +101,44 @@ class StandaloneContext(Context):
"""don't need to deploy"""
# Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
- pass
+ if not self.vm_deploy:
+ return
+
+ # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
+ self.nfvi_obj.ssh_remote_machine()
+ if self.nfvi_obj.first_run is True:
+ self.nfvi_obj.install_req_libs()
+
+ nic_details = self.nfvi_obj.get_nic_details()
+ print("{0}".format(nic_details))
+
+ if self.nfvi_node[0]["role"] == "Sriov":
+ self.nfvi_obj.setup_sriov_context(
+ self.nfvi_obj.sriov[0]['phy_ports'],
+ nic_details,
+ self.nfvi_obj.sriov[0]['phy_driver'])
+ if self.nfvi_node[0]["role"] == "Ovsdpdk":
+ self.nfvi_obj.setup_ovs(self.nfvi_obj.ovs[0]["phy_ports"])
+ self.nfvi_obj.start_ovs_serverswitch()
+ time.sleep(5)
+ self.nfvi_obj.setup_ovs_bridge()
+ self.nfvi_obj.add_oflows()
+ self.nfvi_obj.setup_ovs_context(
+ self.nfvi_obj.ovs[0]['phy_ports'],
+ nic_details,
+ self.nfvi_obj.ovs[0]['phy_driver'])
+ pass
def undeploy(self):
"""don't need to undeploy"""
+ if not self.vm_deploy:
+ return
# Todo: NFVi undeploy (sriov, vswitch, ovs etc) based on the config.
- super(StandaloneContext, self).undeploy()
+ # self.nfvi_obj = self.get_nfvi_obj()
+ self.nfvi_obj.ssh_remote_machine()
+ self.nfvi_obj.destroy_vm()
+ pass
def _get_server(self, attr_name):
"""lookup server info by name from context
@@ -91,16 +146,12 @@ class StandaloneContext(Context):
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
-
if isinstance(attr_name, collections.Mapping):
return None
-
- if self.name.split("-")[0] != attr_name.split(".")[1]:
+ if self.name != attr_name.split(".")[1]:
return None
-
node_name = attr_name.split(".")[0]
matching_nodes = (n for n in self.nodes if n["name"] == node_name)
-
try:
# A clone is created in order to avoid affecting the
# original one.
@@ -115,7 +166,6 @@ class StandaloneContext(Context):
else:
raise ValueError("Duplicate nodes!!! Nodes: %s %s",
(matching_nodes, duplicate))
-
node["name"] = attr_name
return node
@@ -146,3 +196,19 @@ class StandaloneContext(Context):
"physical_network": network.get("physical_network"),
}
return result
+
+ def get_context_impl(self, nfvi_type):
+ """ Find the implementing class from vnf_model["vnf"]["name"] field
+
+ :param vnf_model: dictionary containing a parsed vnfd
+ :return: subclass of GenericVNF
+ """
+ import_modules_from_package(
+ "yardstick.benchmark.contexts")
+ expected_name = nfvi_type
+ impl = [c for c in itersubclasses(StandaloneContext)
+ if c.__name__ == expected_name]
+ try:
+ return next(iter(impl))
+ except StopIteration:
+ raise ValueError("No implementation for %s", expected_name)
diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py
index 7f67a04b3..c8d0865d1 100644
--- a/yardstick/benchmark/core/plugin.py
+++ b/yardstick/benchmark/core/plugin.py
@@ -84,8 +84,8 @@ class Plugin(object):
if deployment_ip == "local":
self.client = ssh.SSH.from_node(deployment, overrides={
- # host can't be None, fail if no INSTALLER_IP
- 'ip': os.environ["INSTALLER_IP"],
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
})
else:
self.client = ssh.SSH.from_node(deployment)
@@ -107,8 +107,8 @@ class Plugin(object):
if deployment_ip == "local":
self.client = ssh.SSH.from_node(deployment, overrides={
- # host can't be None, fail if no INSTALLER_IP
- 'ip': os.environ["INSTALLER_IP"],
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
})
else:
self.client = ssh.SSH.from_node(deployment)
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index b53d6446e..ede14b1c0 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -58,7 +58,12 @@ class Task(object): # pragma: no cover
check_environment()
- output_config = utils.parse_ini_file(config_file)
+ try:
+ output_config = utils.parse_ini_file(config_file)
+ except Exception:
+ # all error will be ignore, the default value is {}
+ output_config = {}
+
self._init_output_config(output_config)
self._set_output_config(output_config, args.output_file)
LOG.debug('Output configuration is: %s', output_config)
diff --git a/yardstick/benchmark/scenarios/availability/actionplayers.py b/yardstick/benchmark/scenarios/availability/actionplayers.py
index 420626413..c5e199ba6 100644
--- a/yardstick/benchmark/scenarios/availability/actionplayers.py
+++ b/yardstick/benchmark/scenarios/availability/actionplayers.py
@@ -29,8 +29,10 @@ class AttackerPlayer(ActionPlayer):
class OperationPlayer(ActionPlayer):
- def __init__(self, operation):
+ def __init__(self, operation, intermediate_variables):
self.underlyingOperation = operation
+ self.underlyingOperation.intermediate_variables \
+ = intermediate_variables
def action(self):
self.underlyingOperation.run()
diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
index b8c34ad44..aa144ab50 100644
--- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml
@@ -16,6 +16,11 @@ kill-process:
inject_script: ha_tools/fault_process_kill.bash
recovery_script: ha_tools/start_service.bash
+kill-lxc-process:
+ check_script: ha_tools/check_lxc_process_python.bash
+ inject_script: ha_tools/fault_lxc_process_kill.bash
+ recovery_script: ha_tools/start_lxc_service.bash
+
bare-metal-down:
check_script: ha_tools/check_host_ping.bash
recovery_script: ha_tools/ipmi_power.bash
@@ -34,4 +39,4 @@ stress-cpu:
block-io:
inject_script: ha_tools/disk/block_io.bash
- recovery_script: ha_tools/disk/recovery_disk_io.bash \ No newline at end of file
+ recovery_script: ha_tools/disk/recovery_disk_io.bash
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index e0d05ebf5..c9187c34d 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -65,7 +65,9 @@ class Director(object):
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
- def createActionPlayer(self, type, key):
+ def createActionPlayer(self, type, key, intermediate_variables=None):
+ if intermediate_variables is None:
+ intermediate_variables = {}
LOG.debug(
"the type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
@@ -76,7 +78,8 @@ class Director(object):
return actionplayers.ResultCheckerPlayer(
self.resultCheckerMgr[key])
if type == ActionType.OPERATION:
- return actionplayers.OperationPlayer(self.operationMgr[key])
+ return actionplayers.OperationPlayer(self.operationMgr[key],
+ intermediate_variables)
LOG.debug("something run when creatactionplayer")
def createActionRollbacker(self, type, key):
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
new file mode 100755
index 000000000..6d2f4dd51
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/check_lxc_process_python.bash
@@ -0,0 +1,42 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# check the status of a service
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ echo $(($(lxc-attach -n "${container_1}" -- ps aux | grep -e "${NOVA_API_PROCESS_1}" | grep -v grep | grep -cv /bin/sh) + $(lxc-attach -n "${container_2}" -- ps aux | grep -e "${NOVA_API_PROCESS_2}" | grep -v grep | grep -cv /bin/sh)))
+else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ if [ "${process_name}" = "haproxy" ]; then
+ ps aux | grep -e "/usr/.*/${process_name}" | grep -v grep | grep -cv /bin/sh
+ else
+ lxc-attach -n "${container}" -- ps aux | grep -e "${process_name}" | grep -v grep | grep -cv /bin/sh
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
new file mode 100755
index 000000000..b0b86ab65
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/fault_lxc_process_kill.bash
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Stop process by process name
+
+set -e
+
+NOVA_API_PROCESS_1="nova-api-os-compute"
+NOVA_API_PROCESS_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+process_name=$1
+
+lxc_filter=$(echo "${process_name}" | sed 's/-/_/g')
+
+if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+fi
+
+if [ "${process_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ pids_1=$(lxc-attach -n "${container_1}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_1}")
+ for pid in ${pids_1};
+ do
+ lxc-attach -n "${container_1}" -- kill -9 "${pid}"
+ done
+
+ pids_2=$(lxc-attach -n "${container_2}" -- pgrep -f "/openstack/.*/${NOVA_API_PROCESS_2}")
+ for pid in ${pids_2};
+ do
+ lxc-attach -n "${container_2}" -- kill -9 "${pid}"
+ done
+else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ if [ "${process_name}" = "haproxy" ]; then
+ for pid in $(pgrep -cf "/usr/.*/${process_name}");
+ do
+ kill -9 "${pid}"
+ done
+ elif [ "${process_name}" = "keystone" ]; then
+ pids=$(lxc-attach -n "${container}" -- ps aux | grep "keystone" | grep -iv heartbeat | grep -iv monitor | grep -v grep | grep -v /bin/sh | awk '{print $2}')
+ for pid in ${pids};
+ do
+ lxc-attach -n "${container}" -- kill -9 "${pid}"
+ done
+ else
+ pids=$(lxc-attach -n "${container}" -- pgrep -f "/openstack/.*/${process_name}")
+ for pid in ${pids};
+ do
+ lxc-attach -n "${container}" -- kill -9 "${pid}"
+ done
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
index aee516ea9..7408409a9 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/create_flavor.bash
@@ -20,4 +20,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
+openstack ${SECURE} flavor create $1 --id $2 --ram $3 --disk $4 --vcpus $5
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
index d39926fc5..7240476f7 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/delete_flavor.bash
@@ -20,4 +20,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor delete $1
+openstack ${SECURE} flavor delete $1
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
index bd61ba9bb..e679fdb9e 100644
--- a/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/nova/show_flavors.bash
@@ -19,4 +19,4 @@ else
SECURE=""
fi
-openstack "${SECURE}" flavor list
+openstack ${SECURE} flavor list
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
new file mode 100755
index 000000000..36a673977
--- /dev/null
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_lxc_service.bash
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Start a service and check the service is started
+
+set -e
+
+NOVA_API_SERVICE_1="nova-api-os-compute"
+NOVA_API_SERVICE_2="nova-api-metadata"
+NOVA_API_LXC_FILTER_1="nova_api_os_compute"
+NOVA_API_LXC_FILTER_2="nova_api_metadata"
+
+service_name=$1
+
+if [ "${service_name}" = "haproxy" ]; then
+ if which systemctl 2>/dev/null; then
+ systemctl start $service_name
+ else
+ service $service_name start
+ fi
+else
+ lxc_filter=${service_name//-/_}
+
+ if [ "${lxc_filter}" = "glance_api" ]; then
+ lxc_filter="glance"
+ fi
+
+ if [ "${service_name}" = "nova-api" ]; then
+ container_1=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_1}")
+ container_2=$(lxc-ls -1 --filter="${NOVA_API_LXC_FILTER_2}")
+
+ if lxc-attach -n "${container_1}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container_1}" -- systemctl start "${NOVA_API_SERVICE_1}"
+ else
+ lxc-attach -n "${container_1}" -- service "${NOVA_API_SERVICE_1}" start
+ fi
+
+ if lxc-attach -n "${container_2}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container_2}" -- systemctl start "${NOVA_API_SERVICE_2}"
+ else
+ lxc-attach -n "${container_2}" -- service "${NOVA_API_SERVICE_2}" start
+ fi
+ else
+ container=$(lxc-ls -1 --filter="${lxc_filter}")
+
+ Distributor=$(lxc-attach -n "${container}" -- lsb_release -a | grep "Distributor ID" | awk '{print $3}')
+
+ if [ "${Distributor}" != "Ubuntu" -a "${service_name}" != "keystone" -a "${service_name}" != "neutron-server" ]; then
+ service_name="openstack-"${service_name}
+ elif [ "${Distributor}" = "Ubuntu" -a "${service_name}" = "keystone" ]; then
+ service_name="apache2"
+ elif [ "${service_name}" = "keystone" ]; then
+ service_name="httpd"
+ fi
+
+ if lxc-attach -n "${container}" -- which systemctl 2>/dev/null; then
+ lxc-attach -n "${container}" -- systemctl start "${service_name}"
+ else
+ lxc-attach -n "${container}" -- service "${service_name}" start
+ fi
+ fi
+fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
index 511449221..a08347d2d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
+++ b/yardstick/benchmark/scenarios/availability/monitor_conf.yaml
@@ -13,6 +13,8 @@ schema: "yardstick:task:0.1"
process-status:
monitor_script: ha_tools/check_process_python.bash
+lxc_process-status:
+ monitor_script: ha_tools/check_lxc_process_python.bash
nova-image-list:
monitor_script: ha_tools/nova_image_list.bash
service-status:
diff --git a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
index be286b8fd..88ca9e2bb 100644
--- a/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
+++ b/yardstick/benchmark/scenarios/availability/operation/baseoperation.py
@@ -58,6 +58,7 @@ class BaseOperation(object):
self.key = ''
self._config = config
self._context = context
+ self.intermediate_variables = {}
@staticmethod
def get_operation_cls(type):
diff --git a/yardstick/benchmark/scenarios/availability/operation/operation_general.py b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
index 8fd387e47..af1ae7469 100644
--- a/yardstick/benchmark/scenarios/availability/operation/operation_general.py
+++ b/yardstick/benchmark/scenarios/availability/operation/operation_general.py
@@ -15,7 +15,8 @@ from yardstick.benchmark.scenarios.availability.operation.baseoperation \
import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.availability.util \
- import buildshellparams, execute_shell_command
+ import buildshellparams, execute_shell_command, \
+ read_stdout_item, build_shell_command
LOG = logging.getLogger(__name__)
@@ -39,11 +40,7 @@ class GeneralOperaion(BaseOperation):
self.operation_key = self._config['operation_key']
if "action_parameter" in self._config:
- actionParameter = self._config['action_parameter']
- str = buildshellparams(
- actionParameter, True if self.connection else False)
- l = list(item for item in actionParameter.values())
- self.action_param = str.format(*l)
+ self.actionParameter_config = self._config['action_parameter']
if "rollback_parameter" in self._config:
rollbackParameter = self._config['rollback_parameter']
@@ -61,6 +58,11 @@ class GeneralOperaion(BaseOperation):
def run(self):
if "action_parameter" in self._config:
+ self.action_param = \
+ build_shell_command(
+ self.actionParameter_config,
+ True if self.connection else False,
+ self.intermediate_variables)
if self.connection:
with open(self.action_script, "r") as stdin_file:
exit_status, stdout, stderr = self.connection.execute(
@@ -83,6 +85,12 @@ class GeneralOperaion(BaseOperation):
if exit_status == 0:
LOG.debug("success,the operation's output is: %s", stdout)
+ if "return_parameter" in self._config:
+ returnParameter = self._config['return_parameter']
+ for key, item in returnParameter.items():
+ value = read_stdout_item(stdout, key)
+ LOG.debug("intermediate variables %s: %s", item, value)
+ self.intermediate_variables[item] = value
else:
LOG.error(
"the operation's error, stdout:%s, stderr:%s",
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 28bec8aff..17ad79f29 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -25,6 +25,7 @@ class ScenarioGeneral(base.Scenario):
"scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
+ self.intermediate_variables = {}
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -38,7 +39,8 @@ class ScenarioGeneral(base.Scenario):
orderedSteps.index(step) + 1)
try:
actionPlayer = self.director.createActionPlayer(
- step['actionType'], step['actionKey'])
+ step['actionType'], step['actionKey'],
+ self.intermediate_variables)
actionPlayer.action()
actionRollbacker = self.director.createActionRollbacker(
step['actionType'], step['actionKey'])
diff --git a/yardstick/benchmark/scenarios/availability/util.py b/yardstick/benchmark/scenarios/availability/util.py
index eadbfa53b..6fef622bd 100644
--- a/yardstick/benchmark/scenarios/availability/util.py
+++ b/yardstick/benchmark/scenarios/availability/util.py
@@ -14,13 +14,8 @@ LOG = logging.getLogger(__name__)
def buildshellparams(param, remote=True):
- i = 0
- values = []
result = '/bin/bash -s' if remote else ''
- for key in param.keys():
- values.append(param[key])
- result += " {%d}" % i
- i = i + 1
+ result += "".join(" {%d}" % i for i in range(len(param)))
return result
@@ -36,5 +31,29 @@ def execute_shell_command(command):
output = traceback.format_exc()
LOG.error("exec command '%s' error:\n ", command)
LOG.error(traceback.format_exc())
-
return exitcode, output
+
+PREFIX = '$'
+
+
+def build_shell_command(param_config, remote=True, intermediate_variables=None):
+ param_template = '/bin/bash -s' if remote else ''
+ if intermediate_variables:
+ for key, val in param_config.items():
+ if str(val).startswith(PREFIX):
+ try:
+ param_config[key] = intermediate_variables[val]
+ except KeyError:
+ pass
+ result = param_template + "".join(" {}".format(v) for v in param_config.values())
+ LOG.debug("THE RESULT OF build_shell_command IS: %s", result)
+ return result
+
+
+def read_stdout_item(stdout, key):
+ for item in stdout.splitlines():
+ if key in item:
+ attributes = item.split("|")
+ if attributes[1].lstrip().startswith(key):
+ return attributes[2].strip()
+ return None
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 3135af9bd..a3d273750 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -50,6 +50,17 @@ For more info see http://software.es.net/iperf
type: int
unit: bytes
default: -
+ length - length of buffer to read or write,
+ (default 128 KB for TCP, 8 KB for UDP)
+ type: int
+ unit: k
+ default: -
+ window - set window size / socket buffer size
+ set TCP windows size. for UDP way to test, this will set to accept UDP
+ packet buffer size, limit the max size of acceptable data packet.
+ type: int
+ unit: k
+ default: -
"""
__scenario_type__ = "Iperf3"
@@ -122,6 +133,12 @@ For more info see http://software.es.net/iperf
elif "blockcount" in options:
cmd += " --blockcount %d" % options["blockcount"]
+ if "length" in options:
+ cmd += " --length %s" % options["length"]
+
+ if "window" in options:
+ cmd += " --window %s" % options["window"]
+
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.host.execute(cmd)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index e6aa7e5fb..8ca1ca60e 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -9,6 +9,7 @@
from __future__ import absolute_import
from __future__ import print_function
+import os
import logging
import pkg_resources
@@ -19,6 +20,9 @@ from yardstick.benchmark.scenarios import base
LOG = logging.getLogger(__name__)
+VNIC_TYPE_LIST = ["ovs", "sriov"]
+SRIOV_DRIVER_LIST = ["ixgbevf", "i40evf"]
+
class Pktgen(base.Scenario):
"""Execute pktgen between two hosts
@@ -44,7 +48,11 @@ class Pktgen(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
+ self.vnic_name = "eth0"
+ self.vnic_type = "ovs"
+ self.queue_number = 1
self.setup_done = False
+ self.multiqueue_setup_done = False
def setup(self):
"""scenario setup"""
@@ -67,6 +75,212 @@ class Pktgen(base.Scenario):
self.setup_done = True
+ def multiqueue_setup(self):
+ # one time setup stuff
+ cmd = "sudo sysctl -w net.core.netdev_budget=3000"
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ cmd = "sudo sysctl -w net.core.netdev_max_backlog=100000"
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ """multiqueue setup"""
+ if not self._is_irqbalance_disabled():
+ self._disable_irqbalance()
+
+ vnic_driver_name = self._get_vnic_driver_name()
+ if vnic_driver_name in SRIOV_DRIVER_LIST:
+ self.vnic_type = "sriov"
+
+ # one time setup stuff
+ cmd = "sudo ethtool -G %s rx 4096 tx 4096" % self.vnic_name
+ self.server.send_command(cmd)
+ self.client.send_command(cmd)
+
+ self.queue_number = self._get_sriov_queue_number()
+ self._setup_irqmapping_sriov(self.queue_number)
+ else:
+ self.vnic_type = "ovs"
+ self.queue_number = self._enable_ovs_multiqueue()
+ self._setup_irqmapping_ovs(self.queue_number)
+
+ self.multiqueue_setup_done = True
+
+ def _get_vnic_driver_name(self):
+ cmd = "readlink /sys/class/net/%s/device/driver" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return os.path.basename(stdout.strip())
+
+ def _is_irqbalance_disabled(self):
+ """Did we disable irqbalance already in the guest?"""
+ is_disabled = False
+ cmd = "grep ENABLED /etc/default/irqbalance"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ if "0" in stdout:
+ is_disabled = True
+
+ return is_disabled
+
+ def _disable_irqbalance(self):
+ cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
+ "/etc/default/irqbalance"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "sudo service irqbalance stop"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "sudo service irqbalance disable"
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _setup_irqmapping_ovs(self, queue_number):
+ cmd = "grep 'virtio0-input.0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "grep 'virtio0-output.0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'"
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ if queue_number == 1:
+ return
+
+ for i in range(1, queue_number):
+ cmd = "grep 'virtio0-input.%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "grep 'virtio0-output.%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _setup_irqmapping_sriov(self, queue_number):
+ cmd = "grep '%s-TxRx-0' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % self.vnic_name
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ if queue_number == 1:
+ return
+
+ for i in range(1, queue_number):
+ cmd = "grep '%s-TxRx-%s' /proc/interrupts |" \
+ "awk '{match($0,/ +[0-9]+/)} " \
+ "{print substr($1,RSTART,RLENGTH-1)}'" % (self.vnic_name, i)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
+ % (1 << i, int(stdout))
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ def _get_sriov_queue_number(self):
+ """Get queue number from server as both VMs are the same"""
+ cmd = "grep %s-TxRx- /proc/interrupts | wc -l" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _get_available_queue_number(self):
+ """Get queue number from client as both VMs are the same"""
+ cmd = "sudo ethtool -l %s | grep Combined | head -1 |" \
+ "awk '{printf $2}'" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _get_usable_queue_number(self):
+ """Get queue number from client as both VMs are the same"""
+ cmd = "sudo ethtool -l %s | grep Combined | tail -1 |" \
+ "awk '{printf $2}'" % self.vnic_name
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return int(stdout)
+
+ def _enable_ovs_multiqueue(self):
+ available_queue_number = self._get_available_queue_number()
+ usable_queue_number = self._get_usable_queue_number()
+ if available_queue_number > 1 and \
+ available_queue_number != usable_queue_number:
+ cmd = "sudo ethtool -L %s combined %s" % \
+ (self.vnic_name, available_queue_number)
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.server.execute(cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+ return available_queue_number
+
def _iptables_setup(self):
"""Setup iptables on server to monitor for received packets"""
cmd = "sudo iptables -F; " \
@@ -99,6 +313,14 @@ class Pktgen(base.Scenario):
options = self.scenario_cfg['options']
packetsize = options.get("packetsize", 60)
self.number_of_ports = options.get("number_of_ports", 10)
+ self.vnic_name = options.get("vnic_name", "eth0")
+ ovs_dpdk = options.get("ovs_dpdk", False)
+ pps = options.get("pps", 1000000)
+ multiqueue = options.get("multiqueue", False)
+
+ if multiqueue and not self.multiqueue_setup_done:
+ self.multiqueue_setup()
+
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
if "runner" in self.scenario_cfg else None
@@ -114,8 +336,18 @@ class Pktgen(base.Scenario):
self._iptables_setup()
- cmd = "sudo bash pktgen.sh %s %s %s %s" \
- % (ipaddr, self.number_of_ports, packetsize, duration)
+ queue_number = self.queue_number
+
+ # For native OVS, half of vCPUs are used by vhost kernel threads
+ # hence set the queue_number to half number of vCPUs
+ # e.g. set queue_number to 2 if there are 4 vCPUs
+ if self.vnic_type == "ovs" and not ovs_dpdk and self.queue_number > 1:
+ queue_number = self.queue_number / 2
+
+ cmd = "sudo bash pktgen.sh %s %s %s %s %s %s" \
+ % (ipaddr, self.number_of_ports, packetsize,
+ duration, queue_number, pps)
+
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -131,12 +363,15 @@ class Pktgen(base.Scenario):
sent = result['packets_sent']
received = result['packets_received']
ppm = 1000000 * (sent - received) / sent
+ # if ppm is 1, then 11 out of 10 million is no pass
+ ppm += (sent - received) % sent > 0
+ LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
% (ppm, sla_max_ppm)
-def _test():
+def _test(): # pragma: no cover
"""internal test function"""
key_filename = pkg_resources.resource_filename('yardstick.resources',
'files/yardstick_key')
@@ -165,6 +400,5 @@ def _test():
p.run(result)
print(result)
-
if __name__ == '__main__':
_test()
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash b/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
index 4224c5abf..e338a1b09 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
+++ b/yardstick/benchmark/scenarios/networking/pktgen_benchmark.bash
@@ -16,6 +16,8 @@ DST_IP=$1 # destination IP address
NUM_PORTS=$2 # number of source ports
PKT_SIZE=$3 # packet size
DURATION=$4 # test duration (seconds)
+TRXQUEUE=$5 # number of RX/TX queues to use
+PPS=$6 # packets per second to send
# Configuration
UDP_SRC_MIN=1000 # UDP source port min
@@ -37,62 +39,100 @@ pgset()
fi
}
+# remove all devices from thread
+pgclean()
+{
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ #
+ # Thread commands
+ #
+
+ PGDEV=/proc/net/pktgen/kpktgend_${COUNTER}
+
+ # Remove all devices from this thread
+ pgset "rem_device_all"
+ let COUNTER=COUNTER+1
+ done
+}
+
# configure pktgen (see pktgen doc for details)
pgconfig()
{
- #
- # Thread commands
- #
+ pps=$(( PPS / TRXQUEUE ))
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ #
+ # Thread commands
+ #
- PGDEV=/proc/net/pktgen/kpktgend_0
+ PGDEV=/proc/net/pktgen/kpktgend_${COUNTER}
- # Remove all devices from this thread
- pgset "rem_device_all"
+ # Add device to thread
+ pgset "add_device $DEV@${COUNTER}"
- # Add device to thread
- pgset "add_device $DEV"
+ #
+ # Device commands
+ #
- #
- # Device commands
- #
+ PGDEV=/proc/net/pktgen/$DEV@${COUNTER}
- PGDEV=/proc/net/pktgen/$DEV
+ # 0 means continious sends untill explicitly stopped
+ pgset "count 0"
- # 0 means continious sends untill explicitly stopped
- pgset "count 0"
+ # set pps count to test with an explicit number. if 0 will try with bandwidth
+ if [ ${pps} -gt 0 ]
+ then
+ pgset "ratep ${pps}"
+ fi
- # use single SKB for all transmits
- pgset "clone_skb 0"
+ pgset "clone_skb 10"
- # packet size, NIC adds 4 bytes CRC
- pgset "pkt_size $PKT_SIZE"
+ # use different queue per thread
+ pgset "queue_map_min ${COUNTER}"
+ pgset "queue_map_max ${COUNTER}"
- # random address within the min-max range
- pgset "flag IPDST_RND UDPSRC_RND UDPDST_RND"
+ # packet size, NIC adds 4 bytes CRC
+ pgset "pkt_size $PKT_SIZE"
- # destination IP
- pgset "dst_min $DST_IP"
- pgset "dst_max $DST_IP"
+ # random address within the min-max range
+ pgset "flag UDPDST_RND"
+ pgset "flag UDPSRC_RND"
+ pgset "flag IPDST_RND"
- # destination MAC address
- pgset "dst_mac $MAC"
+ # destination IP
+ pgset "dst_min $DST_IP"
+ pgset "dst_max $DST_IP"
+
+ # destination MAC address
+ pgset "dst_mac $MAC"
+
+ # source UDP port range
+ pgset "udp_src_min $UDP_SRC_MIN"
+ pgset "udp_src_max $UDP_SRC_MAX"
- # source UDP port range
- pgset "udp_src_min $UDP_SRC_MIN"
- pgset "udp_src_max $UDP_SRC_MAX"
+ # destination UDP port range
+ pgset "udp_dst_min $UDP_DST_MIN"
+ pgset "udp_dst_max $UDP_DST_MAX"
- # destination UDP port range
- pgset "udp_dst_min $UDP_DST_MIN"
- pgset "udp_dst_max $UDP_DST_MAX"
+ let COUNTER=COUNTER+1
+
+ done
}
# run pktgen
pgrun()
{
- # Time to run, result can be vieved in /proc/net/pktgen/$DEV
+ # Time to run, result can be viewed in /proc/net/pktgen/$DEV
PGDEV=/proc/net/pktgen/pgctrl
# Will hang, Ctrl-C or SIGINT to stop
pgset "start" start
+
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ taskset -c ${COUNTER} kpktgend_${COUNTER}
+ let COUNTER=COUNTER+1
+ done
}
# run pktgen for ${DURATION} seconds
@@ -111,19 +151,28 @@ run_test()
# write the result to stdout in json format
output_json()
{
- sent=$(awk '/^Result:/{print $5}' <$PGDEV)
- pps=$(awk 'match($0,/'\([0-9]+\)pps'/, a) {print a[1]}' <$PGDEV)
- errors=$(awk '/errors:/{print $5}' <$PGDEV)
+ sent=0
+ result_pps=0
+ errors=0
+ PGDEV=/proc/net/pktgen/$DEV@
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ sent=$(($sent + $(awk '/^Result:/{print $5}' <$PGDEV${COUNTER})))
+ result_pps=$(($result_pps + $(awk 'match($0,/'\([0-9]+\)pps'/, a) {print a[1]}' <$PGDEV${COUNTER})))
+ errors=$(($errors + $(awk '/errors:/{print $5}' <$PGDEV${COUNTER})))
+ let COUNTER=COUNTER+1
+ done
flows=$(( NUM_PORTS * (NUM_PORTS + 1) ))
- echo { '"packets_sent"':$sent , '"packets_per_second"':$pps, '"flows"':$flows, '"errors"':$errors }
+ echo '{ "packets_sent"':${sent} , '"packets_per_second"':${result_pps}, '"flows"':${flows}, '"errors"':${errors} '}'
}
# main entry
main()
{
modprobe pktgen
+ pgclean
ping -c 3 $DST_IP >/dev/null
@@ -137,16 +186,20 @@ main()
pgconfig
# run the test
- run_test >/dev/null
+ run_test
- PGDEV=/proc/net/pktgen/$DEV
+ PGDEV=/proc/net/pktgen/$DEV@
# check result
- result=$(cat $PGDEV | fgrep "Result: OK:")
- if [ "$result" = "" ]; then
- cat $PGDEV | fgrep Result: >/dev/stderr
- exit 1
- fi
+ COUNTER=0
+ while [ ${COUNTER} -lt ${TRXQUEUE} ]; do
+ result=$(cat $PGDEV${COUNTER} | fgrep "Result: OK:")
+ if [ "$result" = "" ]; then
+ cat $PGDEV${COUNTER} | fgrep Result: >/dev/stderr
+ exit 1
+ fi
+ let COUNTER=COUNTER+1
+ done
# output result
output_json
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index ad34817a7..b99e34270 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -40,10 +40,26 @@ class Fio(base.Scenario):
type: string
unit: na
default: write
+ rwmixwrite - percentage of a mixed workload that should be writes
+ type: int
+ unit: percentage
+ default: 50
ramp_time - run time before logging any performance
type: int
unit: seconds
default: 20
+ direct - whether use non-buffered I/O or not
+ type: boolean
+ unit: na
+ default: 1
+ size - total size of I/O for this job.
+ type: string
+ unit: na
+ default: 1g
+ numjobs - number of clones (processes/threads performing the same workload) of this job
+ type: int
+ unit: na
+ default: 1
Read link below for more fio args description:
http://www.bluestop.org/fio/HOWTO.txt
@@ -74,8 +90,8 @@ class Fio(base.Scenario):
def run(self, result):
"""execute the benchmark"""
- default_args = "-ioengine=libaio -direct=1 -group_reporting " \
- "-numjobs=1 -time_based --output-format=json"
+ default_args = "-ioengine=libaio -group_reporting -time_based -time_based " \
+ "--output-format=json"
if not self.setup_done:
self.setup()
@@ -86,6 +102,10 @@ class Fio(base.Scenario):
iodepth = options.get("iodepth", "1")
rw = options.get("rw", "write")
ramp_time = options.get("ramp_time", 20)
+ size = options.get("size", "1g")
+ direct = options.get("direct", "1")
+ numjobs = options.get("numjobs", "1")
+ rwmixwrite = options.get("rwmixwrite", 50)
name = "yardstick-fio"
# if run by a duration runner
duration_time = self.scenario_cfg["runner"].get("duration", None) \
@@ -99,10 +119,10 @@ class Fio(base.Scenario):
else:
runtime = 30
- cmd_args = "-filename=%s -bs=%s -iodepth=%s -rw=%s -ramp_time=%s " \
- "-runtime=%s -name=%s %s" \
- % (filename, bs, iodepth, rw, ramp_time, runtime, name,
- default_args)
+ cmd_args = "-filename=%s -direct=%s -bs=%s -iodepth=%s -rw=%s -rwmixwrite=%s " \
+ "-size=%s -ramp_time=%s -numjobs=%s -runtime=%s -name=%s %s" \
+ % (filename, direct, bs, iodepth, rw, rwmixwrite, size, ramp_time, numjobs,
+ runtime, name, default_args)
cmd = "sudo bash fio.sh %s %s" % (filename, cmd_args)
LOG.debug("Executing command: %s", cmd)
# Set timeout, so that the cmd execution does not exit incorrectly
diff --git a/yardstick/benchmark/scenarios/storage/storperf.py b/yardstick/benchmark/scenarios/storage/storperf.py
index c10118ad1..f0b2361d6 100644
--- a/yardstick/benchmark/scenarios/storage/storperf.py
+++ b/yardstick/benchmark/scenarios/storage/storperf.py
@@ -87,8 +87,9 @@ class StorPerf(base.Scenario):
def setup(self):
"""Set the configuration."""
env_args = {}
- env_args_payload_list = ["agent_count", "public_network",
- "agent_image", "volume_size"]
+ env_args_payload_list = ["agent_count", "agent_flavor",
+ "public_network", "agent_image",
+ "volume_size"]
for env_argument in env_args_payload_list:
try:
@@ -206,7 +207,7 @@ class StorPerf(base.Scenario):
# terminate_res = requests.delete('http://%s:5000/api/v1.0
# /jobs' % self.target)
# else:
- # time.sleep(int(est_time)/2)
+ # time.sleep(int(esti_time)/2)
result_res = requests.get('http://%s:5000/api/v1.0/jobs?id=%s' %
(self.target, job_id))
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 69485a4e4..8e8114fbb 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -77,6 +77,7 @@ INFLUXDB_PASS = get_param('influxdb.password', 'root')
INFLUXDB_DB_NAME = get_param('influxdb.db_name', 'yardstick')
INFLUXDB_IMAGE = get_param('influxdb.image', 'tutum/influxdb')
INFLUXDB_TAG = get_param('influxdb.tag', '0.13')
+INFLUXDB_DASHBOARD_PORT = 8083
# grafana
GRAFANA_IP = get_param('grafana.ip', SERVER_IP)
@@ -85,6 +86,7 @@ GRAFANA_USER = get_param('grafana.username', 'admin')
GRAFANA_PASS = get_param('grafana.password', 'admin')
GRAFANA_IMAGE = get_param('grafana.image', 'grafana/grafana')
GRAFANA_TAG = get_param('grafana.tag', '3.1.1')
+GRAFANA_MAPPING_PORT = 1948
# api
API_PORT = 5000
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 92bb7b7d3..7a64b8ca2 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -172,7 +172,15 @@ def write_file(path, data, mode='w'):
def parse_ini_file(path):
parser = configparser.ConfigParser()
- parser.read(path)
+
+ try:
+ files = parser.read(path)
+ except configparser.MissingSectionHeaderError:
+ logger.exception('invalid file type')
+ raise
+ else:
+ if not files:
+ raise RuntimeError('file not exist')
try:
default = {k: v for k, v in parser.items('DEFAULT')}
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index 1d770f724..2df6037f3 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -96,7 +96,6 @@ class GenericVNF(object):
return address.version
def _ip_to_hex(self, ip_addr):
- ip_to_convert = ip_addr.split(".")
ip_x = ip_addr
if self.get_ip_version(ip_addr) == 4:
ip_to_convert = ip_addr.split(".")
diff --git a/yardstick/network_services/vnf_generic/vnfdgen.py b/yardstick/network_services/vnf_generic/vnfdgen.py
index 40cc14a49..b56a91915 100644
--- a/yardstick/network_services/vnf_generic/vnfdgen.py
+++ b/yardstick/network_services/vnf_generic/vnfdgen.py
@@ -48,7 +48,7 @@ def generate_vnfd(vnf_model, node):
rendered_vnfd = render(vnf_model, **node)
# This is done to get rid of issues with serializing node
del node["get"]
- filled_vnfd = yaml.load(rendered_vnfd)
+ filled_vnfd = yaml.safe_load(rendered_vnfd)
return filled_vnfd
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 2a907d124..57b23d393 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -231,13 +231,16 @@ name (i.e. %s).\
}
def add_network(self, name, physical_network='physnet1', provider=None,
- segmentation_id=None):
+ segmentation_id=None, port_security_enabled=True):
"""add to the template a Neutron Net"""
log.debug("adding Neutron::Net '%s'", name)
if provider is None:
self.resources[name] = {
'type': 'OS::Neutron::Net',
- 'properties': {'name': name}
+ 'properties': {
+ 'name': name,
+ 'port_security_enabled': port_security_enabled,
+ }
}
else:
self.resources[name] = {
@@ -245,12 +248,12 @@ name (i.e. %s).\
'properties': {
'name': name,
'network_type': 'vlan',
- 'physical_network': physical_network
- }
+ 'physical_network': physical_network,
+ 'port_security_enabled': port_security_enabled,
+ },
}
if segmentation_id:
- seg_id_dit = {'segmentation_id': segmentation_id}
- self.resources[name]["properties"].update(seg_id_dit)
+ self.resources[name]['properties']['segmentation_id'] = segmentation_id
def add_server_group(self, name, policies): # pragma: no cover
"""add to the template a ServerGroup"""
@@ -262,8 +265,9 @@ name (i.e. %s).\
'policies': policies}
}
- def add_subnet(self, name, network, cidr):
- """add to the template a Neutron Subnet"""
+ def add_subnet(self, name, network, cidr, enable_dhcp='true', gateway_ip=None):
+ """add to the template a Neutron Subnet
+ """
log.debug("adding Neutron::Subnet '%s' in network '%s', cidr '%s'",
name, network, cidr)
self.resources[name] = {
@@ -272,9 +276,12 @@ name (i.e. %s).\
'properties': {
'name': name,
'cidr': cidr,
- 'network_id': {'get_resource': network}
+ 'network_id': {'get_resource': network},
+ 'enable_dhcp': enable_dhcp,
}
}
+ if gateway_ip is not None:
+ self.resources[name]['properties']['gateway_ip'] = gateway_ip
self._template['outputs'][name] = {
'description': 'subnet %s ID' % name,
@@ -316,9 +323,10 @@ name (i.e. %s).\
}
}
- def add_port(self, name, network_name, subnet_name, sec_group_id=None,
- provider=None):
- """add to the template a named Neutron Port"""
+ def add_port(self, name, network_name, subnet_name, sec_group_id=None, provider=None,
+ allowed_address_pairs=None):
+ """add to the template a named Neutron Port
+ """
log.debug("adding Neutron::Port '%s', network:'%s', subnet:'%s', "
"secgroup:%s", name, network_name, subnet_name, sec_group_id)
self.resources[name] = {
@@ -341,6 +349,10 @@ name (i.e. %s).\
self.resources[name]['properties']['security_groups'] = \
[sec_group_id]
+ if allowed_address_pairs:
+ self.resources[name]['properties'][
+ 'allowed_address_pairs'] = allowed_address_pairs
+
self._template['outputs'][name] = {
'description': 'Address for interface %s' % name,
'value': {'get_attr': [name, 'fixed_ips', 0, 'ip_address']}