aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick')
-rw-r--r--yardstick/benchmark/contexts/base.py29
-rw-r--r--yardstick/benchmark/contexts/heat.py11
-rw-r--r--yardstick/benchmark/contexts/kubernetes.py3
-rw-r--r--yardstick/benchmark/contexts/node.py2
-rw-r--r--yardstick/benchmark/contexts/standalone/model.py44
-rw-r--r--yardstick/benchmark/contexts/standalone/ovs_dpdk.py9
-rw-r--r--yardstick/benchmark/contexts/standalone/sriov.py15
-rw-r--r--yardstick/benchmark/core/task.py62
-rwxr-xr-xyardstick/benchmark/runners/arithmetic.py11
-rw-r--r--yardstick/benchmark/runners/duration.py7
-rwxr-xr-xyardstick/benchmark/runners/dynamictp.py7
-rw-r--r--yardstick/benchmark/runners/iteration.py7
-rw-r--r--yardstick/benchmark/runners/search.py9
-rw-r--r--yardstick/benchmark/runners/sequence.py9
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/attacker_process.py15
-rw-r--r--yardstick/benchmark/scenarios/availability/attacker/baseattacker.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/director.py22
-rwxr-xr-xyardstick/benchmark/scenarios/availability/ha_tools/start_service.bash13
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/basemonitor.py2
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_command.py12
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py5
-rw-r--r--yardstick/benchmark/scenarios/availability/monitor/monitor_process.py10
-rw-r--r--yardstick/benchmark/scenarios/availability/scenario_general.py34
-rwxr-xr-xyardstick/benchmark/scenarios/availability/serviceha.py34
-rw-r--r--yardstick/benchmark/scenarios/base.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/cyclictest.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/lmbench.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/perf.py20
-rw-r--r--yardstick/benchmark/scenarios/compute/qemu_migrate.py4
-rw-r--r--yardstick/benchmark/scenarios/compute/ramspeed.py6
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench.py2
-rw-r--r--yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash2
-rw-r--r--yardstick/benchmark/scenarios/lib/attach_volume.py35
-rw-r--r--yardstick/benchmark/scenarios/lib/create_image.py71
-rw-r--r--yardstick/benchmark/scenarios/lib/create_keypair.py49
-rw-r--r--yardstick/benchmark/scenarios/lib/create_sec_group.py40
-rw-r--r--yardstick/benchmark/scenarios/lib/create_server.py74
-rw-r--r--yardstick/benchmark/scenarios/lib/create_volume.py49
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_image.py36
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_keypair.py29
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_network.py24
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_server.py33
-rw-r--r--yardstick/benchmark/scenarios/lib/delete_volume.py30
-rw-r--r--yardstick/benchmark/scenarios/lib/detach_volume.py33
-rw-r--r--yardstick/benchmark/scenarios/lib/get_flavor.py37
-rw-r--r--yardstick/benchmark/scenarios/lib/get_server.py84
-rw-r--r--yardstick/benchmark/scenarios/networking/iperf3.py17
-rw-r--r--yardstick/benchmark/scenarios/networking/moongen_testpmd.py7
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf.py6
-rwxr-xr-xyardstick/benchmark/scenarios/networking/netperf_node.py7
-rw-r--r--yardstick/benchmark/scenarios/networking/nstat.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/ping.py20
-rw-r--r--yardstick/benchmark/scenarios/networking/ping6.py13
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen.py46
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk.py2
-rw-r--r--yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py8
-rw-r--r--yardstick/benchmark/scenarios/networking/vnf_generic.py49
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf.py14
-rw-r--r--yardstick/benchmark/scenarios/networking/vsperf_dpdk.py16
-rw-r--r--yardstick/benchmark/scenarios/storage/fio.py2
-rw-r--r--yardstick/common/ansible_common.py2
-rw-r--r--yardstick/common/constants.py18
-rw-r--r--yardstick/common/exceptions.py149
-rw-r--r--yardstick/common/kubernetes_utils.py13
-rw-r--r--yardstick/common/messaging/__init__.py36
-rw-r--r--yardstick/common/messaging/consumer.py85
-rw-r--r--yardstick/common/messaging/payloads.py53
-rw-r--r--yardstick/common/messaging/producer.py70
-rw-r--r--yardstick/common/openstack_utils.py739
-rw-r--r--yardstick/common/utils.py61
-rw-r--r--yardstick/dispatcher/__init__.py9
-rw-r--r--yardstick/error.py48
-rw-r--r--yardstick/network_services/collector/subscriber.py40
-rw-r--r--yardstick/network_services/helpers/dpdkbindnic_helper.py28
-rw-r--r--yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py344
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/__init__.py (renamed from yardstick/network_services/libs/ixia_libs/IxNet/__init__.py)0
-rw-r--r--yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py470
-rw-r--r--yardstick/network_services/nfvi/resource.py61
-rw-r--r--yardstick/network_services/traffic_profile/base.py28
-rw-r--r--yardstick/network_services/traffic_profile/http_ixload.py23
-rw-r--r--yardstick/network_services/traffic_profile/ixia_rfc2544.py108
-rw-r--r--yardstick/network_services/traffic_profile/prox_binsearch.py33
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py398
-rw-r--r--yardstick/network_services/traffic_profile/trex_traffic_profile.py122
-rw-r--r--yardstick/network_services/vnf_generic/vnf/acl_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/base.py32
-rw-r--r--yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py4
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_helpers.py46
-rw-r--r--yardstick/network_services/vnf_generic/vnf/prox_vnf.py24
-rw-r--r--yardstick/network_services/vnf_generic/vnf/sample_vnf.py160
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py96
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py87
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py26
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vfw_vnf.py2
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py8
-rw-r--r--yardstick/orchestrator/heat.py17
-rw-r--r--yardstick/orchestrator/kubernetes.py2
-rw-r--r--yardstick/ssh.py49
-rw-r--r--yardstick/tests/functional/common/messaging/__init__.py0
-rw-r--r--yardstick/tests/functional/common/messaging/test_messaging.py99
-rw-r--r--yardstick/tests/integration/dummy-scenario-heat-context.yaml7
-rw-r--r--yardstick/tests/unit/apiserver/resources/v2/__init__.py0
-rw-r--r--yardstick/tests/unit/apiserver/resources/v2/test_images.py46
-rw-r--r--yardstick/tests/unit/apiserver/utils/__init__.py0
-rw-r--r--yardstick/tests/unit/apiserver/utils/test_influx.py86
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_model.py80
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py9
-rw-r--r--yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py33
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_base.py56
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_dummy.py8
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_heat.py41
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_kubernetes.py9
-rw-r--r--yardstick/tests/unit/benchmark/contexts/test_node.py9
-rw-r--r--yardstick/tests/unit/benchmark/core/test_report.py10
-rw-r--r--yardstick/tests/unit/benchmark/core/test_task.py11
-rw-r--r--yardstick/tests/unit/benchmark/runner/test_search.py50
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py18
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py44
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py36
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py7
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py5
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py62
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py56
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py67
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py63
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py120
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py51
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py48
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py23
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py51
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py49
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py53
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py52
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py69
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py5
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py3
-rwxr-xr-xyardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py3
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py17
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py199
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py45
-rw-r--r--yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py5
-rw-r--r--yardstick/tests/unit/common/messaging/__init__.py0
-rw-r--r--yardstick/tests/unit/common/messaging/test_consumer.py54
-rw-r--r--yardstick/tests/unit/common/messaging/test_payloads.py46
-rw-r--r--yardstick/tests/unit/common/messaging/test_producer.py46
-rw-r--r--yardstick/tests/unit/common/test_exceptions.py28
-rw-r--r--yardstick/tests/unit/common/test_openstack_utils.py465
-rw-r--r--yardstick/tests/unit/common/test_packages.py88
-rw-r--r--yardstick/tests/unit/common/test_utils.py81
-rw-r--r--yardstick/tests/unit/network_services/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/collector/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/collector/test_publisher.py36
-rw-r--r--yardstick/tests/unit/network_services/collector/test_subscriber.py78
-rw-r--r--yardstick/tests/unit/network_services/helpers/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml50
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_cpu.py121
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py632
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_iniparser.py223
-rw-r--r--yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py1104
-rw-r--r--yardstick/tests/unit/network_services/libs/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py512
-rw-r--r--yardstick/tests/unit/network_services/nfvi/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/nfvi/test_collectd.py150
-rw-r--r--yardstick/tests/unit/network_services/nfvi/test_resource.py298
-rw-r--r--yardstick/tests/unit/network_services/test_utils.py141
-rw-r--r--yardstick/tests/unit/network_services/test_yang_model.py129
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_base.py88
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_fixed.py117
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http.py39
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py269
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py623
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py74
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py184
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py128
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py95
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py288
-rw-r--r--yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py277
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py277
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/__init__.py0
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml47
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml41
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py372
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py236
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py455
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py2335
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py455
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py260
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py1949
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py321
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py292
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py426
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py374
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py293
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py505
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py464
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py374
-rw-r--r--yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py799
-rw-r--r--yardstick/tests/unit/orchestrator/test_heat.py19
-rw-r--r--yardstick/tests/unit/orchestrator/test_kubernetes.py2
-rw-r--r--yardstick/tests/unit/service/test_environment.py20
-rw-r--r--yardstick/tests/unit/test_cmd/test_NSBperf.py50
-rw-r--r--yardstick/tests/unit/test_ssh.py48
214 files changed, 20731 insertions, 2686 deletions
diff --git a/yardstick/benchmark/contexts/base.py b/yardstick/benchmark/contexts/base.py
index ae8319e37..64cee8376 100644
--- a/yardstick/benchmark/contexts/base.py
+++ b/yardstick/benchmark/contexts/base.py
@@ -6,17 +6,20 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
import abc
import six
-import yardstick.common.utils as utils
+from yardstick.common import constants
+from yardstick.common import utils
class Flags(object):
"""Class to represent the status of the flags in a context"""
_FLAGS = {'no_setup': False,
- 'no_teardown': False}
+ 'no_teardown': False,
+ 'os_cloud_config': constants.OS_CLOUD_DEFAULT_CONFIG}
def __init__(self, **kwargs):
for name, value in self._FLAGS.items():
@@ -42,20 +45,12 @@ class Context(object):
list = []
SHORT_TASK_ID_LEN = 8
- @staticmethod
- def split_name(name, sep='.'):
- try:
- name_iter = iter(name.split(sep))
- except AttributeError:
- # name is not a string
- return None, None
- return next(name_iter), next(name_iter, None)
-
- def __init__(self):
+ def __init__(self, host_name_separator='.'):
Context.list.append(self)
self._flags = Flags()
self._name = None
self._task_id = None
+ self._host_name_separator = host_name_separator
def init(self, attrs):
"""Initiate context"""
@@ -65,6 +60,12 @@ class Context(object):
self._name_task_id = '{}-{}'.format(
self._name, self._task_id[:self.SHORT_TASK_ID_LEN])
+ def split_host_name(self, name):
+ if (isinstance(name, six.string_types)
+ and self._host_name_separator in name):
+ return tuple(name.split(self._host_name_separator, 1))
+ return None, None
+
@property
def name(self):
if self._flags.no_setup or self._flags.no_teardown:
@@ -76,6 +77,10 @@ class Context(object):
def assigned_name(self):
return self._name
+ @property
+ def host_name_separator(self):
+ return self._host_name_separator
+
@staticmethod
def get_cls(context_type):
"""Return class of specified type."""
diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py
index 0d1dfb86f..cc87176d5 100644
--- a/yardstick/benchmark/contexts/heat.py
+++ b/yardstick/benchmark/contexts/heat.py
@@ -7,9 +7,6 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
-from __future__ import print_function
-
import collections
import logging
import os
@@ -328,8 +325,10 @@ class HeatContext(Context):
if not os.path.exists(self.key_filename):
SSH.gen_keys(self.key_filename)
- heat_template = HeatTemplate(self.name, self.template_file,
- self.heat_parameters)
+ heat_template = HeatTemplate(
+ self.name, template_file=self.template_file,
+ heat_parameters=self.heat_parameters,
+ os_cloud_config=self._flags.os_cloud_config)
if self.template_file is None:
self._add_resources_to_template(heat_template)
@@ -466,7 +465,7 @@ class HeatContext(Context):
with attribute name mapping when using external heat templates
"""
if isinstance(attr_name, collections.Mapping):
- node_name, cname = self.split_name(attr_name['name'])
+ node_name, cname = self.split_host_name(attr_name['name'])
if cname is None or cname != self.name:
return None
diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py
index 4bea991ea..82435d40c 100644
--- a/yardstick/benchmark/contexts/kubernetes.py
+++ b/yardstick/benchmark/contexts/kubernetes.py
@@ -33,8 +33,7 @@ class KubernetesContext(Context):
self.key_path = ''
self.public_key_path = ''
self.template = None
-
- super(KubernetesContext, self).__init__()
+ super(KubernetesContext, self).__init__(host_name_separator='-')
def init(self, attrs):
super(KubernetesContext, self).init(attrs)
diff --git a/yardstick/benchmark/contexts/node.py b/yardstick/benchmark/contexts/node.py
index fa619a9aa..93888ef41 100644
--- a/yardstick/benchmark/contexts/node.py
+++ b/yardstick/benchmark/contexts/node.py
@@ -139,7 +139,7 @@ class NodeContext(Context):
"""lookup server info by name from context
attr_name: a name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py
index f18d090d8..320c61c92 100644
--- a/yardstick/benchmark/contexts/standalone/model.py
+++ b/yardstick/benchmark/contexts/standalone/model.py
@@ -45,7 +45,7 @@ VM_TEMPLATE = """
<vcpu cpuset='{cpuset}'>{vcpu}</vcpu>
{cputune}
<os>
- <type arch="x86_64" machine="pc-i440fx-utopic">hvm</type>
+ <type arch="x86_64" machine="pc-i440fx-xenial">hvm</type>
<boot dev="hd" />
</os>
<features>
@@ -232,14 +232,40 @@ class Libvirt(object):
return ET.tostring(root)
@staticmethod
- def create_snapshot_qemu(connection, index, vm_image):
- # build snapshot image
- image = "/var/lib/libvirt/images/%s.qcow2" % index
- connection.execute("rm %s" % image)
- qemu_template = "qemu-img create -f qcow2 -o backing_file=%s %s"
- connection.execute(qemu_template % (vm_image, image))
-
- return image
+ def create_snapshot_qemu(connection, index, base_image):
+ """Create the snapshot image for a VM using a base image
+
+ :param connection: SSH connection to the remote host
+ :param index: index of the VM to be spawn
+ :param base_image: path of the VM base image in the remote host
+ :return: snapshot image path
+ """
+ vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+ connection.execute('rm -- "%s"' % vm_image)
+ status, _, _ = connection.execute('test -r %s' % base_image)
+ if status:
+ if not os.access(base_image, os.R_OK):
+ raise exceptions.LibvirtQemuImageBaseImageNotPresent(
+ vm_image=vm_image, base_image=base_image)
+ # NOTE(ralonsoh): done in two steps to avoid root permission
+ # issues.
+ LOG.info('Copy %s from execution host to remote host', base_image)
+ file_name = os.path.basename(os.path.normpath(base_image))
+ connection.put_file(base_image, '/tmp/%s' % file_name)
+ status, _, error = connection.execute(
+ 'mv -- "/tmp/%s" "%s"' % (file_name, base_image))
+ if status:
+ raise exceptions.LibvirtQemuImageCreateError(
+ vm_image=vm_image, base_image=base_image, error=error)
+
+ LOG.info('Convert image %s to %s', base_image, vm_image)
+ qemu_cmd = ('qemu-img create -f qcow2 -o backing_file=%s %s' %
+ (base_image, vm_image))
+ status, _, error = connection.execute(qemu_cmd)
+ if status:
+ raise exceptions.LibvirtQemuImageCreateError(
+ vm_image=vm_image, base_image=base_image, error=error)
+ return vm_image
@classmethod
def build_vm_xml(cls, connection, flavor, vm_name, index):
diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
index 30b685eec..8a1482c07 100644
--- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
+++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py
@@ -20,6 +20,7 @@ import re
import time
from yardstick import ssh
+from yardstick.network_services.utils import get_nsb_option
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.standalone import model
from yardstick.common import exceptions
@@ -55,7 +56,8 @@ class OvsDpdkContext(Context):
self.file_path = None
self.sriov = []
self.first_run = True
- self.dpdk_devbind = ''
+ self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
+ 'dpdk-devbind.py')
self.vm_names = []
self.nfvi_host = []
self.nodes = []
@@ -260,9 +262,6 @@ class OvsDpdkContext(Context):
return
self.connection = ssh.SSH.from_node(self.host_mgmt)
- self.dpdk_devbind = utils.provision_tool(
- self.connection,
- os.path.join(utils.get_nsb_option('bin_path'), 'dpdk-devbind.py'))
# Check dpdk/ovs version, if not present install
self.check_ovs_dpdk_env()
@@ -305,7 +304,7 @@ class OvsDpdkContext(Context):
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py
index 5db419e6a..e9f83b217 100644
--- a/yardstick/benchmark/contexts/standalone/sriov.py
+++ b/yardstick/benchmark/contexts/standalone/sriov.py
@@ -19,7 +19,6 @@ import collections
from yardstick import ssh
from yardstick.network_services.utils import get_nsb_option
-from yardstick.network_services.utils import provision_tool
from yardstick.benchmark.contexts.base import Context
from yardstick.benchmark.contexts.standalone import model
from yardstick.network_services.utils import PciAddress
@@ -38,7 +37,8 @@ class SriovContext(Context):
self.file_path = None
self.sriov = []
self.first_run = True
- self.dpdk_devbind = ''
+ self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'),
+ 'dpdk-devbind.py')
self.vm_names = []
self.nfvi_host = []
self.nodes = []
@@ -79,9 +79,6 @@ class SriovContext(Context):
return
self.connection = ssh.SSH.from_node(self.host_mgmt)
- self.dpdk_devbind = provision_tool(
- self.connection,
- os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py"))
# Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config.
model.StandaloneContextHelper.install_req_libs(self.connection)
@@ -115,7 +112,7 @@ class SriovContext(Context):
Keyword arguments:
attr_name -- A name for a server listed in nodes config file
"""
- node_name, name = self.split_name(attr_name)
+ node_name, name = self.split_host_name(attr_name)
if name is None or self.name != name:
return None
@@ -197,10 +194,10 @@ class SriovContext(Context):
slot = index + idx + 10
vf['vpci'] = \
"{}:{}:{:02x}.{}".format(vpci.domain, vpci.bus, slot, vpci.function)
- model.Libvirt.add_sriov_interfaces(
- vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg))
self.connection.execute("ifconfig %s up" % vf['interface'])
self.connection.execute(vf_spoofchk.format(vf['interface']))
+ return model.Libvirt.add_sriov_interfaces(
+ vf['vpci'], vf['vf_pci']['vf_pci'], vf['mac'], str(cfg))
def setup_sriov_context(self):
nodes = []
@@ -223,7 +220,7 @@ class SriovContext(Context):
network_ports = collections.OrderedDict(
{k: v for k, v in vnf["network_ports"].items() if k != 'mgmt'})
for idx, vfs in enumerate(network_ports.values()):
- self._enable_interfaces(index, idx, vfs, cfg)
+ xml_str = self._enable_interfaces(index, idx, vfs, xml_str)
# copy xml to target...
model.Libvirt.write_file(cfg, xml_str)
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
index 955b8cae2..f050e8d0f 100644
--- a/yardstick/benchmark/core/task.py
+++ b/yardstick/benchmark/core/task.py
@@ -112,9 +112,9 @@ class Task(object): # pragma: no cover
continue
try:
- data = self._run(tasks[i]['scenarios'],
- tasks[i]['run_in_parallel'],
- output_config)
+ success, data = self._run(tasks[i]['scenarios'],
+ tasks[i]['run_in_parallel'],
+ output_config)
except KeyboardInterrupt:
raise
except Exception: # pylint: disable=broad-except
@@ -123,9 +123,15 @@ class Task(object): # pragma: no cover
testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
'tc_data': []}
else:
- LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
- testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
- 'tc_data': data}
+ if success:
+ LOG.info('Testcase: "%s" SUCCESS!!!', tasks[i]['case_name'])
+ testcases[tasks[i]['case_name']] = {'criteria': 'PASS',
+ 'tc_data': data}
+ else:
+ LOG.error('Testcase: "%s" FAILED!!!', tasks[i]['case_name'],
+ exc_info=True)
+ testcases[tasks[i]['case_name']] = {'criteria': 'FAIL',
+ 'tc_data': data}
if args.keep_deploy:
# keep deployment, forget about stack
@@ -240,6 +246,7 @@ class Task(object): # pragma: no cover
background_runners = []
+ task_success = True
result = []
# Start all background scenarios
for scenario in filter(_is_background_scenario, scenarios):
@@ -258,8 +265,8 @@ class Task(object): # pragma: no cover
for runner in runners:
status = runner_join(runner, background_runners, self.outputs, result)
if status != 0:
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
else:
# run serially
@@ -271,8 +278,8 @@ class Task(object): # pragma: no cover
LOG.error('Scenario NO.%s: "%s" ERROR!',
scenarios.index(scenario) + 1,
scenario.get('type'))
- raise RuntimeError(
- "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.error("%s runner status %s", runner.__execution_type__, status)
+ task_success = False
LOG.info("Runner ended")
# Abort background runners
@@ -289,7 +296,7 @@ class Task(object): # pragma: no cover
base_runner.Runner.release(runner)
print("Background task ended")
- return result
+ return task_success, result
def atexit_handler(self):
"""handler for process termination"""
@@ -612,27 +619,22 @@ class TaskParser(object): # pragma: no cover
nodes:
tg__0: tg_0.yardstick
vnf__0: vnf_0.yardstick
+
+ NOTE: in Kubernetes context, the separator character between the server
+ name and the context name is "-":
+ scenario:
+ host: host-k8s
+ target: target-k8s
"""
def qualified_name(name):
- try:
- # for openstack
- node_name, context_name = name.split('.')
- sep = '.'
- except ValueError:
- # for kubernetes, some kubernetes resources don't support
- # name format like 'xxx.xxx', so we use '-' instead
- # need unified later
- node_name, context_name = name.split('-')
- sep = '-'
-
- try:
- ctx = next((context for context in contexts
- if context.assigned_name == context_name))
- except StopIteration:
- raise y_exc.ScenarioConfigContextNameNotFound(
- context_name=context_name)
-
- return '{}{}{}'.format(node_name, sep, ctx.name)
+ for context in contexts:
+ host_name, ctx_name = context.split_host_name(name)
+ if context.assigned_name == ctx_name:
+ return '{}{}{}'.format(host_name,
+ context.host_name_separator,
+ context.name)
+
+ raise y_exc.ScenarioConfigContextNameNotFound(host_name=name)
if 'host' in scenario:
scenario['host'] = qualified_name(scenario['host'])
diff --git a/yardstick/benchmark/runners/arithmetic.py b/yardstick/benchmark/runners/arithmetic.py
index 6aaaed888..ecb59f960 100755
--- a/yardstick/benchmark/runners/arithmetic.py
+++ b/yardstick/benchmark/runners/arithmetic.py
@@ -37,6 +37,7 @@ import six
from six.moves import range
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -86,7 +87,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
loop_iter = six.moves.zip(*param_iters)
else:
LOG.warning("iter_type unrecognized: %s", iter_type)
- raise TypeError("iter_type unrecognized: %s", iter_type)
+ raise TypeError("iter_type unrecognized: %s" % iter_type)
# Populate options and run the requested method for each value combination
for comb_values in loop_iter:
@@ -105,14 +106,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/runners/duration.py b/yardstick/benchmark/runners/duration.py
index 60b0348c3..60f1fa536 100644
--- a/yardstick/benchmark/runners/duration.py
+++ b/yardstick/benchmark/runners/duration.py
@@ -27,6 +27,7 @@ import traceback
import time
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -70,13 +71,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
# catch all exceptions because with multiprocessing we can have un-picklable exception
# problems https://bugs.python.org/issue9400
except Exception: # pylint: disable=broad-except
diff --git a/yardstick/benchmark/runners/dynamictp.py b/yardstick/benchmark/runners/dynamictp.py
index 63bfc823a..88d3c5704 100755
--- a/yardstick/benchmark/runners/dynamictp.py
+++ b/yardstick/benchmark/runners/dynamictp.py
@@ -27,6 +27,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -80,10 +81,10 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
method(data)
- except AssertionError as assertion:
- LOG.warning("SLA validation failed: %s" % assertion.args)
+ except y_exc.SLAValidationError as error:
+ LOG.warning("SLA validation failed: %s", error.args)
too_high = True
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/iteration.py b/yardstick/benchmark/runners/iteration.py
index 20d6da054..4c88f3671 100644
--- a/yardstick/benchmark/runners/iteration.py
+++ b/yardstick/benchmark/runners/iteration.py
@@ -29,6 +29,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -75,13 +76,13 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
elif sla_action == "rate-control":
try:
scenario_cfg['options']['rate']
diff --git a/yardstick/benchmark/runners/search.py b/yardstick/benchmark/runners/search.py
index 8037329b5..01a4292c7 100644
--- a/yardstick/benchmark/runners/search.py
+++ b/yardstick/benchmark/runners/search.py
@@ -33,6 +33,7 @@ from collections import Mapping
from six.moves import zip
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -119,14 +120,14 @@ If the scenario ends before the time has elapsed, it will be started again.
try:
self.worker_helper(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if self.sla_action == "assert":
raise
elif self.sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
diff --git a/yardstick/benchmark/runners/sequence.py b/yardstick/benchmark/runners/sequence.py
index d6e3f7109..0148a45b2 100644
--- a/yardstick/benchmark/runners/sequence.py
+++ b/yardstick/benchmark/runners/sequence.py
@@ -30,6 +30,7 @@ import traceback
import os
from yardstick.benchmark.runners import base
+from yardstick.common import exceptions as y_exc
LOG = logging.getLogger(__name__)
@@ -74,14 +75,14 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
try:
result = method(data)
- except AssertionError as assertion:
+ except y_exc.SLAValidationError as error:
# SLA validation failed in scenario, determine what to do now
if sla_action == "assert":
raise
elif sla_action == "monitor":
- LOG.warning("SLA validation failed: %s", assertion.args)
- errors = assertion.args
- except Exception as e:
+ LOG.warning("SLA validation failed: %s", error.args)
+ errors = error.args
+ except Exception as e: # pylint: disable=broad-except
errors = traceback.format_exc()
LOG.exception(e)
else:
diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
index cb171eafa..7f1136c08 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
@@ -42,29 +42,28 @@ class ProcessAttacker(BaseAttacker):
def check(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, stderr = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
if stdout:
- LOG.info("check the environment success!")
+ LOG.info("Check the environment success!")
return int(stdout.strip('\n'))
else:
- LOG.error(
- "the host environment is error, stdout:%s, stderr:%s",
- stdout, stderr)
+ LOG.error("Error checking the host environment, "
+ "stdout:%s, stderr:%s", stdout, stderr)
return False
def inject_fault(self):
with open(self.inject_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ self.connection.execute(
"sudo /bin/sh -s {0}".format(self.service_name),
stdin=stdin_file)
def recover(self):
with open(self.recovery_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ exit_status, _, _ = self.connection.execute(
"sudo /bin/bash -s {0} ".format(self.service_name),
stdin=stdin_file)
if exit_status:
- LOG.info("Fail to restart service!")
+ LOG.info("Failed to restart service: %s", self.recovery_script)
diff --git a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
index d03d04420..d67a16b98 100644
--- a/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
+++ b/yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
@@ -71,7 +71,7 @@ class BaseAttacker(object):
for attacker_cls in utils.itersubclasses(BaseAttacker):
if attacker_type == attacker_cls.__attacker_type__:
return attacker_cls
- raise RuntimeError("No such runner_type %s" % attacker_type)
+ raise RuntimeError("No such runner_type: %s" % attacker_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(attacker_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/director.py b/yardstick/benchmark/scenarios/availability/director.py
index 71690c135..6cc0cb286 100644
--- a/yardstick/benchmark/scenarios/availability/director.py
+++ b/yardstick/benchmark/scenarios/availability/director.py
@@ -40,7 +40,7 @@ class Director(object):
nodes = self.context_cfg.get("nodes", None)
# setup attackers
if "attackers" in self.scenario_cfg["options"]:
- LOG.debug("start init attackers...")
+ LOG.debug("Start init attackers...")
attacker_cfgs = self.scenario_cfg["options"]["attackers"]
self.attackerMgr = baseattacker.AttackerMgr()
self.data = self.attackerMgr.init_attackers(attacker_cfgs,
@@ -48,19 +48,19 @@ class Director(object):
# setup monitors
if "monitors" in self.scenario_cfg["options"]:
- LOG.debug("start init monitors...")
+ LOG.debug("Start init monitors...")
monitor_cfgs = self.scenario_cfg["options"]["monitors"]
self.monitorMgr = basemonitor.MonitorMgr(self.data)
self.monitorMgr.init_monitors(monitor_cfgs, nodes)
# setup operations
if "operations" in self.scenario_cfg["options"]:
- LOG.debug("start init operations...")
+ LOG.debug("Start init operations...")
operation_cfgs = self.scenario_cfg["options"]["operations"]
self.operationMgr = baseoperation.OperationMgr()
self.operationMgr.init_operations(operation_cfgs, nodes)
# setup result checker
if "resultCheckers" in self.scenario_cfg["options"]:
- LOG.debug("start init resultCheckers...")
+ LOG.debug("Start init resultCheckers...")
result_check_cfgs = self.scenario_cfg["options"]["resultCheckers"]
self.resultCheckerMgr = baseresultchecker.ResultCheckerMgr()
self.resultCheckerMgr.init_ResultChecker(result_check_cfgs, nodes)
@@ -69,7 +69,7 @@ class Director(object):
if intermediate_variables is None:
intermediate_variables = {}
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionplayers.AttackerPlayer(self.attackerMgr[key], intermediate_variables)
if type == ActionType.MONITOR:
@@ -80,17 +80,17 @@ class Director(object):
if type == ActionType.OPERATION:
return actionplayers.OperationPlayer(self.operationMgr[key],
intermediate_variables)
- LOG.debug("something run when creatactionplayer")
+ LOG.debug("The type is not recognized by createActionPlayer")
def createActionRollbacker(self, type, key):
LOG.debug(
- "the type of current action is %s, the key is %s", type, key)
+ "The type of current action is %s, the key is %s", type, key)
if type == ActionType.ATTACKER:
return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
if type == ActionType.OPERATION:
return actionrollbackers.OperationRollbacker(
self.operationMgr[key])
- LOG.debug("no rollbacker created for %s", key)
+ LOG.debug("No rollbacker created for key: %s", key)
def verify(self):
result = True
@@ -99,7 +99,7 @@ class Director(object):
if hasattr(self, 'resultCheckerMgr'):
result &= self.resultCheckerMgr.verify()
if result:
- LOG.debug("monitors are passed")
+ LOG.debug("Monitor results are passed")
return result
def stopMonitors(self):
@@ -107,12 +107,12 @@ class Director(object):
self.monitorMgr.wait_monitors()
def knockoff(self):
- LOG.debug("knock off ....")
+ LOG.debug("Knock off ....")
while self.executionSteps:
singleStep = self.executionSteps.pop()
singleStep.rollback()
def store_result(self, result):
- LOG.debug("store result ....")
+ LOG.debug("Store result ....")
if hasattr(self, 'monitorMgr'):
self.monitorMgr.store_result(result)
diff --git a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
index 858d86ca0..2388507d7 100755
--- a/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
+++ b/yardstick/benchmark/scenarios/availability/ha_tools/start_service.bash
@@ -9,24 +9,23 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-# Start a service and check the service is started
+# Start or restart a service and check the service is started
set -e
service_name=$1
+operation=${2-start} # values are "start" or "restart"
-Distributor=$(lsb_release -a | grep "Distributor ID" | awk '{print $3}')
-
-if [ "$Distributor" != "Ubuntu" -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" ]; then
+if [ -f /usr/bin/yum -a "$service_name" != "keystone" -a "$service_name" != "neutron-server" -a "$service_name" != "haproxy" -a "$service_name" != "openvswitch" ]; then
service_name="openstack-"${service_name}
-elif [ "$Distributor" = "Ubuntu" -a "$service_name" = "keystone" ]; then
+elif [ -f /usr/bin/apt -a "$service_name" = "keystone" ]; then
service_name="apache2"
elif [ "$service_name" = "keystone" ]; then
service_name="httpd"
fi
if which systemctl 2>/dev/null; then
- systemctl start $service_name
+ systemctl $operation $service_name
else
- service $service_name start
+ service $service_name $operation
fi
diff --git a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
index 50a63f53d..f6004c774 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
@@ -103,7 +103,7 @@ class BaseMonitor(multiprocessing.Process):
for monitor in utils.itersubclasses(BaseMonitor):
if monitor_type == monitor.__monitor_type__:
return monitor
- raise RuntimeError("No such monitor_type %s" % monitor_type)
+ raise RuntimeError("No such monitor_type: %s" % monitor_type)
def get_script_fullpath(self, path):
base_path = os.path.dirname(monitor_conf_path)
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
index d0551bf03..3b36c762d 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
output = []
try:
output = subprocess.check_output(command, shell=True)
- except Exception:
+ except Exception: # pylint: disable=broad-except
exitcode = -1
LOG.error("exec command '%s' error:\n ", command, exc_info=True)
@@ -45,7 +45,7 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host,
defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_openstack_cmd.bash")
@@ -61,22 +61,20 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
self.cmd = self.cmd + " --insecure"
def monitor_func(self):
- exit_status = 0
exit_status, stdout = _execute_shell_command(self.cmd)
- LOG.debug("Execute command '%s' and the stdout is:\n%s", self.cmd, stdout)
+ LOG.debug("Executed command '%s'. "
+ "The stdout is:\n%s", self.cmd, stdout)
if exit_status:
return False
return True
def verify_SLA(self):
outage_time = self._result.get('outage_time', None)
- LOG.debug("the _result:%s", self._result)
max_outage_time = self._config["sla"]["max_outage_time"]
if outage_time > max_outage_time:
LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -97,7 +95,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
index dce69f45f..971bae1e9 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_multi.py
@@ -29,7 +29,7 @@ class MultiMonitor(basemonitor.BaseMonitor):
monitor_cls = basemonitor.BaseMonitor.get_monitor_cls(monitor_type)
monitor_number = self._config.get("monitor_number", 1)
- for i in range(monitor_number):
+ for _ in range(monitor_number):
monitor_ins = monitor_cls(self._config, self._context,
self.monitor_data)
self.monitors.append(monitor_ins)
@@ -70,7 +70,8 @@ class MultiMonitor(basemonitor.BaseMonitor):
elif "max_recover_time" in self._config["sla"]:
max_outage_time = self._config["sla"]["max_recover_time"]
else:
- raise RuntimeError("monitor max_outage_time config is not found")
+ raise RuntimeError("'max_outage_time' or 'max_recover_time' "
+ "config is not found")
self._result = {"outage_time": outage_time}
if outage_time > max_outage_time:
diff --git a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
index b0f6f8e9d..8d2f2633c 100644
--- a/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
+++ b/yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
@@ -25,14 +25,14 @@ class MonitorProcess(basemonitor.BaseMonitor):
self.connection = ssh.SSH.from_node(host, defaults={"user": "root"})
self.connection.wait(timeout=600)
- LOG.debug("ssh host success!")
+ LOG.debug("ssh host (%s) success!", str(host))
self.check_script = self.get_script_fullpath(
"ha_tools/check_process_python.bash")
self.process_name = self._config["process_name"]
def monitor_func(self):
with open(self.check_script, "r") as stdin_file:
- exit_status, stdout, stderr = self.connection.execute(
+ _, stdout, _ = self.connection.execute(
"sudo /bin/sh -s {0}".format(self.process_name),
stdin=stdin_file)
@@ -45,14 +45,12 @@ class MonitorProcess(basemonitor.BaseMonitor):
return True
def verify_SLA(self):
- LOG.debug("the _result:%s", self._result)
outage_time = self._result.get('outage_time', None)
max_outage_time = self._config["sla"]["max_recover_time"]
if outage_time > max_outage_time:
- LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
+ LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
return False
else:
- LOG.info("the sla is passed")
return True
@@ -73,7 +71,7 @@ def _test(): # pragma: no cover
}
monitor_configs.append(config)
- p = basemonitor.MonitorMgr()
+ p = basemonitor.MonitorMgr({})
p.init_monitors(monitor_configs, context)
p.start_monitors()
p.wait_monitors()
diff --git a/yardstick/benchmark/scenarios/availability/scenario_general.py b/yardstick/benchmark/scenarios/availability/scenario_general.py
index 9ac55471d..e2db03a70 100644
--- a/yardstick/benchmark/scenarios/availability/scenario_general.py
+++ b/yardstick/benchmark/scenarios/availability/scenario_general.py
@@ -26,7 +26,6 @@ class ScenarioGeneral(base.Scenario):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.intermediate_variables = {}
- self.pass_flag = True
def setup(self):
self.director = Director(self.scenario_cfg, self.context_cfg)
@@ -47,7 +46,7 @@ class ScenarioGeneral(base.Scenario):
step['actionType'], step['actionKey'])
if actionRollbacker:
self.director.executionSteps.append(actionRollbacker)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception")
LOG.debug(
"\033[91m exception when running step: %s .... \033[0m",
@@ -59,31 +58,20 @@ class ScenarioGeneral(base.Scenario):
self.director.stopMonitors()
verify_result = self.director.verify()
-
- self.director.store_result(result)
-
+ service_not_found = False
for k, v in self.director.data.items():
if v == 0:
- result['sla_pass'] = 0
verify_result = False
- self.pass_flag = False
- LOG.info(
- "\033[92m The service process not found in the host \
-envrioment, the HA test case NOT pass")
+ service_not_found = True
+ LOG.info("\033[92m The service process (%s) not found in the host environment", k)
- if verify_result:
- result['sla_pass'] = 1
- LOG.info(
- "\033[92m Congratulations, "
- "the HA test case PASS! \033[0m")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info(
- "\033[91m Aoh, the HA test case FAIL,"
- "please check the detail debug information! \033[0m")
+ result['sla_pass'] = 1 if verify_result else 0
+ self.director.store_result(result)
+
+ self.verify_SLA(
+ verify_result, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "Director.verify() failed"))
def teardown(self):
self.director.knockoff()
-
- assert self.pass_flag, "The HA test case NOT passed"
diff --git a/yardstick/benchmark/scenarios/availability/serviceha.py b/yardstick/benchmark/scenarios/availability/serviceha.py
index 6d0d812af..76721e38c 100755
--- a/yardstick/benchmark/scenarios/availability/serviceha.py
+++ b/yardstick/benchmark/scenarios/availability/serviceha.py
@@ -29,13 +29,12 @@ class ServiceHA(base.Scenario):
self.context_cfg = context_cfg
self.setup_done = False
self.data = {}
- self.pass_flag = True
def setup(self):
"""scenario setup"""
nodes = self.context_cfg.get("nodes", None)
if nodes is None:
- LOG.error("the nodes info is none")
+ LOG.error("The nodes info is none")
return
self.attackers = []
@@ -58,44 +57,39 @@ class ServiceHA(base.Scenario):
def run(self, result):
"""execute the benchmark"""
if not self.setup_done:
- LOG.error("The setup not finished!")
+ LOG.error("The setup is not finished!")
return
self.monitorMgr.start_monitors()
- LOG.info("HA monitor start!")
+ LOG.info("Monitor '%s' start!", self.__scenario_type__)
for attacker in self.attackers:
attacker.inject_fault()
self.monitorMgr.wait_monitors()
- LOG.info("HA monitor stop!")
+ LOG.info("Monitor '%s' stop!", self.__scenario_type__)
sla_pass = self.monitorMgr.verify_SLA()
+ service_not_found = False
for k, v in self.data.items():
if v == 0:
- result['sla_pass'] = 0
- self.pass_flag = False
- LOG.info("The service process not found in the host envrioment, \
-the HA test case NOT pass")
- return
+ sla_pass = False
+ service_not_found = True
+ LOG.info("The service process (%s) not found in the host envrioment", k)
+
+ result['sla_pass'] = 1 if sla_pass else 0
self.monitorMgr.store_result(result)
- if sla_pass:
- result['sla_pass'] = 1
- LOG.info("The HA test case PASS the SLA")
- else:
- result['sla_pass'] = 0
- self.pass_flag = False
- assert sla_pass is True, "The HA test case NOT pass the SLA"
- return
+ self.verify_SLA(
+ sla_pass, ("a service process was not found in the host "
+ "environment" if service_not_found
+ else "MonitorMgr.verify_SLA() failed"))
def teardown(self):
"""scenario teardown"""
for attacker in self.attackers:
attacker.recover()
- assert self.pass_flag, "The HA test case NOT passed"
-
def _test(): # pragma: no cover
"""internal test function"""
diff --git a/yardstick/benchmark/scenarios/base.py b/yardstick/benchmark/scenarios/base.py
index 58a02805c..30ac1bea9 100644
--- a/yardstick/benchmark/scenarios/base.py
+++ b/yardstick/benchmark/scenarios/base.py
@@ -20,6 +20,7 @@ import six
from stevedore import extension
import yardstick.common.utils as utils
+from yardstick.common import exceptions as y_exc
def _iter_scenario_classes(scenario_type=None):
@@ -61,6 +62,11 @@ class Scenario(object):
"""Time waited after executing the run method"""
time.sleep(time_seconds)
+ def verify_SLA(self, condition, error_msg):
+ if not condition:
+ raise y_exc.SLAValidationError(
+ case_name=self.__scenario_type__, error_msg=error_msg)
+
@staticmethod
def get_types():
"""return a list of known runner type (class) names"""
diff --git a/yardstick/benchmark/scenarios/compute/cyclictest.py b/yardstick/benchmark/scenarios/compute/cyclictest.py
index 998463ef6..413709f3b 100644
--- a/yardstick/benchmark/scenarios/compute/cyclictest.py
+++ b/yardstick/benchmark/scenarios/compute/cyclictest.py
@@ -100,7 +100,7 @@ class Cyclictest(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -195,7 +195,7 @@ class Cyclictest(base.Scenario):
if latency > sla_latency:
sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
(t, latency, t, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/lmbench.py b/yardstick/benchmark/scenarios/compute/lmbench.py
index 801f7fa80..2237e49e0 100644
--- a/yardstick/benchmark/scenarios/compute/lmbench.py
+++ b/yardstick/benchmark/scenarios/compute/lmbench.py
@@ -119,8 +119,8 @@ class Lmbench(base.Scenario):
cmd = "sudo bash lmbench_latency_for_cache.sh %d %d" % \
(repetition, warmup)
else:
- raise RuntimeError("No such test_type: %s for Lmbench scenario",
- test_type)
+ raise RuntimeError("No such test_type: %s for Lmbench scenario"
+ % test_type)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -157,7 +157,7 @@ class Lmbench(base.Scenario):
if sla_latency < cache_latency:
sla_error += "latency %f > sla:max_latency(%f); " \
% (cache_latency, sla_latency)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/perf.py b/yardstick/benchmark/scenarios/compute/perf.py
index 0b8ed9b28..b973211f1 100644
--- a/yardstick/benchmark/scenarios/compute/perf.py
+++ b/yardstick/benchmark/scenarios/compute/perf.py
@@ -93,7 +93,7 @@ class Perf(base.Scenario):
% (load, duration, events_string)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, stdout, _ = self.client.execute(cmd)
if status:
raise RuntimeError(stdout)
@@ -105,16 +105,14 @@ class Perf(base.Scenario):
exp_val = self.scenario_cfg['sla']['expected_value']
smaller_than_exp = 'smaller_than_expected' \
in self.scenario_cfg['sla']
-
- if metric not in result:
- assert False, "Metric (%s) not found." % metric
- else:
- if smaller_than_exp:
- assert result[metric] < exp_val, "%s %d >= %d (sla); " \
- % (metric, result[metric], exp_val)
- else:
- assert result[metric] >= exp_val, "%s %d < %d (sla); " \
- % (metric, result[metric], exp_val)
+ self.verify_SLA(metric in result,
+ "Metric (%s) not found." % metric)
+ self.verify_SLA(
+ not smaller_than_exp,
+ "%s %d >= %d (sla); " % (metric, result[metric], exp_val))
+ self.verify_SLA(
+ result[metric] >= exp_val,
+ "%s %d < %d (sla); " % (metric, result[metric], exp_val))
def _test():
diff --git a/yardstick/benchmark/scenarios/compute/qemu_migrate.py b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
index 2de1270ef..975c90b22 100644
--- a/yardstick/benchmark/scenarios/compute/qemu_migrate.py
+++ b/yardstick/benchmark/scenarios/compute/qemu_migrate.py
@@ -56,7 +56,7 @@ class QemuMigrate(base.Scenario):
def _run_setup_cmd(self, client, cmd):
LOG.debug("Run cmd: %s", cmd)
- status, stdout, stderr = client.execute(cmd)
+ status, _, stderr = client.execute(cmd)
if status:
if re.search(self.REBOOT_CMD_PATTERN, cmd):
LOG.debug("Error on reboot")
@@ -127,7 +127,7 @@ class QemuMigrate(base.Scenario):
if timevalue > sla_time:
sla_error += "%s timevalue %d > sla:max_%s(%d); " % \
(t, timevalue, t, sla_time)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/ramspeed.py b/yardstick/benchmark/scenarios/compute/ramspeed.py
index ca64935dd..4daf776ff 100644
--- a/yardstick/benchmark/scenarios/compute/ramspeed.py
+++ b/yardstick/benchmark/scenarios/compute/ramspeed.py
@@ -121,8 +121,8 @@ class Ramspeed(base.Scenario):
(test_id, load, block_size)
# only the test_id 1-6 will be used in this scenario
else:
- raise RuntimeError("No such type_id: %s for Ramspeed scenario",
- test_id)
+ raise RuntimeError("No such type_id: %s for Ramspeed scenario"
+ % test_id)
LOG.debug("Executing command: %s", cmd)
status, stdout, stderr = self.client.execute(cmd)
@@ -140,4 +140,4 @@ class Ramspeed(base.Scenario):
if bw < sla_min_bw:
sla_error += "Bandwidth %f < " \
"sla:min_bandwidth(%f)" % (bw, sla_min_bw)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/compute/unixbench.py b/yardstick/benchmark/scenarios/compute/unixbench.py
index cdb345717..3cea31694 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench.py
+++ b/yardstick/benchmark/scenarios/compute/unixbench.py
@@ -125,7 +125,7 @@ class Unixbench(base.Scenario):
if score < sla_score:
sla_error += "%s score %f < sla:%s_score(%f); " % \
(t, score, t, sla_score)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
index 5a5dbc394..9f1804819 100644
--- a/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
+++ b/yardstick/benchmark/scenarios/compute/unixbench_benchmark.bash
@@ -18,7 +18,7 @@ OUTPUT_FILE=/tmp/unixbench-out.log
# run unixbench test
run_unixbench()
{
- cd /opt/tempT/UnixBench/
+ cd /opt/tempT/UnixBench/UnixBench/
./Run $OPTIONS > $OUTPUT_FILE
}
diff --git a/yardstick/benchmark/scenarios/lib/attach_volume.py b/yardstick/benchmark/scenarios/lib/attach_volume.py
index 88124964b..96dd130b1 100644
--- a/yardstick/benchmark/scenarios/lib/attach_volume.py
+++ b/yardstick/benchmark/scenarios/lib/attach_volume.py
@@ -6,30 +6,31 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
class AttachVolume(base.Scenario):
- """Attach a volmeu to an instance"""
+ """Attach a volume to an instance"""
__scenario_type__ = "AttachVolume"
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.server_id = self.options.get("server_id", "TestServer")
- self.volume_id = self.options.get("volume_id", None)
+ self.server_name_or_id = self.options["server_name_or_id"]
+ self.volume_name_or_id = self.options["volume_name_or_id"]
+ self.device = self.options.get("device")
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,10 +45,14 @@ class AttachVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.attach_server_volume(self.server_id,
- self.volume_id)
+ status = openstack_utils.attach_volume_to_server(
+ self.shade_client, self.server_name_or_id, self.volume_name_or_id,
+ device=self.device, wait=self.wait, timeout=self.timeout)
+
+ if not status:
+ result.update({"attach_volume": 0})
+ LOG.error("Attach volume to server failed!")
+ raise exceptions.ScenarioAttachVolumeError
- if status:
- LOG.info("Attach volume to server successful!")
- else:
- LOG.info("Attach volume to server failed!")
+ result.update({"attach_volume": 1})
+ LOG.info("Attach volume to server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/create_image.py b/yardstick/benchmark/scenarios/lib/create_image.py
index bcffc7452..d057894a9 100644
--- a/yardstick/benchmark/scenarios/lib/create_image.py
+++ b/yardstick/benchmark/scenarios/lib/create_image.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,20 +23,23 @@ class CreateImage(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
-
- self.image_name = self.options.get("image_name", "TestImage")
- self.file_path = self.options.get("file_path", None)
- self.disk_format = self.options.get("disk_format", "qcow2")
- self.container_format = self.options.get("container_format", "bare")
- self.min_disk = self.options.get("min_disk", 0)
- self.min_ram = self.options.get("min_ram", 0)
- self.protected = self.options.get("protected", False)
- self.public = self.options.get("public", "public")
- self.tags = self.options.get("tags", [])
- self.custom_property = self.options.get("property", {})
-
- self.glance_client = op_utils.get_glance_client()
+ self.options = self.scenario_cfg["options"]
+
+ self.name = self.options["image_name"]
+ self.file_name = self.options.get("file_name")
+ self.container = self.options.get("container", 'images')
+ self.md5 = self.options.get("md5")
+ self.sha256 = self.options.get("sha256")
+ self.disk_format = self.options.get("disk_format")
+ self.container_format = self.options.get("container_format",)
+ self.disable_vendor_agent = self.options.get("disable_vendor_agent", True)
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout", 3600)
+ self.allow_duplicates = self.options.get("allow_duplicates", False)
+ self.meta = self.options.get("meta")
+ self.volume = self.options.get("volume")
+
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -54,19 +54,22 @@ class CreateImage(base.Scenario):
if not self.setup_done:
self.setup()
- image_id = op_utils.create_image(self.glance_client, self.image_name,
- self.file_path, self.disk_format,
- self.container_format, self.min_disk,
- self.min_ram, self.protected, self.tags,
- self.public, **self.custom_property)
-
- if image_id:
- LOG.info("Create image successful!")
- values = [image_id]
-
- else:
- LOG.info("Create image failed!")
- values = []
-
- keys = self.scenario_cfg.get('output', '').split()
+ image_id = openstack_utils.create_image(
+ self.shade_client, self.name, filename=self.file_name,
+ container=self.container, md5=self.md5, sha256=self.sha256,
+ disk_format=self.disk_format,
+ container_format=self.container_format,
+ disable_vendor_agent=self.disable_vendor_agent, wait=self.wait,
+ timeout=self.timeout, allow_duplicates=self.allow_duplicates,
+ meta=self.meta, volume=self.volume)
+
+ if not image_id:
+ result.update({"image_create": 0})
+ LOG.error("Create image failed!")
+ raise exceptions.ScenarioCreateImageError
+
+ result.update({"image_create": 1})
+ LOG.info("Create image successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ values = [image_id]
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_keypair.py b/yardstick/benchmark/scenarios/lib/create_keypair.py
index f5b1fff7a..ee9bc440a 100644
--- a/yardstick/benchmark/scenarios/lib/create_keypair.py
+++ b/yardstick/benchmark/scenarios/lib/create_keypair.py
@@ -6,15 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
-import paramiko
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -27,10 +23,11 @@ class CreateKeypair(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.key_name = self.options.get("key_name", "yardstick_key")
- self.key_filename = self.options.get("key_path", "/tmp/yardstick_key")
+ self.name = self.options["key_name"]
+ self.public_key = self.options.get("public_key")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,27 +42,17 @@ class CreateKeypair(base.Scenario):
if not self.setup_done:
self.setup()
- rsa_key = paramiko.RSAKey.generate(bits=2048, progress_func=None)
- rsa_key.write_private_key_file(self.key_filename)
- LOG.info("Writing key_file %s ...", self.key_filename)
- with open(self.key_filename + ".pub", "w") as pubkey_file:
- pubkey_file.write(
- "%s %s\n" % (rsa_key.get_name(), rsa_key.get_base64()))
- del rsa_key
-
- keypair = op_utils.create_keypair(self.key_name,
- self.key_filename + ".pub")
+ keypair = openstack_utils.create_keypair(
+ self.shade_client, self.name, public_key=self.public_key)
- if keypair:
- result.update({"keypair_create": 1})
- LOG.info("Create keypair successful!")
- else:
+ if not keypair:
result.update({"keypair_create": 0})
- LOG.info("Create keypair failed!")
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [keypair.id]
- return self._push_to_outputs(keys, values)
+ LOG.error("Create keypair failed!")
+ raise exceptions.ScenarioCreateKeypairError
+
+ result.update({"keypair_create": 1})
+ LOG.info("Create keypair successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ keypair_id = keypair["id"]
+ values = [keypair_id]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_sec_group.py b/yardstick/benchmark/scenarios/lib/create_sec_group.py
index 3d1aec9e8..1d2e36488 100644
--- a/yardstick/benchmark/scenarios/lib/create_sec_group.py
+++ b/yardstick/benchmark/scenarios/lib/create_sec_group.py
@@ -7,13 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,11 +24,12 @@ class CreateSecgroup(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.sg_name = self.options.get("sg_name", "yardstick_sec_group")
- self.description = self.options.get("description", None)
- self.neutron_client = op_utils.get_neutron_client()
+ self.sg_name = self.options["sg_name"]
+ self.description = self.options.get("description", "")
+ self.project_id = self.options.get("project_id")
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,21 +44,16 @@ class CreateSecgroup(base.Scenario):
if not self.setup_done:
self.setup()
- sg_id = op_utils.create_security_group_full(self.neutron_client,
- sg_name=self.sg_name,
- sg_description=self.description)
-
- if sg_id:
- result.update({"sg_create": 1})
- LOG.info("Create security group successful!")
- else:
+ sg_id = openstack_utils.create_security_group_full(
+ self.shade_client, self.sg_name, sg_description=self.description,
+ project_id=self.project_id)
+ if not sg_id:
result.update({"sg_create": 0})
LOG.error("Create security group failed!")
+ raise exceptions.ScenarioCreateSecurityGroupError
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [sg_id]
- return self._push_to_outputs(keys, values)
+ result.update({"sg_create": 1})
+ LOG.info("Create security group successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ values = [sg_id]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_server.py b/yardstick/benchmark/scenarios/lib/create_server.py
index 31ba18ed4..e2748aecf 100644
--- a/yardstick/benchmark/scenarios/lib/create_server.py
+++ b/yardstick/benchmark/scenarios/lib/create_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,15 +23,27 @@ class CreateServer(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
-
- self.image_name = self.options.get("image_name", None)
- self.flavor_name = self.options.get("flavor_name", None)
- self.openstack = self.options.get("openstack_paras", None)
-
- self.glance_client = op_utils.get_glance_client()
- self.neutron_client = op_utils.get_neutron_client()
- self.nova_client = op_utils.get_nova_client()
+ self.options = self.scenario_cfg["options"]
+
+ self.name = self.options["name"]
+ self.image = self.options["image"]
+ self.flavor = self.options["flavor"]
+ self.auto_ip = self.options.get("auto_ip", True)
+ self.ips = self.options.get("ips")
+ self.ip_pool = self.options.get("ip_pool")
+ self.root_volume = self.options.get("root_volume")
+ self.terminate_volume = self.options.get("terminate_volume", False)
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout", 180)
+ self.reuse_ips = self.options.get("reuse_ips", True)
+ self.network = self.options.get("network")
+ self.boot_from_volume = self.options.get("boot_from_volume", False)
+ self.volume_size = self.options.get("volume_size", "20")
+ self.boot_volume = self.options.get("boot_volume")
+ self.volumes = self.options.get("volumes")
+ self.nat_destination = self.options.get("nat_destination")
+
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -49,26 +58,23 @@ class CreateServer(base.Scenario):
if not self.setup_done:
self.setup()
- if self.image_name is not None:
- self.openstack['image'] = op_utils.get_image_id(self.glance_client,
- self.image_name)
- if self.flavor_name is not None:
- self.openstack['flavor'] = op_utils.get_flavor_id(self.nova_client,
- self.flavor_name)
-
- vm = op_utils.create_instance_and_wait_for_active(self.openstack)
-
- if vm:
- result.update({"instance_create": 1})
- LOG.info("Create server successful!")
- else:
+ server = openstack_utils.create_instance_and_wait_for_active(
+ self.shade_client, self.name, self.image,
+ self.flavor, auto_ip=self.auto_ip, ips=self.ips,
+ ip_pool=self.ip_pool, root_volume=self.root_volume,
+ terminate_volume=self.terminate_volume, wait=self.wait,
+ timeout=self.timeout, reuse_ips=self.reuse_ips,
+ network=self.network, boot_from_volume=self.boot_from_volume,
+ volume_size=self.volume_size, boot_volume=self.boot_volume,
+ volumes=self.volumes, nat_destination=self.nat_destination)
+
+ if not server:
result.update({"instance_create": 0})
LOG.error("Create server failed!")
+ raise exceptions.ScenarioCreateServerError
- try:
- keys = self.scenario_cfg.get('output', '').split()
- except KeyError:
- pass
- else:
- values = [vm.id]
- return self._push_to_outputs(keys, values)
+ result.update({"instance_create": 1})
+ LOG.info("Create instance successful!")
+ keys = self.scenario_cfg.get("output", '').split()
+ values = [server["id"]]
+ return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/create_volume.py b/yardstick/benchmark/scenarios/lib/create_volume.py
index df523a5ec..b66749026 100644
--- a/yardstick/benchmark/scenarios/lib/create_volume.py
+++ b/yardstick/benchmark/scenarios/lib/create_volume.py
@@ -7,14 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import time
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -27,15 +25,16 @@ class CreateVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.volume_name = self.options.get("volume_name", "TestVolume")
- self.volume_size = self.options.get("size", 100)
- self.image_name = self.options.get("image", None)
- self.image_id = None
+ self.size = self.options["size_gb"]
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+ self.image = self.options.get("image")
+ self.name = self.options.get("name")
+ self.description = self.options.get("description")
- self.glance_client = op_utils.get_glance_client()
- self.cinder_client = op_utils.get_cinder_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,27 +43,29 @@ class CreateVolume(base.Scenario):
self.setup_done = True
- def run(self):
+ def run(self, result):
"""execute the test"""
if not self.setup_done:
self.setup()
- self.image_id = op_utils.get_image_id(self.glance_client,
- self.image_name)
+ volume = openstack_utils.create_volume(
+ self.shade_client, self.size, wait=self.wait, timeout=self.timeout,
+ image=self.image, name=self.name, description=self.description)
- volume = op_utils.create_volume(self.cinder_client, self.volume_name,
- self.volume_size, self.image_id)
+ if not volume:
+ result.update({"volume_create": 0})
+ LOG.error("Create volume failed!")
+ raise exceptions.ScenarioCreateVolumeError
- status = volume.status
- while(status == 'creating' or status == 'downloading'):
+ status = volume["status"]
+ while status == "creating" or status == "downloading":
LOG.info("Volume status is: %s", status)
time.sleep(5)
- volume = op_utils.get_volume_by_name(self.volume_name)
- status = volume.status
-
+ volume = openstack_utils.get_volume(self.shade_client, self.name)
+ status = volume["status"]
+ result.update({"volume_create": 1})
LOG.info("Create volume successful!")
-
- values = [volume.id]
- keys = self.scenario_cfg.get('output', '').split()
+ values = [volume["id"]]
+ keys = self.scenario_cfg.get("output", '').split()
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/delete_image.py b/yardstick/benchmark/scenarios/lib/delete_image.py
index 0e3a853e5..008f104b2 100644
--- a/yardstick/benchmark/scenarios/lib/delete_image.py
+++ b/yardstick/benchmark/scenarios/lib/delete_image.py
@@ -7,13 +7,11 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,12 +24,14 @@ class DeleteImage(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.image_name = self.options.get("image_name", "TestImage")
- self.image_id = None
+ self.image_name_or_id = self.options["name_or_id"]
+ self.wait = self.options.get("wait", False)
+ self.timeout = self.options.get("timeout", 3600)
+ self.delete_objects = self.options.get("delete_objects", True)
- self.glance_client = op_utils.get_glance_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -46,16 +46,14 @@ class DeleteImage(base.Scenario):
if not self.setup_done:
self.setup()
- self.image_id = op_utils.get_image_id(self.glance_client, self.image_name)
- LOG.info("Deleting image: %s", self.image_name)
- status = op_utils.delete_image(self.glance_client, self.image_id)
+ status = openstack_utils.delete_image(
+ self.shade_client, self.image_name_or_id, wait=self.wait,
+ timeout=self.timeout, delete_objects=self.delete_objects)
- if status:
- LOG.info("Delete image successful!")
- values = [status]
- else:
- LOG.info("Delete image failed!")
- values = []
+ if not status:
+ result.update({"delete_image": 0})
+ LOG.error("Delete image failed!")
+ raise exceptions.ScenarioDeleteImageError
- keys = self.scenario_cfg.get('output', '').split()
- return self._push_to_outputs(keys, values)
+ result.update({"delete_image": 1})
+ LOG.info("Delete image successful!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_keypair.py b/yardstick/benchmark/scenarios/lib/delete_keypair.py
index 135139959..a52a38567 100644
--- a/yardstick/benchmark/scenarios/lib/delete_keypair.py
+++ b/yardstick/benchmark/scenarios/lib/delete_keypair.py
@@ -6,14 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -26,11 +24,11 @@ class DeleteKeypair(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.key_name = self.options.get("key_name", "yardstick_key")
+ self.key_name = self.options["key_name"]
- self.nova_client = op_utils.get_nova_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,12 +43,13 @@ class DeleteKeypair(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_keypair(self.nova_client,
- self.key_name)
+ status = openstack_utils.delete_keypair(self.shade_client,
+ self.key_name)
- if status:
- result.update({"delete_keypair": 1})
- LOG.info("Delete keypair successful!")
- else:
+ if not status:
result.update({"delete_keypair": 0})
- LOG.info("Delete keypair failed!")
+ LOG.error("Delete keypair failed!")
+ raise exceptions.ScenarioDeleteKeypairError
+
+ result.update({"delete_keypair": 1})
+ LOG.info("Delete keypair successful!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_network.py b/yardstick/benchmark/scenarios/lib/delete_network.py
index 2e8b595f9..8874e8b1e 100644
--- a/yardstick/benchmark/scenarios/lib/delete_network.py
+++ b/yardstick/benchmark/scenarios/lib/delete_network.py
@@ -10,7 +10,8 @@
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -24,11 +25,11 @@ class DeleteNetwork(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.network_id = self.options.get("network_id", None)
+ self.network_name_or_id = self.options["network_name_or_id"]
- self.shade_client = op_utils.get_shade_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -43,12 +44,13 @@ class DeleteNetwork(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_neutron_net(self.shade_client,
- network_id=self.network_id)
- if status:
- result.update({"delete_network": 1})
- LOG.info("Delete network successful!")
- else:
+ status = openstack_utils.delete_neutron_net(self.shade_client,
+ self.network_name_or_id)
+
+ if not status:
result.update({"delete_network": 0})
LOG.error("Delete network failed!")
- return status
+ raise exceptions.ScenarioDeleteNetworkError
+
+ result.update({"delete_network": 1})
+ LOG.info("Delete network successful!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_server.py b/yardstick/benchmark/scenarios/lib/delete_server.py
index bcd8faba7..46229ff04 100644
--- a/yardstick/benchmark/scenarios/lib/delete_server.py
+++ b/yardstick/benchmark/scenarios/lib/delete_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
LOG = logging.getLogger(__name__)
@@ -26,9 +23,13 @@ class DeleteServer(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
- self.server_id = self.options.get("server_id", None)
- self.nova_client = op_utils.get_nova_client()
+ self.options = self.scenario_cfg["options"]
+ self.server_name_or_id = self.options["name_or_id"]
+ self.wait = self.options.get("wait", False)
+ self.timeout = self.options.get("timeout", 180)
+ self.delete_ips = self.options.get("delete_ips", False)
+ self.delete_ip_retry = self.options.get("delete_ip_retry", 1)
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -43,9 +44,15 @@ class DeleteServer(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_instance(self.nova_client,
- instance_id=self.server_id)
- if status:
- LOG.info("Delete server successful!")
- else:
+ status = openstack_utils.delete_instance(
+ self.shade_client, self.server_name_or_id, wait=self.wait,
+ timeout=self.timeout, delete_ips=self.delete_ips,
+ delete_ip_retry=self.delete_ip_retry)
+
+ if not status:
+ result.update({"delete_server": 0})
LOG.error("Delete server failed!")
+ raise exceptions.ScenarioDeleteServerError
+
+ result.update({"delete_server": 1})
+ LOG.info("Delete server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/delete_volume.py b/yardstick/benchmark/scenarios/lib/delete_volume.py
index ea2b85812..59e19dfdf 100644
--- a/yardstick/benchmark/scenarios/lib/delete_volume.py
+++ b/yardstick/benchmark/scenarios/lib/delete_volume.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
LOG = logging.getLogger(__name__)
@@ -26,11 +23,13 @@ class DeleteVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.volume_id = self.options.get("volume_id", None)
+ self.volume_name_or_id = self.options.get("name_or_id")
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
- self.cinder_client = op_utils.get_cinder_client()
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -45,11 +44,14 @@ class DeleteVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.delete_volume(self.cinder_client, self.volume_id)
+ status = openstack_utils.delete_volume(
+ self.shade_client, name_or_id=self.volume_name_or_id,
+ wait=self.wait, timeout=self.timeout)
- if status:
- result.update({"delete_volume": 1})
- LOG.info("Delete volume successful!")
- else:
+ if not status:
result.update({"delete_volume": 0})
- LOG.info("Delete volume failed!")
+ LOG.error("Delete volume failed!")
+ raise exceptions.ScenarioDeleteVolumeError
+
+ result.update({"delete_volume": 1})
+ LOG.info("Delete volume successful!")
diff --git a/yardstick/benchmark/scenarios/lib/detach_volume.py b/yardstick/benchmark/scenarios/lib/detach_volume.py
index 0b02a3a81..76c0167bd 100644
--- a/yardstick/benchmark/scenarios/lib/detach_volume.py
+++ b/yardstick/benchmark/scenarios/lib/detach_volume.py
@@ -6,14 +6,12 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+
LOG = logging.getLogger(__name__)
@@ -26,10 +24,14 @@ class DetachVolume(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
+ self.options = self.scenario_cfg["options"]
- self.server_id = self.options.get("server_id", "TestServer")
- self.volume_id = self.options.get("volume_id", None)
+ self.server = self.options["server_name_or_id"]
+ self.volume = self.options["volume_name_or_id"]
+ self.wait = self.options.get("wait", True)
+ self.timeout = self.options.get("timeout")
+
+ self.shade_client = openstack_utils.get_shade_client()
self.setup_done = False
@@ -44,11 +46,14 @@ class DetachVolume(base.Scenario):
if not self.setup_done:
self.setup()
- status = op_utils.detach_volume(self.server_id, self.volume_id)
+ status = openstack_utils.detach_volume(
+ self.shade_client, self.server, self.volume,
+ wait=self.wait, timeout=self.timeout)
- if status:
- result.update({"detach_volume": 1})
- LOG.info("Detach volume from server successful!")
- else:
+ if not status:
result.update({"detach_volume": 0})
- LOG.info("Detach volume from server failed!")
+ LOG.error("Detach volume from server failed!")
+ raise exceptions.ScenarioDetachVolumeError
+
+ result.update({"detach_volume": 1})
+ LOG.info("Detach volume from server successful!")
diff --git a/yardstick/benchmark/scenarios/lib/get_flavor.py b/yardstick/benchmark/scenarios/lib/get_flavor.py
index d5e33947e..6727a7343 100644
--- a/yardstick/benchmark/scenarios/lib/get_flavor.py
+++ b/yardstick/benchmark/scenarios/lib/get_flavor.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -26,8 +23,12 @@ class GetFlavor(base.Scenario):
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg['options']
- self.flavor_name = self.options.get("flavor_name", "TestFlavor")
+ self.options = self.scenario_cfg["options"]
+ self.name_or_id = self.options["name_or_id"]
+ self.filters = self.options.get("filters")
+ self.get_extra = self.options.get("get_extra", True)
+ self.shade_client = openstack_utils.get_shade_client()
+
self.setup_done = False
def setup(self):
@@ -41,14 +42,18 @@ class GetFlavor(base.Scenario):
if not self.setup_done:
self.setup()
- LOG.info("Querying flavor: %s", self.flavor_name)
- flavor = op_utils.get_flavor_by_name(self.flavor_name)
- if flavor:
- LOG.info("Get flavor successful!")
- values = [self._change_obj_to_dict(flavor)]
- else:
- LOG.info("Get flavor: no flavor matched!")
- values = []
+ LOG.info("Querying flavor: %s", self.name_or_id)
+ flavor = openstack_utils.get_flavor(
+ self.shade_client, self.name_or_id, filters=self.filters,
+ get_extra=self.get_extra)
+
+ if not flavor:
+ result.update({"get_flavor": 0})
+ LOG.error("Get flavor failed!")
+ raise exceptions.ScenarioGetFlavorError
- keys = self.scenario_cfg.get('output', '').split()
+ result.update({"get_flavor": 1})
+ LOG.info("Get flavor successful!")
+ values = [flavor]
+ keys = self.scenario_cfg.get("output", '').split()
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/lib/get_server.py b/yardstick/benchmark/scenarios/lib/get_server.py
index fcf47c80d..f65fa9ebf 100644
--- a/yardstick/benchmark/scenarios/lib/get_server.py
+++ b/yardstick/benchmark/scenarios/lib/get_server.py
@@ -6,14 +6,11 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-from __future__ import print_function
-from __future__ import absolute_import
-
import logging
from yardstick.benchmark.scenarios import base
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
LOG = logging.getLogger(__name__)
@@ -21,63 +18,58 @@ LOG = logging.getLogger(__name__)
class GetServer(base.Scenario):
"""Get a server instance
- Parameters
- server_id - ID of the server
- type: string
- unit: N/A
- default: null
- server_name - name of the server
- type: string
- unit: N/A
- default: null
-
- Either server_id or server_name is required.
-
- Outputs
+ Parameters:
+ name_or_id - Name or ID of the server
+ type: string
+ filters - meta data to use for further filtering
+ type: dict
+ detailed: Whether or not to add detailed additional information.
+ type: bool
+ bare: Whether to skip adding any additional information to the server
+ record.
+ type: bool
+ all_projects: Whether to get server from all projects or just the current
+ auth scoped project.
+ type: bool
+
+ Outputs:
rc - response code of getting server instance
- 0 for success
- 1 for failure
+ 1 for success
+ 0 for failure
type: int
- unit: N/A
server - instance of the server
type: dict
- unit: N/A
+
"""
- __scenario_type__ = "GetServer"
+ __scenario_type__ = 'GetServer'
def __init__(self, scenario_cfg, context_cfg):
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
- self.options = self.scenario_cfg.get('options', {})
+ self.options = self.scenario_cfg['options']
- self.server_id = self.options.get("server_id")
- if self.server_id:
- LOG.debug('Server id is %s', self.server_id)
+ self.server_name_or_id = self.options.get('name_or_id')
+ self.filters = self.options.get('filters')
+ self.detailed = self.options.get('detailed', False)
+ self.bare = self.options.get('bare', False)
- default_name = self.scenario_cfg.get('host',
- self.scenario_cfg.get('target'))
- self.server_name = self.options.get('server_name', default_name)
- if self.server_name:
- LOG.debug('Server name is %s', self.server_name)
-
- self.nova_client = op_utils.get_nova_client()
+ self.shade_client = openstack_utils.get_shade_client()
def run(self, result):
"""execute the test"""
- if self.server_id:
- server = self.nova_client.servers.get(self.server_id)
- else:
- server = op_utils.get_server_by_name(self.server_name)
-
- keys = self.scenario_cfg.get('output', '').split()
+ server = openstack_utils.get_server(
+ self.shade_client, name_or_id=self.server_name_or_id,
+ filters=self.filters, detailed=self.detailed, bare=self.bare)
- if server:
- LOG.info("Get server successful!")
- values = [0, self._change_obj_to_dict(server)]
- else:
- LOG.info("Get server failed!")
- values = [1]
+ if not server:
+ result.update({'get_server': 0})
+ LOG.error('Get Server failed!')
+ raise exceptions.ScenarioGetServerError
+ result.update({'get_server': 1})
+ LOG.info('Get Server successful!')
+ keys = self.scenario_cfg.get('output', '').split()
+ values = [server]
return self._push_to_outputs(keys, values)
diff --git a/yardstick/benchmark/scenarios/networking/iperf3.py b/yardstick/benchmark/scenarios/networking/iperf3.py
index 98c45990e..51e044e7b 100644
--- a/yardstick/benchmark/scenarios/networking/iperf3.py
+++ b/yardstick/benchmark/scenarios/networking/iperf3.py
@@ -92,7 +92,7 @@ For more info see http://software.es.net/iperf
def teardown(self):
LOG.debug("teardown")
self.host.close()
- status, stdout, stderr = self.target.execute("pkill iperf3")
+ status, _, stderr = self.target.execute("pkill iperf3")
if status:
LOG.warning(stderr)
self.target.close()
@@ -145,7 +145,7 @@ For more info see http://software.es.net/iperf
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.host.execute(cmd)
+ status, stdout, _ = self.host.execute(cmd)
if status:
# error cause in json dict on stdout
raise RuntimeError(stdout)
@@ -165,16 +165,17 @@ For more info see http://software.es.net/iperf
bit_per_second = \
int(iperf_result["end"]["sum_received"]["bits_per_second"])
bytes_per_second = bit_per_second / 8
- assert bytes_per_second >= sla_bytes_per_second, \
- "bytes_per_second %d < sla:bytes_per_second (%d); " % \
- (bytes_per_second, sla_bytes_per_second)
+ self.verify_SLA(
+ bytes_per_second >= sla_bytes_per_second,
+ "bytes_per_second %d < sla:bytes_per_second (%d); "
+ % (bytes_per_second, sla_bytes_per_second))
else:
sla_jitter = float(sla_iperf["jitter"])
jitter_ms = float(iperf_result["end"]["sum"]["jitter_ms"])
- assert jitter_ms <= sla_jitter, \
- "jitter_ms %f > sla:jitter %f; " % \
- (jitter_ms, sla_jitter)
+ self.verify_SLA(jitter_ms <= sla_jitter,
+ "jitter_ms %f > sla:jitter %f; "
+ % (jitter_ms, sla_jitter))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
index 86173c9da..e3bd7af46 100644
--- a/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
+++ b/yardstick/benchmark/scenarios/networking/moongen_testpmd.py
@@ -367,9 +367,10 @@ ports = {0,1},
throughput_rx_mpps = int(
self.scenario_cfg["sla"]["throughput_rx_mpps"])
- assert throughput_rx_mpps <= moongen_result["tx_mpps"], \
- "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); " % \
- (throughput_rx_mpps, moongen_result["tx_mpps"])
+ self.verify_SLA(
+ throughput_rx_mpps <= moongen_result["tx_mpps"],
+ "sla_throughput_rx_mpps %f > throughput_rx_mpps(%f); "
+ % (throughput_rx_mpps, moongen_result["tx_mpps"]))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/netperf.py b/yardstick/benchmark/scenarios/networking/netperf.py
index 33c02d409..9f1a81413 100755
--- a/yardstick/benchmark/scenarios/networking/netperf.py
+++ b/yardstick/benchmark/scenarios/networking/netperf.py
@@ -138,9 +138,9 @@ class Netperf(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def _test():
diff --git a/yardstick/benchmark/scenarios/networking/netperf_node.py b/yardstick/benchmark/scenarios/networking/netperf_node.py
index d52e6b9e1..0ad2ecff5 100755
--- a/yardstick/benchmark/scenarios/networking/netperf_node.py
+++ b/yardstick/benchmark/scenarios/networking/netperf_node.py
@@ -156,9 +156,10 @@ class NetperfNode(base.Scenario):
sla_max_mean_latency = int(
self.scenario_cfg["sla"]["mean_latency"])
- assert mean_latency <= sla_max_mean_latency, \
- "mean_latency %f > sla_max_mean_latency(%f); " % \
- (mean_latency, sla_max_mean_latency)
+ self.verify_SLA(
+ mean_latency <= sla_max_mean_latency,
+ "mean_latency %f > sla_max_mean_latency(%f); "
+ % (mean_latency, sla_max_mean_latency))
def teardown(self):
"""remove netperf from nodes after test"""
diff --git a/yardstick/benchmark/scenarios/networking/nstat.py b/yardstick/benchmark/scenarios/networking/nstat.py
index 10c560769..ea067f8ab 100644
--- a/yardstick/benchmark/scenarios/networking/nstat.py
+++ b/yardstick/benchmark/scenarios/networking/nstat.py
@@ -121,4 +121,4 @@ class Nstat(base.Scenario):
if rate > sla_rate:
sla_error += "%s rate %f > sla:%s_rate(%f); " % \
(i, rate, i, sla_rate)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
diff --git a/yardstick/benchmark/scenarios/networking/ping.py b/yardstick/benchmark/scenarios/networking/ping.py
index e7d9beea8..6caeab5ef 100644
--- a/yardstick/benchmark/scenarios/networking/ping.py
+++ b/yardstick/benchmark/scenarios/networking/ping.py
@@ -91,9 +91,10 @@ class Ping(base.Scenario):
result.update(utils.flatten_dict_key(ping_result))
if sla_max_rtt is not None:
sla_max_rtt = float(sla_max_rtt)
- assert rtt_result[target_vm_name] <= sla_max_rtt,\
- "rtt %f > sla: max_rtt(%f); " % \
- (rtt_result[target_vm_name], sla_max_rtt)
+ self.verify_SLA(
+ rtt_result[target_vm_name] <= sla_max_rtt,
+ "rtt %f > sla: max_rtt(%f); "
+ % (rtt_result[target_vm_name], sla_max_rtt))
else:
LOG.error("ping '%s' '%s' timeout", options, target_vm)
# we need to specify a result to satisfy influxdb schema
@@ -102,13 +103,12 @@ class Ping(base.Scenario):
rtt_result[target_vm_name] = float(self.PING_ERROR_RTT)
# store result before potential AssertionError
result.update(utils.flatten_dict_key(ping_result))
- if sla_max_rtt is not None:
- raise AssertionError("packet dropped rtt {:f} > sla: max_rtt({:f})".format(
- rtt_result[target_vm_name], sla_max_rtt))
-
- else:
- raise AssertionError(
- "packet dropped rtt {:f}".format(rtt_result[target_vm_name]))
+ self.verify_SLA(sla_max_rtt is None,
+ "packet dropped rtt %f > sla: max_rtt(%f)"
+ % (rtt_result[target_vm_name], sla_max_rtt))
+ self.verify_SLA(False,
+ "packet dropped rtt %f"
+ % (rtt_result[target_vm_name]))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/ping6.py b/yardstick/benchmark/scenarios/networking/ping6.py
index 74855a10f..377278004 100644
--- a/yardstick/benchmark/scenarios/networking/ping6.py
+++ b/yardstick/benchmark/scenarios/networking/ping6.py
@@ -59,8 +59,7 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.pre_setup_script, '~/pre_setup.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash pre_setup.sh")
+ self.client.execute("sudo bash pre_setup.sh")
def _get_controller_node(self, host_list):
for host_name in host_list:
@@ -122,7 +121,7 @@ class Ping6(base.Scenario): # pragma: no cover
cmd = "sudo bash %s %s %s" % \
(setup_bash_file, self.openrc, self.external_network)
LOG.debug("Executing setup command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ self.client.execute(cmd)
self.setup_done = True
@@ -171,8 +170,9 @@ class Ping6(base.Scenario): # pragma: no cover
result["rtt"] = float(stdout)
if "sla" in self.scenario_cfg:
sla_max_rtt = int(self.scenario_cfg["sla"]["max_rtt"])
- assert result["rtt"] <= sla_max_rtt, \
- "rtt %f > sla:max_rtt(%f); " % (result["rtt"], sla_max_rtt)
+ self.verify_SLA(result["rtt"] <= sla_max_rtt,
+ "rtt %f > sla:max_rtt(%f); "
+ % (result["rtt"], sla_max_rtt))
else:
LOG.error("ping6 timeout!!!")
self.run_done = True
@@ -216,5 +216,4 @@ class Ping6(base.Scenario): # pragma: no cover
self._ssh_host(node_name)
self.client._put_file_shell(
self.post_teardown_script, '~/post_teardown.sh')
- status, stdout, stderr = self.client.execute(
- "sudo bash post_teardown.sh")
+ self.client.execute("sudo bash post_teardown.sh")
diff --git a/yardstick/benchmark/scenarios/networking/pktgen.py b/yardstick/benchmark/scenarios/networking/pktgen.py
index b79b91539..d1d500ff6 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen.py
@@ -87,7 +87,7 @@ class Pktgen(base.Scenario):
self.server.send_command(cmd)
self.client.send_command(cmd)
- """multiqueue setup"""
+ # multiqueue setup
if not self._is_irqbalance_disabled():
self._disable_irqbalance()
@@ -132,20 +132,20 @@ class Pktgen(base.Scenario):
def _disable_irqbalance(self):
cmd = "sudo sed -i -e 's/ENABLED=\"1\"/ENABLED=\"0\"/g' " \
"/etc/default/irqbalance"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
cmd = "sudo service irqbalance stop"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
cmd = "sudo service irqbalance disable"
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -158,8 +158,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -171,8 +171,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -192,8 +192,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -206,8 +206,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -220,8 +220,8 @@ class Pktgen(base.Scenario):
raise RuntimeError(stderr)
cmd = "echo 1 | sudo tee /proc/irq/%s/smp_affinity" % (int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -240,8 +240,8 @@ class Pktgen(base.Scenario):
cmd = "echo %s | sudo tee /proc/irq/%s/smp_affinity" \
% (smp_affinity_mask, int(stdout))
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
@@ -282,8 +282,8 @@ class Pktgen(base.Scenario):
cmd = "sudo ethtool -L %s combined %s" % \
(self.vnic_name, available_queue_number)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.server.execute(cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ status, _, stderr = self.server.execute(cmd)
+ status, _, stderr = self.client.execute(cmd)
if status:
raise RuntimeError(stderr)
return available_queue_number
@@ -374,8 +374,8 @@ class Pktgen(base.Scenario):
if "sla" in self.scenario_cfg:
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm,
+ "ppm %d > sla_max_ppm %d; " % (ppm, sla_max_ppm))
def _test(): # pragma: no cover
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
index 9a7b975a2..1b018f52a 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk.py
@@ -135,4 +135,4 @@ cat ~/result.log -vT \
LOG.info("sla_max_latency: %d", sla_max_latency)
debug_info = "avg_latency %d > sla_max_latency %d" \
% (avg_latency, sla_max_latency)
- assert avg_latency <= sla_max_latency, debug_info
+ self.verify_SLA(avg_latency <= sla_max_latency, debug_info)
diff --git a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
index 497e59ee8..97b9cf73f 100644
--- a/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
+++ b/yardstick/benchmark/scenarios/networking/pktgen_dpdk_throughput.py
@@ -143,11 +143,11 @@ class PktgenDPDK(base.Scenario):
cmd = "ip a | grep eth1 2>/dev/null"
LOG.debug("Executing command: %s in %s", cmd, host)
if "server" in host:
- status, stdout, stderr = self.server.execute(cmd)
+ _, stdout, _ = self.server.execute(cmd)
if stdout:
is_run = False
else:
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
@@ -222,5 +222,5 @@ class PktgenDPDK(base.Scenario):
ppm += (sent - received) % sent > 0
LOG.debug("Lost packets %d - Lost ppm %d", (sent - received), ppm)
sla_max_ppm = int(self.scenario_cfg["sla"]["max_ppm"])
- assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; " \
- % (ppm, sla_max_ppm)
+ self.verify_SLA(ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d; "
+ % (ppm, sla_max_ppm))
diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py
index be2fa3f3b..4d7c4f9be 100644
--- a/yardstick/benchmark/scenarios/networking/vnf_generic.py
+++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py
@@ -13,20 +13,19 @@
# limitations under the License.
import copy
-import logging
-import time
-
import ipaddress
from itertools import chain
+import logging
import os
import sys
+import time
import six
import yaml
from yardstick.benchmark.scenarios import base as scenario_base
-from yardstick.error import IncorrectConfig
from yardstick.common.constants import LOG_DIR
+from yardstick.common import exceptions
from yardstick.common.process import terminate_children
from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
@@ -134,11 +133,10 @@ class NetworkServiceTestCase(scenario_base.Scenario):
with utils.open_relative_file(profile, path) as infile:
return infile.read()
- def _get_topology(self):
- topology = self.scenario_cfg["topology"]
- path = self.scenario_cfg["task_path"]
- with utils.open_relative_file(topology, path) as infile:
- return infile.read()
+ def _get_duration(self):
+ options = self.scenario_cfg.get('options', {})
+ return options.get('duration',
+ tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)
def _fill_traffic_profile(self):
tprofile = self._get_traffic_profile()
@@ -148,12 +146,17 @@ class NetworkServiceTestCase(scenario_base.Scenario):
'imix': self._get_traffic_imix(),
tprofile_base.TrafficProfile.UPLINK: {},
tprofile_base.TrafficProfile.DOWNLINK: {},
- 'extra_args': extra_args
- }
-
+ 'extra_args': extra_args,
+ 'duration': self._get_duration()}
traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)
+ def _get_topology(self):
+ topology = self.scenario_cfg["topology"]
+ path = self.scenario_cfg["task_path"]
+ with utils.open_relative_file(topology, path) as infile:
+ return infile.read()
+
def _render_topology(self):
topology = self._get_topology()
topology_args = self.scenario_cfg.get('extra_args', {})
@@ -190,8 +193,9 @@ class NetworkServiceTestCase(scenario_base.Scenario):
try:
node0_data, node1_data = vld["vnfd-connection-point-ref"]
except (ValueError, TypeError):
- raise IncorrectConfig("Topology file corrupted, "
- "wrong endpoint count for connection")
+ raise exceptions.IncorrectConfig(
+ error_msg='Topology file corrupted, wrong endpoint count '
+ 'for connection')
node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
@@ -237,15 +241,17 @@ class NetworkServiceTestCase(scenario_base.Scenario):
except KeyError:
LOG.exception("")
- raise IncorrectConfig("Required interface not found, "
- "topology file corrupted")
+ raise exceptions.IncorrectConfig(
+ error_msg='Required interface not found, topology file '
+ 'corrupted')
for vld in self.topology['vld']:
try:
node0_data, node1_data = vld["vnfd-connection-point-ref"]
except (ValueError, TypeError):
- raise IncorrectConfig("Topology file corrupted, "
- "wrong endpoint count for connection")
+ raise exceptions.IncorrectConfig(
+ error_msg='Topology file corrupted, wrong endpoint count '
+ 'for connection')
node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])
@@ -330,8 +336,9 @@ class NetworkServiceTestCase(scenario_base.Scenario):
except StopIteration:
pass
- raise IncorrectConfig("No implementation for %s found in %s" %
- (expected_name, classes_found))
+ message = ('No implementation for %s found in %s'
+ % (expected_name, classes_found))
+ raise exceptions.IncorrectConfig(error_msg=message)
@staticmethod
def create_interfaces_from_node(vnfd, node):
@@ -441,7 +448,7 @@ class NetworkServiceTestCase(scenario_base.Scenario):
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
- self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
+ self.collector = Collector(self.vnfs)
self.collector.start()
# Start the actual traffic
diff --git a/yardstick/benchmark/scenarios/networking/vsperf.py b/yardstick/benchmark/scenarios/networking/vsperf.py
index 705544c41..2b3474070 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf.py
@@ -215,15 +215,15 @@ class Vsperf(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
index 454587829..27bf40dcb 100644
--- a/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
+++ b/yardstick/benchmark/scenarios/networking/vsperf_dpdk.py
@@ -231,7 +231,7 @@ class VsperfDPDK(base.Scenario):
is_run = True
cmd = "ip a | grep %s 2>/dev/null" % (self.tg_port1)
LOG.debug("Executing command: %s", cmd)
- status, stdout, stderr = self.client.execute(cmd)
+ _, stdout, _ = self.client.execute(cmd)
if stdout:
is_run = False
return is_run
@@ -325,15 +325,15 @@ class VsperfDPDK(base.Scenario):
if 'sla' in self.scenario_cfg and \
'metrics' in self.scenario_cfg['sla']:
for metric in self.scenario_cfg['sla']['metrics'].split(','):
- assert metric in result, \
- '%s is not collected by VSPERF' % (metric)
- assert metric in self.scenario_cfg['sla'], \
- '%s is not defined in SLA' % (metric)
+ self.verify_SLA(metric in result,
+ '%s was not collected by VSPERF' % metric)
+ self.verify_SLA(metric in self.scenario_cfg['sla'],
+ '%s is not defined in SLA' % metric)
vs_res = float(result[metric])
sla_res = float(self.scenario_cfg['sla'][metric])
- assert vs_res >= sla_res, \
- 'VSPERF_%s(%f) < SLA_%s(%f)' % \
- (metric, vs_res, metric, sla_res)
+ self.verify_SLA(vs_res >= sla_res,
+ 'VSPERF_%s(%f) < SLA_%s(%f)'
+ % (metric, vs_res, metric, sla_res))
def teardown(self):
"""cleanup after the test execution"""
diff --git a/yardstick/benchmark/scenarios/storage/fio.py b/yardstick/benchmark/scenarios/storage/fio.py
index d3ed840d8..c57c6edf2 100644
--- a/yardstick/benchmark/scenarios/storage/fio.py
+++ b/yardstick/benchmark/scenarios/storage/fio.py
@@ -223,7 +223,7 @@ class Fio(base.Scenario):
sla_error += "%s %d < " \
"sla:%s(%d); " % (k, v, k, min_v)
- assert sla_error == "", sla_error
+ self.verify_SLA(sla_error == "", sla_error)
def _test():
diff --git a/yardstick/common/ansible_common.py b/yardstick/common/ansible_common.py
index 38d2dd7c2..ca5a110e2 100644
--- a/yardstick/common/ansible_common.py
+++ b/yardstick/common/ansible_common.py
@@ -514,7 +514,7 @@ class AnsibleCommon(object):
parser.add_section('defaults')
parser.set('defaults', 'host_key_checking', 'False')
- cfg_path = os.path.join(directory, 'setup.cfg')
+ cfg_path = os.path.join(directory, 'ansible.cfg')
with open(cfg_path, 'w') as f:
parser.write(f)
diff --git a/yardstick/common/constants.py b/yardstick/common/constants.py
index 153bd4bf4..f6e4ab7e9 100644
--- a/yardstick/common/constants.py
+++ b/yardstick/common/constants.py
@@ -145,6 +145,21 @@ BASE_URL = 'http://localhost:5000'
ENV_ACTION_API = BASE_URL + '/yardstick/env/action'
ASYNC_TASK_API = BASE_URL + '/yardstick/asynctask'
+API_ERRORS = {
+ 'UploadOpenrcError': {
+ 'message': "Upload openrc ERROR!",
+ 'status': API_ERROR,
+ },
+ 'UpdateOpenrcError': {
+ 'message': "Update openrc ERROR!",
+ 'status': API_ERROR,
+ },
+ 'ApiServerError': {
+ 'message': "An unkown exception happened to Api Server!",
+ 'status': API_ERROR,
+ },
+}
+
# flags
IS_EXISTING = 'is_existing'
IS_PUBLIC = 'is_public'
@@ -152,3 +167,6 @@ IS_PUBLIC = 'is_public'
# general
TESTCASE_PRE = 'opnfv_yardstick_'
TESTSUITE_PRE = 'opnfv_'
+
+# OpenStack cloud default config parameters
+OS_CLOUD_DEFAULT_CONFIG = {'verify': False}
diff --git a/yardstick/common/exceptions.py b/yardstick/common/exceptions.py
index 439b9cb1b..954d655cb 100644
--- a/yardstick/common/exceptions.py
+++ b/yardstick/common/exceptions.py
@@ -21,6 +21,16 @@ class ProcessExecutionError(RuntimeError):
self.returncode = returncode
+class ErrorClass(object):
+
+ def __init__(self, *args, **kwargs):
+ if 'test' not in kwargs:
+ raise RuntimeError
+
+ def __getattr__(self, item):
+ raise AttributeError
+
+
class YardstickException(Exception):
"""Base Yardstick Exception.
@@ -54,16 +64,30 @@ class YardstickException(Exception):
return False
+class ResourceCommandError(YardstickException):
+ message = 'Command: "%(command)s" Failed, stderr: "%(stderr)s"'
+
+
class FunctionNotImplemented(YardstickException):
message = ('The function "%(function_name)s" is not implemented in '
'"%(class_name)" class.')
+class InfluxDBConfigurationMissing(YardstickException):
+ message = ('InfluxDB configuration is not available. Add "influxdb" as '
+ 'a dispatcher and the configuration section')
+
+
class YardstickBannedModuleImported(YardstickException):
# pragma: no cover
message = 'Module "%(module)s" cannnot be imported. Reason: "%(reason)s"'
+class PayloadMissingAttributes(YardstickException):
+ message = ('Error instantiating a Payload class, missing attributes: '
+ '%(missing_attributes)s')
+
+
class HeatTemplateError(YardstickException):
"""Error in Heat during the stack deployment"""
message = ('Error in Heat during the creation of the OpenStack stack '
@@ -112,8 +136,39 @@ class LibvirtCreateError(YardstickException):
message = 'Error creating the virtual machine. Error: %(error)s.'
+class LibvirtQemuImageBaseImageNotPresent(YardstickException):
+ message = ('Error creating the qemu image for %(vm_image)s. Base image: '
+ '%(base_image)s. Base image not present in execution host or '
+ 'remote host.')
+
+
+class LibvirtQemuImageCreateError(YardstickException):
+ message = ('Error creating the qemu image for %(vm_image)s. Base image: '
+ '%(base_image)s. Error: %(error)s.')
+
+
+class SSHError(YardstickException):
+ message = '%(error_msg)s'
+
+
+class SSHTimeout(SSHError):
+ pass
+
+
+class IncorrectConfig(YardstickException):
+ message = '%(error_msg)s'
+
+
+class IncorrectSetup(YardstickException):
+ message = '%(error_msg)s'
+
+
+class IncorrectNodeSetup(IncorrectSetup):
+ pass
+
+
class ScenarioConfigContextNameNotFound(YardstickException):
- message = 'Context name "%(context_name)s" not found'
+ message = 'Context for host name "%(host_name)s" not found'
class StackCreationInterrupt(YardstickException):
@@ -132,6 +187,14 @@ class TaskRenderError(YardstickException):
message = 'Failed to render template:\n%(input_task)s'
+class TimerTimeout(YardstickException):
+ message = 'Timer timeout expired, %(timeout)s seconds'
+
+
+class WaitTimeout(YardstickException):
+ message = 'Wait timeout while waiting for condition'
+
+
class ScenarioCreateNetworkError(YardstickException):
message = 'Create Neutron Network Scenario failed'
@@ -166,3 +229,87 @@ class ScenarioCreateFloatingIPError(YardstickException):
class ScenarioDeleteFloatingIPError(YardstickException):
message = 'Delete Neutron Floating IP Scenario failed'
+
+
+class ScenarioCreateSecurityGroupError(YardstickException):
+ message = 'Create Neutron Security Group Scenario failed'
+
+
+class ScenarioDeleteNetworkError(YardstickException):
+ message = 'Delete Neutron Network Scenario failed'
+
+
+class ScenarioCreateServerError(YardstickException):
+ message = 'Nova Create Server Scenario failed'
+
+
+class ScenarioDeleteServerError(YardstickException):
+ message = 'Delete Server Scenario failed'
+
+
+class ScenarioCreateKeypairError(YardstickException):
+ message = 'Nova Create Keypair Scenario failed'
+
+
+class ScenarioDeleteKeypairError(YardstickException):
+ message = 'Nova Delete Keypair Scenario failed'
+
+
+class ScenarioAttachVolumeError(YardstickException):
+ message = 'Nova Attach Volume Scenario failed'
+
+
+class ScenarioGetServerError(YardstickException):
+ message = 'Nova Get Server Scenario failed'
+
+
+class ScenarioGetFlavorError(YardstickException):
+ message = 'Nova Get Falvor Scenario failed'
+
+
+class ScenarioCreateVolumeError(YardstickException):
+ message = 'Cinder Create Volume Scenario failed'
+
+
+class ScenarioDeleteVolumeError(YardstickException):
+ message = 'Cinder Delete Volume Scenario failed'
+
+
+class ScenarioDetachVolumeError(YardstickException):
+ message = 'Cinder Detach Volume Scenario failed'
+
+
+class ApiServerError(YardstickException):
+ message = 'An unkown exception happened to Api Server!'
+
+
+class UploadOpenrcError(ApiServerError):
+ message = 'Upload openrc ERROR!'
+
+
+class UpdateOpenrcError(ApiServerError):
+ message = 'Update openrc ERROR!'
+
+
+class ScenarioCreateImageError(YardstickException):
+ message = 'Glance Create Image Scenario failed'
+
+
+class ScenarioDeleteImageError(YardstickException):
+ message = 'Glance Delete Image Scenario failed'
+
+
+class IxNetworkClientNotConnected(YardstickException):
+ message = 'IxNetwork client not connected to a TCL server'
+
+
+class IxNetworkFlowNotPresent(YardstickException):
+ message = 'Flow Group "%(flow_group)s" is not present'
+
+
+class IxNetworkFieldNotPresentInStackItem(YardstickException):
+ message = 'Field "%(field_name)s" not present in stack item %(stack_item)s'
+
+
+class SLAValidationError(YardstickException):
+ message = '%(case_name)s SLA validation failed. Error: %(error_msg)s'
diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py
index 0cf7b9eab..d60c9b23a 100644
--- a/yardstick/common/kubernetes_utils.py
+++ b/yardstick/common/kubernetes_utils.py
@@ -41,6 +41,7 @@ def create_service(template,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
metadata = client.V1ObjectMeta(**template.get('metadata', {}))
@@ -63,7 +64,8 @@ def delete_service(name,
**kwargs): # pragma: no cover
core_v1_api = get_core_api()
try:
- core_v1_api.delete_namespaced_service(name, namespace, **kwargs)
+ body = client.V1DeleteOptions()
+ core_v1_api.delete_namespaced_service(name, namespace, body, **kwargs)
except ApiException:
LOG.exception('Delete Service failed')
@@ -86,7 +88,7 @@ def create_replication_controller(template,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
-
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
try:
core_v1_api.create_namespaced_replication_controller(namespace,
@@ -101,7 +103,7 @@ def delete_replication_controller(name,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
-
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -119,7 +121,7 @@ def delete_pod(name,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
-
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
@@ -147,6 +149,7 @@ def read_pod(name,
def read_pod_status(name, namespace='default', **kwargs): # pragma: no cover
+ # pylint: disable=unused-argument
return read_pod(name).status.phase
@@ -155,6 +158,7 @@ def create_config_map(name,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
metadata = client.V1ObjectMeta(name=name)
body = client.V1ConfigMap(data=data, metadata=metadata)
@@ -169,6 +173,7 @@ def delete_config_map(name,
namespace='default',
wait=False,
**kwargs): # pragma: no cover
+ # pylint: disable=unused-argument
core_v1_api = get_core_api()
body = kwargs.get('body', client.V1DeleteOptions())
kwargs.pop('body', None)
diff --git a/yardstick/common/messaging/__init__.py b/yardstick/common/messaging/__init__.py
new file mode 100644
index 000000000..f0f012ec3
--- /dev/null
+++ b/yardstick/common/messaging/__init__.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# MQ is statically configured:
+# - MQ service: RabbitMQ
+# - user/password: yardstick/yardstick
+# - host:port: localhost:5672
+MQ_USER = 'yardstick'
+MQ_PASS = 'yardstick'
+MQ_SERVICE = 'rabbit'
+SERVER = 'localhost'
+PORT = 5672
+TRANSPORT_URL = (MQ_SERVICE + '://' + MQ_USER + ':' + MQ_PASS + '@' + SERVER +
+ ':' + str(PORT) + '/')
+
+# RPC server.
+RPC_SERVER_EXECUTOR = 'threading'
+
+# Topics.
+RUNNER = 'runner'
+
+# Methods.
+# RUNNER methods:
+RUNNER_INFO = 'runner_info'
+RUNNER_LOOP = 'runner_loop'
diff --git a/yardstick/common/messaging/consumer.py b/yardstick/common/messaging/consumer.py
new file mode 100644
index 000000000..24ec6f184
--- /dev/null
+++ b/yardstick/common/messaging/consumer.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+
+from oslo_config import cfg
+import oslo_messaging
+import six
+
+from yardstick.common import messaging
+
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class NotificationHandler(object):
+ """Abstract class to define a endpoint object for a MessagingConsumer"""
+
+ def __init__(self, _id, ctx_pids, queue):
+ self._id = _id
+ self._ctx_pids = ctx_pids
+ self._queue = queue
+
+
+@six.add_metaclass(abc.ABCMeta)
+class MessagingConsumer(object):
+ """Abstract class to implement a MQ consumer
+
+ This abstract class allows a class implementing this interface to receive
+ the messages published by a `MessagingNotifier`.
+ """
+
+ def __init__(self, topic, pids, endpoints, fanout=True):
+ """Init function.
+
+ :param topic: (string) MQ exchange topic
+ :param pids: (list of int) list of PIDs of the processes implementing
+ the MQ Notifier which will be in the message context
+ :param endpoints: (list of class) list of classes implementing the
+ methods (see `MessagingNotifier.send_message) used by
+ the Notifier
+ :param fanout: (bool) MQ clients may request that a copy of the message
+ be delivered to all servers listening on a topic by
+ setting fanout to ``True``, rather than just one of them
+ :returns: `MessagingConsumer` class object
+ """
+
+ self._pids = pids
+ self._endpoints = endpoints
+ self._transport = oslo_messaging.get_rpc_transport(
+ cfg.CONF, url=messaging.TRANSPORT_URL)
+ self._target = oslo_messaging.Target(topic=topic, fanout=fanout,
+ server=messaging.SERVER)
+ self._server = oslo_messaging.get_rpc_server(
+ self._transport, self._target, self._endpoints,
+ executor=messaging.RPC_SERVER_EXECUTOR,
+ access_policy=oslo_messaging.DefaultRPCAccessPolicy)
+
+ def start_rpc_server(self):
+ """Start the RPC server."""
+ if self._server:
+ self._server.start()
+
+ def stop_rpc_server(self):
+ """Stop the RPC server."""
+ if self._server:
+ self._server.stop()
+
+ def wait(self):
+ """Wait for message processing to complete (blocking)."""
+ if self._server:
+ self._server.wait()
diff --git a/yardstick/common/messaging/payloads.py b/yardstick/common/messaging/payloads.py
new file mode 100644
index 000000000..d29d79808
--- /dev/null
+++ b/yardstick/common/messaging/payloads.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+
+import six
+
+from yardstick.common import exceptions
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Payload(object):
+ """Base Payload class to transfer data through the MQ service"""
+
+ REQUIRED_FIELDS = {'version'}
+
+ def __init__(self, **kwargs):
+ """Init method
+
+ :param kwargs: (dictionary) attributes and values of the object
+ :returns: Payload object
+ """
+
+ if not all(req_field in kwargs for req_field in self.REQUIRED_FIELDS):
+ _attrs = set(kwargs) - self.REQUIRED_FIELDS
+ missing_attributes = ', '.join(str(_attr) for _attr in _attrs)
+ raise exceptions.PayloadMissingAttributes(
+ missing_attributes=missing_attributes)
+
+ for name, value in kwargs.items():
+ setattr(self, name, value)
+
+ self._fields = set(kwargs.keys())
+
+ def obj_to_dict(self):
+ """Returns a dictionary with the attributes of the object"""
+ return {field: getattr(self, field) for field in self._fields}
+
+ @classmethod
+ def dict_to_obj(cls, _dict):
+ """Returns a Payload object built from the dictionary elements"""
+ return cls(**_dict)
diff --git a/yardstick/common/messaging/producer.py b/yardstick/common/messaging/producer.py
new file mode 100644
index 000000000..b6adc0c17
--- /dev/null
+++ b/yardstick/common/messaging/producer.py
@@ -0,0 +1,70 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import logging
+import os
+
+from oslo_config import cfg
+import oslo_messaging
+import six
+
+from yardstick.common import messaging
+
+
+LOG = logging.getLogger(__name__)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class MessagingProducer(object):
+ """Abstract class to implement a MQ producer
+
+ This abstract class allows a class implementing this interface to publish
+ messages in a message queue.
+ """
+
+ def __init__(self, topic, pid=os.getpid(), fanout=True):
+ """Init function.
+
+ :param topic: (string) MQ exchange topic
+ :param pid: (int) PID of the process implementing this MQ Notifier
+ :param fanout: (bool) MQ clients may request that a copy of the message
+ be delivered to all servers listening on a topic by
+ setting fanout to ``True``, rather than just one of them
+ :returns: `MessagingNotifier` class object
+ """
+ self._topic = topic
+ self._pid = pid
+ self._fanout = fanout
+ self._transport = oslo_messaging.get_rpc_transport(
+ cfg.CONF, url=messaging.TRANSPORT_URL)
+ self._target = oslo_messaging.Target(topic=topic, fanout=fanout,
+ server=messaging.SERVER)
+ self._notifier = oslo_messaging.RPCClient(self._transport,
+ self._target)
+
+ def send_message(self, method, payload):
+ """Send a cast message, that will invoke a method without blocking.
+
+ The cast() method is used to invoke an RPC method that does not return
+ a value. cast() RPC requests may be broadcast to all Servers listening
+ on a given topic by setting the fanout Target property to ``True``.
+
+ :param methos: (string) method name, that must be implemented in the
+ consumer endpoints
+ :param payload: (subclass `Payload`) payload content
+ """
+ self._notifier.cast({'pid': self._pid},
+ method,
+ **payload.obj_to_dict())
diff --git a/yardstick/common/openstack_utils.py b/yardstick/common/openstack_utils.py
index 2785230c0..6ff6617a9 100644
--- a/yardstick/common/openstack_utils.py
+++ b/yardstick/common/openstack_utils.py
@@ -7,20 +7,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import os
-import time
-import sys
+import copy
import logging
+import os
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
from keystoneauth1 import loading
from keystoneauth1 import session
+from neutronclient.neutron import client as neutronclient
import shade
from shade import exc
-from cinderclient import client as cinderclient
-from novaclient import client as novaclient
-from glanceclient import client as glanceclient
-from neutronclient.neutron import client as neutronclient
+from yardstick.common import constants
log = logging.getLogger(__name__)
@@ -156,204 +156,205 @@ def get_glance_client(): # pragma: no cover
return glanceclient.Client(get_glance_client_version(), session=sess)
-def get_shade_client():
- return shade.openstack_cloud()
+def get_shade_client(**os_cloud_config):
+ """Get Shade OpenStack cloud client
+
+ By default, the input parameters given to "shade.openstack_cloud" method
+ are stored in "constants.OS_CLOUD_DEFAULT_CONFIG". The input parameters
+ passed in this function, "os_cloud_config", will overwrite the default
+ ones.
+
+ :param os_cloud_config: (kwargs) input arguments for
+ "shade.openstack_cloud" method.
+ :return: ``shade.OpenStackCloud`` object.
+ """
+ params = copy.deepcopy(constants.OS_CLOUD_DEFAULT_CONFIG)
+ params.update(os_cloud_config)
+ return shade.openstack_cloud(**params)
# *********************************************
# NOVA
# *********************************************
-def get_instances(nova_client):
- try:
- return nova_client.servers.list(search_opts={'all_tenants': 1})
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instances(nova_client)]")
-
-
-def get_instance_status(nova_client, instance): # pragma: no cover
- try:
- return nova_client.servers.get(instance.id).status
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instance_status(nova_client)]")
-
-
-def get_instance_by_name(nova_client, instance_name): # pragma: no cover
- try:
- return nova_client.servers.find(name=instance_name)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_instance_by_name(nova_client, '%s')]",
- instance_name)
-
-
-def get_aggregates(nova_client): # pragma: no cover
- try:
- return nova_client.aggregates.list()
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_aggregates(nova_client)]")
+def create_keypair(shade_client, name, public_key=None):
+ """Create a new keypair.
+ :param name: Name of the keypair being created.
+ :param public_key: Public key for the new keypair.
-def get_availability_zones(nova_client): # pragma: no cover
- try:
- return nova_client.availability_zones.list()
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_availability_zones(nova_client)]")
-
-
-def get_availability_zone_names(nova_client): # pragma: no cover
+ :return: Created keypair.
+ """
try:
- return [az.zoneName for az in get_availability_zones(nova_client)]
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_availability_zone_names(nova_client)]")
+ return shade_client.create_keypair(name, public_key=public_key)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [create_keypair(shade_client)]. "
+ "Exception message, '%s'", o_exc.orig_message)
-def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
+def create_instance_and_wait_for_active(shade_client, name, image,
+ flavor, auto_ip=True, ips=None,
+ ip_pool=None, root_volume=None,
+ terminate_volume=False, wait=True,
+ timeout=180, reuse_ips=True,
+ network=None, boot_from_volume=False,
+ volume_size='20', boot_volume=None,
+ volumes=None, nat_destination=None,
+ **kwargs):
+ """Create a virtual server instance.
+
+ :param name:(string) Name of the server.
+ :param image:(dict) Image dict, name or ID to boot with. Image is required
+ unless boot_volume is given.
+ :param flavor:(dict) Flavor dict, name or ID to boot onto.
+ :param auto_ip: Whether to take actions to find a routable IP for
+ the server.
+ :param ips: List of IPs to attach to the server.
+ :param ip_pool:(string) Name of the network or floating IP pool to get an
+ address from.
+ :param root_volume:(string) Name or ID of a volume to boot from.
+ (defaults to None - deprecated, use boot_volume)
+ :param boot_volume:(string) Name or ID of a volume to boot from.
+ :param terminate_volume:(bool) If booting from a volume, whether it should
+ be deleted when the server is destroyed.
+ :param volumes:(optional) A list of volumes to attach to the server.
+ :param wait:(optional) Wait for the address to appear as assigned to the server.
+ :param timeout: Seconds to wait, defaults to 60.
+ :param reuse_ips:(bool)Whether to attempt to reuse pre-existing
+ floating ips should a floating IP be needed.
+ :param network:(dict) Network dict or name or ID to attach the server to.
+ Mutually exclusive with the nics parameter. Can also be be
+ a list of network names or IDs or network dicts.
+ :param boot_from_volume:(bool) Whether to boot from volume. 'boot_volume'
+ implies True, but boot_from_volume=True with
+ no boot_volume is valid and will create a
+ volume from the image and use that.
+ :param volume_size: When booting an image from volume, how big should
+ the created volume be?
+ :param nat_destination: Which network should a created floating IP
+ be attached to, if it's not possible to infer from
+ the cloud's configuration.
+ :param meta:(optional) A dict of arbitrary key/value metadata to store for
+ this server. Both keys and values must be <=255 characters.
+ :param reservation_id: A UUID for the set of servers being requested.
+ :param min_count:(optional extension) The minimum number of servers to
+ launch.
+ :param max_count:(optional extension) The maximum number of servers to
+ launch.
+ :param security_groups: A list of security group names.
+ :param userdata: User data to pass to be exposed by the metadata server
+ this can be a file type object as well or a string.
+ :param key_name:(optional extension) Name of previously created keypair to
+ inject into the instance.
+ :param availability_zone: Name of the availability zone for instance
+ placement.
+ :param block_device_mapping:(optional) A dict of block device mappings for
+ this server.
+ :param block_device_mapping_v2:(optional) A dict of block device mappings
+ for this server.
+ :param nics:(optional extension) An ordered list of nics to be added to
+ this server, with information about connected networks, fixed
+ IPs, port etc.
+ :param scheduler_hints:(optional extension) Arbitrary key-value pairs
+ specified by the client to help boot an instance.
+ :param config_drive:(optional extension) Value for config drive either
+ boolean, or volume-id.
+ :param disk_config:(optional extension) Control how the disk is partitioned
+ when the server is created. Possible values are 'AUTO'
+ or 'MANUAL'.
+ :param admin_pass:(optional extension) Add a user supplied admin password.
+
+ :returns: The created server.
+ """
try:
- nova_client.aggregates.create(aggregate_name, av_zone)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_aggregate(nova_client, %s, %s)]",
- aggregate_name, av_zone)
- return False
- else:
- return True
+ return shade_client.create_server(
+ name, image, flavor, auto_ip=auto_ip, ips=ips, ip_pool=ip_pool,
+ root_volume=root_volume, terminate_volume=terminate_volume,
+ wait=wait, timeout=timeout, reuse_ips=reuse_ips, network=network,
+ boot_from_volume=boot_from_volume, volume_size=volume_size,
+ boot_volume=boot_volume, volumes=volumes,
+ nat_destination=nat_destination, **kwargs)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [create_instance(shade_client)]. "
+ "Exception message, '%s'", o_exc.orig_message)
-def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover
- try:
- aggregates = get_aggregates(nova_client)
- _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
- except Exception: # pylint: disable=broad-except
- log.exception("Error [get_aggregate_id(nova_client, %s)]",
- aggregate_name)
- else:
- return _id
+def attach_volume_to_server(shade_client, server_name_or_id, volume_name_or_id,
+ device=None, wait=True, timeout=None):
+ """Attach a volume to a server.
+ This will attach a volume, described by the passed in volume
+ dict, to the server described by the passed in server dict on the named
+ device on the server.
-def add_host_to_aggregate(nova_client, aggregate_name,
- compute_host): # pragma: no cover
- try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.add_host(aggregate_id, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
- aggregate_name, compute_host)
- return False
- else:
- return True
+ If the volume is already attached to the server, or generally not
+ available, then an exception is raised. To re-attach to a server,
+ but under a different device, the user must detach it first.
+ :param server_name_or_id:(string) The server name or id to attach to.
+ :param volume_name_or_id:(string) The volume name or id to attach.
+ :param device:(string) The device name where the volume will attach.
+ :param wait:(bool) If true, waits for volume to be attached.
+ :param timeout: Seconds to wait for volume attachment. None is forever.
-def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
- compute_host): # pragma: no cover
+ :returns: True if attached successful, False otherwise.
+ """
try:
- create_aggregate(nova_client, aggregate_name, av_zone)
- add_host_to_aggregate(nova_client, aggregate_name, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_aggregate_with_host("
- "nova_client, %s, %s, %s)]",
- aggregate_name, av_zone, compute_host)
- return False
- else:
+ server = shade_client.get_server(name_or_id=server_name_or_id)
+ volume = shade_client.get_volume(volume_name_or_id)
+ shade_client.attach_volume(
+ server, volume, device=device, wait=wait, timeout=timeout)
return True
-
-
-def create_keypair(name, key_path=None): # pragma: no cover
- try:
- with open(key_path) as fpubkey:
- keypair = get_nova_client().keypairs.create(
- name=name, public_key=fpubkey.read())
- return keypair
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_keypair(nova_client)]")
-
-
-def create_instance(json_body): # pragma: no cover
- try:
- return get_nova_client().servers.create(**json_body)
- except Exception: # pylint: disable=broad-except
- log.exception("Error create instance failed")
- return None
-
-
-def create_instance_and_wait_for_active(json_body): # pragma: no cover
- SLEEP = 3
- VM_BOOT_TIMEOUT = 180
- nova_client = get_nova_client()
- instance = create_instance(json_body)
- for _ in range(int(VM_BOOT_TIMEOUT / SLEEP)):
- status = get_instance_status(nova_client, instance)
- if status.lower() == "active":
- return instance
- elif status.lower() == "error":
- log.error("The instance went to ERROR status.")
- return None
- time.sleep(SLEEP)
- log.error("Timeout booting the instance.")
- return None
-
-
-def attach_server_volume(server_id, volume_id,
- device=None): # pragma: no cover
- try:
- get_nova_client().volumes.create_server_volume(server_id,
- volume_id, device)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
- server_id, volume_id)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [attach_volume_to_server(shade_client)]. "
+ "Exception message: %s", o_exc.orig_message)
return False
- else:
- return True
-def delete_instance(nova_client, instance_id): # pragma: no cover
- try:
- nova_client.servers.force_delete(instance_id)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_instance(nova_client, '%s')]",
- instance_id)
- return False
- else:
- return True
-
+def delete_instance(shade_client, name_or_id, wait=False, timeout=180,
+ delete_ips=False, delete_ip_retry=1):
+ """Delete a server instance.
-def remove_host_from_aggregate(nova_client, aggregate_name,
- compute_host): # pragma: no cover
+ :param name_or_id: name or ID of the server to delete
+ :param wait:(bool) If true, waits for server to be deleted.
+ :param timeout:(int) Seconds to wait for server deletion.
+ :param delete_ips:(bool) If true, deletes any floating IPs associated with
+ the instance.
+ :param delete_ip_retry:(int) Number of times to retry deleting
+ any floating ips, should the first try be
+ unsuccessful.
+ :returns: True if delete succeeded, False otherwise.
+ """
try:
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- nova_client.aggregates.remove_host(aggregate_id, compute_host)
- except Exception: # pylint: disable=broad-except
- log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
- aggregate_name, compute_host)
+ return shade_client.delete_server(
+ name_or_id, wait=wait, timeout=timeout, delete_ips=delete_ips,
+ delete_ip_retry=delete_ip_retry)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [delete_instance(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id,
+ o_exc.orig_message)
return False
- else:
- return True
-def remove_hosts_from_aggregate(nova_client,
- aggregate_name): # pragma: no cover
- aggregate_id = get_aggregate_id(nova_client, aggregate_name)
- hosts = nova_client.aggregates.get(aggregate_id).hosts
- assert(
- all(remove_host_from_aggregate(nova_client, aggregate_name, host)
- for host in hosts))
+def get_server(shade_client, name_or_id=None, filters=None, detailed=False,
+ bare=False):
+ """Get a server by name or ID.
+ :param name_or_id: Name or ID of the server.
+ :param filters:(dict) A dictionary of meta data to use for further
+ filtering.
+ :param detailed:(bool) Whether or not to add detailed additional
+ information.
+ :param bare:(bool) Whether to skip adding any additional information to the
+ server record.
-def delete_aggregate(nova_client, aggregate_name): # pragma: no cover
- try:
- remove_hosts_from_aggregate(nova_client, aggregate_name)
- nova_client.aggregates.delete(aggregate_name)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_aggregate(nova_client, %s)]",
- aggregate_name)
- return False
- else:
- return True
-
-
-def get_server_by_name(name): # pragma: no cover
+ :returns: A server ``munch.Munch`` or None if no matching server is found.
+ """
try:
- return get_nova_client().servers.list(search_opts={'name': name})[0]
- except IndexError:
- log.exception('Failed to get nova client')
- raise
+ return shade_client.get_server(name_or_id=name_or_id, filters=filters,
+ detailed=detailed, bare=bare)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_server(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
@@ -366,14 +367,6 @@ def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
return None
-def get_image_by_name(name): # pragma: no cover
- images = get_nova_client().images.list()
- try:
- return next((a for a in images if a.name == name))
- except StopIteration:
- log.exception('No image matched')
-
-
def get_flavor_id(nova_client, flavor_name): # pragma: no cover
flavors = nova_client.flavors.list(detailed=True)
flavor_id = ''
@@ -384,27 +377,22 @@ def get_flavor_id(nova_client, flavor_name): # pragma: no cover
return flavor_id
-def get_flavor_by_name(name): # pragma: no cover
- flavors = get_nova_client().flavors.list()
- try:
- return next((a for a in flavors if a.name == name))
- except StopIteration:
- log.exception('No flavor matched')
-
+def get_flavor(shade_client, name_or_id, filters=None, get_extra=True):
+ """Get a flavor by name or ID.
-def check_status(status, name, iterations, interval): # pragma: no cover
- for _ in range(iterations):
- try:
- server = get_server_by_name(name)
- except IndexError:
- log.error('Cannot found %s server', name)
- raise
+ :param name_or_id: Name or ID of the flavor.
+ :param filters: A dictionary of meta data to use for further filtering.
+ :param get_extra: Whether or not the list_flavors call should get the extra
+ flavor specs.
- if server.status == status:
- return True
-
- time.sleep(interval)
- return False
+ :returns: A flavor ``munch.Munch`` or None if no matching flavor is found.
+ """
+ try:
+ return shade_client.get_flavor(name_or_id, filters=filters,
+ get_extra=get_extra)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [get_flavor(shade_client, '%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
def delete_flavor(flavor_id): # pragma: no cover
@@ -417,12 +405,18 @@ def delete_flavor(flavor_id): # pragma: no cover
return True
-def delete_keypair(nova_client, key): # pragma: no cover
+def delete_keypair(shade_client, name):
+ """Delete a keypair.
+
+ :param name: Name of the keypair to delete.
+
+ :returns: True if delete succeeded, False otherwise.
+ """
try:
- nova_client.keypairs.delete(key=key)
- return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_keypair(nova_client)]")
+ return shade_client.delete_keypair(name)
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [delete_neutron_router(shade_client, '%s')]. "
+ "Exception message: %s", name, o_exc.orig_message)
return False
@@ -625,39 +619,6 @@ def delete_floating_ip(shade_client, floating_ip_id, retry=1):
return False
-def get_security_groups(neutron_client): # pragma: no cover
- try:
- security_groups = neutron_client.list_security_groups()[
- 'security_groups']
- return security_groups
- except Exception: # pylint: disable=broad-except
- log.error("Error [get_security_groups(neutron_client)]")
- return None
-
-
-def get_security_group_id(neutron_client, sg_name): # pragma: no cover
- security_groups = get_security_groups(neutron_client)
- id = ''
- for sg in security_groups:
- if sg['name'] == sg_name:
- id = sg['id']
- break
- return id
-
-
-def create_security_group(neutron_client, sg_name,
- sg_description): # pragma: no cover
- json_body = {'security_group': {'name': sg_name,
- 'description': sg_description}}
- try:
- secgroup = neutron_client.create_security_group(json_body)
- return secgroup['security_group']
- except Exception: # pylint: disable=broad-except
- log.error("Error [create_security_group(neutron_client, '%s', "
- "'%s')]", sg_name, sg_description)
- return None
-
-
def create_security_group_rule(shade_client, secgroup_name_or_id,
port_range_min=None, port_range_max=None,
protocol=None, remote_ip_prefix=None,
@@ -712,142 +673,216 @@ def create_security_group_rule(shade_client, secgroup_name_or_id,
return False
-def create_security_group_full(neutron_client, sg_name,
- sg_description): # pragma: no cover
- sg_id = get_security_group_id(neutron_client, sg_name)
- if sg_id != '':
+def create_security_group_full(shade_client, sg_name,
+ sg_description, project_id=None):
+ security_group = shade_client.get_security_group(sg_name)
+
+ if security_group:
log.info("Using existing security group '%s'...", sg_name)
- else:
- log.info("Creating security group '%s'...", sg_name)
- SECGROUP = create_security_group(neutron_client,
- sg_name,
- sg_description)
- if not SECGROUP:
- log.error("Failed to create the security group...")
- return None
-
- sg_id = SECGROUP['id']
-
- log.debug("Security group '%s' with ID=%s created successfully.",
- SECGROUP['name'], sg_id)
-
- log.debug("Adding ICMP rules in security group '%s'...", sg_name)
- if not create_security_group_rule(neutron_client, sg_id,
- 'ingress', 'icmp'):
- log.error("Failed to create the security group rule...")
- return None
-
- log.debug("Adding SSH rules in security group '%s'...", sg_name)
- if not create_security_group_rule(
- neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
- log.error("Failed to create the security group rule...")
- return None
-
- if not create_security_group_rule(
- neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
- log.error("Failed to create the security group rule...")
- return None
- return sg_id
+ return security_group['id']
+
+ log.info("Creating security group '%s'...", sg_name)
+ try:
+ security_group = shade_client.create_security_group(
+ sg_name, sg_description, project_id=project_id)
+ except (exc.OpenStackCloudException,
+ exc.OpenStackCloudUnavailableFeature) as op_exc:
+ log.error("Error [create_security_group(shade_client, %s, %s)]. "
+ "Exception message: %s", sg_name, sg_description,
+ op_exc.orig_message)
+ return
+
+ log.debug("Security group '%s' with ID=%s created successfully.",
+ security_group['name'], security_group['id'])
+
+ log.debug("Adding ICMP rules in security group '%s'...", sg_name)
+ if not create_security_group_rule(shade_client, security_group['id'],
+ direction='ingress', protocol='icmp'):
+ log.error("Failed to create the security group rule...")
+ shade_client.delete_security_group(sg_name)
+ return
+
+ log.debug("Adding SSH rules in security group '%s'...", sg_name)
+ if not create_security_group_rule(shade_client, security_group['id'],
+ direction='ingress', protocol='tcp',
+ port_range_min='22',
+ port_range_max='22'):
+ log.error("Failed to create the security group rule...")
+ shade_client.delete_security_group(sg_name)
+ return
+
+ if not create_security_group_rule(shade_client, security_group['id'],
+ direction='egress', protocol='tcp',
+ port_range_min='22',
+ port_range_max='22'):
+ log.error("Failed to create the security group rule...")
+ shade_client.delete_security_group(sg_name)
+ return
+ return security_group['id']
# *********************************************
# GLANCE
# *********************************************
-def get_image_id(glance_client, image_name): # pragma: no cover
- images = glance_client.images.list()
- return next((i.id for i in images if i.name == image_name), None)
-
-
-def create_image(glance_client, image_name, file_path, disk_format,
- container_format, min_disk, min_ram, protected, tag,
- public, **kwargs): # pragma: no cover
- if not os.path.isfile(file_path):
- log.error("Error: file %s does not exist.", file_path)
- return None
+def create_image(shade_client, name, filename=None, container='images',
+ md5=None, sha256=None, disk_format=None,
+ container_format=None, disable_vendor_agent=True,
+ wait=False, timeout=3600, allow_duplicates=False, meta=None,
+ volume=None, **kwargs):
+ """Upload an image.
+
+ :param name:(str) Name of the image to create. If it is a pathname of an
+ image, the name will be constructed from the extensionless
+ basename of the path.
+ :param filename:(str) The path to the file to upload, if needed.
+ :param container:(str) Name of the container in swift where images should
+ be uploaded for import if the cloud requires such a thing.
+ :param md5:(str) md5 sum of the image file. If not given, an md5 will
+ be calculated.
+ :param sha256:(str) sha256 sum of the image file. If not given, an md5
+ will be calculated.
+ :param disk_format:(str) The disk format the image is in.
+ :param container_format:(str) The container format the image is in.
+ :param disable_vendor_agent:(bool) Whether or not to append metadata
+ flags to the image to inform the cloud in
+ question to not expect a vendor agent to be running.
+ :param wait:(bool) If true, waits for image to be created.
+ :param timeout:(str) Seconds to wait for image creation.
+ :param allow_duplicates:(bool) If true, skips checks that enforce unique
+ image name.
+ :param meta:(dict) A dict of key/value pairs to use for metadata that
+ bypasses automatic type conversion.
+ :param volume:(str) Name or ID or volume object of a volume to create an
+ image from.
+ Additional kwargs will be passed to the image creation as additional
+ metadata for the image and will have all values converted to string
+ except for min_disk, min_ram, size and virtual_size which will be
+ converted to int.
+ If you are sure you have all of your data types correct or have an
+ advanced need to be explicit, use meta. If you are just a normal
+ consumer, using kwargs is likely the right choice.
+ If a value is in meta and kwargs, meta wins.
+ :returns: Image id
+ """
try:
- image_id = get_image_id(glance_client, image_name)
+ image_id = shade_client.get_image_id(name)
if image_id is not None:
- log.info("Image %s already exists.", image_name)
- else:
- log.info("Creating image '%s' from '%s'...", image_name, file_path)
-
- image = glance_client.images.create(
- name=image_name, visibility=public, disk_format=disk_format,
- container_format=container_format, min_disk=min_disk,
- min_ram=min_ram, tags=tag, protected=protected, **kwargs)
- image_id = image.id
- with open(file_path) as image_data:
- glance_client.images.upload(image_id, image_data)
+ log.info("Image %s already exists.", name)
+ return image_id
+ log.info("Creating image '%s'", name)
+ image = shade_client.create_image(
+ name, filename=filename, container=container, md5=md5, sha256=sha256,
+ disk_format=disk_format, container_format=container_format,
+ disable_vendor_agent=disable_vendor_agent, wait=wait, timeout=timeout,
+ allow_duplicates=allow_duplicates, meta=meta, volume=volume, **kwargs)
+ image_id = image["id"]
return image_id
- except Exception: # pylint: disable=broad-except
- log.error(
- "Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
- image_name, file_path, public)
- return None
+ except exc.OpenStackCloudException as op_exc:
+ log.error("Failed to create_image(shade_client). "
+ "Exception message: %s", op_exc.orig_message)
-def delete_image(glance_client, image_id): # pragma: no cover
+def delete_image(shade_client, name_or_id, wait=False, timeout=3600,
+ delete_objects=True):
try:
- glance_client.images.delete(image_id)
+ return shade_client.delete_image(name_or_id, wait=wait,
+ timeout=timeout,
+ delete_objects=delete_objects)
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
+ except exc.OpenStackCloudException as op_exc:
+ log.error("Failed to delete_image(shade_client). "
+ "Exception message: %s", op_exc.orig_message)
+ return False
+
+
+def list_images(shade_client=None):
+ if shade_client is None:
+ shade_client = get_shade_client()
+
+ try:
+ return shade_client.list_images()
+ except exc.OpenStackCloudException as o_exc:
+ log.error("Error [list_images(shade_client)]."
+ "Exception message, '%s'", o_exc.orig_message)
return False
- else:
- return True
# *********************************************
# CINDER
# *********************************************
-def get_volume_id(volume_name): # pragma: no cover
- volumes = get_cinder_client().volumes.list()
- return next((v.id for v in volumes if v.name == volume_name), None)
-
-
-def create_volume(cinder_client, volume_name, volume_size,
- volume_image=False): # pragma: no cover
- try:
- if volume_image:
- volume = cinder_client.volumes.create(name=volume_name,
- size=volume_size,
- imageRef=volume_image)
- else:
- volume = cinder_client.volumes.create(name=volume_name,
- size=volume_size)
- return volume
- except Exception: # pylint: disable=broad-except
- log.exception("Error [create_volume(cinder_client, %s)]",
- (volume_name, volume_size))
- return None
+def get_volume_id(shade_client, volume_name):
+ return shade_client.get_volume_id(volume_name)
-def delete_volume(cinder_client, volume_id,
- forced=False): # pragma: no cover
- try:
- if forced:
- try:
- cinder_client.volumes.detach(volume_id)
- except Exception: # pylint: disable=broad-except
- log.error(sys.exc_info()[0])
- cinder_client.volumes.force_delete(volume_id)
- else:
- while True:
- volume = get_cinder_client().volumes.get(volume_id)
- if volume.status.lower() == 'available':
- break
- cinder_client.volumes.delete(volume_id)
- return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [delete_volume(cinder_client, '%s')]", volume_id)
+def get_volume(shade_client, name_or_id, filters=None):
+ """Get a volume by name or ID.
+
+ :param name_or_id: Name or ID of the volume.
+ :param filters: A dictionary of meta data to use for further filtering.
+
+ :returns: A volume ``munch.Munch`` or None if no matching volume is found.
+ """
+ return shade_client.get_volume(name_or_id, filters=filters)
+
+
+def create_volume(shade_client, size, wait=True, timeout=None,
+ image=None, **kwargs):
+ """Create a volume.
+
+ :param size: Size, in GB of the volume to create.
+ :param name: (optional) Name for the volume.
+ :param description: (optional) Name for the volume.
+ :param wait: If true, waits for volume to be created.
+ :param timeout: Seconds to wait for volume creation. None is forever.
+ :param image: (optional) Image name, ID or object from which to create
+ the volume.
+
+ :returns: The created volume object.
+
+ """
+ try:
+ return shade_client.create_volume(size, wait=wait, timeout=timeout,
+ image=image, **kwargs)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as op_exc:
+ log.error("Failed to create_volume(shade_client). "
+ "Exception message: %s", op_exc.orig_message)
+
+
+def delete_volume(shade_client, name_or_id=None, wait=True, timeout=None):
+ """Delete a volume.
+
+ :param name_or_id:(string) Name or unique ID of the volume.
+ :param wait:(bool) If true, waits for volume to be deleted.
+ :param timeout:(string) Seconds to wait for volume deletion. None is forever.
+
+ :return: True on success, False otherwise.
+ """
+ try:
+ return shade_client.delete_volume(name_or_id=name_or_id,
+ wait=wait, timeout=timeout)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as o_exc:
+ log.error("Error [delete_volume(shade_client,'%s')]. "
+ "Exception message: %s", name_or_id, o_exc.orig_message)
return False
-def detach_volume(server_id, volume_id): # pragma: no cover
+def detach_volume(shade_client, server_name_or_id, volume_name_or_id,
+ wait=True, timeout=None):
+ """Detach a volume from a server.
+
+ :param server_name_or_id: The server name or id to detach from.
+ :param volume_name_or_id: The volume name or id to detach.
+ :param wait: If true, waits for volume to be detached.
+ :param timeout: Seconds to wait for volume detachment. None is forever.
+
+ :return: True on success.
+ """
try:
- get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+ volume = shade_client.get_volume(volume_name_or_id)
+ server = get_server(shade_client, name_or_id=server_name_or_id)
+ shade_client.detach_volume(server, volume, wait=wait, timeout=timeout)
return True
- except Exception: # pylint: disable=broad-except
- log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
- server_id, volume_id)
+ except (exc.OpenStackCloudException, exc.OpenStackCloudTimeout) as o_exc:
+ log.error("Error [detach_volume(shade_client)]. "
+ "Exception message: %s", o_exc.orig_message)
return False
diff --git a/yardstick/common/utils.py b/yardstick/common/utils.py
index 357f66be8..5b44ce0e2 100644
--- a/yardstick/common/utils.py
+++ b/yardstick/common/utils.py
@@ -23,9 +23,11 @@ import logging
import os
import random
import re
+import signal
import socket
import subprocess
import sys
+import time
import six
from flask import jsonify
@@ -34,6 +36,8 @@ from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import yardstick
+from yardstick.common import exceptions
+
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@@ -136,6 +140,11 @@ def source_env(env_file):
p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
shell=True)
output = p.communicate()[0]
+
+ # sometimes output type would be binary_type, and it don't have splitlines
+ # method, so we need to decode
+ if isinstance(output, six.binary_type):
+ output = encodeutils.safe_decode(output)
env = dict(line.split('=', 1) for line in output.splitlines() if '=' in line)
os.environ.update(env)
return env
@@ -400,20 +409,51 @@ class ErrorClass(object):
class Timer(object):
- def __init__(self):
+ def __init__(self, timeout=None, raise_exception=True):
super(Timer, self).__init__()
self.start = self.delta = None
+ self._timeout = int(timeout) if timeout else None
+ self._timeout_flag = False
+ self._raise_exception = raise_exception
+
+ def _timeout_handler(self, *args):
+ self._timeout_flag = True
+ if self._raise_exception:
+ raise exceptions.TimerTimeout(timeout=self._timeout)
+ self.__exit__()
def __enter__(self):
self.start = datetime.datetime.now()
+ if self._timeout:
+ signal.signal(signal.SIGALRM, self._timeout_handler)
+ signal.alarm(self._timeout)
return self
def __exit__(self, *_):
+ if self._timeout:
+ signal.alarm(0)
self.delta = datetime.datetime.now() - self.start
def __getattr__(self, item):
return getattr(self.delta, item)
+ def __iter__(self):
+ self._raise_exception = False
+ return self.__enter__()
+
+ def next(self): # pragma: no cover
+ # NOTE(ralonsoh): Python 2 support.
+ if not self._timeout_flag:
+ return datetime.datetime.now()
+ raise StopIteration()
+
+ def __next__(self): # pragma: no cover
+ # NOTE(ralonsoh): Python 3 support.
+ return self.next()
+
+ def __del__(self): # pragma: no cover
+ signal.alarm(0)
+
def read_meminfo(ssh_client):
"""Read "/proc/meminfo" file and parse all keys and values"""
@@ -455,3 +495,22 @@ def open_relative_file(path, task_path):
if e.errno == errno.ENOENT:
return open(os.path.join(task_path, path))
raise
+
+
+def wait_until_true(predicate, timeout=60, sleep=1, exception=None):
+ """Wait until callable predicate is evaluated as True
+
+ :param predicate: (func) callable deciding whether waiting should continue
+ :param timeout: (int) timeout in seconds how long should function wait
+ :param sleep: (int) polling interval for results in seconds
+ :param exception: exception instance to raise on timeout. If None is passed
+ (default) then WaitTimeout exception is raised.
+ """
+ try:
+ with Timer(timeout=timeout):
+ while not predicate():
+ time.sleep(sleep)
+ except exceptions.TimerTimeout:
+ if exception and issubclass(exception, Exception):
+ raise exception # pylint: disable=raising-bad-type
+ raise exceptions.WaitTimeout
diff --git a/yardstick/dispatcher/__init__.py b/yardstick/dispatcher/__init__.py
index dfb130760..837a4397c 100644
--- a/yardstick/dispatcher/__init__.py
+++ b/yardstick/dispatcher/__init__.py
@@ -7,12 +7,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-from __future__ import absolute_import
from oslo_config import cfg
import yardstick.common.utils as utils
-utils.import_modules_from_package("yardstick.dispatcher")
+utils.import_modules_from_package('yardstick.dispatcher')
+
CONF = cfg.CONF
OPTS = [
@@ -21,3 +21,8 @@ OPTS = [
help='Dispatcher to store data.'),
]
CONF.register_opts(OPTS)
+
+# Dispatchers
+FILE = 'file'
+HTTP = 'http'
+INFLUXDB = 'influxdb'
diff --git a/yardstick/error.py b/yardstick/error.py
deleted file mode 100644
index 9b84de1af..000000000
--- a/yardstick/error.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class SSHError(Exception):
- """Class handles ssh connection error exception"""
- pass
-
-
-class SSHTimeout(SSHError):
- """Class handles ssh connection timeout exception"""
- pass
-
-
-class IncorrectConfig(Exception):
- """Class handles incorrect configuration during setup"""
- pass
-
-
-class IncorrectSetup(Exception):
- """Class handles incorrect setup during setup"""
- pass
-
-
-class IncorrectNodeSetup(IncorrectSetup):
- """Class handles incorrect setup during setup"""
- pass
-
-
-class ErrorClass(object):
-
- def __init__(self, *args, **kwargs):
- if 'test' not in kwargs:
- raise RuntimeError
-
- def __getattr__(self, item):
- raise AttributeError
diff --git a/yardstick/network_services/collector/subscriber.py b/yardstick/network_services/collector/subscriber.py
index 7e18302eb..322b3f5a2 100644
--- a/yardstick/network_services/collector/subscriber.py
+++ b/yardstick/network_services/collector/subscriber.py
@@ -14,42 +14,29 @@
"""This module implements stub for publishing results in yardstick format."""
import logging
-from yardstick.network_services.nfvi.resource import ResourceProfile
-from yardstick.network_services.utils import get_nsb_option
-
LOG = logging.getLogger(__name__)
class Collector(object):
"""Class that handles dictionary of results in yardstick-plot format."""
- def __init__(self, vnfs, nodes, traffic_profile, timeout=3600):
+ def __init__(self, vnfs):
super(Collector, self).__init__()
- self.traffic_profile = traffic_profile
self.vnfs = vnfs
- self.nodes = nodes
- self.timeout = timeout
- self.bin_path = get_nsb_option('bin_path', '')
- self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout)
- for node_name, node in self.nodes.items()
- if node.get("collectd")}
def start(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.initiate_systemagent(self.bin_path)
- resource.start()
- resource.amqp_process_for_nfvi_kpi()
+ for vnf in self.vnfs:
+ vnf.start_collect()
def stop(self):
- """Nothing to do, yet"""
- for resource in self.resource_profiles.values():
- resource.stop()
+ for vnf in self.vnfs:
+ vnf.stop_collect()
def get_kpi(self):
"""Returns dictionary of results in yardstick-plot format
- :return:
+ :return: (dict) dictionary of kpis collected from the VNFs;
+ the keys are the names of the VNFs.
"""
results = {}
for vnf in self.vnfs:
@@ -58,17 +45,4 @@ class Collector(object):
LOG.debug("collect KPI for %s", vnf.name)
results[vnf.name] = vnf.collect_kpi()
- for node_name, resource in self.resource_profiles.items():
- # Result example:
- # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
- LOG.debug("collect KPI for %s", node_name)
- if resource.check_if_system_agent_running("collectd")[0] != 0:
- continue
-
- try:
- results[node_name] = {"core": resource.amqp_collect_nfvi_kpi()}
- LOG.debug("%s collect KPIs %s", node_name, results[node_name]['core'])
- # NOTE(elfoley): catch a more specific error
- except Exception as exc: # pylint: disable=broad-except
- LOG.exception(exc)
return results
diff --git a/yardstick/network_services/helpers/dpdkbindnic_helper.py b/yardstick/network_services/helpers/dpdkbindnic_helper.py
index 05b822c2e..1c74355ef 100644
--- a/yardstick/network_services/helpers/dpdkbindnic_helper.py
+++ b/yardstick/network_services/helpers/dpdkbindnic_helper.py
@@ -18,12 +18,9 @@ import re
from collections import defaultdict
from itertools import chain
+from yardstick.common import exceptions
from yardstick.common.utils import validate_non_string_sequence
-from yardstick.error import IncorrectConfig
-from yardstick.error import IncorrectSetup
-from yardstick.error import IncorrectNodeSetup
-from yardstick.error import SSHTimeout
-from yardstick.error import SSHError
+
NETWORK_KERNEL = 'network_kernel'
NETWORK_DPDK = 'network_dpdk'
@@ -51,7 +48,7 @@ class DpdkInterface(object):
try:
assert self.local_mac
except (AssertionError, KeyError):
- raise IncorrectConfig
+ raise exceptions.IncorrectConfig(error_msg='')
@property
def local_mac(self):
@@ -98,10 +95,12 @@ class DpdkInterface(object):
# if we don't find all the keys then don't update
pass
- except (IncorrectNodeSetup, SSHError, SSHTimeout):
- raise IncorrectConfig(
- "Unable to probe missing interface fields '%s', on node %s "
- "SSH Error" % (', '.join(self.missing_fields), self.dpdk_node.node_key))
+ except (exceptions.IncorrectNodeSetup, exceptions.SSHError,
+ exceptions.SSHTimeout):
+ message = ('Unable to probe missing interface fields "%s", on '
+ 'node %s SSH Error' % (', '.join(self.missing_fields),
+ self.dpdk_node.node_key))
+ raise exceptions.IncorrectConfig(error_msg=message)
class DpdkNode(object):
@@ -118,11 +117,12 @@ class DpdkNode(object):
try:
self.dpdk_interfaces = {intf['name']: DpdkInterface(self, intf['virtual-interface'])
for intf in self.interfaces}
- except IncorrectConfig:
+ except exceptions.IncorrectConfig:
template = "MAC address is required for all interfaces, missing on: {}"
errors = (intf['name'] for intf in self.interfaces if
'local_mac' not in intf['virtual-interface'])
- raise IncorrectSetup(template.format(", ".join(errors)))
+ raise exceptions.IncorrectSetup(
+ error_msg=template.format(", ".join(errors)))
@property
def dpdk_helper(self):
@@ -176,7 +176,7 @@ class DpdkNode(object):
self._probe_netdevs()
try:
self._probe_missing_values()
- except IncorrectConfig:
+ except exceptions.IncorrectConfig:
# ignore for now
pass
@@ -193,7 +193,7 @@ class DpdkNode(object):
missing_fields)
errors = "\n".join(errors)
if errors:
- raise IncorrectSetup(errors)
+ raise exceptions.IncorrectSetup(error_msg=errors)
finally:
self._dpdk_helper = None
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py b/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
deleted file mode 100644
index 70ce4ff03..000000000
--- a/yardstick/network_services/libs/ixia_libs/IxNet/IxNet.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# Copyright (c) 2016-2017 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import print_function
-import sys
-import logging
-
-import re
-from itertools import product
-
-log = logging.getLogger(__name__)
-
-IP_VERSION_4 = 4
-IP_VERSION_6 = 6
-
-
-class TrafficStreamHelper(object):
-
- TEMPLATE = '{0.traffic_item}/{0.stream}:{0.param_id}/{1}'
-
- def __init__(self, traffic_item, stream, param_id):
- super(TrafficStreamHelper, self).__init__()
- self.traffic_item = traffic_item
- self.stream = stream
- self.param_id = param_id
-
- def __getattr__(self, item):
- return self.TEMPLATE.format(self, item)
-
-
-class FramesizeHelper(object):
-
- def __init__(self):
- super(FramesizeHelper, self).__init__()
- self.weighted_pairs = []
- self.weighted_range_pairs = []
-
- @property
- def weighted_pairs_arg(self):
- return '-weightedPairs', self.weighted_pairs
-
- @property
- def weighted_range_pairs_arg(self):
- return '-weightedRangePairs', self.weighted_range_pairs
-
- def make_args(self, *args):
- return self.weighted_pairs_arg + self.weighted_range_pairs_arg + args
-
- def populate_data(self, framesize_data):
- for key, value in framesize_data.items():
- if value == '0':
- continue
-
- replaced = re.sub('[Bb]', '', key)
- self.weighted_pairs.extend([
- replaced,
- value,
- ])
- pairs = [
- replaced,
- replaced,
- value,
- ]
- self.weighted_range_pairs.append(pairs)
-
-
-class IxNextgen(object):
-
- STATS_NAME_MAP = {
- "traffic_item": 'Traffic Item',
- "Tx_Frames": 'Tx Frames',
- "Rx_Frames": 'Rx Frames',
- "Tx_Frame_Rate": 'Tx Frame Rate',
- "Rx_Frame_Rate": 'Tx Frame Rate',
- "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
- "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
- "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
- }
-
- PORT_STATS_NAME_MAP = {
- "stat_name": 'Stat Name',
- "Frames_Tx": 'Frames Tx.',
- "Valid_Frames_Rx": 'Valid Frames Rx.',
- "Frames_Tx_Rate": 'Frames Tx. Rate',
- "Valid_Frames_Rx_Rate": 'Valid Frames Rx. Rate',
- "Tx_Rate_Kbps": 'Tx. Rate (Kbps)',
- "Rx_Rate_Kbps": 'Rx. Rate (Kbps)',
- "Tx_Rate_Mbps": 'Tx. Rate (Mbps)',
- "Rx_Rate_Mbps": 'Rx. Rate (Mbps)',
- }
-
- LATENCY_NAME_MAP = {
- "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
- "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
- "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
- }
-
- RANDOM_MASK_MAP = {
- IP_VERSION_4: '0.0.0.255',
- IP_VERSION_6: '0:0:0:0:0:0:0:ff',
- }
-
- MODE_SEEDS_MAP = {
- 0: ('uplink', ['256', '2048']),
- }
-
- MODE_SEEDS_DEFAULT = 'downlink', ['2048', '256']
-
- @staticmethod
- def find_view_obj(view_name, views):
- edited_view_name = '::ixNet::OBJ-/statistics/view:"{}"'.format(view_name)
- return next((view for view in views if edited_view_name == view), '')
-
- @staticmethod
- def get_config(tg_cfg):
- card = []
- port = []
- external_interface = tg_cfg["vdu"][0]["external-interface"]
- for intf in external_interface:
- card_port0 = intf["virtual-interface"]["vpci"]
- card0, port0 = card_port0.split(':')[:2]
- card.append(card0)
- port.append(port0)
-
- cfg = {
- 'py_lib_path': tg_cfg["mgmt-interface"]["tg-config"]["py_lib_path"],
- 'machine': tg_cfg["mgmt-interface"]["ip"],
- 'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
- 'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
- 'cards': card,
- 'ports': port,
- 'output_dir': tg_cfg["mgmt-interface"]["tg-config"]["dut_result_dir"],
- 'version': tg_cfg["mgmt-interface"]["tg-config"]["version"],
- 'bidir': True,
- }
-
- return cfg
-
- def __init__(self, ixnet=None):
- self.ixnet = ixnet
- self._objRefs = dict()
- self._cfg = None
- self._logger = logging.getLogger(__name__)
- self._params = None
- self._bidir = None
-
- def iter_over_get_lists(self, x1, x2, y2, offset=0):
- for x in self.ixnet.getList(x1, x2):
- y_list = self.ixnet.getList(x, y2)
- for i, y in enumerate(y_list, offset):
- yield x, y, i
-
- def set_random_ip_multi_attribute(self, ipv4, seed, fixed_bits, random_mask, l3_count):
- self.ixnet.setMultiAttribute(
- ipv4,
- '-seed', str(seed),
- '-fixedBits', str(fixed_bits),
- '-randomMask', str(random_mask),
- '-valueType', 'random',
- '-countValue', str(l3_count))
-
- def set_random_ip_multi_attributes(self, ip, version, seeds, l3):
- try:
- random_mask = self.RANDOM_MASK_MAP[version]
- except KeyError:
- raise ValueError('Unknown version %s' % version)
-
- l3_count = l3['count']
- if "srcIp" in ip:
- fixed_bits = l3['srcip4']
- self.set_random_ip_multi_attribute(ip, seeds[0], fixed_bits, random_mask, l3_count)
- if "dstIp" in ip:
- fixed_bits = l3['dstip4']
- self.set_random_ip_multi_attribute(ip, seeds[1], fixed_bits, random_mask, l3_count)
-
- def add_ip_header(self, params, version):
- for it, ep, i in self.iter_over_get_lists('/traffic', 'trafficItem', "configElement", 1):
- iter1 = (v['outer_l3'] for v in params.values() if str(v['id']) == str(i))
- try:
- l3 = next(iter1, {})
- seeds = self.MODE_SEEDS_MAP.get(i, self.MODE_SEEDS_DEFAULT)[1]
- except (KeyError, IndexError):
- continue
-
- for ip, ip_bits, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
- self.set_random_ip_multi_attributes(ip_bits, version, seeds, l3)
-
- self.ixnet.commit()
-
- def _connect(self, tg_cfg):
- self._cfg = self.get_config(tg_cfg)
-
- sys.path.append(self._cfg["py_lib_path"])
- # Import IxNetwork after getting ixia lib path
- try:
- import IxNetwork
- except ImportError:
- raise
-
- self.ixnet = IxNetwork.IxNet()
-
- machine = self._cfg['machine']
- port = str(self._cfg['port'])
- version = str(self._cfg['version'])
- result = self.ixnet.connect(machine, '-port', port, '-version', version)
- return result
-
- def clear_ixia_config(self):
- self.ixnet.execute('newConfig')
-
- def load_ixia_profile(self, profile):
- self.ixnet.execute('loadConfig', self.ixnet.readFrom(profile))
-
- def ix_load_config(self, profile):
- self.clear_ixia_config()
- self.load_ixia_profile(profile)
-
- def ix_assign_ports(self):
- vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport')
- ports = []
-
- chassis = self._cfg['chassis']
- ports = [(chassis, card, port) for card, port in
- zip(self._cfg['cards'], self._cfg['ports'])]
-
- vport_list = self.ixnet.getList("/", "vport")
- self.ixnet.execute('assignPorts', ports, [], vport_list, True)
- self.ixnet.commit()
-
- for vport in vports:
- if self.ixnet.getAttribute(vport, '-state') != 'up':
- log.error("Both thr ports are down...")
-
- def ix_update_frame(self, params):
- streams = ["configElement"]
-
- for param in params.values():
- framesize_data = FramesizeHelper()
- traffic_items = self.ixnet.getList('/traffic', 'trafficItem')
- param_id = param['id']
- for traffic_item, stream in product(traffic_items, streams):
- helper = TrafficStreamHelper(traffic_item, stream, param_id)
-
- self.ixnet.setMultiAttribute(helper.transmissionControl,
- '-type', '{0}'.format(param.get('traffic_type',
- 'continuous')),
- '-duration', '{0}'.format(param.get('duration',
- "30")))
-
- stream_frame_rate_path = helper.frameRate
- self.ixnet.setMultiAttribute(stream_frame_rate_path, '-rate', param['iload'])
- if param['outer_l2']['framesPerSecond']:
- self.ixnet.setMultiAttribute(stream_frame_rate_path,
- '-type', 'framesPerSecond')
-
- framesize_data.populate_data(param['outer_l2']['framesize'])
-
- make_attr_args = framesize_data.make_args('-incrementFrom', '66',
- '-randomMin', '66',
- '-quadGaussian', [],
- '-type', 'weightedPairs',
- '-presetDistribution', 'cisco',
- '-incrementTo', '1518')
-
- self.ixnet.setMultiAttribute(helper.frameSize, *make_attr_args)
-
- self.ixnet.commit()
-
- def update_ether_multi_attribute(self, ether, mac_addr):
- self.ixnet.setMultiAttribute(ether,
- '-singleValue', mac_addr,
- '-fieldValue', mac_addr,
- '-valueType', 'singleValue')
-
- def update_ether_multi_attributes(self, ether, l2):
- if "ethernet.header.destinationAddress" in ether:
- self.update_ether_multi_attribute(ether, str(l2.get('dstmac', "00:00:00:00:00:02")))
-
- if "ethernet.header.sourceAddress" in ether:
- self.update_ether_multi_attribute(ether, str(l2.get('srcmac', "00:00:00:00:00:01")))
-
- def ix_update_ether(self, params):
- for ti, ep, index in self.iter_over_get_lists('/traffic', 'trafficItem',
- "configElement", 1):
- iter1 = (v['outer_l2'] for v in params.values() if str(v['id']) == str(index))
- try:
- l2 = next(iter1, {})
- except KeyError:
- continue
-
- for ip, ether, _ in self.iter_over_get_lists(ep, 'stack', 'field'):
- self.update_ether_multi_attributes(ether, l2)
-
- self.ixnet.commit()
-
- def ix_update_udp(self, params):
- pass
-
- def ix_update_tcp(self, params):
- pass
-
- def ix_start_traffic(self):
- tis = self.ixnet.getList('/traffic', 'trafficItem')
- for ti in tis:
- self.ixnet.execute('generate', [ti])
- self.ixnet.execute('apply', '/traffic')
- self.ixnet.execute('start', '/traffic')
-
- def ix_stop_traffic(self):
- tis = self.ixnet.getList('/traffic', 'trafficItem')
- for _ in tis:
- self.ixnet.execute('stop', '/traffic')
-
- def build_stats_map(self, view_obj, name_map):
- return {kl: self.execute_get_column_values(view_obj, kr) for kl, kr in name_map.items()}
-
- def execute_get_column_values(self, view_obj, name):
- return self.ixnet.execute('getColumnValues', view_obj, name)
-
- def ix_get_statistics(self):
- views = self.ixnet.getList('/statistics', 'view')
- stats = {}
- view_obj = self.find_view_obj("Traffic Item Statistics", views)
- stats = self.build_stats_map(view_obj, self.STATS_NAME_MAP)
-
- view_obj = self.find_view_obj("Port Statistics", views)
- ports_stats = self.build_stats_map(view_obj, self.PORT_STATS_NAME_MAP)
-
- view_obj = self.find_view_obj("Flow Statistics", views)
- stats["latency"] = self.build_stats_map(view_obj, self.LATENCY_NAME_MAP)
-
- return stats, ports_stats
diff --git a/yardstick/network_services/libs/ixia_libs/IxNet/__init__.py b/yardstick/network_services/libs/ixia_libs/ixnet/__init__.py
index e69de29bb..e69de29bb 100644
--- a/yardstick/network_services/libs/ixia_libs/IxNet/__init__.py
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/__init__.py
diff --git a/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
new file mode 100644
index 000000000..393f60f7c
--- /dev/null
+++ b/yardstick/network_services/libs/ixia_libs/ixnet/ixnet_api.py
@@ -0,0 +1,470 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+import IxNetwork
+
+from yardstick.common import exceptions
+from yardstick.common import utils
+
+
+log = logging.getLogger(__name__)
+
+IP_VERSION_4 = 4
+IP_VERSION_6 = 6
+
+PROTO_ETHERNET = 'ethernet'
+PROTO_IPV4 = 'ipv4'
+PROTO_IPV6 = 'ipv6'
+PROTO_UDP = 'udp'
+PROTO_TCP = 'tcp'
+PROTO_VLAN = 'vlan'
+
+IP_VERSION_4_MASK = '0.0.0.255'
+IP_VERSION_6_MASK = '0:0:0:0:0:0:0:ff'
+
+TRAFFIC_STATUS_STARTED = 'started'
+TRAFFIC_STATUS_STOPPED = 'stopped'
+
+
+# NOTE(ralonsoh): this pragma will be removed in the last patch of this series
+class IxNextgen(object): # pragma: no cover
+
+ PORT_STATS_NAME_MAP = {
+ "stat_name": 'Stat Name',
+ "Frames_Tx": 'Frames Tx.',
+ "Valid_Frames_Rx": 'Valid Frames Rx.',
+ "Frames_Tx_Rate": 'Frames Tx. Rate',
+ "Valid_Frames_Rx_Rate": 'Valid Frames Rx. Rate',
+ "Tx_Rate_Kbps": 'Tx. Rate (Kbps)',
+ "Rx_Rate_Kbps": 'Rx. Rate (Kbps)',
+ "Tx_Rate_Mbps": 'Tx. Rate (Mbps)',
+ "Rx_Rate_Mbps": 'Rx. Rate (Mbps)',
+ }
+
+ LATENCY_NAME_MAP = {
+ "Store-Forward_Avg_latency_ns": 'Store-Forward Avg Latency (ns)',
+ "Store-Forward_Min_latency_ns": 'Store-Forward Min Latency (ns)',
+ "Store-Forward_Max_latency_ns": 'Store-Forward Max Latency (ns)',
+ }
+
+ @staticmethod
+ def get_config(tg_cfg):
+ card = []
+ port = []
+ external_interface = tg_cfg["vdu"][0]["external-interface"]
+ for intf in external_interface:
+ card_port0 = intf["virtual-interface"]["vpci"]
+ card0, port0 = card_port0.split(':')[:2]
+ card.append(card0)
+ port.append(port0)
+
+ cfg = {
+ 'machine': tg_cfg["mgmt-interface"]["ip"],
+ 'port': tg_cfg["mgmt-interface"]["tg-config"]["tcl_port"],
+ 'chassis': tg_cfg["mgmt-interface"]["tg-config"]["ixchassis"],
+ 'cards': card,
+ 'ports': port,
+ 'output_dir': tg_cfg["mgmt-interface"]["tg-config"]["dut_result_dir"],
+ 'version': tg_cfg["mgmt-interface"]["tg-config"]["version"],
+ 'bidir': True,
+ }
+
+ return cfg
+
+ def __init__(self): # pragma: no cover
+ self._ixnet = None
+ self._cfg = None
+ self._params = None
+ self._bidir = None
+
+ @property
+ def ixnet(self): # pragma: no cover
+ if self._ixnet:
+ return self._ixnet
+ raise exceptions.IxNetworkClientNotConnected()
+
+ def _get_config_element_by_flow_group_name(self, flow_group_name):
+ """Get a config element using the flow group name
+
+ Each named flow group contains one config element (by configuration).
+ According to the documentation, "configElements" is a list and "each
+ item in this list is aligned to the sequential order of your endpoint
+ list".
+
+ :param flow_group_name: (str) flow group name; this parameter is
+ always a number (converted to string) starting
+ from "1".
+ :return: (str) config element reference ID or None.
+ """
+ traffic_item = self.ixnet.getList(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')[0]
+ flow_groups = self.ixnet.getList(traffic_item, 'endpointSet')
+ for flow_group in flow_groups:
+ if (str(self.ixnet.getAttribute(flow_group, '-name')) ==
+ flow_group_name):
+ return traffic_item + '/configElement:' + flow_group_name
+
+ def _get_stack_item(self, flow_group_name, protocol_name):
+ """Return the stack item given the flow group name and the proto name
+
+ :param flow_group_name: (str) flow group name
+ :param protocol_name: (str) protocol name, referred to PROTO_*
+ constants
+ :return: list of stack item descriptors
+ """
+ celement = self._get_config_element_by_flow_group_name(flow_group_name)
+ if not celement:
+ raise exceptions.IxNetworkFlowNotPresent(
+ flow_group=flow_group_name)
+ stack_items = self.ixnet.getList(celement, 'stack')
+ return [s_i for s_i in stack_items if protocol_name in s_i]
+
+ def _get_field_in_stack_item(self, stack_item, field_name):
+ """Return the field in a stack item given the name
+
+ :param stack_item: (str) stack item descriptor
+ :param field_name: (str) field name
+ :return: (str) field descriptor
+ """
+ fields = self.ixnet.getList(stack_item, 'field')
+ for field in (field for field in fields if field_name in field):
+ return field
+ raise exceptions.IxNetworkFieldNotPresentInStackItem(
+ field_name=field_name, stack_item=stack_item)
+
+ def _get_traffic_state(self):
+ """Get traffic state"""
+ return self.ixnet.getAttribute(self.ixnet.getRoot() + 'traffic',
+ '-state')
+
+ def is_traffic_running(self):
+ """Returns true if traffic state == TRAFFIC_STATUS_STARTED"""
+ return self._get_traffic_state() == TRAFFIC_STATUS_STARTED
+
+ def is_traffic_stopped(self):
+ """Returns true if traffic state == TRAFFIC_STATUS_STOPPED"""
+ return self._get_traffic_state() == TRAFFIC_STATUS_STOPPED
+
+ @staticmethod
+ def _parse_framesize(framesize):
+ """Parse "framesize" config param. to return a list of weighted pairs
+
+ :param framesize: dictionary of frame sizes and weights
+ :return: list of paired frame sizes and weights
+ """
+ weighted_range_pairs = []
+ for size, weight in framesize.items():
+ weighted_range_pairs.append(int(size.upper().replace('B', '')))
+ weighted_range_pairs.append(int(weight))
+ return weighted_range_pairs
+
+ def iter_over_get_lists(self, x1, x2, y2, offset=0):
+ for x in self.ixnet.getList(x1, x2):
+ y_list = self.ixnet.getList(x, y2)
+ for i, y in enumerate(y_list, offset):
+ yield x, y, i
+
+ def connect(self, tg_cfg):
+ self._cfg = self.get_config(tg_cfg)
+ self._ixnet = IxNetwork.IxNet()
+
+ machine = self._cfg['machine']
+ port = str(self._cfg['port'])
+ version = str(self._cfg['version'])
+ return self.ixnet.connect(machine, '-port', port,
+ '-version', version)
+
+ def clear_config(self):
+ """Wipe out any possible configuration present in the client"""
+ self.ixnet.execute('newConfig')
+
+ def assign_ports(self):
+ """Create and assign vports for each physical port defined in config
+
+ This configuration is present in the IXIA profile file. E.g.:
+ name: trafficgen_1
+ role: IxNet
+ interfaces:
+ xe0:
+ vpci: "2:15" # Card:port
+ driver: "none"
+ dpdk_port_num: 0
+ local_ip: "152.16.100.20"
+ netmask: "255.255.0.0"
+ local_mac: "00:98:10:64:14:00"
+ xe1:
+ ...
+ """
+ chassis_ip = self._cfg['chassis']
+ ports = [(chassis_ip, card, port) for card, port in
+ zip(self._cfg['cards'], self._cfg['ports'])]
+
+ log.info('Create and assign vports: %s', ports)
+ for port in ports:
+ vport = self.ixnet.add(self.ixnet.getRoot(), 'vport')
+ self.ixnet.commit()
+ self.ixnet.execute('assignPorts', [port], [], [vport], True)
+ self.ixnet.commit()
+ if self.ixnet.getAttribute(vport, '-state') != 'up':
+ log.warning('Port %s is down', vport)
+
+ def _create_traffic_item(self):
+ """Create the traffic item to hold the flow groups
+
+ The traffic item tracking by "Traffic Item" is enabled to retrieve the
+ latency statistics.
+ """
+ log.info('Create the traffic item "RFC2544"')
+ traffic_item = self.ixnet.add(self.ixnet.getRoot() + '/traffic',
+ 'trafficItem')
+ self.ixnet.setMultiAttribute(traffic_item, '-name', 'RFC2544',
+ '-trafficType', 'raw')
+ self.ixnet.commit()
+
+ traffic_item_id = self.ixnet.remapIds(traffic_item)[0]
+ self.ixnet.setAttribute(traffic_item_id + '/tracking',
+ '-trackBy', 'trafficGroupId0')
+ self.ixnet.commit()
+
+ def _create_flow_groups(self):
+ """Create the flow groups between the assigned ports"""
+ traffic_item_id = self.ixnet.getList(self.ixnet.getRoot() + 'traffic',
+ 'trafficItem')[0]
+ log.info('Create the flow groups')
+ vports = self.ixnet.getList(self.ixnet.getRoot(), 'vport')
+ uplink_ports = vports[::2]
+ downlink_ports = vports[1::2]
+ index = 0
+ for up, down in zip(uplink_ports, downlink_ports):
+ log.info('FGs: %s <--> %s', up, down)
+ endpoint_set_1 = self.ixnet.add(traffic_item_id, 'endpointSet')
+ endpoint_set_2 = self.ixnet.add(traffic_item_id, 'endpointSet')
+ self.ixnet.setMultiAttribute(
+ endpoint_set_1, '-name', str(index + 1),
+ '-sources', [up + '/protocols'],
+ '-destinations', [down + '/protocols'])
+ self.ixnet.setMultiAttribute(
+ endpoint_set_2, '-name', str(index + 2),
+ '-sources', [down + '/protocols'],
+ '-destinations', [up + '/protocols'])
+ self.ixnet.commit()
+ index += 2
+
+ def _append_procotol_to_stack(self, protocol_name, previous_element):
+ """Append a new element in the packet definition stack"""
+ protocol = (self.ixnet.getRoot() +
+ '/traffic/protocolTemplate:"{}"'.format(protocol_name))
+ self.ixnet.execute('append', previous_element, protocol)
+
+ def _setup_config_elements(self):
+ """Setup the config elements
+
+ The traffic item is configured to allow individual configurations per
+ config element. The default frame configuration is applied:
+ Ethernet II: added by default
+ IPv4: element to add
+ UDP: element to add
+ Payload: added by default
+ Ethernet II (Trailer): added by default
+ :return:
+ """
+ traffic_item_id = self.ixnet.getList(self.ixnet.getRoot() + 'traffic',
+ 'trafficItem')[0]
+ log.info('Split the frame rate distribution per config element')
+ config_elements = self.ixnet.getList(traffic_item_id, 'configElement')
+ for config_element in config_elements:
+ self.ixnet.setAttribute(config_element + '/frameRateDistribution',
+ '-portDistribution', 'splitRateEvenly')
+ self.ixnet.setAttribute(config_element + '/frameRateDistribution',
+ '-streamDistribution', 'splitRateEvenly')
+ self.ixnet.commit()
+ self._append_procotol_to_stack(
+ PROTO_UDP, config_element + '/stack:"ethernet-1"')
+ self._append_procotol_to_stack(
+ PROTO_IPV4, config_element + '/stack:"ethernet-1"')
+
+ def create_traffic_model(self):
+ """Create a traffic item and the needed flow groups
+
+ Each flow group inside the traffic item (only one is present)
+ represents the traffic between two ports:
+ (uplink) (downlink)
+ FlowGroup1: port1 -> port2
+ FlowGroup2: port1 <- port2
+ FlowGroup3: port3 -> port4
+ FlowGroup4: port3 <- port4
+ """
+ self._create_traffic_item()
+ self._create_flow_groups()
+ self._setup_config_elements()
+
+ def _update_frame_mac(self, ethernet_descriptor, field, mac_address):
+ """Set the MAC address in a config element stack Ethernet field
+
+ :param ethernet_descriptor: (str) ethernet descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ethernet-1"
+ :param field: (str) field name, e.g.: destinationAddress
+ :param mac_address: (str) MAC address
+ """
+ field_descriptor = self._get_field_in_stack_item(ethernet_descriptor,
+ field)
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-singleValue', mac_address,
+ '-fieldValue', mac_address,
+ '-valueType', 'singleValue')
+ self.ixnet.commit()
+
+ def update_frame(self, traffic):
+ """Update the L2 frame
+
+ This function updates the L2 frame options:
+ - Traffic type: "continuous", "fixedDuration".
+ - Duration: in case of traffic_type="fixedDuration", amount of seconds
+ to inject traffic.
+ - Rate: in frames per seconds or percentage.
+ - Type of rate: "framesPerSecond" ("bitsPerSecond" and
+ "percentLineRate" no used)
+ - Frame size: custom IMIX [1] definition; a list of packet size in
+ bytes and the weight. E.g.:
+ [64, 10, 128, 15, 512, 5]
+
+ [1] https://en.wikipedia.org/wiki/Internet_Mix
+
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group.
+ """
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ config_element = self._get_config_element_by_flow_group_name(fg_id)
+ if not config_element:
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ type = traffic_param.get('traffic_type', 'fixedDuration')
+ duration = traffic_param.get('duration', 30)
+ rate = traffic_param['iload']
+ weighted_range_pairs = self._parse_framesize(
+ traffic_param['outer_l2']['framesize'])
+ srcmac = str(traffic_param.get('srcmac', '00:00:00:00:00:01'))
+ dstmac = str(traffic_param.get('dstmac', '00:00:00:00:00:02'))
+ # NOTE(ralonsoh): add QinQ tagging when
+ # traffic_param['outer_l2']['QinQ'] exists.
+ # s_vlan = traffic_param['outer_l2']['QinQ']['S-VLAN']
+ # c_vlan = traffic_param['outer_l2']['QinQ']['C-VLAN']
+
+ self.ixnet.setMultiAttribute(
+ config_element + '/transmissionControl',
+ '-type', type, '-duration', duration)
+ self.ixnet.setMultiAttribute(
+ config_element + '/frameRate',
+ '-rate', rate, '-type', 'framesPerSecond')
+ self.ixnet.setMultiAttribute(
+ config_element + '/frameSize',
+ '-type', 'weightedPairs',
+ '-weightedRangePairs', weighted_range_pairs)
+ self.ixnet.commit()
+
+ self._update_frame_mac(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'destinationAddress', dstmac)
+ self._update_frame_mac(
+ self._get_stack_item(fg_id, PROTO_ETHERNET)[0],
+ 'sourceAddress', srcmac)
+
+ def _update_ipv4_address(self, ip_descriptor, field, ip_address, seed,
+ mask, count):
+ """Set the IPv4 address in a config element stack IP field
+
+ :param ip_descriptor: (str) IP descriptor, e.g.:
+ /traffic/trafficItem:1/configElement:1/stack:"ipv4-2"
+ :param field: (str) field name, e.g.: scrIp, dstIp
+ :param ip_address: (str) IP address
+ :param seed: (int) seed length
+ :param mask: (str) IP address mask
+ :param count: (int) number of random IPs to generate
+ """
+ field_descriptor = self._get_field_in_stack_item(ip_descriptor,
+ field)
+ self.ixnet.setMultiAttribute(field_descriptor,
+ '-seed', seed,
+ '-fixedBits', ip_address,
+ '-randomMask', mask,
+ '-valueType', 'random',
+ '-countValue', count)
+ self.ixnet.commit()
+
+ def update_ip_packet(self, traffic):
+ """Update the IP packet
+
+ NOTE: Only IPv4 is currently supported.
+ :param traffic: list of traffic elements; each traffic element contains
+ the injection parameter for each flow group.
+ """
+ # NOTE(ralonsoh): L4 configuration is not set.
+ for traffic_param in traffic.values():
+ fg_id = str(traffic_param['id'])
+ if not self._get_config_element_by_flow_group_name(fg_id):
+ raise exceptions.IxNetworkFlowNotPresent(flow_group=fg_id)
+
+ count = traffic_param['outer_l3']['count']
+ srcip4 = str(traffic_param['outer_l3']['srcip4'])
+ dstip4 = str(traffic_param['outer_l3']['dstip4'])
+
+ self._update_ipv4_address(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0],
+ 'srcIp', srcip4, 1, IP_VERSION_4_MASK, count)
+ self._update_ipv4_address(
+ self._get_stack_item(fg_id, PROTO_IPV4)[0],
+ 'dstIp', dstip4, 1, IP_VERSION_4_MASK, count)
+
+ def _build_stats_map(self, view_obj, name_map):
+ return {data_yardstick: self.ixnet.execute(
+ 'getColumnValues', view_obj, data_ixia)
+ for data_yardstick, data_ixia in name_map.items()}
+
+ def get_statistics(self):
+ """Retrieve port and flow statistics
+
+ "Port Statistics" parameters are stored in self.PORT_STATS_NAME_MAP.
+ "Flow Statistics" parameters are stored in self.LATENCY_NAME_MAP.
+
+ :return: dictionary with the statistics; the keys of this dictionary
+ are PORT_STATS_NAME_MAP and LATENCY_NAME_MAP keys.
+ """
+ port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
+ flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
+ stats = self._build_stats_map(port_statistics,
+ self.PORT_STATS_NAME_MAP)
+ stats.update(self._build_stats_map(flow_statistics,
+ self.LATENCY_NAME_MAP))
+ return stats
+
+ def start_traffic(self):
+ """Start the traffic injection in the traffic item
+
+ By configuration, there is only one traffic item. This function returns
+ when the traffic state is TRAFFIC_STATUS_STARTED.
+ """
+ traffic_items = self.ixnet.getList('/traffic', 'trafficItem')
+ if self.is_traffic_running():
+ self.ixnet.execute('stop', '/traffic')
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.is_traffic_stopped())
+
+ self.ixnet.execute('generate', traffic_items)
+ self.ixnet.execute('apply', '/traffic')
+ self.ixnet.execute('start', '/traffic')
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.is_traffic_running())
diff --git a/yardstick/network_services/nfvi/resource.py b/yardstick/network_services/nfvi/resource.py
index dc5c46a86..0c0bf223a 100644
--- a/yardstick/network_services/nfvi/resource.py
+++ b/yardstick/network_services/nfvi/resource.py
@@ -27,6 +27,7 @@ from oslo_config import cfg
from oslo_utils.encodeutils import safe_decode
from yardstick import ssh
+from yardstick.common.exceptions import ResourceCommandError
from yardstick.common.task_template import finalize_for_yaml
from yardstick.common.utils import validate_non_string_sequence
from yardstick.network_services.nfvi.collectd import AmqpConsumer
@@ -249,45 +250,46 @@ class ResourceProfile(object):
if status != 0:
LOG.error("cannot find OVS socket %s", socket_path)
+ def _start_rabbitmq(self, connection):
+ # Reset amqp queue
+ LOG.debug("reset and setup amqp to collect data from collectd")
+ # ensure collectd.conf.d exists to avoid error/warning
+ cmd_list = ["sudo mkdir -p /etc/collectd/collectd.conf.d",
+ "sudo service rabbitmq-server restart",
+ "sudo rabbitmqctl stop_app",
+ "sudo rabbitmqctl reset",
+ "sudo rabbitmqctl start_app",
+ "sudo rabbitmqctl add_user admin admin",
+ "sudo rabbitmqctl authenticate_user admin admin",
+ "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'"
+ ]
+ for cmd in cmd_list:
+ exit_status, stdout, stderr = connection.execute(cmd)
+ if exit_status != 0:
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
+ # check stdout for "sudo rabbitmqctl status" command
+ cmd = "sudo rabbitmqctl status"
+ _, stdout, stderr = connection.execute(cmd)
+ if not re.search("RabbitMQ", stdout):
+ LOG.error("rabbitmqctl status don't have RabbitMQ in running apps")
+ raise ResourceCommandError(command=cmd, stderr=stderr)
+
def _start_collectd(self, connection, bin_path):
LOG.debug("Starting collectd to collect NFVi stats")
- connection.execute('sudo pkill -x -9 collectd')
collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd")
config_file_path = os.path.join(bin_path, "collectd", "etc")
+ self._prepare_collectd_conf(config_file_path)
+
+ connection.execute('sudo pkill -x -9 collectd')
exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0]
if exit_status != 0:
LOG.warning("%s is not present disabling", collectd_path)
- # disable auto-provisioning because it requires Internet access
- # collectd_installer = os.path.join(bin_path, "collectd.sh")
- # provision_tool(connection, collectd)
- # http_proxy = os.environ.get('http_proxy', '')
- # https_proxy = os.environ.get('https_proxy', '')
- # connection.execute("sudo %s '%s' '%s'" % (
- # collectd_installer, http_proxy, https_proxy))
return
if "ovs_stats" in self.plugins:
self._setup_ovs_stats(connection)
LOG.debug("Starting collectd to collect NFVi stats")
- # ensure collectd.conf.d exists to avoid error/warning
- connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d")
- self._prepare_collectd_conf(config_file_path)
-
- # Reset amqp queue
- LOG.debug("reset and setup amqp to collect data from collectd")
- connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*")
- connection.execute("sudo service rabbitmq-server start")
- connection.execute("sudo rabbitmqctl stop_app")
- connection.execute("sudo rabbitmqctl reset")
- connection.execute("sudo rabbitmqctl start_app")
- connection.execute("sudo service rabbitmq-server restart")
-
- LOG.debug("Creating admin user for rabbitmq in order to collect data from collectd")
- connection.execute("sudo rabbitmqctl delete_user guest")
- connection.execute("sudo rabbitmqctl add_user admin admin")
- connection.execute("sudo rabbitmqctl authenticate_user admin admin")
- connection.execute("sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'")
-
LOG.debug("Start collectd service..... %s second timeout", self.timeout)
# intel_pmu plug requires large numbers of files open, so try to set
# ulimit -n to a large value
@@ -299,9 +301,10 @@ class ResourceProfile(object):
""" Start system agent for NFVi collection on host """
if self.enable:
try:
+ self._start_rabbitmq(self.connection)
self._start_collectd(self.connection, bin_path)
- except Exception:
- LOG.exception("Exception during collectd start")
+ except ResourceCommandError as e:
+ LOG.exception("Exception during collectd and rabbitmq start: %s", str(e))
raise
def start(self):
diff --git a/yardstick/network_services/traffic_profile/base.py b/yardstick/network_services/traffic_profile/base.py
index 162bab2bc..f4b5b178c 100644
--- a/yardstick/network_services/traffic_profile/base.py
+++ b/yardstick/network_services/traffic_profile/base.py
@@ -16,6 +16,31 @@ from yardstick.common import exceptions
from yardstick.common import utils
+class TrafficProfileConfig(object):
+ """Class to contain the TrafficProfile class information
+
+ This object will parse and validate the traffic profile information.
+ """
+
+ DEFAULT_SCHEMA = 'nsb:traffic_profile:0.1'
+ DEFAULT_FRAME_RATE = 100
+ DEFAULT_DURATION = 30
+
+ def __init__(self, tp_config):
+ self.schema = tp_config.get('schema', self.DEFAULT_SCHEMA)
+ self.name = tp_config.get('name')
+ self.description = tp_config.get('description')
+ tprofile = tp_config['traffic_profile']
+ self.traffic_type = tprofile.get('traffic_type')
+ self.frame_rate = tprofile.get('frame_rate', self.DEFAULT_FRAME_RATE)
+ self.test_precision = tprofile.get('test_precision')
+ self.packet_sizes = tprofile.get('packet_sizes')
+ self.duration = tprofile.get('duration', self.DEFAULT_DURATION)
+ self.lower_bound = tprofile.get('lower_bound')
+ self.upper_bound = tprofile.get('upper_bound')
+ self.step_interval = tprofile.get('step_interval')
+
+
class TrafficProfile(object):
"""
This class defines the behavior
@@ -43,8 +68,9 @@ class TrafficProfile(object):
# e.g. RFC2544 start_ip, stop_ip, drop_rate,
# IMIX = {"10K": 0.1, "100M": 0.5}
self.params = tp_config
+ self.config = TrafficProfileConfig(tp_config)
- def execute_traffic(self, traffic_generator):
+ def execute_traffic(self, traffic_generator, **kawrgs):
""" This methods defines the behavior of the traffic generator.
It will be called in a loop until the traffic generator exits.
diff --git a/yardstick/network_services/traffic_profile/http_ixload.py b/yardstick/network_services/traffic_profile/http_ixload.py
index 348056551..6cbdb8ab2 100644
--- a/yardstick/network_services/traffic_profile/http_ixload.py
+++ b/yardstick/network_services/traffic_profile/http_ixload.py
@@ -12,9 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-from __future__ import print_function
-
import sys
import os
import logging
@@ -27,22 +24,14 @@ try:
except ImportError:
import json as jsonutils
-
-class ErrorClass(object):
-
- def __init__(self, *args, **kwargs):
- if 'test' not in kwargs:
- raise RuntimeError
-
- def __getattr__(self, item):
- raise AttributeError
-
+from yardstick.common import exceptions
try:
from IxLoad import IxLoad, StatCollectorUtils
except ImportError:
- IxLoad = ErrorClass
- StatCollectorUtils = ErrorClass
+ IxLoad = exceptions.ErrorClass
+ StatCollectorUtils = exceptions.ErrorClass
+
LOG = logging.getLogger(__name__)
CSV_FILEPATH_NAME = 'IxL_statResults.csv'
@@ -93,7 +82,7 @@ def validate_non_string_sequence(value, default=None, raise_exc=None):
if isinstance(value, collections.Sequence) and not isinstance(value, str):
return value
if raise_exc:
- raise raise_exc
+ raise raise_exc # pylint: disable=raising-bad-type
return default
@@ -218,7 +207,7 @@ class IXLOADHttpTest(object):
# ---- Remap ports ----
try:
self.reassign_ports(test, repository, self.ports_to_reassign)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOG.exception("Exception occurred during reassign_ports")
# -----------------------------------------------------------------------
diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
index 7f047226b..e105c2f55 100644
--- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py
+++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
import logging
from yardstick.network_services.traffic_profile.trex_traffic_profile import \
@@ -82,13 +81,10 @@ class IXIARFC2544Profile(TrexProfile):
def _ixia_traffic_generate(self, traffic, ixia_obj):
for key, value in traffic.items():
if key.startswith((self.UPLINK, self.DOWNLINK)):
- value["iload"] = str(self.rate)
- ixia_obj.ix_update_frame(traffic)
- ixia_obj.ix_update_ether(traffic)
- ixia_obj.add_ip_header(traffic, 4)
- ixia_obj.ix_start_traffic()
- self.tmp_drop = 0
- self.tmp_throughput = 0
+ value['iload'] = str(self.rate)
+ ixia_obj.update_frame(traffic)
+ ixia_obj.update_ip_packet(traffic)
+ ixia_obj.start_traffic()
def update_traffic_profile(self, traffic_generator):
def port_generator():
@@ -99,85 +95,65 @@ class IXIARFC2544Profile(TrexProfile):
if not profile_data:
continue
self.profile_data = profile_data
- self.get_streams(self.profile_data)
self.full_profile.update({vld_id: self.profile_data})
for intf in intfs:
yield traffic_generator.vnfd_helper.port_num(intf)
self.ports = [port for port in port_generator()]
- def execute_traffic(self, traffic_generator, ixia_obj, mac=None):
- if mac is None:
- mac = {}
+ def execute_traffic(self, traffic_generator, ixia_obj=None, mac=None):
+ mac = {} if mac is None else mac
+ first_run = self.first_run
if self.first_run:
+ self.first_run = False
self.full_profile = {}
self.pg_id = 0
self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac)
self.max_rate = self.rate
self.min_rate = 0
- self.get_multiplier()
- self._ixia_traffic_generate(traffic, ixia_obj)
-
- def get_multiplier(self):
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
+ else:
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 2)
- def start_ixia_latency(self, traffic_generator, ixia_obj, mac=None):
- if mac is None:
- mac = {}
- self.update_traffic_profile(traffic_generator)
- traffic = \
- self._get_ixia_traffic_profile(self.full_profile, mac)
+ traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
self._ixia_traffic_generate(traffic, ixia_obj)
+ return first_run
- def get_drop_percentage(self, samples, tol_min, tolerance, ixia_obj,
- mac=None):
- if mac is None:
- mac = {}
- status = 'Running'
+ def get_drop_percentage(self, samples, tol_min, tolerance, duration=30.0,
+ first_run=False):
+ completed = False
drop_percent = 100
- in_packets = sum([samples[iface]['in_packets'] for iface in samples])
- out_packets = sum([samples[iface]['out_packets'] for iface in samples])
- rx_throughput = \
- sum([samples[iface]['RxThroughput'] for iface in samples])
- tx_throughput = \
- sum([samples[iface]['TxThroughput'] for iface in samples])
- packet_drop = abs(out_packets - in_packets)
+ num_ifaces = len(samples)
+ in_packets_sum = sum(
+ [samples[iface]['in_packets'] for iface in samples])
+ out_packets_sum = sum(
+ [samples[iface]['out_packets'] for iface in samples])
+ rx_throughput = sum(
+ [samples[iface]['RxThroughput'] for iface in samples])
+ rx_throughput = round(float(rx_throughput), 2)
+ tx_throughput = sum(
+ [samples[iface]['TxThroughput'] for iface in samples])
+ tx_throughput = round(float(tx_throughput), 2)
+ packet_drop = abs(out_packets_sum - in_packets_sum)
+
try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 2)
+ drop_percent = round(
+ (packet_drop / float(out_packets_sum)) * 100, 2)
except ZeroDivisionError:
LOG.info('No traffic is flowing')
- samples['TxThroughput'] = round(tx_throughput / 1.0, 2)
- samples['RxThroughput'] = round(rx_throughput / 1.0, 2)
- samples['CurrentDropPercentage'] = drop_percent
- samples['Throughput'] = self.tmp_throughput
- samples['DropPercentage'] = self.tmp_drop
- if drop_percent > tolerance and self.tmp_throughput == 0:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- if self.first_run:
- max_supported_rate = out_packets / 30.0
- self.rate = max_supported_rate
- self.first_run = False
- if drop_percent <= tolerance:
- status = 'Completed'
+
+ samples['TxThroughput'] = tx_throughput
+ samples['RxThroughput'] = rx_throughput
+ samples['DropPercentage'] = drop_percent
+
+ if first_run:
+ self.rate = out_packets_sum / duration / num_ifaces
+ completed = True if drop_percent <= tolerance else False
+
if drop_percent > tolerance:
self.max_rate = self.rate
elif drop_percent < tol_min:
self.min_rate = self.rate
- if drop_percent >= self.tmp_drop:
- self.tmp_drop = drop_percent
- self.tmp_throughput = round((rx_throughput / 1.0), 2)
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
else:
- samples['Throughput'] = round(rx_throughput / 1.0, 2)
- samples['DropPercentage'] = drop_percent
- return status, samples
- self.get_multiplier()
- traffic = self._get_ixia_traffic_profile(self.full_profile, mac)
- self._ixia_traffic_generate(traffic, ixia_obj)
- return status, samples
+ completed = True
+
+ return completed, samples
diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py
index c3277fb12..225ee4356 100644
--- a/yardstick/network_services/traffic_profile/prox_binsearch.py
+++ b/yardstick/network_services/traffic_profile/prox_binsearch.py
@@ -90,10 +90,10 @@ class ProxBinSearchProfile(ProxProfile):
# Store one time only value in influxdb
single_samples = {
- "test_duration" : traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
- "test_precision" : self.params["traffic_profile"]["test_precision"],
- "tolerated_loss" : self.params["traffic_profile"]["tolerated_loss"],
- "duration" : duration
+ "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"],
+ "test_precision": self.params["traffic_profile"]["test_precision"],
+ "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"],
+ "duration": duration
}
self.queue.put(single_samples)
self.prev_time = time.time()
@@ -108,7 +108,6 @@ class ProxBinSearchProfile(ProxProfile):
self.tolerated_loss,
line_speed)
self.curr_time = time.time()
- diff_time = self.curr_time - self.prev_time
self.prev_time = self.curr_time
if result.success:
@@ -116,15 +115,15 @@ class ProxBinSearchProfile(ProxProfile):
self.current_lower = test_value
successful_pkt_loss = result.pkt_loss
samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
- samples["TxThroughput"] = samples["TxThroughput"] * 1000 * 1000
# store results with success tag in influxdb
success_samples = {'Success_' + key: value for key, value in samples.items()}
- success_samples["Success_rx_total"] = int(result.rx_total / diff_time)
- success_samples["Success_tx_total"] = int(result.tx_total / diff_time)
- success_samples["Success_can_be_lost"] = int(result.can_be_lost / diff_time)
- success_samples["Success_drop_total"] = int(result.drop_total / diff_time)
+ # Store number of packets based statistics (we already have throughput)
+ success_samples["Success_rx_total"] = int(result.rx_total)
+ success_samples["Success_tx_total"] = int(result.tx_total)
+ success_samples["Success_can_be_lost"] = int(result.can_be_lost)
+ success_samples["Success_drop_total"] = int(result.drop_total)
self.queue.put(success_samples)
# Store Actual throughput for result samples
@@ -134,20 +133,16 @@ class ProxBinSearchProfile(ProxProfile):
LOG.debug("Failure... Decreasing upper bound")
self.current_upper = test_value
samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples)
+ # samples contains data such as Latency, Throughput, number of packets
+ # Hence they should not be divided by the time difference
- for k in samples:
- tmp = samples[k]
- if isinstance(tmp, dict):
- for k2 in tmp:
- samples[k][k2] = int(samples[k][k2] / diff_time)
-
- if theor_max_thruput < samples["TxThroughput"]:
- theor_max_thruput = samples['TxThroughput']
+ if theor_max_thruput < samples["RequestedTxThroughput"]:
+ theor_max_thruput = samples['RequestedTxThroughput']
self.queue.put({'theor_max_throughput': theor_max_thruput})
LOG.debug("Collect TG KPIs %s %s", datetime.datetime.now(), samples)
self.queue.put(samples)
result_samples["Result_pktSize"] = pkt_size
- result_samples["Result_theor_max_throughput"] = theor_max_thruput/ (1000 * 1000)
+ result_samples["Result_theor_max_throughput"] = theor_max_thruput
self.queue.put(result_samples)
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
index 83020c85c..c24e2f65a 100644
--- a/yardstick/network_services/traffic_profile/rfc2544.py
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -11,190 +11,288 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" RFC2544 Throughput implemenation """
-from __future__ import absolute_import
-from __future__ import division
import logging
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.network_services.traffic_profile import trex_traffic_profile
-from yardstick.network_services.traffic_profile.trex_traffic_profile \
- import TrexProfile
LOGGING = logging.getLogger(__name__)
+SRC_PORT = 'sport'
+DST_PORT = 'dport'
+
+
+class PortPgIDMap(object):
+ """Port and pg_id mapping class
+
+ "pg_id" is the identification STL library gives to each stream. In the
+ RFC2544Profile class, the traffic has a STLProfile per port, which contains
+ one or several streams, one per packet size defined in the IMIX test case
+ description.
+
+ Example of port <-> pg_id map:
+ self._port_pg_id_map = {
+ 0: [1, 2, 3, 4],
+ 1: [5, 6, 7, 8]
+ }
+ """
+
+ def __init__(self):
+ self._pg_id = 0
+ self._last_port = None
+ self._port_pg_id_map = {}
+
+ def add_port(self, port):
+ self._last_port = port
+ self._port_pg_id_map[port] = []
+
+ def get_pg_ids(self, port):
+ return self._port_pg_id_map.get(port)
+
+ def increase_pg_id(self, port=None):
+ port = self._last_port if not port else port
+ if port is None:
+ return
+ pg_id_list = self._port_pg_id_map.get(port)
+ if not pg_id_list:
+ self.add_port(port)
+ pg_id_list = self._port_pg_id_map[port]
+ self._pg_id += 1
+ pg_id_list.append(self._pg_id)
+ return self._pg_id
-class RFC2544Profile(TrexProfile):
- """ This class handles rfc2544 implemenation. """
+class RFC2544Profile(trex_traffic_profile.TrexProfile):
+ """TRex RFC2544 traffic profile"""
+
+ TOLERANCE_LIMIT = 0.05
def __init__(self, traffic_generator):
super(RFC2544Profile, self).__init__(traffic_generator)
self.generator = None
- self.max_rate = None
- self.min_rate = None
- self.ports = None
- self.rate = 100
- self.drop_percent_at_max_tx = None
- self.throughput_max = None
+ self.rate = self.config.frame_rate
+ self.max_rate = self.config.frame_rate
+ self.min_rate = 0
+ self.drop_percent_max = 0
def register_generator(self, generator):
self.generator = generator
- def execute_traffic(self, traffic_generator=None):
- """ Generate the stream and run traffic on the given ports """
+ def stop_traffic(self, traffic_generator=None):
+ """"Stop traffic injection, reset counters and remove streams"""
if traffic_generator is not None and self.generator is None:
self.generator = traffic_generator
- if self.ports is not None:
- return
+ self.generator.client.stop()
+ self.generator.client.reset()
+ self.generator.client.remove_all_streams()
+
+ def execute_traffic(self, traffic_generator=None):
+ """Generate the stream and run traffic on the given ports
+
+ :param traffic_generator: (TrexTrafficGenRFC) traffic generator
+ :return ports: (list of int) indexes of ports
+ port_pg_id: (dict) port indexes and pg_id [1] map
+ [1] https://trex-tgn.cisco.com/trex/doc/cp_stl_docs/api/
+ profile_code.html#stlstream-modes
+ """
+ if traffic_generator is not None and self.generator is None:
+ self.generator = traffic_generator
- self.ports = []
+ port_pg_id = PortPgIDMap()
+ ports = []
for vld_id, intfs in sorted(self.generator.networks.items()):
profile_data = self.params.get(vld_id)
- # no profile for this port
if not profile_data:
continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
+ if (vld_id.startswith(self.DOWNLINK) and
+ self.generator.rfc2544_helper.correlated_traffic):
continue
for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.max_rate = self.rate
- self.min_rate = 0
- self.generator.client.start(ports=self.ports, mult=self.get_multiplier(),
- duration=30, force=True)
- self.drop_percent_at_max_tx = 0
- self.throughput_max = 0
-
- def get_multiplier(self):
- """ Get the rate at which next iteration to run """
- self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
- multiplier = round(self.rate / self.pps, 2)
- return str(multiplier)
-
- def get_drop_percentage(self, generator=None):
- """ Calculate the drop percentage and run the traffic """
- if generator is None:
- generator = self.generator
- run_duration = self.generator.RUN_DURATION
- samples = self.generator.generate_samples(self.ports)
-
- in_packets = sum([value['in_packets'] for value in samples.values()])
- out_packets = sum([value['out_packets'] for value in samples.values()])
-
- packet_drop = abs(out_packets - in_packets)
- drop_percent = 100.0
- try:
- drop_percent = round((packet_drop / float(out_packets)) * 100, 5)
- except ZeroDivisionError:
- LOGGING.info('No traffic is flowing')
+ port_num = int(self.generator.port_num(intf))
+ ports.append(port_num)
+ port_pg_id.add_port(port_num)
+ profile = self._create_profile(profile_data,
+ self.rate, port_pg_id)
+ self.generator.client.add_streams(profile, ports=[port_num])
+
+ self.generator.client.start(ports=ports,
+ duration=self.config.duration,
+ force=True)
+ return ports, port_pg_id
+
+ def _create_profile(self, profile_data, rate, port_pg_id):
+ """Create a STL profile (list of streams) for a port"""
+ streams = []
+ for packet_name in profile_data:
+ imix = (profile_data[packet_name].
+ get('outer_l2', {}).get('framesize'))
+ imix_data = self._create_imix_data(imix)
+ self._create_vm(profile_data[packet_name])
+ _streams = self._create_streams(imix_data, rate, port_pg_id)
+ streams.extend(_streams)
+ return trex_stl_streams.STLProfile(streams)
+
+ def _create_imix_data(self, imix):
+ """Generate the IMIX distribution for a STL profile
+
+ The input information is the framesize dictionary in a test case
+ traffic profile definition. E.g.:
+ downlink_0:
+ ipv4:
+ id: 2
+ outer_l2:
+ framesize:
+ 64B: 10
+ 128B: 20
+ ...
+
+ This function normalizes the sum of framesize weights to 100 and
+ returns a dictionary of frame sizes in bytes and weight in percentage.
+ E.g.:
+ imix_count = {64: 25, 128: 75}
+
+ :param imix: (dict) IMIX size and weight
+ """
+ imix_count = {}
+ if not imix:
+ return imix_count
+
+ imix_count = {size.upper().replace('B', ''): int(weight)
+ for size, weight in imix.items()}
+ imix_sum = sum(imix_count.values())
+ if imix_sum <= 0:
+ imix_count = {64: 100}
+ imix_sum = 100
+
+ weight_normalize = float(imix_sum) / 100
+ return {size: float(weight) / weight_normalize
+ for size, weight in imix_count.items()}
+
+ def _create_vm(self, packet_definition):
+ """Create the STL Raw instructions"""
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp[DST_PORT] = 'UDP.dport'
+ self.udp[SRC_PORT] = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2')
+ outer_l3v4 = packet_definition.get('outer_l3v4')
+ outer_l3v6 = packet_definition.get('outer_l3v6')
+ outer_l4 = packet_definition.get('outer_l4')
+ if outer_l2:
+ self._set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self._set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self._set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self._set_outer_l4_fields(outer_l4)
+ self.trex_vm = trex_stl_packet_builder_scapy.STLScVmRaw(
+ self.vm_flow_vars)
+
+ def _create_single_packet(self, size=64):
+ size -= 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
+ else:
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ return trex_stl_packet_builder_scapy.STLPktBuilder(
+ pkt=base_pkt / pad, vm=self.trex_vm)
+
+ def _create_streams(self, imix_data, rate, port_pg_id):
+ """Create a list of streams per packet size
+
+ The STL TX mode speed of the generated streams will depend on the frame
+ weight and the frame rate. Both the frame weight and the total frame
+ rate are normalized to 100. The STL TX mode speed, defined in
+ percentage, is the combitation of both percentages. E.g.:
+ frame weight = 100
+ rate = 90
+ --> STLTXmode percentage = 10 (%)
+
+ frame weight = 80
+ rate = 50
+ --> STLTXmode percentage = 40 (%)
+
+ :param imix_data: (dict) IMIX size and weight
+ :param rate: (float) normalized [0..100] total weight
+ :param pg_id: (PortPgIDMap) port / pg_id (list) map
+ """
+ streams = []
+ for size, weight in ((int(size), float(weight)) for (size, weight)
+ in imix_data.items() if float(weight) > 0):
+ packet = self._create_single_packet(size)
+ pg_id = port_pg_id.increase_pg_id()
+ stl_flow = trex_stl_streams.STLFlowLatencyStats(pg_id=pg_id)
+ mode = trex_stl_streams.STLTXCont(percentage=weight * rate / 100)
+ streams.append(trex_stl_client.STLStream(
+ packet=packet, flow_stats=stl_flow, mode=mode))
+ return streams
+
+ def get_drop_percentage(self, samples, tol_low, tol_high,
+ correlated_traffic):
+ """Calculate the drop percentage and run the traffic"""
+ tx_rate_fps = 0
+ rx_rate_fps = 0
+ for sample in samples:
+ tx_rate_fps += sum(
+ port['tx_throughput_fps'] for port in sample.values())
+ rx_rate_fps += sum(
+ port['rx_throughput_fps'] for port in sample.values())
+ tx_rate_fps = round(float(tx_rate_fps) / len(samples), 2)
+ rx_rate_fps = round(float(rx_rate_fps) / len(samples), 2)
# TODO(esm): RFC2544 doesn't tolerate packet loss, why do we?
- tolerance_low = generator.rfc2544_helper.tolerance_low
- tolerance_high = generator.rfc2544_helper.tolerance_high
-
- tx_rate = out_packets / run_duration
- rx_rate = in_packets / run_duration
-
- throughput_max = self.throughput_max
- drop_percent_at_max_tx = self.drop_percent_at_max_tx
+ out_packets = sum(port['out_packets'] for port in samples[-1].values())
+ in_packets = sum(port['in_packets'] for port in samples[-1].values())
+ drop_percent = 100.0
- if self.drop_percent_at_max_tx is None:
- self.rate = tx_rate
- self.first_run = False
+ # https://tools.ietf.org/html/rfc2544#section-26.3
+ if out_packets:
+ drop_percent = round(
+ (float(abs(out_packets - in_packets)) / out_packets) * 100, 5)
- if drop_percent > tolerance_high:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ tol_high = tol_high if tol_high > self.TOLERANCE_LIMIT else tol_high
+ tol_low = tol_low if tol_low > self.TOLERANCE_LIMIT else tol_low
+ if drop_percent > tol_high:
self.max_rate = self.rate
- if throughput_max == 0:
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= tolerance_low:
- # TODO(esm): why do we update the samples dict in this case
- # and not update our tracking values?
- throughput_max = rx_rate
- drop_percent_at_max_tx = drop_percent
-
- elif drop_percent >= self.drop_percent_at_max_tx:
- # TODO(esm): why don't we discard results that are out of tolerance?
+ elif drop_percent < tol_low:
self.min_rate = self.rate
- self.drop_percent_at_max_tx = drop_percent_at_max_tx = drop_percent
- self.throughput_max = throughput_max = rx_rate
+ # else:
+ # NOTE(ralonsoh): the test should finish here
+ # pass
+ last_rate = self.rate
+ self.rate = round(float(self.max_rate + self.min_rate) / 2.0, 5)
- else:
- # TODO(esm): why don't we discard results that are out of tolerance?
- self.min_rate = self.rate
+ throughput = rx_rate_fps * 2 if correlated_traffic else rx_rate_fps
- generator.clear_client_stats(self.ports)
- generator.start_client(self.ports, mult=self.get_multiplier(),
- duration=run_duration, force=True)
+ if drop_percent > self.drop_percent_max:
+ self.drop_percent_max = drop_percent
- # if correlated traffic update the Throughput
- if generator.rfc2544_helper.correlated_traffic:
- throughput_max *= 2
+ latency = {port_num: value['latency']
+ for port_num, value in samples[-1].items()}
- samples.update({
- 'TxThroughput': tx_rate,
- 'RxThroughput': rx_rate,
+ output = {
+ 'TxThroughput': tx_rate_fps,
+ 'RxThroughput': rx_rate_fps,
'CurrentDropPercentage': drop_percent,
- 'Throughput': throughput_max,
- 'DropPercentage': drop_percent_at_max_tx,
- })
-
- return samples
-
- def execute_latency(self, generator=None, samples=None):
- if generator is not None and self.generator is None:
- self.generator = generator
-
- if samples is None:
- samples = self.generator.generate_samples()
-
- self.pps, multiplier = self.calculate_pps(samples)
- self.ports = []
- self.pg_id = self.params['traffic_profile'].get('pg_id', 1)
- for vld_id, intfs in sorted(self.generator.networks.items()):
- profile_data = self.params.get(vld_id)
- if not profile_data:
- continue
- # correlated traffic doesn't use public traffic?
- if vld_id.startswith(self.DOWNLINK) and \
- self.generator.rfc2544_helper.correlated_traffic:
- continue
- for intf in intfs:
- port = self.generator.port_num(intf)
- self.ports.append(port)
- self.generator.client.add_streams(self.get_streams(profile_data), ports=port)
-
- self.generator.start_client(ports=self.ports, mult=str(multiplier),
- duration=120, force=True)
- self.first_run = False
-
- def calculate_pps(self, samples):
- pps = round(samples['Throughput'] / 2, 2)
- multiplier = round(self.rate / self.pps, 2)
- return pps, multiplier
-
- def create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if pps:
- stl_mode = STLTXCont(pps=pps)
- else:
- stl_mode = STLTXCont(pps=self.pps)
- if self.pg_id:
- LOGGING.debug("pg_id: %s", self.pg_id)
- stl_flow_stats = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode,
- flow_stats=stl_flow_stats)
- self.pg_id += 1
- else:
- stream = STLStream(isg=isg, packet=packet, mode=stl_mode)
- return stream
+ 'Throughput': throughput,
+ 'DropPercentage': self.drop_percent_max,
+ 'Rate': last_rate,
+ 'Latency': latency
+ }
+ return output
diff --git a/yardstick/network_services/traffic_profile/trex_traffic_profile.py b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
index f5e3923d5..ed0355fa5 100644
--- a/yardstick/network_services/traffic_profile/trex_traffic_profile.py
+++ b/yardstick/network_services/traffic_profile/trex_traffic_profile.py
@@ -19,21 +19,16 @@ from random import SystemRandom
import ipaddress
import six
-
-from yardstick.common import exceptions as y_exc
-from yardstick.network_services.traffic_profile import base
-from trex_stl_lib.trex_stl_client import STLStream
-from trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
-from trex_stl_lib.trex_stl_streams import STLTXCont
-from trex_stl_lib.trex_stl_streams import STLProfile
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVarRepeatableRandom
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
-from trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
from trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
from trex_stl_lib import api as Pkt
+from yardstick.common import exceptions as y_exc
+from yardstick.network_services.traffic_profile import base
+
+
SRC = 'src'
DST = 'dst'
ETHERNET = 'Ethernet'
@@ -342,115 +337,6 @@ class TrexProfile(base.TrafficProfile):
if 'dstport' in outer_l4:
self._set_proto_addr(UDP, DST_PORT, outer_l4['dstport'], outer_l4['count'])
- def generate_imix_data(self, packet_definition):
- """ generate packet size for a given traffic profile """
- imix_count = {}
- imix_data = {}
- if not packet_definition:
- return imix_count
- imix = packet_definition.get('framesize')
- if imix:
- for size in imix:
- data = imix[size]
- imix_data[int(size[:-1])] = int(data)
- imix_sum = sum(imix_data.values())
- if imix_sum > 100:
- raise SystemExit("Error in IMIX data")
- elif imix_sum < 100:
- imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
-
- avg_size = 0.0
- for size in imix_data:
- count = int(imix_data[size])
- if count:
- avg_size += round(size * count / 100, 2)
- pps = round(self.pps * count / 100, 0)
- imix_count[size] = pps
- self.rate = round(1342177280 / avg_size, 0) * 2
- logging.debug("Imax: %s rate: %s", imix_count, self.rate)
- return imix_count
-
- def get_streams(self, profile_data):
- """ generate trex stream
- :param profile_data:
- :type profile_data:
- """
- self.streams = []
- self.pps = self.params['traffic_profile'].get('frame_rate', 100)
- for packet_name in profile_data:
- outer_l2 = profile_data[packet_name].get('outer_l2')
- imix_data = self.generate_imix_data(outer_l2)
- if not imix_data:
- imix_data = {64: self.pps}
- self.generate_vm(profile_data[packet_name])
- for size in imix_data:
- self._generate_streams(size, imix_data[size])
- self._generate_profile()
- return self.profile
-
- def generate_vm(self, packet_definition):
- """ generate trex vm with flows setup """
- self.ether_packet = Pkt.Ether()
- self.ip_packet = Pkt.IP()
- self.ip6_packet = None
- self.udp_packet = Pkt.UDP()
- self.udp[DST_PORT] = 'UDP.dport'
- self.udp[SRC_PORT] = 'UDP.sport'
- self.qinq = False
- self.vm_flow_vars = []
- outer_l2 = packet_definition.get('outer_l2', None)
- outer_l3v4 = packet_definition.get('outer_l3v4', None)
- outer_l3v6 = packet_definition.get('outer_l3v6', None)
- outer_l4 = packet_definition.get('outer_l4', None)
- if outer_l2:
- self._set_outer_l2_fields(outer_l2)
- if outer_l3v4:
- self._set_outer_l3v4_fields(outer_l3v4)
- if outer_l3v6:
- self._set_outer_l3v6_fields(outer_l3v6)
- if outer_l4:
- self._set_outer_l4_fields(outer_l4)
- self.trex_vm = STLScVmRaw(self.vm_flow_vars)
-
- def generate_packets(self):
- """ generate packets from trex TG """
- base_pkt = self.base_pkt
- size = self.fsize - 4
- pad = max(0, size - len(base_pkt)) * 'x'
- self.packets = [STLPktBuilder(pkt=base_pkt / pad,
- vm=vm) for vm in self.vms]
-
- def _create_single_packet(self, size=64):
- size = size - 4
- ether_packet = self.ether_packet
- ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
- udp_packet = self.udp_packet
- if self.qinq:
- qinq_packet = self.qinq_packet
- base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
- else:
- base_pkt = ether_packet / ip_packet / udp_packet
- pad = max(0, size - len(base_pkt)) * 'x'
- packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
- return packet
-
- def _create_single_stream(self, packet_size, pps, isg=0):
- packet = self._create_single_packet(packet_size)
- if self.pg_id:
- self.pg_id += 1
- stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
- flow_stats=stl_flow)
- else:
- stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
- return stream
-
- def _generate_streams(self, packet_size, pps):
- self.streams.append(self._create_single_stream(packet_size, pps))
-
- def _generate_profile(self):
- self.profile = STLProfile(self.streams)
-
@classmethod
def _count_ip(cls, start_ip, end_ip):
start = ipaddress.ip_address(six.u(start_ip))
diff --git a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
index f3cafef7a..d9719eb4e 100644
--- a/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/acl_vnf.py
@@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__)
# ACL should work the same on all systems, we can provide the binary
ACL_PIPELINE_COMMAND = \
- 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
+ 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}'
ACL_COLLECT_KPI = r"""\
ACL TOTAL:[^p]+pkts_processed"?:\s(\d+),[^p]+pkts_drop"?:\s(\d+),[^p]+pkts_received"?:\s(\d+),"""
diff --git a/yardstick/network_services/vnf_generic/vnf/base.py b/yardstick/network_services/vnf_generic/vnf/base.py
index a776b0989..9ceac3167 100644
--- a/yardstick/network_services/vnf_generic/vnf/base.py
+++ b/yardstick/network_services/vnf_generic/vnf/base.py
@@ -195,6 +195,18 @@ class GenericVNF(object):
:return: {"kpi": value, "kpi2": value}
"""
+ @abc.abstractmethod
+ def start_collect(self):
+ """Start KPI collection
+ :return: None
+ """
+
+ @abc.abstractmethod
+ def stop_collect(self):
+ """Stop KPI collection
+ :return: None
+ """
+
@six.add_metaclass(abc.ABCMeta)
class GenericTrafficGen(GenericVNF):
@@ -254,3 +266,23 @@ class GenericTrafficGen(GenericVNF):
:return: True/False
"""
pass
+
+ def start_collect(self):
+ """Start KPI collection.
+
+ Traffic measurements are always collected during injection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
+
+ def stop_collect(self):
+ """Stop KPI collection.
+
+ Optional.
+
+ :return: True/False
+ """
+ pass
diff --git a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
index 53f73b4d7..bfe628f09 100644
--- a/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/cgnapt_vnf.py
@@ -21,10 +21,10 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, Dpd
LOG = logging.getLogger(__name__)
# CGNAPT should work the same on all systems, we can provide the binary
-CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}'
+CGNAPT_PIPELINE_COMMAND = 'sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}'
WAIT_FOR_STATIC_NAPT = 4
-CGNAPT_COLLECT_KPI = """\
+CGNAPT_COLLECT_KPI = r"""\
CG-NAPT(.*\n)*\
Received\s(\d+),\
Missed\s(\d+),\
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
index 31ed30140..7816c6d91 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_helpers.py
@@ -44,6 +44,8 @@ SECTION_CONTENTS = 1
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG)
+LOG_RESULT = logging.getLogger('yardstick')
+LOG_RESULT.setLevel(logging.DEBUG)
BITS_PER_BYTE = 8
RETRY_SECONDS = 60
@@ -123,7 +125,8 @@ class TotStatsTuple(namedtuple('TotStats', 'rx,tx,tsc,hz')):
class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_rx,'
'delta_tx,delta_tsc,'
- 'latency,rx_total,tx_total,pps')):
+ 'latency,rx_total,tx_total,'
+ 'requested_pps')):
@property
def pkt_loss(self):
try:
@@ -132,11 +135,16 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
return 100.0
@property
- def mpps(self):
+ def tx_mpps(self):
# calculate the effective throughput in Mpps
return float(self.delta_tx) * self.tsc_hz / self.delta_tsc / 1e6
@property
+ def rx_mpps(self):
+ # calculate the effective throughput in Mpps
+ return float(self.delta_rx) * self.tsc_hz / self.delta_tsc / 1e6
+
+ @property
def can_be_lost(self):
return int(self.tx_total * self.tolerated / 1e2)
@@ -162,11 +170,12 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
]
samples = {
- "Throughput": self.mpps,
+ "Throughput": self.rx_mpps,
+ "RxThroughput": self.rx_mpps,
"DropPackets": pkt_loss,
"CurrentDropPackets": pkt_loss,
- "TxThroughput": self.pps / 1e6,
- "RxThroughput": self.mpps,
+ "RequestedTxThroughput": self.requested_pps / 1e6,
+ "TxThroughput": self.tx_mpps,
"PktSize": pkt_size,
}
if port_samples:
@@ -177,11 +186,12 @@ class ProxTestDataTuple(namedtuple('ProxTestDataTuple', 'tolerated,tsc_hz,delta_
def log_data(self, logger=None):
if logger is None:
- logger = LOG
+ logger = LOG_RESULT
template = "RX: %d; TX: %d; dropped: %d (tolerated: %d)"
- logger.debug(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
- logger.debug("Mpps configured: %f; Mpps effective %f", self.pps / 1e6, self.mpps)
+ logger.info(template, self.rx_total, self.tx_total, self.drop_total, self.can_be_lost)
+ logger.info("Mpps configured: %f; Mpps generated %f; Mpps received %f",
+ self.requested_pps / 1e6, self.tx_mpps, self.rx_mpps)
class PacketDump(object):
@@ -288,7 +298,7 @@ class ProxSocketHelper(object):
if mode != 'pktdump':
# Regular 1-line message. Stop reading from the socket.
LOG.debug("Regular response read")
- return ret_str
+ return ret_str, True
LOG.debug("Packet dump header read: [%s]", ret_str)
@@ -309,11 +319,11 @@ class ProxSocketHelper(object):
# Return boolean instead of string to signal
# successful reception of the packet dump.
LOG.debug("Packet dump stored, returning")
- return True
+ return True, False
index = data_end + 1
- return ret_str
+ return ret_str, False
def get_data(self, pkt_dump_only=False, timeout=1):
""" read data from the socket """
@@ -352,7 +362,9 @@ class ProxSocketHelper(object):
ret_str = ""
for status in iter(is_ready, False):
decoded_data = self._sock.recv(256).decode('utf-8')
- ret_str = self._parse_socket_data(decoded_data, pkt_dump_only)
+ ret_str, done = self._parse_socket_data(decoded_data, pkt_dump_only)
+ if (done):
+ break
LOG.debug("Received data from socket: [%s]", ret_str)
return ret_str if status else ''
@@ -1001,8 +1013,8 @@ class ProxDataHelper(object):
def totals_and_pps(self):
if self._totals_and_pps is None:
rx_total, tx_total = self.sut.port_stats(range(self.port_count))[6:8]
- pps = self.value / 100.0 * self.line_rate_to_pps()
- self._totals_and_pps = rx_total, tx_total, pps
+ requested_pps = self.value / 100.0 * self.line_rate_to_pps()
+ self._totals_and_pps = rx_total, tx_total, requested_pps
return self._totals_and_pps
@property
@@ -1014,7 +1026,7 @@ class ProxDataHelper(object):
return self.totals_and_pps[1]
@property
- def pps(self):
+ def requested_pps(self):
return self.totals_and_pps[2]
@property
@@ -1055,7 +1067,7 @@ class ProxDataHelper(object):
self.latency,
self.rx_total,
self.tx_total,
- self.pps,
+ self.requested_pps,
)
self.result_tuple.log_data()
@@ -1134,6 +1146,7 @@ class ProxProfileHelper(object):
self.sut.set_pkt_size(self.test_cores, pkt_size)
self.sut.set_speed(self.test_cores, value)
self.sut.start_all()
+ time.sleep(1)
yield
finally:
self.sut.stop_all()
@@ -1246,6 +1259,7 @@ class ProxMplsProfileHelper(ProxProfileHelper):
ratio = 1.0 * (pkt_size - 4 + 20) / (pkt_size + 20)
self.sut.set_speed(self.plain_cores, value * ratio)
self.sut.start_all()
+ time.sleep(1)
yield
finally:
self.sut.stop_all()
diff --git a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
index 285e08659..36f1a19d0 100644
--- a/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/prox_vnf.py
@@ -15,8 +15,6 @@
import errno
import logging
import datetime
-import time
-
from yardstick.common.process import check_if_process_failed
from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
@@ -44,7 +42,8 @@ class ProxApproxVnf(SampleVNF):
self.prev_packets_in = 0
self.prev_packets_sent = 0
- self.prev_time = time.time()
+ self.prev_tsc = 0
+ self.tsc_hz = 0
super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
resource_helper_type)
@@ -68,8 +67,7 @@ class ProxApproxVnf(SampleVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
- check_if_process_failed(self._vnf_process)
-
+ check_if_process_failed(self._vnf_process, 0.01)
if self.resource_helper is None:
result = {
"packets_in": 0,
@@ -79,6 +77,12 @@ class ProxApproxVnf(SampleVNF):
}
return result
+ if (self.tsc_hz == 0):
+ self.tsc_hz = float(self.resource_helper.sut.hz())
+ LOG.debug("TSC = %f", self.tsc_hz)
+ if (self.tsc_hz == 0):
+ raise RuntimeError("Unable to retrieve TSC")
+
# use all_ports so we only use ports matched in topology
port_count = len(self.vnfd_helper.port_pairs.all_ports)
if port_count not in {1, 2, 4}:
@@ -86,10 +90,10 @@ class ProxApproxVnf(SampleVNF):
"1, 2 or 4 ports only supported at this time")
self.port_stats = self.vnf_execute('port_stats', range(port_count))
- curr_time = time.time()
try:
rx_total = self.port_stats[6]
tx_total = self.port_stats[7]
+ tsc = self.port_stats[10]
except IndexError:
LOG.debug("port_stats parse fail ")
# return empty dict so we don't mess up existing KPIs
@@ -103,15 +107,17 @@ class ProxApproxVnf(SampleVNF):
# collectd KPIs here and not TG KPIs, so use a different method name
"collect_stats": self.resource_helper.collect_collectd_kpi(),
}
- curr_packets_in = int((rx_total - self.prev_packets_in) / (curr_time - self.prev_time))
- curr_packets_fwd = int((tx_total - self.prev_packets_sent) / (curr_time - self.prev_time))
+ curr_packets_in = int(((rx_total - self.prev_packets_in) * self.tsc_hz)
+ / (tsc - self.prev_tsc) * port_count)
+ curr_packets_fwd = int(((tx_total - self.prev_packets_sent) * self.tsc_hz)
+ / (tsc - self.prev_tsc) * port_count)
result["curr_packets_in"] = curr_packets_in
result["curr_packets_fwd"] = curr_packets_fwd
self.prev_packets_in = rx_total
self.prev_packets_sent = tx_total
- self.prev_time = curr_time
+ self.prev_tsc = tsc
LOG.debug("%s collect KPIs %s %s", self.APP_NAME, datetime.datetime.now(), result)
return result
diff --git a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
index 77488c479..8e0e29675 100644
--- a/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/sample_vnf.py
@@ -11,20 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Base class implementation for generic vnf implementation """
-from collections import Mapping
import logging
from multiprocessing import Queue, Value, Process
import os
import posixpath
import re
+import six
import subprocess
import time
-import six
-
from trex_stl_lib.trex_stl_client import LoggerApi
from trex_stl_lib.trex_stl_client import STLClient
from trex_stl_lib.trex_stl_exceptions import STLError
@@ -35,7 +32,6 @@ from yardstick.common import utils
from yardstick.network_services import constants
from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode
from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig
-from yardstick.network_services.helpers.samplevnf_helper import PortPairs
from yardstick.network_services.nfvi.resource import ResourceProfile
from yardstick.network_services.utils import get_nsb_option
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
@@ -60,6 +56,7 @@ class SetupEnvHelper(object):
self.vnfd_helper = vnfd_helper
self.ssh_helper = ssh_helper
self.scenario_helper = scenario_helper
+ self.collectd_options = {}
def build_config(self):
raise NotImplementedError
@@ -193,11 +190,20 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
port_nums = self.vnfd_helper.port_nums(ports)
# create mask from all the dpdk port numbers
ports_mask_hex = hex(sum(2 ** num for num in port_nums))
+
+ vnf_cfg = self.scenario_helper.vnf_cfg
+ lb_config = vnf_cfg.get('lb_config', 'SW')
+ worker_threads = vnf_cfg.get('worker_threads', 3)
+ hwlb = ''
+ if lb_config == 'HW':
+ hwlb = ' --hwlb %s' % worker_threads
+
self.pipeline_kwargs = {
'cfg_file': self.CFG_CONFIG,
'script': self.CFG_SCRIPT,
'port_mask_hex': ports_mask_hex,
'tool_path': tool_path,
+ 'hwlb': hwlb,
}
def setup_vnf_environment(self):
@@ -225,12 +231,6 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
if exit_status == 0:
return
- def get_collectd_options(self):
- options = self.scenario_helper.all_options.get("collectd", {})
- # override with specific node settings
- options.update(self.scenario_helper.options.get("collectd", {}))
- return options
-
def _setup_resources(self):
# what is this magic? how do we know which socket is for which port?
# what about quad-socket?
@@ -243,11 +243,11 @@ class DpdkVnfSetupEnvHelper(SetupEnvHelper):
# this won't work because we don't have DPDK port numbers yet
ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num)
port_names = (intf["name"] for intf in ports)
- collectd_options = self.get_collectd_options()
- plugins = collectd_options.get("plugins", {})
+ plugins = self.collectd_options.get("plugins", {})
+ interval = self.collectd_options.get("interval")
# we must set timeout to be the same as the VNF otherwise KPIs will die before VNF
return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names,
- plugins=plugins, interval=collectd_options.get("interval"),
+ plugins=plugins, interval=interval,
timeout=self.scenario_helper.timeout)
def _check_interface_fields(self):
@@ -372,39 +372,14 @@ class ClientResourceHelper(ResourceHelper):
LOG.error('TRex client not connected')
return {}
- def generate_samples(self, ports, key=None, default=None):
- # needs to be used ports
- last_result = self.get_stats(ports)
- key_value = last_result.get(key, default)
-
- if not isinstance(last_result, Mapping): # added for mock unit test
- self._terminated.value = 1
- return {}
-
- samples = {}
- # recalculate port for interface and see if it matches ports provided
- for intf in self.vnfd_helper.interfaces:
- name = intf["name"]
- port = self.vnfd_helper.port_num(name)
- if port in ports:
- xe_value = last_result.get(port, {})
- samples[name] = {
- "rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
- "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
- "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
- "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
- "in_packets": int(xe_value.get("ipackets", 0)),
- "out_packets": int(xe_value.get("opackets", 0)),
- }
- if key:
- samples[name][key] = key_value
- return samples
+ def _get_samples(self, ports, port_pg_id=False):
+ raise NotImplementedError()
def _run_traffic_once(self, traffic_profile):
traffic_profile.execute_traffic(self)
self.client_started.value = 1
time.sleep(self.RUN_DURATION)
- samples = self.generate_samples(traffic_profile.ports)
+ samples = self._get_samples(traffic_profile.ports)
time.sleep(self.QUEUE_WAIT_TIME)
self._queue.put(samples)
@@ -657,49 +632,6 @@ class SampleVNF(GenericVNF):
self.vnf_port_pairs = None
self._vnf_process = None
- def _build_ports(self):
- self._port_pairs = PortPairs(self.vnfd_helper.interfaces)
- self.networks = self._port_pairs.networks
- self.uplink_ports = self.vnfd_helper.port_nums(self._port_pairs.uplink_ports)
- self.downlink_ports = self.vnfd_helper.port_nums(self._port_pairs.downlink_ports)
- self.my_ports = self.vnfd_helper.port_nums(self._port_pairs.all_ports)
-
- def _get_route_data(self, route_index, route_type):
- route_iter = iter(self.vnfd_helper.vdu0.get('nd_route_tbl', []))
- for _ in range(route_index):
- next(route_iter, '')
- return next(route_iter, {}).get(route_type, '')
-
- def _get_port0localip6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0localip6 : %s", return_value)
- return return_value
-
- def _get_port1localip6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1localip6 : %s", return_value)
- return return_value
-
- def _get_port0prefixlen6(self):
- return_value = self._get_route_data(0, 'netmask')
- LOG.info("_get_port0prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port1prefixlen6(self):
- return_value = self._get_route_data(1, 'netmask')
- LOG.info("_get_port1prefixlen6 : %s", return_value)
- return return_value
-
- def _get_port0gateway6(self):
- return_value = self._get_route_data(0, 'network')
- LOG.info("_get_port0gateway6 : %s", return_value)
- return return_value
-
- def _get_port1gateway6(self):
- return_value = self._get_route_data(1, 'network')
- LOG.info("_get_port1gateway6 : %s", return_value)
- return return_value
-
def _start_vnf(self):
self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT)
name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid())
@@ -710,6 +642,7 @@ class SampleVNF(GenericVNF):
pass
def instantiate(self, scenario_cfg, context_cfg):
+ self._update_collectd_options(scenario_cfg, context_cfg)
self.scenario_helper.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name])
@@ -721,6 +654,54 @@ class SampleVNF(GenericVNF):
self.resource_helper.setup()
self._start_vnf()
+ def _update_collectd_options(self, scenario_cfg, context_cfg):
+ """Update collectd configuration options
+ This function retrieves all collectd options contained in the test case
+
+ definition builds a single dictionary combining them. The following fragment
+ represents a test case with the collectd options and priorities (1 highest, 3 lowest):
+ ---
+ schema: yardstick:task:0.1
+ scenarios:
+ - type: NSPerf
+ nodes:
+ tg__0: trafficgen_1.yardstick
+ vnf__0: vnf.yardstick
+ options:
+ collectd:
+ <options> # COLLECTD priority 3
+ vnf__0:
+ collectd:
+ plugins:
+ load
+ <options> # COLLECTD priority 2
+ context:
+ type: Node
+ name: yardstick
+ nfvi_type: baremetal
+ file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1
+ """
+ scenario_options = scenario_cfg.get('options', {})
+ generic_options = scenario_options.get('collectd', {})
+ scenario_node_options = scenario_options.get(self.name, {})\
+ .get('collectd', {})
+ context_node_options = context_cfg.get('nodes', {})\
+ .get(self.name, {}).get('collectd', {})
+
+ options = generic_options
+ self._update_options(options, scenario_node_options)
+ self._update_options(options, context_node_options)
+
+ self.setup_helper.collectd_options = options
+
+ def _update_options(self, options, additional_options):
+ """Update collectd options and plugins dictionary"""
+ for k, v in additional_options.items():
+ if isinstance(v, dict) and k in options:
+ options[k].update(v)
+ else:
+ options[k] = v
+
def wait_for_instantiate(self):
buf = []
time.sleep(self.WAIT_TIME) # Give some time for config to load
@@ -736,7 +717,6 @@ class SampleVNF(GenericVNF):
LOG.info("%s VNF is up and running.", self.APP_NAME)
self._vnf_up_post()
self.queue_wrapper.clear()
- self.resource_helper.start_collect()
return self._vnf_process.exitcode
if "PANIC" in message:
@@ -749,6 +729,12 @@ class SampleVNF(GenericVNF):
# by other VNF output
self.q_in.put('\r\n')
+ def start_collect(self):
+ self.resource_helper.start_collect()
+
+ def stop_collect(self):
+ self.resource_helper.stop_collect()
+
def _build_run_kwargs(self):
self.run_kwargs = {
'stdin': self.queue_wrapper,
@@ -811,7 +797,7 @@ class SampleVNF(GenericVNF):
def collect_kpi(self):
# we can't get KPIs if the VNF is down
- check_if_process_failed(self._vnf_process)
+ check_if_process_failed(self._vnf_process, 0.01)
stats = self.get_stats()
m = re.search(self.COLLECT_KPI, stats, re.MULTILINE)
if m:
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
index 265d0b7a9..2010546e7 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py
@@ -12,19 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from __future__ import absolute_import
-
-import time
import os
import logging
import sys
+from yardstick.common import exceptions
from yardstick.common import utils
-from yardstick import error
from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
+
LOG = logging.getLogger(__name__)
WAIT_AFTER_CFG_LOAD = 10
@@ -36,7 +34,7 @@ sys.path.append(IXNET_LIB)
try:
from IxNet import IxNextgen
except ImportError:
- IxNextgen = error.ErrorClass
+ IxNextgen = exceptions.ErrorClass
class IxiaRfc2544Helper(Rfc2544ResourceHelper):
@@ -64,10 +62,10 @@ class IxiaResourceHelper(ClientResourceHelper):
self._connect()
def _connect(self, client=None):
- self.client._connect(self.vnfd_helper)
+ self.client.connect(self.vnfd_helper)
def get_stats(self, *args, **kwargs):
- return self.client.ix_get_statistics()
+ return self.client.get_statistics()
def stop_collect(self):
self._terminated.value = 1
@@ -76,8 +74,6 @@ class IxiaResourceHelper(ClientResourceHelper):
def generate_samples(self, ports, key=None, default=None):
stats = self.get_stats()
- last_result = stats[1]
- latency = stats[0]
samples = {}
# this is not DPDK port num, but this is whatever number we gave
@@ -88,19 +84,21 @@ class IxiaResourceHelper(ClientResourceHelper):
intf = self.vnfd_helper.find_interface_by_port(port_num)
port_name = intf["name"]
samples[port_name] = {
- "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port_num]),
- "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port_num]),
- "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][port_num]),
- "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][port_num]),
- "in_packets": int(last_result["Valid_Frames_Rx"][port_num]),
- "out_packets": int(last_result["Frames_Tx"][port_num]),
- "RxThroughput": int(last_result["Valid_Frames_Rx"][port_num]) / 30,
- "TxThroughput": int(last_result["Frames_Tx"][port_num]) / 30,
+ "rx_throughput_kps": float(stats["Rx_Rate_Kbps"][port_num]),
+ "tx_throughput_kps": float(stats["Tx_Rate_Kbps"][port_num]),
+ "rx_throughput_mbps": float(stats["Rx_Rate_Mbps"][port_num]),
+ "tx_throughput_mbps": float(stats["Tx_Rate_Mbps"][port_num]),
+ "in_packets": int(stats["Valid_Frames_Rx"][port_num]),
+ "out_packets": int(stats["Frames_Tx"][port_num]),
+ # NOTE(ralonsoh): we need to make the traffic injection
+ # time variable.
+ "RxThroughput": int(stats["Valid_Frames_Rx"][port_num]) / 30,
+ "TxThroughput": int(stats["Frames_Tx"][port_num]) / 30,
}
if key:
- avg_latency = latency["Store-Forward_Avg_latency_ns"][port_num]
- min_latency = latency["Store-Forward_Min_latency_ns"][port_num]
- max_latency = latency["Store-Forward_Max_latency_ns"][port_num]
+ avg_latency = stats["Store-Forward_Avg_latency_ns"][port_num]
+ min_latency = stats["Store-Forward_Min_latency_ns"][port_num]
+ max_latency = stats["Store-Forward_Max_latency_ns"][port_num]
samples[port_name][key] = \
{"Store-Forward_Avg_latency_ns": avg_latency,
"Store-Forward_Min_latency_ns": min_latency,
@@ -110,6 +108,12 @@ class IxiaResourceHelper(ClientResourceHelper):
return samples
+ def _initialize_client(self):
+ """Initialize the IXIA IxNetwork client and configure the server"""
+ self.client.clear_config()
+ self.client.assign_ports()
+ self.client.create_traffic_model()
+
def run_traffic(self, traffic_profile):
if self._terminated.value:
return
@@ -119,16 +123,7 @@ class IxiaResourceHelper(ClientResourceHelper):
default = "00:00:00:00:00:00"
self._build_ports()
-
- # we don't know client_file_name until runtime as instantiate
- client_file_name = \
- utils.find_relative_file(
- self.scenario_helper.scenario_cfg['ixia_profile'],
- self.scenario_helper.scenario_cfg["task_path"])
- self.client.ix_load_config(client_file_name)
- time.sleep(WAIT_AFTER_CFG_LOAD)
-
- self.client.ix_assign_ports()
+ self._initialize_client()
mac = {}
for port_name in self.vnfd_helper.port_pairs.all_ports:
@@ -140,43 +135,28 @@ class IxiaResourceHelper(ClientResourceHelper):
mac["src_mac_{}".format(port_num)] = virt_intf.get("local_mac", default)
mac["dst_mac_{}".format(port_num)] = virt_intf.get("dst_mac", default)
- samples = {}
- # Generate ixia traffic config...
try:
while not self._terminated.value:
- traffic_profile.execute_traffic(self, self.client, mac)
+ first_run = traffic_profile.execute_traffic(
+ self, self.client, mac)
self.client_started.value = 1
- time.sleep(WAIT_FOR_TRAFFIC)
- self.client.ix_stop_traffic()
+ # pylint: disable=unnecessary-lambda
+ utils.wait_until_true(lambda: self.client.is_traffic_stopped())
samples = self.generate_samples(traffic_profile.ports)
+
+ # NOTE(ralonsoh): the traffic injection duration is fixed to 30
+ # seconds. This parameter is configurable and must be retrieved
+ # from the traffic_profile.full_profile information.
+ # Every flow must have the same duration.
+ completed, samples = traffic_profile.get_drop_percentage(
+ samples, min_tol, max_tol, first_run=first_run)
self._queue.put(samples)
- status, samples = traffic_profile.get_drop_percentage(samples, min_tol,
- max_tol, self.client, mac)
- current = samples['CurrentDropPercentage']
- if min_tol <= current <= max_tol or status == 'Completed':
+ if completed:
self._terminated.value = 1
- self.client.ix_stop_traffic()
- self._queue.put(samples)
-
- if not self.rfc_helper.is_done():
- self._terminated.value = 1
- return
-
- traffic_profile.execute_traffic(self, self.client, mac)
- for _ in range(5):
- time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.ix_stop_traffic()
- samples = self.generate_samples(traffic_profile.ports, 'latency', {})
- self._queue.put(samples)
- traffic_profile.start_ixia_latency(self, self.client, mac)
- if self._terminated.value:
- break
-
- self.client.ix_stop_traffic()
except Exception: # pylint: disable=broad-except
- LOG.exception("Run Traffic terminated")
+ LOG.exception('Run Traffic terminated')
self._terminated.value = 1
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
index 4e9f4bdc1..07cec6745 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -11,74 +11,45 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-""" Trex traffic generation definitions which implements rfc2544 """
-from __future__ import absolute_import
-from __future__ import print_function
-import time
import logging
-from collections import Mapping
-
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexTrafficGen
-from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
-from yardstick.network_services.vnf_generic.vnf.tg_trex import TrexResourceHelper
-
-LOGGING = logging.getLogger(__name__)
+import time
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_trex
-class TrexRfc2544ResourceHelper(Rfc2544ResourceHelper):
- def is_done(self):
- return self.latency and self.iteration.value > 10
+LOGGING = logging.getLogger(__name__)
-class TrexRfcResourceHelper(TrexResourceHelper):
+class TrexRfcResourceHelper(tg_trex.TrexResourceHelper):
- LATENCY_TIME_SLEEP = 120
- RUN_DURATION = 30
- WAIT_TIME = 3
+ SAMPLING_PERIOD = 2
+ TRANSIENT_PERIOD = 10
- def __init__(self, setup_helper, rfc_helper_type=None):
+ def __init__(self, setup_helper):
super(TrexRfcResourceHelper, self).__init__(setup_helper)
-
- if rfc_helper_type is None:
- rfc_helper_type = TrexRfc2544ResourceHelper
-
- self.rfc2544_helper = rfc_helper_type(self.scenario_helper)
+ self.rfc2544_helper = sample_vnf.Rfc2544ResourceHelper(
+ self.scenario_helper)
def _run_traffic_once(self, traffic_profile):
- if self._terminated.value:
- return
-
- traffic_profile.execute_traffic(self)
self.client_started.value = 1
- time.sleep(self.RUN_DURATION)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- samples = traffic_profile.get_drop_percentage(self)
- self._queue.put(samples)
-
- if not self.rfc2544_helper.is_done():
- return
-
- self.client.stop(traffic_profile.ports)
- self.client.reset(ports=traffic_profile.ports)
- self.client.remove_all_streams(traffic_profile.ports)
- traffic_profile.execute_traffic_latency(samples=samples)
- multiplier = traffic_profile.calculate_pps(samples)[1]
- for _ in range(5):
- time.sleep(self.LATENCY_TIME_SLEEP)
- self.client.stop(traffic_profile.ports)
- time.sleep(self.WAIT_TIME)
- last_res = self.client.get_stats(traffic_profile.ports)
- if not isinstance(last_res, Mapping):
- self._terminated.value = 1
- continue
- self.generate_samples(traffic_profile.ports, 'latency', {})
- self._queue.put(samples)
- self.client.start(mult=str(multiplier),
- ports=traffic_profile.ports,
- duration=120, force=True)
+ ports, port_pg_id = traffic_profile.execute_traffic(self)
+
+ samples = []
+ timeout = int(traffic_profile.config.duration) - self.TRANSIENT_PERIOD
+ time.sleep(self.TRANSIENT_PERIOD)
+ for _ in utils.Timer(timeout=timeout):
+ samples.append(self._get_samples(ports, port_pg_id=port_pg_id))
+ time.sleep(self.SAMPLING_PERIOD)
+
+ traffic_profile.stop_traffic(self)
+ output = traffic_profile.get_drop_percentage(
+ samples, self.rfc2544_helper.tolerance_low,
+ self.rfc2544_helper.tolerance_high,
+ self.rfc2544_helper.correlated_traffic)
+ self._queue.put(output)
def start_client(self, ports, mult=None, duration=None, force=True):
self.client.start(ports=ports, mult=mult, duration=duration, force=force)
@@ -86,12 +57,8 @@ class TrexRfcResourceHelper(TrexResourceHelper):
def clear_client_stats(self, ports):
self.client.clear_stats(ports=ports)
- def collect_kpi(self):
- self.rfc2544_helper.iteration.value += 1
- return super(TrexRfcResourceHelper, self).collect_kpi()
-
-class TrexTrafficGenRFC(TrexTrafficGen):
+class TrexTrafficGenRFC(tg_trex.TrexTrafficGen):
"""
This class handles mapping traffic profile and generating
traffic for rfc2544 testcase.
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
index 0084a124c..80b42e22d 100644
--- a/yardstick/network_services/vnf_generic/vnf/tg_trex.py
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -13,7 +13,6 @@
# limitations under the License.
""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
-from __future__ import absolute_import
import logging
import os
@@ -25,6 +24,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTraff
from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+
LOG = logging.getLogger(__name__)
@@ -165,6 +165,30 @@ class TrexResourceHelper(ClientResourceHelper):
cmd = "sudo fuser -n tcp %s %s -k > /dev/null 2>&1"
self.ssh_helper.execute(cmd % (self.SYNC_PORT, self.ASYNC_PORT))
+ def _get_samples(self, ports, port_pg_id=None):
+ stats = self.get_stats(ports)
+ samples = {}
+ for pname in (intf['name'] for intf in self.vnfd_helper.interfaces):
+ port_num = self.vnfd_helper.port_num(pname)
+ port_stats = stats.get(port_num, {})
+ samples[pname] = {
+ 'rx_throughput_fps': float(port_stats.get('rx_pps', 0.0)),
+ 'tx_throughput_fps': float(port_stats.get('tx_pps', 0.0)),
+ 'rx_throughput_bps': float(port_stats.get('rx_bps', 0.0)),
+ 'tx_throughput_bps': float(port_stats.get('tx_bps', 0.0)),
+ 'in_packets': int(port_stats.get('ipackets', 0)),
+ 'out_packets': int(port_stats.get('opackets', 0)),
+ }
+
+ pg_id_list = port_pg_id.get_pg_ids(port_num)
+ samples[pname]['latency'] = {}
+ for pg_id in pg_id_list:
+ latency_global = stats.get('latency', {})
+ pg_latency = latency_global.get(pg_id, {}).get('latency')
+ samples[pname]['latency'][pg_id] = pg_latency
+
+ return samples
+
class TrexTrafficGen(SampleVNFTrafficGen):
"""
diff --git a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
index 61e99855f..3ba1f91b7 100644
--- a/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vfw_vnf.py
@@ -21,7 +21,7 @@ from yardstick.network_services.yang_model import YangModel
LOG = logging.getLogger(__name__)
# vFW should work the same on all systems, we can provide the binary
-FW_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
+FW_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}"
FW_COLLECT_KPI = (r"""VFW TOTAL:[^p]+pkts_received"?:\s(\d+),[^p]+pkts_fw_forwarded"?:\s(\d+),"""
r"""[^p]+pkts_drop_fw"?:\s(\d+),\s""")
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
index 077ce2385..9deef5cfa 100644
--- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -31,7 +31,7 @@ from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, Dpd
LOG = logging.getLogger(__name__)
-VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}"""
+VPE_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}"
VPE_COLLECT_KPI = """\
Pkts in:\\s(\\d+)\r\n\
@@ -115,7 +115,8 @@ class ConfigCreate(object):
pktq = "SWQ{0}{1}".format(self.sw_q, sink)
return pktq
- def vpe_upstream(self, vnf_cfg, index=0):
+ def vpe_upstream(self, vnf_cfg, index=0): # pragma: no cover
+ # NOTE(ralonsoh): this function must be covered in UTs.
parser = configparser.ConfigParser()
parser.read(os.path.join(vnf_cfg, 'vpe_upstream'))
@@ -147,7 +148,8 @@ class ConfigCreate(object):
self.n_pipeline += 1
return parser
- def vpe_downstream(self, vnf_cfg, index):
+ def vpe_downstream(self, vnf_cfg, index): # pragma: no cover
+ # NOTE(ralonsoh): this function must be covered in UTs.
parser = configparser.ConfigParser()
parser.read(os.path.join(vnf_cfg, 'vpe_downstream'))
for pipeline in parser.sections():
diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py
index 5afa4151e..e0c0db262 100644
--- a/yardstick/orchestrator/heat.py
+++ b/yardstick/orchestrator/heat.py
@@ -22,13 +22,13 @@ import time
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
-import shade
from shade._heat import event_utils
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import constants as consts
from yardstick.common import exceptions
from yardstick.common import template_format
-from yardstick.common import constants as consts
+from yardstick.common import openstack_utils as op_utils
+
log = logging.getLogger(__name__)
@@ -41,10 +41,11 @@ _DEPLOYED_STACKS = {}
class HeatStack(object):
"""Represents a Heat stack (deployed template) """
- def __init__(self, name):
+ def __init__(self, name, os_cloud_config=None):
self.name = name
self.outputs = {}
- self._cloud = shade.openstack_cloud()
+ os_cloud_config = {} if not os_cloud_config else os_cloud_config
+ self._cloud = op_utils.get_shade_client(**os_cloud_config)
self._stack = None
def _update_stack_tracking(self):
@@ -152,10 +153,12 @@ name (i.e. %s).
# short hand for resources part of template
self.resources = self._template['resources']
- def __init__(self, name, template_file=None, heat_parameters=None):
+ def __init__(self, name, template_file=None, heat_parameters=None,
+ os_cloud_config=None):
self.name = name
self.keystone_client = None
self.heat_parameters = {}
+ self._os_cloud_config = {} if not os_cloud_config else os_cloud_config
# heat_parameters is passed to heat in stack create, empty dict when
# yardstick creates the template (no get_param in resources part)
@@ -622,7 +625,7 @@ name (i.e. %s).
log.info("Creating stack '%s' START", self.name)
start_time = time.time()
- stack = HeatStack(self.name)
+ stack = HeatStack(self.name, os_cloud_config=self._os_cloud_config)
stack.create(self._template, self.heat_parameters, block, timeout)
if not block:
diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py
index 198eeac6d..ac3a09ed1 100644
--- a/yardstick/orchestrator/kubernetes.py
+++ b/yardstick/orchestrator/kubernetes.py
@@ -74,7 +74,7 @@ class KubernetesObject(object):
def _add_container(self):
container_name = '{}-container'.format(self.name)
- ssh_key_mount_path = "/root/.ssh/"
+ ssh_key_mount_path = '/tmp/.ssh/'
container = {
"args": self.args,
diff --git a/yardstick/ssh.py b/yardstick/ssh.py
index d7adc0d05..6b5e6faf4 100644
--- a/yardstick/ssh.py
+++ b/yardstick/ssh.py
@@ -62,15 +62,13 @@ Eventlet:
sshclient = eventlet.import_patched("yardstick.ssh")
"""
-from __future__ import absolute_import
-import os
import io
+import logging
+import os
+import re
import select
import socket
import time
-import re
-
-import logging
import paramiko
from chainmap import ChainMap
@@ -78,6 +76,7 @@ from oslo_utils import encodeutils
from scp import SCPClient
import six
+from yardstick.common import exceptions
from yardstick.common.utils import try_int, NON_NONE_DEFAULT, make_dict_from_map
from yardstick.network_services.utils import provision_tool
@@ -90,12 +89,12 @@ def convert_key_to_str(key):
return k.getvalue()
-class SSHError(Exception):
- pass
-
-
-class SSHTimeout(SSHError):
- pass
+# class SSHError(Exception):
+# pass
+#
+#
+# class SSHTimeout(SSHError):
+# pass
class SSH(object):
@@ -193,7 +192,7 @@ class SSH(object):
return key_class.from_private_key(key)
except paramiko.SSHException as e:
errors.append(e)
- raise SSHError("Invalid pkey: %s" % errors)
+ raise exceptions.SSHError(error_msg='Invalid pkey: %s' % errors)
@property
def is_connected(self):
@@ -214,10 +213,10 @@ class SSH(object):
return self._client
except Exception as e:
message = ("Exception %(exception_type)s was raised "
- "during connect. Exception value is: %(exception)r")
+ "during connect. Exception value is: %(exception)r" %
+ {"exception": e, "exception_type": type(e)})
self._client = False
- raise SSHError(message % {"exception": e,
- "exception_type": type(e)})
+ raise exceptions.SSHError(error_msg=message)
def _make_dict(self):
return {
@@ -334,11 +333,11 @@ class SSH(object):
break
if timeout and (time.time() - timeout) > start_time:
- args = {"cmd": cmd, "host": self.host}
- raise SSHTimeout("Timeout executing command "
- "'%(cmd)s' on host %(host)s" % args)
+ message = ('Timeout executing command %(cmd)s on host %(host)s'
+ % {"cmd": cmd, "host": self.host})
+ raise exceptions.SSHTimeout(error_msg=message)
if e:
- raise SSHError("Socket error.")
+ raise exceptions.SSHError(error_msg='Socket error')
exit_status = session.recv_exit_status()
if exit_status != 0 and raise_on_error:
@@ -346,7 +345,7 @@ class SSH(object):
details = fmt % {"cmd": cmd, "status": exit_status}
if stderr_data:
details += " Last stderr data: '%s'." % stderr_data
- raise SSHError(details)
+ raise exceptions.SSHError(error_msg=details)
return exit_status
def execute(self, cmd, stdin=None, timeout=3600):
@@ -377,11 +376,12 @@ class SSH(object):
while True:
try:
return self.execute("uname")
- except (socket.error, SSHError) as e:
+ except (socket.error, exceptions.SSHError) as e:
self.log.debug("Ssh is still unavailable: %r", e)
time.sleep(interval)
if time.time() > end_time:
- raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
def put(self, files, remote_path=b'.', recursive=False):
client = self._get_client()
@@ -486,11 +486,12 @@ class AutoConnectSSH(SSH):
while True:
try:
return self._get_client()
- except (socket.error, SSHError) as e:
+ except (socket.error, exceptions.SSHError) as e:
self.log.debug("Ssh is still unavailable: %r", e)
time.sleep(interval)
if time.time() > end_time:
- raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+ raise exceptions.SSHTimeout(
+ error_msg='Timeout waiting for "%s"' % self.host)
def drop_connection(self):
""" Don't close anything, just force creation of a new client """
diff --git a/yardstick/tests/functional/common/messaging/__init__.py b/yardstick/tests/functional/common/messaging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/functional/common/messaging/__init__.py
diff --git a/yardstick/tests/functional/common/messaging/test_messaging.py b/yardstick/tests/functional/common/messaging/test_messaging.py
new file mode 100644
index 000000000..99874343b
--- /dev/null
+++ b/yardstick/tests/functional/common/messaging/test_messaging.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import multiprocessing
+import time
+
+from yardstick.common.messaging import consumer
+from yardstick.common.messaging import payloads
+from yardstick.common.messaging import producer
+from yardstick.tests.functional import base
+
+
+TOPIC = 'topic_MQ'
+METHOD_INFO = 'info'
+
+
+class DummyPayload(payloads.Payload):
+ REQUIRED_FIELDS = {'version', 'data'}
+
+
+class DummyEndpoint(consumer.NotificationHandler):
+
+ def info(self, ctxt, **kwargs):
+ if ctxt['pid'] in self._ctx_pids:
+ self._queue.put('ID {}, data: {}, pid: {}'.format(
+ self._id, kwargs['data'], ctxt['pid']))
+
+
+class DummyConsumer(consumer.MessagingConsumer):
+
+ def __init__(self, _id, ctx_pids, queue):
+ self._id = _id
+ endpoints = [DummyEndpoint(_id, ctx_pids, queue)]
+ super(DummyConsumer, self).__init__(TOPIC, ctx_pids, endpoints)
+
+
+class DummyProducer(producer.MessagingProducer):
+ pass
+
+
+def _run_consumer(_id, ctx_pids, queue):
+ _consumer = DummyConsumer(_id, ctx_pids, queue)
+ _consumer.start_rpc_server()
+ _consumer.wait()
+
+
+class MessagingTestCase(base.BaseFunctionalTestCase):
+
+ @staticmethod
+ def _terminate_consumers(num_consumers, processes):
+ for i in range(num_consumers):
+ processes[i].terminate()
+
+ def test_run_five_consumers(self):
+ output_queue = multiprocessing.Queue()
+ num_consumers = 10
+ ctx_1 = 100001
+ ctx_2 = 100002
+ producers = [DummyProducer(TOPIC, pid=ctx_1),
+ DummyProducer(TOPIC, pid=ctx_2)]
+
+ processes = []
+ for i in range(num_consumers):
+ processes.append(multiprocessing.Process(
+ name='consumer_{}'.format(i),
+ target=_run_consumer,
+ args=(i, [ctx_1, ctx_2], output_queue)))
+ processes[i].start()
+ self.addCleanup(self._terminate_consumers, num_consumers, processes)
+
+ time.sleep(2) # Let consumers to create the listeners
+ for producer in producers:
+ for message in ['message 0', 'message 1']:
+ producer.send_message(METHOD_INFO,
+ DummyPayload(version=1, data=message))
+
+ time.sleep(2) # Let consumers attend the calls
+ output = []
+ while not output_queue.empty():
+ output.append(output_queue.get(True, 1))
+
+ self.assertEqual(num_consumers * 4, len(output))
+ msg_template = 'ID {}, data: {}, pid: {}'
+ for i in range(num_consumers):
+ for ctx in [ctx_1, ctx_2]:
+ for message in ['message 0', 'message 1']:
+ msg = msg_template.format(i, message, ctx)
+ self.assertIn(msg, output)
diff --git a/yardstick/tests/integration/dummy-scenario-heat-context.yaml b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
index 7c980b412..45a39951a 100644
--- a/yardstick/tests/integration/dummy-scenario-heat-context.yaml
+++ b/yardstick/tests/integration/dummy-scenario-heat-context.yaml
@@ -6,6 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{% set context_name = context_name or "demo" %}
---
# Sample Heat context config with Dummy context
@@ -22,9 +23,9 @@ scenarios:
context:
name: {{ context_name }}
- image: cirros-0.3.5
- flavor: cirros256
- user: cirros
+ image: yardstick-image
+ flavor: yardstick-flavor
+ user: ubuntu
servers:
athena:
diff --git a/yardstick/tests/unit/apiserver/resources/v2/__init__.py b/yardstick/tests/unit/apiserver/resources/v2/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/apiserver/resources/v2/__init__.py
diff --git a/yardstick/tests/unit/apiserver/resources/v2/test_images.py b/yardstick/tests/unit/apiserver/resources/v2/test_images.py
new file mode 100644
index 000000000..ab131eec5
--- /dev/null
+++ b/yardstick/tests/unit/apiserver/resources/v2/test_images.py
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2017 Huawei Technologies Co.,Ltd.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import mock
+
+import unittest
+
+from yardstick.tests.unit.apiserver import APITestCase
+from api.resources.v2.images import format_image_info
+
+
+class V2ImagesTestCase(APITestCase):
+ @mock.patch('yardstick.common.openstack_utils.list_images')
+ @mock.patch('yardstick.common.utils.source_env')
+ def test_get(self, _, mock_list_images):
+ if self.app is None:
+ unittest.skip('host config error')
+ return
+
+ single_image = mock.MagicMock()
+ single_image.name = 'yardstick-image'
+ single_image.size = 16384
+ single_image.status = 'active'
+ single_image.updated_at = '2018-04-08'
+
+ mock_list_images.return_value = [single_image]
+ url = 'api/v2/yardstick/images'
+ resp = self._get(url)
+ self.assertEqual(resp.get('status'), 1)
+
+
+class FormatImageInfoTestCase(unittest.TestCase):
+ def test_format_image_info(self):
+ image = mock.MagicMock()
+ image.name = 'yardstick-image'
+ image.size = 1048576
+ image.status = 'active'
+ image.updated_at = '2018-04-08'
+
+ image_dict = format_image_info(image)
+ self.assertEqual(image_dict.get('size'), 1)
diff --git a/yardstick/tests/unit/apiserver/utils/__init__.py b/yardstick/tests/unit/apiserver/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/apiserver/utils/__init__.py
diff --git a/yardstick/tests/unit/apiserver/utils/test_influx.py b/yardstick/tests/unit/apiserver/utils/test_influx.py
index dce6c1cec..95105d8ae 100644
--- a/yardstick/tests/unit/apiserver/utils/test_influx.py
+++ b/yardstick/tests/unit/apiserver/utils/test_influx.py
@@ -6,28 +6,48 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
+
+from influxdb import client as influxdb_client
import mock
+from six.moves import configparser
from api.utils import influx
-from six.moves import configparser as ConfigParser
+from yardstick.common import constants
+from yardstick.common import exceptions
+from yardstick import dispatcher
+from yardstick.tests.unit import base
+
+class GetDataDbClientTestCase(base.BaseUnitTestCase):
-class GetDataDbClientTestCase(unittest.TestCase):
+ @mock.patch.object(influx, '_get_influxdb_client',
+ return_value='fake_client')
+ @mock.patch.object(influx.ConfigParser, 'ConfigParser')
+ def test_get_data_db_client(self, mock_parser, mock_get_client):
+ _mock_parser = mock.Mock()
+ mock_parser.return_value = _mock_parser
- @mock.patch('api.utils.influx.ConfigParser')
- def test_get_data_db_client_dispatcher_not_influxdb(self, mock_parser):
- mock_parser.ConfigParser().get.return_value = 'file'
- # reset exception to avoid
- # TypeError: catching classes that do not inherit from BaseException
- mock_parser.NoOptionError = ConfigParser.NoOptionError
- try:
+ self.assertEqual('fake_client', influx.get_data_db_client())
+ _mock_parser.read.assert_called_once_with(constants.CONF_FILE)
+ mock_get_client.assert_called_once_with(_mock_parser)
+
+ @mock.patch.object(influx, '_get_influxdb_client',
+ return_value='fake_client')
+ @mock.patch.object(influx.ConfigParser, 'ConfigParser')
+ def test_get_data_db_client_parsing_error(self, mock_parser,
+ mock_get_client):
+ _mock_parser = mock.Mock()
+ mock_parser.return_value = _mock_parser
+ mock_parser.NoOptionError = configparser.NoOptionError
+ mock_get_client.side_effect = configparser.NoOptionError('option', 'section')
+ with self.assertRaises(configparser.NoOptionError):
influx.get_data_db_client()
- except Exception as e: # pylint: disable=broad-except
- self.assertIsInstance(e, RuntimeError)
+
+ _mock_parser.read.assert_called_once_with(constants.CONF_FILE)
+ mock_get_client.assert_called_once_with(_mock_parser)
-class GetIpTestCase(unittest.TestCase):
+class GetIpTestCase(base.BaseUnitTestCase):
def test_get_url(self):
url = 'http://localhost:8086/hello'
@@ -37,16 +57,32 @@ class GetIpTestCase(unittest.TestCase):
self.assertEqual(result, output)
-class QueryTestCase(unittest.TestCase):
+class GetInfluxdbTestCase(base.BaseUnitTestCase):
+
+ @mock.patch.object(influxdb_client, 'InfluxDBClient',
+ return_value='idb_client')
+ @mock.patch.object(influx, '_get_ip', return_value='fake_ip')
+ def test_get_influxdb_client(self, mock_get_ip, mock_client):
+ mock_parser = mock.Mock()
+ mock_parser.get.side_effect = [dispatcher.INFLUXDB, 'target', 'user',
+ 'pass', 'db_name']
+
+ self.assertEqual('idb_client',
+ influx._get_influxdb_client(mock_parser))
+ mock_client.assert_called_once_with('fake_ip', constants.INFLUXDB_PORT,
+ 'user', 'pass', 'db_name')
+ mock_get_ip.assert_called_once_with('target')
+ mock_parser.get.assert_has_calls([
+ mock.call('DEFAULT', 'dispatcher'),
+ mock.call('dispatcher_influxdb', 'target'),
+ mock.call('dispatcher_influxdb', 'username'),
+ mock.call('dispatcher_influxdb', 'password'),
+ mock.call('dispatcher_influxdb', 'db_name')])
+
+ def test_get_influxdb_client_no_influxdb_client(self):
+ mock_parser = mock.Mock()
+ mock_parser.get.return_value = dispatcher.FILE
- @mock.patch('api.utils.influx.ConfigParser')
- def test_query_dispatcher_not_influxdb(self, mock_parser):
- mock_parser.ConfigParser().get.return_value = 'file'
- # reset exception to avoid
- # TypeError: catching classes that do not inherit from BaseException
- mock_parser.NoOptionError = ConfigParser.NoOptionError
- try:
- sql = 'select * form tasklist'
- influx.query(sql)
- except Exception as e: # pylint: disable=broad-except
- self.assertIsInstance(e, RuntimeError)
+ with self.assertRaises(exceptions.InfluxDBConfigurationMissing):
+ influx._get_influxdb_client(mock_parser)
+ mock_parser.get.assert_called_once_with('DEFAULT', 'dispatcher')
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
index b1dcee209..246a5b2b9 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py
@@ -13,11 +13,11 @@
# limitations under the License.
import copy
-import mock
import os
-import unittest
import uuid
+import mock
+import unittest
from xml.etree import ElementTree
from yardstick import ssh
@@ -172,14 +172,70 @@ class ModelLibvirtTestCase(unittest.TestCase):
interface_address.get('function'))
def test_create_snapshot_qemu(self):
- result = "/var/lib/libvirt/images/0.qcow2"
- with mock.patch("yardstick.ssh.SSH") as ssh:
- ssh_mock = mock.Mock(autospec=ssh.SSH)
- ssh_mock.execute = \
- mock.Mock(return_value=(0, "a", ""))
- ssh.return_value = ssh_mock
- image = model.Libvirt.create_snapshot_qemu(ssh_mock, "0", "ubuntu.img")
- self.assertEqual(image, result)
+ self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0))
+ index = 1
+ vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+ base_image = '/tmp/base_image'
+
+ model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image)
+ self.mock_ssh.execute.assert_has_calls([
+ mock.call('rm -- "%s"' % vm_image),
+ mock.call('test -r %s' % base_image),
+ mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+ (base_image, vm_image))
+ ])
+
+ @mock.patch.object(os.path, 'basename', return_value='base_image')
+ @mock.patch.object(os.path, 'normpath')
+ @mock.patch.object(os, 'access', return_value=True)
+ def test_create_snapshot_qemu_no_image_remote(self,
+ mock_os_access, mock_normpath, mock_basename):
+ self.mock_ssh.execute = mock.Mock(
+ side_effect=[(0, 0, 0), (1, 0, 0), (0, 0, 0), (0, 0, 0)])
+ index = 1
+ vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+ base_image = '/tmp/base_image'
+ mock_normpath.return_value = base_image
+
+ model.Libvirt.create_snapshot_qemu(self.mock_ssh, index, base_image)
+ self.mock_ssh.execute.assert_has_calls([
+ mock.call('rm -- "%s"' % vm_image),
+ mock.call('test -r %s' % base_image),
+ mock.call('mv -- "/tmp/%s" "%s"' % ('base_image', base_image)),
+ mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+ (base_image, vm_image))
+ ])
+ mock_os_access.assert_called_once_with(base_image, os.R_OK)
+ mock_normpath.assert_called_once_with(base_image)
+ mock_basename.assert_has_calls([mock.call(base_image)])
+ self.mock_ssh.put_file.assert_called_once_with(base_image,
+ '/tmp/base_image')
+
+ @mock.patch.object(os, 'access', return_value=False)
+ def test_create_snapshot_qemu_no_image_local(self, mock_os_access):
+ self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)])
+ base_image = '/tmp/base_image'
+
+ with self.assertRaises(exceptions.LibvirtQemuImageBaseImageNotPresent):
+ model.Libvirt.create_snapshot_qemu(self.mock_ssh, 3, base_image)
+ mock_os_access.assert_called_once_with(base_image, os.R_OK)
+
+ def test_create_snapshot_qemu_error_qemuimg_command(self):
+ self.mock_ssh.execute = mock.Mock(
+ side_effect=[(0, 0, 0), (0, 0, 0), (1, 0, 0)])
+ index = 1
+ vm_image = '/var/lib/libvirt/images/%s.qcow2' % index
+ base_image = '/tmp/base_image'
+
+ with self.assertRaises(exceptions.LibvirtQemuImageCreateError):
+ model.Libvirt.create_snapshot_qemu(self.mock_ssh, index,
+ base_image)
+ self.mock_ssh.execute.assert_has_calls([
+ mock.call('rm -- "%s"' % vm_image),
+ mock.call('test -r %s' % base_image),
+ mock.call('qemu-img create -f qcow2 -o backing_file=%s %s' %
+ (base_image, vm_image))
+ ])
@mock.patch.object(model.Libvirt, 'pin_vcpu_for_perf', return_value='4,5')
@mock.patch.object(model.Libvirt, 'create_snapshot_qemu',
@@ -422,7 +478,7 @@ class OvsDeployTestCase(unittest.TestCase):
def setUp(self):
self._mock_ssh = mock.patch.object(ssh, 'SSH')
- self.mock_ssh = self._mock_ssh .start()
+ self.mock_ssh = self._mock_ssh.start()
self.ovs_deploy = model.OvsDeploy(self.mock_ssh,
'/tmp/dpdk-devbind.py',
self.OVS_DETAILS)
@@ -494,4 +550,4 @@ class OvsDeployTestCase(unittest.TestCase):
'dpdk_version': dpdk_version,
'proxy': 'test_proxy'})
mock_execute.assert_called_once_with(cmd)
- mock_env_get.assert_called_once_with('http_proxy', '')
+ mock_env_get.assert_has_calls([mock.call('http_proxy', '')])
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
index bc3bb73cd..6eb438cb1 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py
@@ -19,6 +19,7 @@ import mock
import six
import unittest
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import ovs_dpdk
from yardstick.common import exceptions
@@ -59,9 +60,11 @@ class OvsDpdkContextTestCase(unittest.TestCase):
self.ovs_dpdk = ovs_dpdk.OvsDpdkContext()
self.addCleanup(self._remove_contexts)
- def _remove_contexts(self):
- if self.ovs_dpdk in self.ovs_dpdk.list:
- self.ovs_dpdk._delete_context()
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
@mock.patch('yardstick.benchmark.contexts.standalone.model.Server')
@mock.patch('yardstick.benchmark.contexts.standalone.model.StandaloneContextHelper')
diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
index e70ab0ae8..de748e285 100644
--- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
+++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py
@@ -18,6 +18,7 @@ import mock
import unittest
from yardstick import ssh
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts.standalone import model
from yardstick.benchmark.contexts.standalone import sriov
@@ -66,9 +67,11 @@ class SriovContextTestCase(unittest.TestCase):
self.sriov = sriov.SriovContext()
self.addCleanup(self._remove_contexts)
- def _remove_contexts(self):
- if self.sriov in self.sriov.list:
- self.sriov._delete_context()
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
@mock.patch.object(model, 'StandaloneContextHelper')
@mock.patch.object(model, 'Libvirt')
@@ -242,18 +245,19 @@ class SriovContextTestCase(unittest.TestCase):
self.assertIsNone(self.sriov.configure_nics_for_sriov())
@mock.patch.object(ssh, 'SSH', return_value=(0, "a", ""))
- @mock.patch.object(model, 'Libvirt')
- def test__enable_interfaces(self, mock_libvirt, mock_ssh):
- # pylint: disable=unused-argument
- # NOTE(ralonsoh): the pylint exception should be removed.
+ @mock.patch.object(model.Libvirt, 'add_sriov_interfaces',
+ return_value='out_xml')
+ def test__enable_interfaces(self, mock_add_sriov, mock_ssh):
self.sriov.vm_deploy = True
self.sriov.connection = mock_ssh
self.sriov.vm_names = ['vm_0', 'vm_1']
self.sriov.drivers = []
self.sriov.networks = self.NETWORKS
- self.sriov.get_vf_data = mock.Mock(return_value="")
- self.assertIsNone(self.sriov._enable_interfaces(
- 0, 0, ["private_0"], 'test'))
+ self.assertEqual(
+ 'out_xml',
+ self.sriov._enable_interfaces(0, 0, ['private_0'], 'test'))
+ mock_add_sriov.assert_called_once_with(
+ '0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test')
@mock.patch.object(model.Libvirt, 'build_vm_xml')
@mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete')
@@ -282,7 +286,9 @@ class SriovContextTestCase(unittest.TestCase):
mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01')
with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \
- mock.patch.object(self.sriov, '_enable_interfaces'):
+ mock.patch.object(self.sriov, '_enable_interfaces') as \
+ mock_enable_interfaces:
+ mock_enable_interfaces.return_value = 'out_xml'
mock_vnf_node.generate_vnf_instance = mock.Mock(
return_value='node')
nodes_out = self.sriov.setup_sriov_context()
@@ -294,7 +300,10 @@ class SriovContextTestCase(unittest.TestCase):
connection, 'flavor', vm_name, 0)
mock_create_vm.assert_called_once_with(connection, cfg)
mock_check.assert_called_once_with(vm_name, connection)
- mock_write_file.assert_called_once_with(cfg, xml_out)
+ mock_write_file.assert_called_once_with(cfg, 'out_xml')
+ mock_enable_interfaces.assert_has_calls([
+ mock.call(0, mock.ANY, ['private_0'], mock.ANY),
+ mock.call(0, mock.ANY, ['public_0'], mock.ANY)], any_order=True)
def test__get_vf_data(self):
with mock.patch("yardstick.ssh.SSH") as ssh:
diff --git a/yardstick/tests/unit/benchmark/contexts/test_base.py b/yardstick/tests/unit/benchmark/contexts/test_base.py
index 153c6a527..81267cf98 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_base.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_base.py
@@ -12,12 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import unittest
-
from yardstick.benchmark.contexts import base
+from yardstick.tests.unit import base as ut_base
+
+
+class DummyContextClass(base.Context):
+
+ def _get_network(self, *args):
+ pass
+
+ def _get_server(self, *args):
+ pass
+
+ def deploy(self):
+ pass
+
+ def undeploy(self):
+ pass
-class FlagsTestCase(unittest.TestCase):
+class FlagsTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self.flags = base.Flags()
@@ -25,6 +39,7 @@ class FlagsTestCase(unittest.TestCase):
def test___init__(self):
self.assertFalse(self.flags.no_setup)
self.assertFalse(self.flags.no_teardown)
+ self.assertEqual({'verify': False}, self.flags.os_cloud_config)
def test___init__with_flags(self):
flags = base.Flags(no_setup=True)
@@ -32,12 +47,43 @@ class FlagsTestCase(unittest.TestCase):
self.assertFalse(flags.no_teardown)
def test_parse(self):
- self.flags.parse(no_setup=True, no_teardown="False")
+ self.flags.parse(no_setup=True, no_teardown='False',
+ os_cloud_config={'verify': True})
self.assertTrue(self.flags.no_setup)
- self.assertEqual(self.flags.no_teardown, "False")
+ self.assertEqual('False', self.flags.no_teardown)
+ self.assertEqual({'verify': True}, self.flags.os_cloud_config)
def test_parse_forbidden_flags(self):
self.flags.parse(foo=42)
with self.assertRaises(AttributeError):
_ = self.flags.foo
+
+
+class ContextTestCase(ut_base.BaseUnitTestCase):
+
+ @staticmethod
+ def _remove_ctx(ctx_obj):
+ if ctx_obj in base.Context.list:
+ base.Context.list.remove(ctx_obj)
+
+ def test_split_host_name(self):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name.ctx_name'
+ self.assertEqual(('host_name', 'ctx_name'),
+ ctx_obj.split_host_name(config_name))
+
+ def test_split_host_name_wrong_separator(self):
+ ctx_obj = DummyContextClass()
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name-ctx_name'
+ self.assertEqual((None, None),
+ ctx_obj.split_host_name(config_name))
+
+ def test_split_host_name_other_separator(self):
+ ctx_obj = DummyContextClass(host_name_separator='-')
+ self.addCleanup(self._remove_ctx, ctx_obj)
+ config_name = 'host_name-ctx_name'
+ self.assertEqual(('host_name', 'ctx_name'),
+ ctx_obj.split_host_name(config_name))
diff --git a/yardstick/tests/unit/benchmark/contexts/test_dummy.py b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
index e393001a1..c4113be41 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_dummy.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_dummy.py
@@ -9,6 +9,7 @@
import unittest
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import dummy
@@ -20,7 +21,12 @@ class DummyContextTestCase(unittest.TestCase):
'task_id': '1234567890',
}
self.test_context = dummy.DummyContext()
- self.addCleanup(self.test_context._delete_context)
+ self.addCleanup(self._delete_contexts)
+
+ @staticmethod
+ def _delete_contexts():
+ for context in base.Context.list:
+ context._delete_context()
def test___init__(self):
self.assertFalse(self.test_context._flags.no_setup)
diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py
index 625f97bf4..9c822b3a7 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_heat.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py
@@ -8,38 +8,34 @@
##############################################################################
from collections import OrderedDict
-from itertools import count
import logging
import os
import mock
-import unittest
+from yardstick import ssh
from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import heat
from yardstick.benchmark.contexts import model
from yardstick.common import constants as consts
from yardstick.common import exceptions as y_exc
-from yardstick import ssh
+from yardstick.tests.unit import base as ut_base
LOG = logging.getLogger(__name__)
-class HeatContextTestCase(unittest.TestCase):
-
- def __init__(self, *args, **kwargs):
- super(HeatContextTestCase, self).__init__(*args, **kwargs)
- self.name_iter = ('vnf{:03}'.format(x) for x in count(0, step=3))
+class HeatContextTestCase(ut_base.BaseUnitTestCase):
def setUp(self):
self.test_context = heat.HeatContext()
self.addCleanup(self._remove_contexts)
- self.mock_context = mock.Mock(spec=heat.HeatContext())
- def _remove_contexts(self):
- if self.test_context in self.test_context.list:
- self.test_context._delete_context()
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
def test___init__(self):
self.assertIsNone(self.test_context._name)
@@ -229,12 +225,12 @@ class HeatContextTestCase(unittest.TestCase):
self.assertRaises(y_exc.HeatTemplateError,
self.test_context.deploy)
- mock_path_exists.assert_called_once()
+ mock_path_exists.assert_called()
mock_resources_template.assert_called_once()
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(ssh.SSH, 'gen_keys')
- @mock.patch('yardstick.benchmark.contexts.heat.HeatTemplate')
+ @mock.patch.object(heat, 'HeatTemplate')
def test_deploy(self, mock_template, mock_genkeys, mock_path_exists):
self.test_context._name = 'foo'
self.test_context._task_id = '1234567890'
@@ -245,16 +241,17 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context.get_neutron_info = mock.MagicMock()
self.test_context.deploy()
- mock_template.assert_called_with('foo-12345678',
- '/bar/baz/some-heat-file',
- {'image': 'cirros'})
+ mock_template.assert_called_with(
+ 'foo-12345678', template_file='/bar/baz/some-heat-file',
+ heat_parameters={'image': 'cirros'},
+ os_cloud_config=self.test_context._flags.os_cloud_config)
self.assertIsNotNone(self.test_context.stack)
key_filename = ''.join(
[consts.YARDSTICK_ROOT_PATH,
'yardstick/resources/files/yardstick_key-',
self.test_context._name_task_id])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_once_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
@mock.patch.object(heat, 'HeatTemplate')
@mock.patch.object(os.path, 'exists', return_value=False)
@@ -280,7 +277,7 @@ class HeatContextTestCase(unittest.TestCase):
'yardstick/resources/files/yardstick_key-',
self.test_context._name])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_once_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
@mock.patch.object(heat, 'HeatTemplate')
@mock.patch.object(os.path, 'exists', return_value=False)
@@ -296,7 +293,6 @@ class HeatContextTestCase(unittest.TestCase):
self.test_context._flags.no_setup = True
self.test_context.template_file = '/bar/baz/some-heat-file'
self.test_context.get_neutron_info = mock.MagicMock()
-
self.test_context.deploy()
mock_retrieve_stack.assert_called_once_with(self.test_context._name)
@@ -306,7 +302,7 @@ class HeatContextTestCase(unittest.TestCase):
'yardstick/resources/files/yardstick_key-',
self.test_context._name])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_once_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
@mock.patch.object(heat, 'HeatTemplate', return_value='heat_template')
@mock.patch.object(heat.HeatContext, '_add_resources_to_template')
@@ -334,7 +330,7 @@ class HeatContextTestCase(unittest.TestCase):
'yardstick/resources/files/yardstick_key-',
self.test_context._name_task_id])
mock_genkeys.assert_called_once_with(key_filename)
- mock_path_exists.assert_called_with(key_filename)
+ mock_path_exists.assert_any_call(key_filename)
mock_call_gen_keys = mock.call.gen_keys(key_filename)
mock_call_add_resources = (
@@ -658,6 +654,7 @@ class HeatContextTestCase(unittest.TestCase):
baz3_server.public_ip = None
baz3_server.context.user = 'zab'
+ self.mock_context = mock.Mock(spec=heat.HeatContext())
self.mock_context._name = 'bar1'
self.test_context.stack = mock.Mock()
self.mock_context.stack.outputs = {
diff --git a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
index 4dd9d40d1..0e11a53e1 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_kubernetes.py
@@ -10,6 +10,7 @@
import mock
import unittest
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import kubernetes
@@ -43,9 +44,11 @@ class KubernetesTestCase(unittest.TestCase):
self.addCleanup(self._remove_contexts)
self.k8s_context.init(context_cfg)
- def _remove_contexts(self):
- if self.k8s_context in self.k8s_context.list:
- self.k8s_context._delete_context()
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
@mock.patch.object(kubernetes.KubernetesContext, '_delete_services')
@mock.patch.object(kubernetes.KubernetesContext, '_delete_ssh_key')
diff --git a/yardstick/tests/unit/benchmark/contexts/test_node.py b/yardstick/tests/unit/benchmark/contexts/test_node.py
index 8b232481b..b67be3758 100644
--- a/yardstick/tests/unit/benchmark/contexts/test_node.py
+++ b/yardstick/tests/unit/benchmark/contexts/test_node.py
@@ -13,6 +13,7 @@ import errno
import mock
from yardstick.common import constants as consts
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import node
@@ -33,9 +34,11 @@ class NodeContextTestCase(unittest.TestCase):
'file': self._get_file_abspath(self.NODES_SAMPLE)
}
- def _remove_contexts(self):
- if self.test_context in self.test_context.list:
- self.test_context._delete_context()
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
def _get_file_abspath(self, filename):
curr_path = os.path.dirname(os.path.abspath(__file__))
diff --git a/yardstick/tests/unit/benchmark/core/test_report.py b/yardstick/tests/unit/benchmark/core/test_report.py
index a684ad750..524302f92 100644
--- a/yardstick/tests/unit/benchmark/core/test_report.py
+++ b/yardstick/tests/unit/benchmark/core/test_report.py
@@ -42,16 +42,16 @@ class ReportTestCase(unittest.TestCase):
self.param.task_id = [FAKE_TASK_ID]
self.rep = report.Report()
- @mock.patch('yardstick.benchmark.core.report.Report._get_tasks')
- @mock.patch('yardstick.benchmark.core.report.Report._get_fieldkeys')
- @mock.patch('yardstick.benchmark.core.report.Report._validate')
+ @mock.patch.object(report.Report, '_get_tasks')
+ @mock.patch.object(report.Report, '_get_fieldkeys')
+ @mock.patch.object(report.Report, '_validate')
def test_generate_success(self, mock_valid, mock_keys, mock_tasks):
mock_tasks.return_value = FAKE_DB_TASK
mock_keys.return_value = FAKE_DB_FIELDKEYS
self.rep.generate(self.param)
mock_valid.assert_called_once_with(FAKE_YAML_NAME, FAKE_TASK_ID)
- self.assertEqual(1, mock_tasks.call_count)
- self.assertEqual(1, mock_keys.call_count)
+ mock_tasks.assert_called_once_with()
+ mock_keys.assert_called_once_with()
# pylint: disable=deprecated-method
def test_invalid_yaml_name(self):
diff --git a/yardstick/tests/unit/benchmark/core/test_task.py b/yardstick/tests/unit/benchmark/core/test_task.py
index 9e8e4e9f7..7468368df 100644
--- a/yardstick/tests/unit/benchmark/core/test_task.py
+++ b/yardstick/tests/unit/benchmark/core/test_task.py
@@ -17,6 +17,7 @@ import six
import unittest
import uuid
+from yardstick.benchmark.contexts import base
from yardstick.benchmark.contexts import dummy
from yardstick.benchmark.core import task
from yardstick.common import constants as consts
@@ -357,6 +358,12 @@ key2:
}
}
+ @staticmethod
+ def _remove_contexts():
+ for context in base.Context.list:
+ context._delete_context()
+ base.Context.list = []
+
def test__change_node_names(self):
ctx_attrs = {
@@ -371,6 +378,7 @@ key2:
}
my_context = dummy.DummyContext()
+ self.addCleanup(self._remove_contexts)
my_context.init(ctx_attrs)
expected_scenario = {
@@ -413,6 +421,7 @@ key2:
}
my_context = dummy.DummyContext()
+ self.addCleanup(self._remove_contexts)
my_context.init(ctx_attrs)
scenario = copy.deepcopy(self.scenario)
@@ -428,6 +437,7 @@ key2:
}
my_context = dummy.DummyContext()
+ self.addCleanup(self._remove_contexts)
my_context.init(ctx_attrs)
scenario = copy.deepcopy(self.scenario)
scenario['options'] = None
@@ -442,6 +452,7 @@ key2:
}
my_context = dummy.DummyContext()
+ self.addCleanup(self._remove_contexts)
my_context.init(ctx_attrs)
scenario = copy.deepcopy(self.scenario)
scenario['options']['server_name'] = None
diff --git a/yardstick/tests/unit/benchmark/runner/test_search.py b/yardstick/tests/unit/benchmark/runner/test_search.py
index 4e5b4fe77..d5d1b8ded 100644
--- a/yardstick/tests/unit/benchmark/runner/test_search.py
+++ b/yardstick/tests/unit/benchmark/runner/test_search.py
@@ -19,36 +19,33 @@ import unittest
from yardstick.benchmark.runners.search import SearchRunner
from yardstick.benchmark.runners.search import SearchRunnerHelper
+from yardstick.common import exceptions as y_exc
class TestSearchRunnerHelper(unittest.TestCase):
def test___call__(self):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
- benchmark = cls()
- method = getattr(benchmark, 'my_method')
+ benchmark = mock.Mock()
+ method = getattr(benchmark(), 'my_method')
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ benchmark, 'my_method', scenario_cfg, {}, mock.Mock())
with helper.get_benchmark_instance():
helper()
- self.assertEqual(method.call_count, 1)
+ method.assert_called_once()
def test___call___error(self):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
with self.assertRaises(RuntimeError):
helper()
@@ -56,8 +53,6 @@ class TestSearchRunnerHelper(unittest.TestCase):
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_is_not_done(self, mock_time, *args):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
@@ -65,7 +60,7 @@ class TestSearchRunnerHelper(unittest.TestCase):
mock_time.side_effect = range(1000)
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
index = -1
for index in helper.is_not_done():
@@ -76,8 +71,6 @@ class TestSearchRunnerHelper(unittest.TestCase):
@mock.patch.object(time, 'sleep')
def test_is_not_done_immediate_stop(self, *args):
- cls = mock.MagicMock()
- aborted = mock.MagicMock()
scenario_cfg = {
'runner': {
'run_step': '',
@@ -85,7 +78,7 @@ class TestSearchRunnerHelper(unittest.TestCase):
}
helper = SearchRunnerHelper(
- cls, 'my_method', scenario_cfg, {}, aborted)
+ mock.Mock(), 'my_method', scenario_cfg, {}, mock.Mock())
index = -1
for index in helper.is_not_done():
@@ -112,7 +105,7 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=update)
+ runner.worker_helper = mock.Mock(side_effect=update)
self.assertFalse(runner._worker_run_once('sequence 1'))
@@ -136,51 +129,49 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=update)
+ runner.worker_helper = mock.Mock(side_effect=update)
self.assertTrue(runner._worker_run_once('sequence 1'))
def test__worker_run_once_assertion_error_assert(self):
runner = SearchRunner({})
runner.sla_action = 'assert'
- runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+ runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError)
- with self.assertRaises(AssertionError):
+ with self.assertRaises(y_exc.SLAValidationError):
runner._worker_run_once('sequence 1')
def test__worker_run_once_assertion_error_monitor(self):
runner = SearchRunner({})
runner.sla_action = 'monitor'
- runner.worker_helper = mock.MagicMock(side_effect=AssertionError)
+ runner.worker_helper = mock.Mock(side_effect=y_exc.SLAValidationError)
self.assertFalse(runner._worker_run_once('sequence 1'))
def test__worker_run_once_non_assertion_error_none(self):
runner = SearchRunner({})
- runner.worker_helper = mock.MagicMock(side_effect=RuntimeError)
+ runner.worker_helper = mock.Mock(side_effect=RuntimeError)
self.assertTrue(runner._worker_run_once('sequence 1'))
def test__worker_run_once_non_assertion_error(self):
runner = SearchRunner({})
runner.sla_action = 'monitor'
- runner.worker_helper = mock.MagicMock(side_effect=RuntimeError)
+ runner.worker_helper = mock.Mock(side_effect=RuntimeError)
self.assertFalse(runner._worker_run_once('sequence 1'))
def test__worker_run(self):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {'interval': 0, 'timeout': 1},
}
runner = SearchRunner({})
- runner._worker_run_once = mock.MagicMock(side_effect=[0, 0, 1])
+ runner._worker_run_once = mock.Mock(side_effect=[0, 0, 1])
- runner._worker_run(cls, 'my_method', scenario_cfg, {})
+ runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {})
def test__worker_run_immediate_stop(self):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {
'run_step': '',
@@ -188,15 +179,14 @@ class TestSearchRunner(unittest.TestCase):
}
runner = SearchRunner({})
- runner._worker_run(cls, 'my_method', scenario_cfg, {})
+ runner._worker_run(mock.Mock(), 'my_method', scenario_cfg, {})
@mock.patch('yardstick.benchmark.runners.search.multiprocessing')
def test__run_benchmark(self, mock_multi_process):
- cls = mock.MagicMock()
scenario_cfg = {
'runner': {},
}
runner = SearchRunner({})
- runner._run_benchmark(cls, 'my_method', scenario_cfg, {})
- self.assertEqual(mock_multi_process.Process.call_count, 1)
+ runner._run_benchmark(mock.Mock(), 'my_method', scenario_cfg, {})
+ mock_multi_process.Process.assert_called_once()
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
index ce972779d..8d042c406 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_basemonitor.py
@@ -7,6 +7,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+import time
+
import mock
import unittest
@@ -86,13 +88,19 @@ class BaseMonitorTestCase(unittest.TestCase):
'sla': {'max_outage_time': 5}
}
+ def _close_queue(self, instace):
+ time.sleep(0.1)
+ instace._queue.close()
+
def test__basemonitor_start_wait_successful(self):
ins = basemonitor.BaseMonitor(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.start_monitor()
ins.wait_monitor()
def test__basemonitor_all_successful(self):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
ins.run()
ins.verify_SLA()
@@ -100,16 +108,12 @@ class BaseMonitorTestCase(unittest.TestCase):
@mock.patch.object(basemonitor, 'multiprocessing')
def test__basemonitor_func_false(self, mock_multiprocess):
ins = self.MonitorSimple(self.monitor_cfg, None, {"nova-api": 10})
+ self.addCleanup(self._close_queue, ins)
ins.setup()
mock_multiprocess.Event().is_set.return_value = False
ins.run()
ins.verify_SLA()
- # TODO(elfoley): fix this test to not throw an error
def test__basemonitor_getmonitorcls_successfule(self):
- cls = None
- try:
- cls = basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
- except Exception: # pylint: disable=broad-except
- pass
- self.assertIsNone(cls)
+ with self.assertRaises(RuntimeError):
+ basemonitor.BaseMonitor.get_monitor_cls(self.monitor_cfg)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
index 45840d569..cd065c961 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_scenario_general.py
@@ -11,10 +11,12 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import scenario_general
+from yardstick.common import exceptions as y_exc
class ScenarioGeneralTestCase(unittest.TestCase):
- def setUp(self):
+ @mock.patch.object(scenario_general, 'Director')
+ def setUp(self, *args):
self.scenario_cfg = {
'type': "general_scenario",
'options': {
@@ -36,32 +38,36 @@ class ScenarioGeneralTestCase(unittest.TestCase):
}
}
self.instance = scenario_general.ScenarioGeneral(self.scenario_cfg, None)
-
- self._mock_director = mock.patch.object(scenario_general, 'Director')
- self.mock_director = self._mock_director.start()
- self.addCleanup(self._stop_mock)
-
- def _stop_mock(self):
- self._mock_director.stop()
+ self.instance.setup()
+ self.instance.director.verify.return_value = True
def test_scenario_general_all_successful(self):
- self.instance.setup()
- self.instance.run({})
+
+ ret = {}
+ self.instance.run(ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 1)
def test_scenario_general_exception(self):
- mock_obj = mock.Mock()
- mock_obj.createActionPlayer.side_effect = KeyError('Wrong')
- self.instance.director = mock_obj
+ self.instance.director.createActionPlayer.side_effect = KeyError('Wrong')
self.instance.director.data = {}
- self.instance.run({})
+ ret = {}
+ self.instance.run(ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 1)
def test_scenario_general_case_fail(self):
- mock_obj = mock.Mock()
- mock_obj.verify.return_value = False
- self.instance.director = mock_obj
+ self.instance.director.verify.return_value = False
self.instance.director.data = {}
- self.instance.run({})
- self.instance.pass_flag = True
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
+ self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 0)
+
+ def test_scenario_general_case_service_not_found_fail(self):
+ self.instance.director.verify.return_value = True
+ self.instance.director.data = {"general-attacker": 0}
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, self.instance.run, ret)
self.instance.teardown()
+ self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
index 6bb3ec63b..cf1e76d7a 100644
--- a/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
+++ b/yardstick/tests/unit/benchmark/scenarios/availability/test_serviceha.py
@@ -11,6 +11,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.availability import serviceha
+from yardstick.common import exceptions as y_exc
class ServicehaTestCase(unittest.TestCase):
@@ -60,15 +61,32 @@ class ServicehaTestCase(unittest.TestCase):
p.setup()
self.assertTrue(p.setup_done)
- # def test__serviceha_run_sla_error(self, mock_attacker, mock_monitor):
- # p = serviceha.ServiceHA(self.args, self.ctx)
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_run_sla_error(self, mock_monitor, *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+
+ p.setup()
+ self.assertEqual(p.setup_done, True)
+
+ mock_monitor.MonitorMgr().verify_SLA.return_value = False
+
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+ self.assertEqual(ret['sla_pass'], 0)
- # p.setup()
- # self.assertEqual(p.setup_done, True)
+ @mock.patch.object(serviceha, 'baseattacker')
+ @mock.patch.object(serviceha, 'basemonitor')
+ def test__serviceha_run_service_not_found_sla_error(self, mock_monitor,
+ *args):
+ p = serviceha.ServiceHA(self.args, self.ctx)
+
+ p.setup()
+ self.assertTrue(p.setup_done)
+ p.data["kill-process"] = 0
- # result = {}
- # result["outage_time"] = 10
- # mock_monitor.Monitor().get_result.return_value = result
+ mock_monitor.MonitorMgr().verify_SLA.return_value = True
- # ret = {}
- # self.assertRaises(AssertionError, p.run, ret)
+ ret = {}
+ self.assertRaises(y_exc.SLAValidationError, p.run, ret)
+ self.assertEqual(ret['sla_pass'], 0)
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
index f24ec24ec..4fadde4dc 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_cyclictest.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import cyclictest
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
@@ -122,7 +123,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
@@ -136,7 +137,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
@@ -150,7 +151,7 @@ class CyclictestTestCase(unittest.TestCase):
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, c.run, result)
+ self.assertRaises(y_exc.SLAValidationError, c.run, result)
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
index 9640ce000..c4ac347f4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_lmbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import lmbench
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -144,7 +145,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '[{"latency": 37.5, "size": 0.00049}]'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_unsuccessful_bandwidth_run_sla(self, mock_ssh):
@@ -162,7 +163,7 @@ class LmbenchTestCase(unittest.TestCase):
sample_output = '{"size(MB)": 0.262144, "bandwidth(MBps)": 9925.5}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, l.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, l.run, self.result)
def test_successful_latency_for_cache_run_sla(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
index 03003d01f..02040ca01 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_qemumigrate.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import qemu_migrate
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.qemu_migrate.ssh')
@@ -116,7 +117,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_downtime(self, mock_ssh):
@@ -129,7 +130,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_sla_setuptime(self, mock_ssh):
@@ -142,7 +143,7 @@ class QemuMigrateTestCase(unittest.TestCase):
sample_output = '{"totaltime": 15, "downtime": 2, "setuptime": 1}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, q.run, result)
+ self.assertRaises(y_exc.SLAValidationError, q.run, result)
def test_qemu_migrate_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
index dcc0e810d..9e055befe 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_ramspeed.py
@@ -18,6 +18,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.compute import ramspeed
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.ramspeed.ssh')
@@ -146,7 +147,7 @@ class RamspeedTestCase(unittest.TestCase):
"Block_size(kb)": 16384, "Bandwidth(MBps)": 14128.94}, {"Test_type":\
"INTEGER & WRITING", "Block_size(kb)": 32768, "Bandwidth(MBps)": 8340.85}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_script_error(self, mock_ssh):
options = {
@@ -219,7 +220,7 @@ class RamspeedTestCase(unittest.TestCase):
"Bandwidth(MBps)": 1300.27}, {"Test_type": "INTEGER AVERAGE:",\
"Bandwidth(MBps)": 2401.58}]}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, r.run, self.result)
+ self.assertRaises(y_exc.SLAValidationError, r.run, self.result)
def test_ramspeed_unsuccessful_unknown_type_run(self, mock_ssh):
options = {
diff --git a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
index 6339a2dcd..e4a8d6e26 100644
--- a/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
+++ b/yardstick/tests/unit/benchmark/scenarios/compute/test_unixbench.py
@@ -17,6 +17,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import unixbench
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.compute.unixbench.ssh')
@@ -122,7 +123,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"single_score":"200.7","parallel_score":"4395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_sla_parallel_score(self, mock_ssh):
@@ -137,7 +138,7 @@ class UnixbenchTestCase(unittest.TestCase):
sample_output = '{"signle_score":"2251.7","parallel_score":"3395.9"}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, u.run, result)
+ self.assertRaises(y_exc.SLAValidationError, u.run, result)
def test_unixbench_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
index 2964ecc14..bb7fa4536 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_attach_volume.py
@@ -6,21 +6,51 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.attach_volume import AttachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import attach_volume
class AttachVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.attach_server_volume')
- def test_attach_volume(self, mock_attach_server_volume):
- options = {
- 'volume_id': '123-456-000',
- 'server_id': '000-123-456'
- }
- args = {"options": options}
- obj = AttachVolume(args, {})
- obj.run({})
- mock_attach_server_volume.assert_called_once()
+ def setUp(self):
+
+ self._mock_attach_volume_to_server = mock.patch.object(
+ openstack_utils, 'attach_volume_to_server')
+ self.mock_attach_volume_to_server = (
+ self._mock_attach_volume_to_server.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(attach_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'server_name_or_id': _uuid,
+ 'volume_name_or_id': _uuid}}
+ self.result = {}
+ self.addCleanup(self._stop_mock)
+ self.attachvol_obj = attach_volume.AttachVolume(self.args, mock.ANY)
+
+ def _stop_mock(self):
+ self._mock_attach_volume_to_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_attach_volume_to_server.return_value = True
+ self.assertIsNone(self.attachvol_obj.run(self.result))
+ self.assertEqual({'attach_volume': 1}, self.result)
+ self.mock_log.info.asset_called_once_with(
+ 'Attach volume to server successful!')
+
+ def test_run_fail(self):
+ self.mock_attach_volume_to_server.return_value = False
+ with self.assertRaises(exceptions.ScenarioAttachVolumeError):
+ self.attachvol_obj.run(self.result)
+ self.assertEqual({'attach_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Attach volume to server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
index 639cf2906..aebd1dfe8 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_image.py
@@ -6,30 +6,50 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
+
import mock
+from oslo_utils import uuidutils
+import unittest
-from yardstick.benchmark.scenarios.lib import create_image
from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_image
+
-# NOTE(elfoley): There should be more tests here.
class CreateImageTestCase(unittest.TestCase):
- @mock.patch.object(openstack_utils, 'create_image')
- @mock.patch.object(openstack_utils, 'get_glance_client')
- def test_create_image(self, mock_get_glance_client, mock_create_image):
- options = {
- 'image_name': 'yardstick_test_image_01',
- 'disk_format': 'qcow2',
- 'container_format': 'bare',
- 'min_disk': '1',
- 'min_ram': '512',
- 'protected': 'False',
- 'tags': '["yardstick automatic test image"]',
- 'file_path': '/home/opnfv/images/cirros-0.3.5-x86_64-disk.img'
- }
- args = {"options": options}
- obj = create_image.CreateImage(args, {})
- obj.run({})
- mock_create_image.assert_called_once()
- mock_get_glance_client.assert_called_once()
+ def setUp(self):
+ self._mock_create_image = mock.patch.object(
+ openstack_utils, 'create_image')
+ self.mock_create_image = (
+ self._mock_create_image.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_image, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'image_name': 'yardstick_image'}}
+ self.result = {}
+ self.cimage_obj = create_image.CreateImage(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_create_image.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cimage_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_image.return_value = _uuid
+ output = self.cimage_obj.run(self.result)
+ self.assertEqual({'image_create': 1}, self.result)
+ self.assertEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create image successful!')
+
+ def test_run_fail(self):
+ self.mock_create_image.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateImageError):
+ self.cimage_obj.run(self.result)
+ self.assertEqual({'image_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create image failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
index 1c3d6cebc..a7b683f47 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_keypair.py
@@ -6,22 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-
-import mock
+from oslo_utils import uuidutils
import unittest
+import mock
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios.lib import create_keypair
class CreateKeypairTestCase(unittest.TestCase):
- @mock.patch.object(create_keypair, 'paramiko')
- @mock.patch.object(create_keypair, 'op_utils')
- def test_create_keypair(self, mock_op_utils, *args):
- options = {
- 'key_name': 'yardstick_key',
- 'key_path': '/tmp/yardstick_key'
- }
- args = {"options": options}
- obj = create_keypair.CreateKeypair(args, {})
- obj.run({})
- mock_op_utils.create_keypair.assert_called_once()
+
+ def setUp(self):
+
+ self._mock_create_keypair = mock.patch.object(
+ openstack_utils, 'create_keypair')
+ self.mock_create_keypair = (
+ self._mock_create_keypair.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_keypair, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'key_name': 'yardstick_key'}}
+ self.result = {}
+
+ self.ckeypair_obj = create_keypair.CreateKeypair(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_create_keypair.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.ckeypair_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_keypair.return_value = {
+ 'name': 'key-name', 'type': 'ssh', 'id': _uuid}
+ output = self.ckeypair_obj.run(self.result)
+ self.assertDictEqual({'keypair_create': 1}, self.result)
+ self.assertDictEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create keypair successful!')
+
+ def test_run_fail(self):
+ self.mock_create_keypair.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateKeypairError):
+ self.ckeypair_obj.run(self.result)
+ self.assertDictEqual({'keypair_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create keypair failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
index 21158ab17..0477a49d4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_sec_group.py
@@ -6,25 +6,54 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.create_sec_group import CreateSecgroup
-
-
-class CreateSecGroupTestCase(unittest.TestCase):
-
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- @mock.patch('yardstick.common.openstack_utils.create_security_group_full')
- def test_create_sec_group(self, mock_get_neutron_client, mock_create_security_group_full):
- options = {
- 'openstack_paras': {
- 'sg_name': 'yardstick_sec_group',
- 'description': 'security group for yardstick manual VM'
- }
- }
- args = {"options": options}
- obj = CreateSecgroup(args, {})
- obj.run({})
- mock_get_neutron_client.assert_called_once()
- mock_create_security_group_full.assert_called_once()
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_sec_group
+
+
+class CreateSecurityGroupTestCase(unittest.TestCase):
+
+ def setUp(self):
+
+ self._mock_create_security_group_full = mock.patch.object(
+ openstack_utils, 'create_security_group_full')
+ self.mock_create_security_group_full = (
+ self._mock_create_security_group_full.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_sec_group, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'sg_name': 'yardstick_sg'}}
+ self.result = {}
+
+ self.csecgp_obj = create_sec_group.CreateSecgroup(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_create_security_group_full.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.csecgp_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_security_group_full.return_value = _uuid
+ output = self.csecgp_obj.run(self.result)
+ self.assertEqual({'sg_create': 1}, self.result)
+ self.assertEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with(
+ 'Create security group successful!')
+
+ def test_run_fail(self):
+ self.mock_create_security_group_full.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateSecurityGroupError):
+ self.csecgp_obj.run(self.result)
+ self.assertEqual({'sg_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Create security group failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
index 9d6d8cb1b..b58785112 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_server.py
@@ -6,29 +6,54 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.create_server import CreateServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import create_server
class CreateServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.create_instance_and_wait_for_active')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- @mock.patch('yardstick.common.openstack_utils.get_neutron_client')
- def test_create_server(self, mock_get_nova_client, mock_get_neutron_client,
- mock_get_glance_client, mock_create_instance_and_wait_for_active):
- scenario_cfg = {
- 'options': {
- 'openstack_paras': 'example'
- },
- 'output': 'server'
- }
- obj = CreateServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_get_glance_client.assert_called_once()
- mock_get_neutron_client.assert_called_once()
- mock_create_instance_and_wait_for_active.assert_called_once()
+ def setUp(self):
+
+ self._mock_create_instance_and_wait_for_active = mock.patch.object(
+ openstack_utils, 'create_instance_and_wait_for_active')
+ self.mock_create_instance_and_wait_for_active = (
+ self._mock_create_instance_and_wait_for_active.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {
+ 'options': {'name': 'server-name', 'image': 'image-name',
+ 'flavor': 'flavor-name'}}
+ self.result = {}
+
+ self.addCleanup(self._stop_mock)
+ self.cserver_obj = create_server.CreateServer(self.args, mock.ANY)
+
+ def _stop_mock(self):
+ self._mock_create_instance_and_wait_for_active.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cserver_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_instance_and_wait_for_active.return_value = (
+ {'name': 'server-name', 'flavor': 'flavor-name', 'id': _uuid})
+ output = self.cserver_obj.run(self.result)
+ self.assertEqual({'instance_create': 1}, self.result)
+ self.assertEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create server successful!')
+
+ def test_run_fail(self):
+ self.mock_create_instance_and_wait_for_active.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateServerError):
+ self.cserver_obj.run(self.result)
+ self.assertEqual({'instance_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
index 30333dda8..f91d2c3f4 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_create_volume.py
@@ -6,95 +6,53 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import mock
+from oslo_utils import uuidutils
import unittest
+import mock
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios.lib import create_volume
class CreateVolumeTestCase(unittest.TestCase):
def setUp(self):
- self._mock_cinder_client = mock.patch(
- 'yardstick.common.openstack_utils.get_cinder_client')
- self.mock_cinder_client = self._mock_cinder_client.start()
- self._mock_glance_client = mock.patch(
- 'yardstick.common.openstack_utils.get_glance_client')
- self.mock_glance_client = self._mock_glance_client.start()
- self.addCleanup(self._stop_mock)
-
- self.scenario_cfg = {
- "options" :
- {
- 'volume_name': 'yardstick_test_volume_01',
- 'size': '256',
- 'image': 'cirros-0.3.5'
- }
- }
- self.scenario = create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={})
+ self._mock_create_volume = mock.patch.object(
+ openstack_utils, 'create_volume')
+ self.mock_create_volume = (
+ self._mock_create_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(create_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'size_gb': 1}}
+ self.result = {}
+
+ self.cvolume_obj = create_volume.CreateVolume(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
def _stop_mock(self):
- self._mock_cinder_client.stop()
- self._mock_glance_client.stop()
-
- def test_init(self):
- self.mock_cinder_client.return_value = "All volumes are equal"
- self.mock_glance_client.return_value = "Images are more equal"
-
- expected_vol_name = self.scenario_cfg["options"]["volume_name"]
- expected_vol_size = self.scenario_cfg["options"]["size"]
- expected_im_name = self.scenario_cfg["options"]["image"]
- expected_im_id = None
-
- scenario = create_volume.CreateVolume(
- scenario_cfg=self.scenario_cfg,
- context_cfg={})
-
- self.assertEqual(expected_vol_name, scenario.volume_name)
- self.assertEqual(expected_vol_size, scenario.volume_size)
- self.assertEqual(expected_im_name, scenario.image_name)
- self.assertEqual(expected_im_id, scenario.image_id)
- self.assertEqual("All volumes are equal", scenario.cinder_client)
- self.assertEqual("Images are more equal", scenario.glance_client)
-
- def test_setup(self):
- self.assertFalse(self.scenario.setup_done)
- self.scenario.setup()
- self.assertTrue(self.scenario.setup_done)
-
- @mock.patch('yardstick.common.openstack_utils.create_volume')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- def test_run(self, mock_image_id, mock_create_volume):
- self.scenario.run()
-
- mock_image_id.assert_called_once()
- mock_create_volume.assert_called_once()
-
- @mock.patch.object(create_volume.CreateVolume, 'setup')
- def test_run_no_setup(self, scenario_setup):
- self.scenario.setup_done = False
- self.scenario.run()
- scenario_setup.assert_called_once()
-
- @mock.patch('yardstick.common.openstack_utils.create_volume')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- def test_create_volume(self, mock_get_glance_client,
- mock_get_cinder_client, mock_image_id,
- mock_create_volume):
- options = {
- 'volume_name': 'yardstick_test_volume_01',
- 'size': '256',
- 'image': 'cirros-0.3.5'
- }
- args = {"options": options}
- scenario = create_volume.CreateVolume(args, {})
- scenario.run()
- mock_create_volume.assert_called_once()
- mock_image_id.assert_called_once()
- mock_get_glance_client.assert_called_once()
- mock_get_cinder_client.assert_called_once()
+ self._mock_create_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.cvolume_obj.scenario_cfg = {'output': 'id'}
+ self.mock_create_volume.return_value = {'name': 'yardstick_volume',
+ 'id': _uuid,
+ 'status': 'available'}
+ output = self.cvolume_obj.run(self.result)
+ self.assertDictEqual({'volume_create': 1}, self.result)
+ self.assertDictEqual({'id': _uuid}, output)
+ self.mock_log.info.asset_called_once_with('Create volume successful!')
+
+ def test_run_fail(self):
+ self.mock_create_volume.return_value = None
+ with self.assertRaises(exceptions.ScenarioCreateVolumeError):
+ self.cvolume_obj.run(self.result)
+ self.assertDictEqual({'volume_create': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Create volume failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
index e382d46fa..8a1d6d695 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_image.py
@@ -9,21 +9,44 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_image import DeleteImage
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_image
class DeleteImageTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.delete_image')
- @mock.patch('yardstick.common.openstack_utils.get_image_id')
- @mock.patch('yardstick.common.openstack_utils.get_glance_client')
- def test_delete_image(self, mock_get_glance_client, mock_image_id, mock_delete_image):
- options = {
- 'image_name': 'yardstick_test_image_01'
- }
- args = {"options": options}
- obj = DeleteImage(args, {})
- obj.run({})
- mock_delete_image.assert_called_once()
- mock_image_id.assert_called_once()
- mock_get_glance_client.assert_called_once()
+ def setUp(self):
+ self._mock_delete_image = mock.patch.object(
+ openstack_utils, 'delete_image')
+ self.mock_delete_image = (
+ self._mock_delete_image.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_image, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_image'}}
+ self.result = {}
+
+ self.delimg_obj = delete_image.DeleteImage(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_image.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_image.return_value = True
+ self.assertIsNone(self.delimg_obj.run(self.result))
+ self.assertEqual({'delete_image': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete image successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_image.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteImageError):
+ self.delimg_obj.run(self.result)
+ self.assertEqual({'delete_image': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete image failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
index 6e790ba90..c7940251e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_keypair.py
@@ -9,19 +9,43 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_keypair import DeleteKeypair
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_keypair
class DeleteKeypairTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- @mock.patch('yardstick.common.openstack_utils.delete_keypair')
- def test_detach_volume(self, mock_get_nova_client, mock_delete_keypair):
- options = {
- 'key_name': 'yardstick_key'
- }
- args = {"options": options}
- obj = DeleteKeypair(args, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_delete_keypair.assert_called_once()
+ def setUp(self):
+ self._mock_delete_keypair = mock.patch.object(
+ openstack_utils, 'delete_keypair')
+ self.mock_delete_keypair = self._mock_delete_keypair.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_keypair, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'key_name': 'yardstick_key'}}
+ self.result = {}
+ self.delkey_obj = delete_keypair.DeleteKeypair(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_keypair.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_keypair.return_value = True
+ self.assertIsNone(self.delkey_obj.run(self.result))
+ self.assertEqual({'delete_keypair': 1}, self.result)
+ self.mock_log.info.assert_called_once_with(
+ 'Delete keypair successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_keypair.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteKeypairError):
+ self.delkey_obj.run(self.result)
+ self.assertEqual({'delete_keypair': 0}, self.result)
+ self.mock_log.error.assert_called_once_with("Delete keypair failed!")
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
index aef99ee94..b6dbf4791 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_network.py
@@ -11,7 +11,8 @@ from oslo_utils import uuidutils
import unittest
import mock
-import yardstick.common.openstack_utils as op_utils
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
from yardstick.benchmark.scenarios.lib import delete_network
@@ -19,16 +20,17 @@ class DeleteNetworkTestCase(unittest.TestCase):
def setUp(self):
self._mock_delete_neutron_net = mock.patch.object(
- op_utils, 'delete_neutron_net')
+ openstack_utils, "delete_neutron_net")
self.mock_delete_neutron_net = self._mock_delete_neutron_net.start()
self._mock_get_shade_client = mock.patch.object(
- op_utils, 'get_shade_client')
+ openstack_utils, "get_shade_client")
self.mock_get_shade_client = self._mock_get_shade_client.start()
- self._mock_log = mock.patch.object(delete_network, 'LOG')
+ self._mock_log = mock.patch.object(delete_network, "LOG")
self.mock_log = self._mock_log.start()
- _uuid = uuidutils.generate_uuid()
- self.args = {'options': {'network_id': _uuid}}
- self._del_obj = delete_network.DeleteNetwork(self.args, mock.ANY)
+ self.args = {"options": {"network_name_or_id": (
+ uuidutils.generate_uuid())}}
+ self.result = {}
+ self.del_obj = delete_network.DeleteNetwork(self.args, mock.ANY)
self.addCleanup(self._stop_mock)
@@ -39,11 +41,14 @@ class DeleteNetworkTestCase(unittest.TestCase):
def test_run(self):
self.mock_delete_neutron_net.return_value = True
- self.assertTrue(self._del_obj.run({}))
+ self.assertIsNone(self.del_obj.run(self.result))
+ self.assertEqual({"delete_network": 1}, self.result)
self.mock_log.info.assert_called_once_with(
"Delete network successful!")
def test_run_fail(self):
self.mock_delete_neutron_net.return_value = False
- self.assertFalse(self._del_obj.run({}))
+ with self.assertRaises(exceptions.ScenarioDeleteNetworkError):
+ self.del_obj.run(self.result)
+ self.assertEqual({"delete_network": 0}, self.result)
self.mock_log.error.assert_called_once_with("Delete network failed!")
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
index eee565de7..55fe53df8 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_server.py
@@ -6,22 +6,49 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_server import DeleteServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_server
class DeleteServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.delete_instance')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_delete_server(self, mock_get_nova_client, mock_delete_instance):
- options = {
- 'server_id': '1234-4567-0000'
- }
- args = {"options": options}
- obj = DeleteServer(args, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_delete_instance.assert_called_once()
+ def setUp(self):
+ self._mock_delete_instance = mock.patch.object(
+ openstack_utils, 'delete_instance')
+ self.mock_delete_instance = (
+ self._mock_delete_instance.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': uuidutils.generate_uuid()
+ }}
+ self.result = {}
+
+ self.delserver_obj = delete_server.DeleteServer(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_instance.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_instance.return_value = True
+ self.assertIsNone(self.delserver_obj.run(self.result))
+ self.assertEqual({'delete_server': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete server successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_instance.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteServerError):
+ self.delserver_obj.run(self.result)
+ self.assertEqual({'delete_server': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
index 93f76e819..0db16f396 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_delete_volume.py
@@ -9,19 +9,44 @@
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.delete_volume import DeleteVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import delete_volume
class DeleteVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_cinder_client')
- @mock.patch('yardstick.common.openstack_utils.delete_volume')
- def test_delete_volume(self, mock_get_cinder_client, mock_delete_volume):
- options = {
- 'volume_id': '123-123-123'
- }
- args = {"options": options}
- obj = DeleteVolume(args, {})
- obj.run({})
- mock_get_cinder_client.assert_called_once()
- mock_delete_volume.assert_called_once()
+ def setUp(self):
+ self._mock_delete_volume = mock.patch.object(
+ openstack_utils, 'delete_volume')
+ self.mock_delete_volume = (
+ self._mock_delete_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(delete_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_volume'}}
+ self.result = {}
+
+ self.delvol_obj = delete_volume.DeleteVolume(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_delete_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_delete_volume.return_value = True
+ self.assertIsNone(self.delvol_obj.run(self.result))
+ self.assertEqual({'delete_volume': 1}, self.result)
+ self.mock_log.info.assert_called_once_with('Delete volume successful!')
+
+ def test_run_fail(self):
+ self.mock_delete_volume.return_value = False
+ with self.assertRaises(exceptions.ScenarioDeleteVolumeError):
+ self.delvol_obj.run(self.result)
+ self.assertEqual({'delete_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Delete volume failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
index 9794d2129..2bc57f495 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_detach_volume.py
@@ -6,21 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.detach_volume import DetachVolume
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import detach_volume
class DetachVolumeTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.detach_volume')
- def test_detach_volume(self, mock_detach_volume):
- options = {
- 'server_id': '321-321-321',
- 'volume_id': '123-123-123'
- }
- args = {"options": options}
- obj = DetachVolume(args, {})
- obj.run({})
- mock_detach_volume.assert_called_once()
+ def setUp(self):
+ self._mock_detach_volume = mock.patch.object(
+ openstack_utils, 'detach_volume')
+ self.mock_detach_volume = (
+ self._mock_detach_volume.start())
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(detach_volume, 'LOG')
+ self.mock_log = self._mock_log.start()
+ _uuid = uuidutils.generate_uuid()
+ self.args = {'options': {'server_name_or_id': _uuid,
+ 'volume_name_or_id': _uuid}}
+ self.result = {}
+
+ self.detachvol_obj = detach_volume.DetachVolume(self.args, mock.ANY)
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_detach_volume.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ self.mock_detach_volume.return_value = True
+ self.assertIsNone(self.detachvol_obj.run(self.result))
+ self.assertEqual({'detach_volume': 1}, self.result)
+ self.mock_log.info.assert_called_once_with(
+ 'Detach volume from server successful!')
+
+ def test_run_fail(self):
+ self.mock_detach_volume.return_value = False
+ with self.assertRaises(exceptions.ScenarioDetachVolumeError):
+ self.detachvol_obj.run(self.result)
+ self.assertEqual({'detach_volume': 0}, self.result)
+ self.mock_log.error.assert_called_once_with(
+ 'Detach volume from server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
index 15a6f7c8f..1c1364348 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_flavor.py
@@ -6,20 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_flavor import GetFlavor
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_flavor
class GetFlavorTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_flavor_by_name')
- def test_get_flavor(self, mock_get_flavor_by_name):
- options = {
- 'flavor_name': 'yardstick_test_flavor'
- }
- args = {"options": options}
- obj = GetFlavor(args, {})
- obj.run({})
- mock_get_flavor_by_name.assert_called_once()
+ def setUp(self):
+
+ self._mock_get_flavor = mock.patch.object(
+ openstack_utils, 'get_flavor')
+ self.mock_get_flavor = self._mock_get_flavor.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_flavor, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_flavor'}}
+ self.result = {}
+
+ self.getflavor_obj = get_flavor.GetFlavor(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_flavor.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getflavor_obj.scenario_cfg = {'output': 'flavor'}
+ self.mock_get_flavor.return_value = (
+ {'name': 'flavor-name', 'id': _uuid})
+ output = self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 1}, self.result)
+ self.assertDictEqual({'flavor': {'name': 'flavor-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get flavor successful!')
+
+ def test_run_fail(self):
+ self.mock_get_flavor.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetFlavorError):
+ self.getflavor_obj.run(self.result)
+ self.assertDictEqual({'get_flavor': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get flavor failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
index 83ec903bc..5b5329cb0 100644
--- a/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
+++ b/yardstick/tests/unit/benchmark/scenarios/lib/test_get_server.py
@@ -6,37 +6,52 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+from oslo_utils import uuidutils
import unittest
import mock
-from yardstick.benchmark.scenarios.lib.get_server import GetServer
+from yardstick.common import openstack_utils
+from yardstick.common import exceptions
+from yardstick.benchmark.scenarios.lib import get_server
class GetServerTestCase(unittest.TestCase):
- @mock.patch('yardstick.common.openstack_utils.get_server_by_name')
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_name(self, mock_get_nova_client, mock_get_server_by_name):
- scenario_cfg = {
- 'options': {
- 'server_name': 'yardstick_server'
- },
- 'output': 'status server'
- }
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called_once()
- mock_get_server_by_name.assert_called_once()
-
- @mock.patch('yardstick.common.openstack_utils.get_nova_client')
- def test_get_server_with_id(self, mock_get_nova_client):
- scenario_cfg = {
- 'options': {
- 'server_id': '1'
- },
- 'output': 'status server'
- }
- mock_get_nova_client().servers.get.return_value = None
- obj = GetServer(scenario_cfg, {})
- obj.run({})
- mock_get_nova_client.assert_called()
+ def setUp(self):
+
+ self._mock_get_server = mock.patch.object(
+ openstack_utils, 'get_server')
+ self.mock_get_server = self._mock_get_server.start()
+ self._mock_get_shade_client = mock.patch.object(
+ openstack_utils, 'get_shade_client')
+ self.mock_get_shade_client = self._mock_get_shade_client.start()
+ self._mock_log = mock.patch.object(get_server, 'LOG')
+ self.mock_log = self._mock_log.start()
+ self.args = {'options': {'name_or_id': 'yardstick_key'}}
+ self.result = {}
+
+ self.getserver_obj = get_server.GetServer(self.args, mock.ANY)
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_get_server.stop()
+ self._mock_get_shade_client.stop()
+ self._mock_log.stop()
+
+ def test_run(self):
+ _uuid = uuidutils.generate_uuid()
+ self.getserver_obj.scenario_cfg = {'output': 'server'}
+ self.mock_get_server.return_value = (
+ {'name': 'server-name', 'id': _uuid})
+ output = self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 1}, self.result)
+ self.assertDictEqual({'server': {'name': 'server-name', 'id': _uuid}},
+ output)
+ self.mock_log.info.asset_called_once_with('Get Server successful!')
+
+ def test_run_fail(self):
+ self.mock_get_server.return_value = None
+ with self.assertRaises(exceptions.ScenarioGetServerError):
+ self.getserver_obj.run(self.result)
+ self.assertDictEqual({'get_server': 0}, self.result)
+ self.mock_log.error.assert_called_once_with('Get Server failed!')
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
index 74144afd5..2190e9337 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_iperf3.py
@@ -19,6 +19,7 @@ from oslo_serialization import jsonutils
from yardstick.common import utils
from yardstick.benchmark.scenarios.networking import iperf3
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.iperf3.ssh')
@@ -118,7 +119,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_tcp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_sla_jitter(self, mock_ssh):
options = {"protocol": "udp", "bandwidth": "20m"}
@@ -152,7 +153,7 @@ class IperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.output_name_udp)
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_iperf_successful_tcp_protocal(self, mock_ssh):
options = {"protocol": "tcp", "nodelay": "yes"}
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
index 5907562c2..a7abcd98a 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf.ssh')
@@ -98,7 +99,7 @@ class NetperfTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
index 956a9c078..a577dba59 100755
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py
@@ -19,6 +19,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import netperf_node
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
@@ -98,7 +99,7 @@ class NetperfNodeTestCase(unittest.TestCase):
sample_output = self._read_sample_output()
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
index 4adfab120..559e0599e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -74,7 +75,7 @@ class PingTestCase(unittest.TestCase):
p = ping.Ping(args, self.ctx)
mock_ssh.SSH.from_node().execute.return_value = (0, '100', '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
index 4662c8537..ad5217a14 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_ping6.py
@@ -14,6 +14,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import ping6
+from yardstick.common import exceptions as y_exc
class PingTestCase(unittest.TestCase):
@@ -98,7 +99,7 @@ class PingTestCase(unittest.TestCase):
p = ping6.Ping6(args, self.ctx)
p.client = mock_ssh.SSH.from_node()
mock_ssh.SSH.from_node().execute.side_effect = [(0, 'host1', ''), (0, 100, '')]
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
@mock.patch('yardstick.benchmark.scenarios.networking.ping6.ssh')
def test_ping_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
index 6aea03aee..ea0deab3e 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen.py
@@ -13,6 +13,7 @@ import unittest
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.networking import pktgen
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.networking.pktgen.ssh')
@@ -176,7 +177,7 @@ class PktgenTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "packetsize": 60, "flows": 110}'
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
index 976087148..b141591f7 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk.py
@@ -12,6 +12,7 @@ import unittest
import yardstick.common.utils as utils
from yardstick.benchmark.scenarios.networking import pktgen_dpdk
+from yardstick.common import exceptions as y_exc
class PktgenDPDKLatencyTestCase(unittest.TestCase):
@@ -162,7 +163,7 @@ class PktgenDPDKLatencyTestCase(unittest.TestCase):
sample_output = '100\n110\n112\n130\n149\n150\n90\n150\n200\n162\n'
self.mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_dpdk_unsuccessful_script_error(self):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
index e90fb07c7..39392e4bb 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_pktgen_dpdk_throughput.py
@@ -16,6 +16,7 @@ from oslo_serialization import jsonutils
import mock
from yardstick.benchmark.scenarios.networking import pktgen_dpdk_throughput
+from yardstick.common import exceptions as y_exc
# pylint: disable=unused-argument
@@ -131,7 +132,7 @@ class PktgenDPDKTestCase(unittest.TestCase):
sample_output = '{"packets_per_second": 9753, "errors": 0, \
"packets_sent": 149776, "flows": 110}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_pktgen_dpdk_throughput_unsuccessful_script_error(
self, mock_ssh):
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
index 9bfbf0752..bb1a7aaca 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py
@@ -20,11 +20,11 @@ import mock
import unittest
from yardstick import tests
+from yardstick.common import exceptions
from yardstick.common import utils
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.traffic_profile import base
from yardstick.network_services.vnf_generic import vnfdgen
-from yardstick.error import IncorrectConfig
from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
@@ -423,7 +423,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
with mock.patch.dict(sys.modules, tests.STL_MOCKS):
self.assertIsNotNone(self.s.get_vnf_impl(vnfd))
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s.get_vnf_impl('NonExistentClass')
exc_str = str(raised.exception)
@@ -465,7 +465,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
cfg_patch = mock.patch.object(self.s, 'context_cfg', cfg)
with cfg_patch:
- with self.assertRaises(IncorrectConfig):
+ with self.assertRaises(exceptions.IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test_map_topology_to_infrastructure_config_invalid(self):
@@ -482,7 +482,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
config_patch = mock.patch.object(self.s, 'context_cfg', cfg)
with config_patch:
- with self.assertRaises(IncorrectConfig):
+ with self.assertRaises(exceptions.IncorrectConfig):
self.s.map_topology_to_infrastructure()
def test__resolve_topology_invalid_config(self):
@@ -496,7 +496,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
for interface in self.tg__1['interfaces'].values():
del interface['local_mac']
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('not found', str(raised.exception))
@@ -509,7 +509,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'].append(
self.s.topology["vld"][0]['vnfd-connection-point-ref'][0])
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('wrong endpoint count', str(raised.exception))
@@ -518,7 +518,7 @@ class TestNetworkServiceTestCase(unittest.TestCase):
self.s.topology["vld"][0]['vnfd-connection-point-ref'] = \
self.s.topology["vld"][0]['vnfd-connection-point-ref'][:1]
- with self.assertRaises(vnf_generic.IncorrectConfig) as raised:
+ with self.assertRaises(exceptions.IncorrectConfig) as raised:
self.s._resolve_topology()
self.assertIn('wrong endpoint count', str(raised.exception))
@@ -628,7 +628,8 @@ class TestNetworkServiceTestCase(unittest.TestCase):
'extra_args': {'arg1': 'value1', 'arg2': 'value2'},
'flow': {'flow': {}},
'imix': {'imix': {'64B': 100}},
- 'uplink': {}}
+ 'uplink': {},
+ 'duration': 30}
)
mock_tprofile_get.assert_called_once_with(fake_vnfd)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
index 419605b26..a606543e5 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf.py
@@ -12,31 +12,26 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Unittest for yardstick.benchmark.scenarios.networking.vsperf.Vsperf
-
-from __future__ import absolute_import
-try:
- from unittest import mock
-except ImportError:
- import mock
+import mock
import unittest
+import subprocess
+import yardstick.ssh as ssh
from yardstick.benchmark.scenarios.networking import vsperf
+from yardstick import exceptions as y_exc
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.subprocess')
-@mock.patch('yardstick.benchmark.scenarios.networking.vsperf.ssh')
class VsperfTestCase(unittest.TestCase):
def setUp(self):
- self.ctx = {
+ self.context_cfg = {
"host": {
"ip": "10.229.47.137",
"user": "ubuntu",
"password": "ubuntu",
},
}
- self.args = {
+ self.scenario_cfg = {
'options': {
'testname': 'p2p_rfc2544_continuous',
'traffic_type': 'continuous',
@@ -57,70 +52,154 @@ class VsperfTestCase(unittest.TestCase):
}
}
- def test_vsperf_setup(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self._mock_SSH = mock.patch.object(ssh, 'SSH')
+ self.mock_SSH = self._mock_SSH.start()
+ self.mock_SSH.from_node().execute.return_value = (0, '', '')
+
+ self._mock_subprocess_call = mock.patch.object(subprocess, 'call')
+ self.mock_subprocess_call = self._mock_subprocess_call.start()
+ self.mock_subprocess_call.return_value = None
+
+ self.addCleanup(self._stop_mock)
+
+ self.scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+
+ def _stop_mock(self):
+ self._mock_SSH.stop()
+ self._mock_subprocess_call.stop()
+
+ def test_setup(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ def test_setup_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh setup', shell=True)
+ self.assertIsNone(scenario.tg_port1)
+ self.assertIsNone(scenario.tg_port2)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_setup_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl add-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertIsNone(scenario.setup_script)
+ self.assertIsNotNone(scenario.client)
+ self.assertTrue(scenario.setup_done)
+
+ def test_run_ok(self):
+ self.scenario.setup()
+
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- def test_vsperf_teardown(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_ok_setup_not_done(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- p.setup()
- self.assertIsNotNone(p.client)
- self.assertTrue(p.setup_done)
+ result = {}
+ self.scenario.run(result)
- p.teardown()
- self.assertFalse(p.setup_done)
+ self.assertTrue(self.scenario.setup_done)
+ self.assertEqual(result['throughput_rx_fps'], '14797660.000')
- def test_vsperf_run_ok(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ def test_run_failed_vsperf_execution(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (1, '', ''))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 2)
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (
- 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+ def test_run_failed_csv_report(self):
+ self.mock_SSH.from_node().execute.side_effect = ((0, '', ''),
+ (0, '', ''),
+ (1, '', ''))
- result = {}
- p.run(result)
+ with self.assertRaises(RuntimeError):
+ self.scenario.run({})
+ self.assertEqual(self.mock_SSH.from_node().execute.call_count, 3)
- self.assertEqual(result['throughput_rx_fps'], '14797660.000')
+ def test_run_sla_fail(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
- def test_vsperf_run_falied_vsperf_execution(self, mock_ssh,
- mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ self.assertTrue('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)'
+ in str(raised.exception))
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ def test_run_sla_fail_metric_not_collected(self):
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
- def test_vsperf_run_falied_csv_report(self, mock_ssh, mock_subprocess):
- p = vsperf.Vsperf(self.args, self.ctx)
+ self.assertTrue('throughput_rx_fps was not collected by VSPERF'
+ in str(raised.exception))
- # setup() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_subprocess.call().execute.return_value = None
+ def test_run_sla_fail_metric_not_defined_in_sla(self):
+ del self.scenario_cfg['sla']['throughput_rx_fps']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.setup()
- # run() specific mocks
- mock_ssh.SSH.from_node().execute.return_value = (0, '', '')
- mock_ssh.SSH.from_node().execute.return_value = (1, '', '')
+ self.mock_SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
- result = {}
- self.assertRaises(RuntimeError, p.run, result)
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ scenario.run({})
+ self.assertTrue('throughput_rx_fps is not defined in SLA'
+ in str(raised.exception))
+
+ def test_teardown(self):
+ self.scenario.setup()
+ self.assertIsNotNone(self.scenario.client)
+ self.assertTrue(self.scenario.setup_done)
+
+ self.scenario.teardown()
+ self.assertFalse(self.scenario.setup_done)
+
+ def test_teardown_tg_port_not_set(self):
+ del self.scenario_cfg['options']['trafficgen_port1']
+ del self.scenario_cfg['options']['trafficgen_port2']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_called_once_with(
+ 'setup_yardstick.sh teardown', shell=True)
+ self.assertFalse(scenario.setup_done)
+
+ def test_teardown_no_setup_script(self):
+ del self.scenario_cfg['options']['setup_script']
+ scenario = vsperf.Vsperf(self.scenario_cfg, self.context_cfg)
+ scenario.teardown()
+
+ self.mock_subprocess_call.assert_has_calls(
+ (mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth1"',
+ shell=True),
+ mock.call('sudo bash -c "ovs-vsctl del-port br-ex eth3"',
+ shell=True)))
+ self.assertEqual(2, self.mock_subprocess_call.call_count)
+ self.assertFalse(scenario.setup_done)
diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
index 1d2278e21..c05d2ced2 100644
--- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
+++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vsperf_dpdk.py
@@ -19,6 +19,7 @@ import mock
import unittest
from yardstick.benchmark.scenarios.networking import vsperf_dpdk
+from yardstick import exceptions as y_exc
class VsperfDPDKTestCase(unittest.TestCase):
@@ -211,3 +212,47 @@ class VsperfDPDKTestCase(unittest.TestCase):
result = {}
self.assertRaises(RuntimeError, self.scenario.run, result)
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('VSPERF_throughput_rx_fps(123456.000000) < '
+ 'SLA_throughput_rx_fps(500000.000000)',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_metric_not_collected(self, *args):
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'nonexisting_metric\r\n123456.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps was not collected by VSPERF',
+ str(raised.exception))
+
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(subprocess, 'check_output')
+ def test_vsperf_run_sla_fail_sla_not_defined(self, *args):
+ del self.scenario.scenario_cfg['sla']['throughput_rx_fps']
+ self.scenario.setup()
+
+ self.mock_ssh.SSH.from_node().execute.return_value = (
+ 0, 'throughput_rx_fps\r\n14797660.000\r\n', '')
+
+ with self.assertRaises(y_exc.SLAValidationError) as raised:
+ self.scenario.run({})
+
+ self.assertIn('throughput_rx_fps is not defined in SLA',
+ str(raised.exception))
diff --git a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
index f149cee69..6e69ddc6d 100644
--- a/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
+++ b/yardstick/tests/unit/benchmark/scenarios/storage/test_fio.py
@@ -18,6 +18,7 @@ import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.storage import fio
+from yardstick.common import exceptions as y_exc
@mock.patch('yardstick.benchmark.scenarios.storage.fio.ssh')
@@ -203,7 +204,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_successful_bw_iops_sla(self, mock_ssh):
@@ -252,7 +253,7 @@ class FioTestCase(unittest.TestCase):
sample_output = self._read_sample_output(self.sample_output['rw'])
mock_ssh.SSH.from_node().execute.return_value = (0, sample_output, '')
- self.assertRaises(AssertionError, p.run, result)
+ self.assertRaises(y_exc.SLAValidationError, p.run, result)
def test_fio_unsuccessful_script_error(self, mock_ssh):
diff --git a/yardstick/tests/unit/common/messaging/__init__.py b/yardstick/tests/unit/common/messaging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/common/messaging/__init__.py
diff --git a/yardstick/tests/unit/common/messaging/test_consumer.py b/yardstick/tests/unit/common/messaging/test_consumer.py
new file mode 100644
index 000000000..612dcaecd
--- /dev/null
+++ b/yardstick/tests/unit/common/messaging/test_consumer.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from oslo_config import cfg
+import oslo_messaging
+
+from yardstick.common import messaging
+from yardstick.common.messaging import consumer
+from yardstick.tests.unit import base as ut_base
+
+
+class TestEndPoint(object):
+ def action_1(self):
+ pass
+
+
+class _MessagingConsumer(consumer.MessagingConsumer):
+ pass
+
+
+class MessagingConsumerTestCase(ut_base.BaseUnitTestCase):
+
+ def test__init(self):
+ with mock.patch.object(oslo_messaging, 'get_rpc_server') as \
+ mock_get_rpc_server, \
+ mock.patch.object(oslo_messaging, 'get_rpc_transport') as \
+ mock_get_rpc_transport, \
+ mock.patch.object(oslo_messaging, 'Target') as \
+ mock_Target:
+ mock_get_rpc_transport.return_value = 'test_rpc_transport'
+ mock_Target.return_value = 'test_Target'
+
+ _MessagingConsumer('test_topic', 'test_pid', [TestEndPoint],
+ fanout=True)
+ mock_get_rpc_transport.assert_called_once_with(
+ cfg.CONF, url=messaging.TRANSPORT_URL)
+ mock_Target.assert_called_once_with(
+ topic='test_topic', fanout=True, server=messaging.SERVER)
+ mock_get_rpc_server.assert_called_once_with(
+ 'test_rpc_transport', 'test_Target', [TestEndPoint],
+ executor=messaging.RPC_SERVER_EXECUTOR,
+ access_policy=oslo_messaging.DefaultRPCAccessPolicy)
diff --git a/yardstick/tests/unit/common/messaging/test_payloads.py b/yardstick/tests/unit/common/messaging/test_payloads.py
new file mode 100644
index 000000000..00ec220c9
--- /dev/null
+++ b/yardstick/tests/unit/common/messaging/test_payloads.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.common.messaging import payloads
+from yardstick.tests.unit import base as ut_base
+
+
+class _DummyPayload(payloads.Payload):
+ REQUIRED_FIELDS = {'version', 'key1', 'key2'}
+
+
+class PayloadTestCase(ut_base.BaseUnitTestCase):
+
+ def test__init(self):
+ payload = _DummyPayload(version=1, key1='value1', key2='value2')
+ self.assertEqual(1, payload.version)
+ self.assertEqual('value1', payload.key1)
+ self.assertEqual('value2', payload.key2)
+ self.assertEqual(3, len(payload._fields))
+
+ def test__init_missing_required_fields(self):
+ with self.assertRaises(exceptions.PayloadMissingAttributes):
+ _DummyPayload(key1='value1', key2='value2')
+
+ def test_obj_to_dict(self):
+ payload = _DummyPayload(version=1, key1='value1', key2='value2')
+ payload_dict = payload.obj_to_dict()
+ self.assertEqual({'version': 1, 'key1': 'value1', 'key2': 'value2'},
+ payload_dict)
+
+ def test_dict_to_obj(self):
+ _dict = {'version': 2, 'key1': 'value100', 'key2': 'value200'}
+ payload = _DummyPayload.dict_to_obj(_dict)
+ self.assertEqual(set(_dict.keys()), payload._fields)
diff --git a/yardstick/tests/unit/common/messaging/test_producer.py b/yardstick/tests/unit/common/messaging/test_producer.py
new file mode 100644
index 000000000..0289689dc
--- /dev/null
+++ b/yardstick/tests/unit/common/messaging/test_producer.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from oslo_config import cfg
+import oslo_messaging
+
+from yardstick.common import messaging
+from yardstick.common.messaging import producer
+from yardstick.tests.unit import base as ut_base
+
+
+class _MessagingProducer(producer.MessagingProducer):
+ pass
+
+
+class MessagingProducerTestCase(ut_base.BaseUnitTestCase):
+
+ def test__init(self):
+ with mock.patch.object(oslo_messaging, 'RPCClient') as \
+ mock_RPCClient, \
+ mock.patch.object(oslo_messaging, 'get_rpc_transport') as \
+ mock_get_rpc_transport, \
+ mock.patch.object(oslo_messaging, 'Target') as \
+ mock_Target:
+ mock_get_rpc_transport.return_value = 'test_rpc_transport'
+ mock_Target.return_value = 'test_Target'
+
+ _MessagingProducer('test_topic', 'test_pid', fanout=True)
+ mock_get_rpc_transport.assert_called_once_with(
+ cfg.CONF, url=messaging.TRANSPORT_URL)
+ mock_Target.assert_called_once_with(
+ topic='test_topic', fanout=True, server=messaging.SERVER)
+ mock_RPCClient.assert_called_once_with('test_rpc_transport',
+ 'test_Target')
diff --git a/yardstick/tests/unit/common/test_exceptions.py b/yardstick/tests/unit/common/test_exceptions.py
new file mode 100644
index 000000000..884015536
--- /dev/null
+++ b/yardstick/tests/unit/common/test_exceptions.py
@@ -0,0 +1,28 @@
+# Copyright 2018 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from yardstick.common import exceptions
+from yardstick.tests.unit import base as ut_base
+
+
+class ErrorClassTestCase(ut_base.BaseUnitTestCase):
+
+ def test_init(self):
+ with self.assertRaises(RuntimeError):
+ exceptions.ErrorClass()
+
+ def test_getattr(self):
+ error_instance = exceptions.ErrorClass(test='')
+ with self.assertRaises(AttributeError):
+ error_instance.get_name()
diff --git a/yardstick/tests/unit/common/test_openstack_utils.py b/yardstick/tests/unit/common/test_openstack_utils.py
index 3b7e8eaa1..9361a97f2 100644
--- a/yardstick/tests/unit/common/test_openstack_utils.py
+++ b/yardstick/tests/unit/common/test_openstack_utils.py
@@ -10,8 +10,10 @@
from oslo_utils import uuidutils
import unittest
import mock
-
+import shade
from shade import exc
+
+from yardstick.common import constants
from yardstick.common import openstack_utils
@@ -35,22 +37,44 @@ class GetHeatApiVersionTestCase(unittest.TestCase):
self.assertEqual(api_version, expected_result)
+class GetShadeClientTestCase(unittest.TestCase):
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client(self, mock_openstack_cloud):
+ os_cloud_config = {'param1': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ mock_openstack_cloud.reset_mock()
+ os_cloud_config = {'verify': True, 'param2': 'value2'}
+ self.assertEqual('os_client',
+ openstack_utils.get_shade_client(**os_cloud_config))
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
+ @mock.patch.object(shade, 'openstack_cloud', return_value='os_client')
+ def test_get_shade_client_no_parameters(self, mock_openstack_cloud):
+ self.assertEqual('os_client', openstack_utils.get_shade_client())
+ mock_openstack_cloud.assert_called_once_with(
+ **constants.OS_CLOUD_DEFAULT_CONFIG)
+
+
class DeleteNeutronNetTestCase(unittest.TestCase):
def setUp(self):
self.mock_shade_client = mock.Mock()
- self.mock_shade_client.delete_network = mock.Mock()
def test_delete_neutron_net(self):
self.mock_shade_client.delete_network.return_value = True
output = openstack_utils.delete_neutron_net(self.mock_shade_client,
- 'network_id')
+ 'network_name_or_id')
self.assertTrue(output)
def test_delete_neutron_net_fail(self):
self.mock_shade_client.delete_network.return_value = False
output = openstack_utils.delete_neutron_net(self.mock_shade_client,
- 'network_id')
+ 'network_name_or_id')
self.assertFalse(output)
@mock.patch.object(openstack_utils, 'log')
@@ -58,7 +82,7 @@ class DeleteNeutronNetTestCase(unittest.TestCase):
self.mock_shade_client.delete_network.side_effect = (
exc.OpenStackCloudException('error message'))
output = openstack_utils.delete_neutron_net(self.mock_shade_client,
- 'network_id')
+ 'network_name_or_id')
self.assertFalse(output)
mock_logger.error.assert_called_once()
@@ -264,3 +288,434 @@ class CreateSecurityGroupRuleTestCase(unittest.TestCase):
self.mock_shade_client, self.secgroup_name_or_id)
mock_logger.error.assert_called_once()
self.assertFalse(output)
+
+
+class ListImageTestCase(unittest.TestCase):
+
+ def test_list_images(self):
+ mock_shade_client = mock.MagicMock()
+ mock_shade_client.list_images.return_value = []
+ openstack_utils.list_images(mock_shade_client)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_list_images_exception(self, mock_logger):
+ mock_shade_client = mock.MagicMock()
+ mock_shade_client.list_images = mock.MagicMock()
+ mock_shade_client.list_images.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ images = openstack_utils.list_images(mock_shade_client)
+ mock_logger.error.assert_called_once()
+ self.assertFalse(images)
+
+
+class SecurityGroupTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.sg_name = 'sg_name'
+ self.sg_description = 'sg_description'
+ self._uuid = uuidutils.generate_uuid()
+
+ def test_create_security_group_full_existing_security_group(self):
+ self.mock_shade_client.get_security_group.return_value = (
+ {'name': 'name', 'id': self._uuid})
+ output = openstack_utils.create_security_group_full(
+ self.mock_shade_client, self.sg_name, self.sg_description)
+ self.mock_shade_client.get_security_group.assert_called_once()
+ self.assertEqual(self._uuid, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_security_group_full_non_existing_security_group(
+ self, mock_logger):
+ self.mock_shade_client.get_security_group.return_value = None
+ self.mock_shade_client.create_security_group.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_security_group_full(
+ self.mock_shade_client, self.sg_name, self.sg_description)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+ @mock.patch.object(openstack_utils, 'create_security_group_rule')
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_security_group_full_create_rule_fail(
+ self, mock_logger, mock_create_security_group_rule):
+ self.mock_shade_client.get_security_group.return_value = None
+ self.mock_shade_client.create_security_group.return_value = (
+ {'name': 'name', 'id': self._uuid})
+ mock_create_security_group_rule.return_value = False
+ output = openstack_utils.create_security_group_full(
+ self.mock_shade_client, self.sg_name, self.sg_description)
+ mock_create_security_group_rule.assert_called()
+ self.mock_shade_client.delete_security_group(self.sg_name)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+ @mock.patch.object(openstack_utils, 'create_security_group_rule')
+ def test_create_security_group_full(
+ self, mock_create_security_group_rule):
+ self.mock_shade_client.get_security_group.return_value = None
+ self.mock_shade_client.create_security_group.return_value = (
+ {'name': 'name', 'id': self._uuid})
+ mock_create_security_group_rule.return_value = True
+ output = openstack_utils.create_security_group_full(
+ self.mock_shade_client, self.sg_name, self.sg_description)
+ mock_create_security_group_rule.assert_called()
+ self.mock_shade_client.delete_security_group(self.sg_name)
+ self.assertEqual(self._uuid, output)
+
+# *********************************************
+# NOVA
+# *********************************************
+
+
+class CreateInstanceTestCase(unittest.TestCase):
+
+ def test_create_instance_and_wait_for_active(self):
+ self.mock_shade_client = mock.Mock()
+ name = 'server_name'
+ image = 'image_name'
+ flavor = 'flavor_name'
+ self.mock_shade_client.create_server.return_value = (
+ {'name': name, 'image': image, 'flavor': flavor})
+ output = openstack_utils.create_instance_and_wait_for_active(
+ self.mock_shade_client, name, image, flavor)
+ self.assertEqual(
+ {'name': name, 'image': image, 'flavor': flavor}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_instance_and_wait_for_active_fail(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.create_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_instance_and_wait_for_active(
+ self.mock_shade_client, 'server_name', 'image_name', 'flavor_name')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteInstanceTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_instance(self):
+ self.mock_shade_client.delete_server.return_value = True
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ self.assertTrue(output)
+
+ def test_delete_instance_fail(self):
+ self.mock_shade_client.delete_server.return_value = False
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_instance_exception(self, mock_logger):
+ self.mock_shade_client.delete_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_instance(self.mock_shade_client,
+ 'instance_name_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class CreateKeypairTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.name = 'key_name'
+
+ def test_create_keypair(self):
+ self.mock_shade_client.create_keypair.return_value = (
+ {'name': 'key-name', 'type': 'ssh'})
+ output = openstack_utils.create_keypair(
+ self.mock_shade_client, self.name)
+ self.assertEqual(
+ {'name': 'key-name', 'type': 'ssh'},
+ output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_keypair_fail(self, mock_logger):
+ self.mock_shade_client.create_keypair.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_keypair(
+ self.mock_shade_client, self.name)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteKeypairTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_keypair(self):
+ self.mock_shade_client.delete_keypair.return_value = True
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ self.assertTrue(output)
+
+ def test_delete_keypair_fail(self):
+ self.mock_shade_client.delete_keypair.return_value = False
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_keypair_exception(self, mock_logger):
+ self.mock_shade_client.delete_keypair.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_keypair(self.mock_shade_client,
+ 'key_name')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class AttachVolumeToServerTestCase(unittest.TestCase):
+
+ def test_attach_volume_to_server(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ self.mock_shade_client.attach_volume.return_value = True
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ self.assertTrue(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_attach_volume_to_server_fail(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.attach_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.attach_volume_to_server(
+ self.mock_shade_client, 'server_name_or_id', 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class GetServerTestCase(unittest.TestCase):
+
+ def test_get_server(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_server.return_value = {
+ 'name': 'server_name', 'id': _uuid}
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ self.assertEqual({'name': 'server_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_server_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_server.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_server(self.mock_shade_client,
+ 'server_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class GetFlavorTestCase(unittest.TestCase):
+
+ def test_get_flavor(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_flavor.return_value = {
+ 'name': 'flavor_name', 'id': _uuid}
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ self.assertEqual({'name': 'flavor_name', 'id': _uuid}, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_get_flavor_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_flavor.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.get_flavor(self.mock_shade_client,
+ 'flavor_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+# *********************************************
+# CINDER
+# *********************************************
+
+
+class GetVolumeIDTestCase(unittest.TestCase):
+
+ def test_get_volume_id(self):
+ self.mock_shade_client = mock.Mock()
+ _uuid = uuidutils.generate_uuid()
+ self.mock_shade_client.get_volume_id.return_value = _uuid
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertEqual(_uuid, output)
+
+ def test_get_volume_id_None(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_volume_id.return_value = None
+ output = openstack_utils.get_volume_id(self.mock_shade_client,
+ 'volume_name')
+ self.assertIsNone(output)
+
+
+class GetVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.get_volume = mock.Mock()
+
+ def test_get_volume(self):
+ self.mock_shade_client.get_volume.return_value = {'volume'}
+ output = openstack_utils.get_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertEqual({'volume'}, output)
+
+ def test_get_volume_None(self):
+ self.mock_shade_client.get_volume.return_value = None
+ output = openstack_utils.get_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertIsNone(output)
+
+
+class CreateVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self.size = 1
+
+ def test_create_volume(self):
+ self.mock_shade_client.create_volume.return_value = (
+ {'name': 'volume-name', 'size': self.size})
+ output = openstack_utils.create_volume(
+ self.mock_shade_client, self.size)
+ self.assertEqual(
+ {'name': 'volume-name', 'size': self.size},
+ output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_volume_fail(self, mock_logger):
+ self.mock_shade_client.create_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.create_volume(self.mock_shade_client,
+ self.size)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteVolumeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+
+ def test_delete_volume(self):
+ self.mock_shade_client.delete_volume.return_value = True
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertTrue(output)
+
+ def test_delete_volume_fail(self):
+ self.mock_shade_client.delete_volume.return_value = False
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_volume_exception(self, mock_logger):
+ self.mock_shade_client.delete_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_volume(self.mock_shade_client,
+ 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+class DetachVolumeTestCase(unittest.TestCase):
+
+ @mock.patch.object(openstack_utils, 'get_server')
+ def test_detach_volume(self, mock_get_server):
+ self.mock_shade_client = mock.Mock()
+ mock_get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ output = openstack_utils.detach_volume(self.mock_shade_client,
+ 'server_name_or_id',
+ 'volume_name_or_id')
+ self.assertTrue(output)
+
+ @mock.patch.object(openstack_utils, 'get_server')
+ @mock.patch.object(openstack_utils, 'log')
+ def test_detach_volume_exception(self, mock_logger, mock_get_server):
+ self.mock_shade_client = mock.Mock()
+ mock_get_server.return_value = {'server_dict'}
+ self.mock_shade_client.get_volume.return_value = {'volume_dict'}
+ self.mock_shade_client.detach_volume.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.detach_volume(self.mock_shade_client,
+ 'server_name_or_id',
+ 'volume_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
+
+
+# *********************************************
+# GLANCE
+# *********************************************
+
+class CreateImageTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_shade_client = mock.Mock()
+ self._uuid = uuidutils.generate_uuid()
+ self.name = 'image_name'
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_image_already_exit(self, mock_logger):
+ self.mock_shade_client.get_image_id.return_value = self._uuid
+ output = openstack_utils.create_image(self.mock_shade_client, self.name)
+ mock_logger.info.assert_called_once()
+ self.assertEqual(self._uuid, output)
+
+ def test_create_image(self):
+ self.mock_shade_client.get_image_id.return_value = None
+ self.mock_shade_client.create_image.return_value = {'id': self._uuid}
+ output = openstack_utils.create_image(self.mock_shade_client, self.name)
+ self.assertEqual(self._uuid, output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_create_image_exception(self, mock_logger):
+ self.mock_shade_client.get_image_id.return_value = None
+ self.mock_shade_client.create_image.side_effect = (
+ exc.OpenStackCloudException('error message'))
+
+ output = openstack_utils.create_image(self.mock_shade_client,
+ self.name)
+ mock_logger.error.assert_called_once()
+ self.assertIsNone(output)
+
+
+class DeleteImageTestCase(unittest.TestCase):
+
+ def test_delete_image(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.delete_image.return_value = True
+ output = openstack_utils.delete_image(self.mock_shade_client,
+ 'image_name_or_id')
+ self.assertTrue(output)
+
+ def test_delete_image_fail(self):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.delete_image.return_value = False
+ output = openstack_utils.delete_image(self.mock_shade_client,
+ 'image_name_or_id')
+ self.assertFalse(output)
+
+ @mock.patch.object(openstack_utils, 'log')
+ def test_delete_image_exception(self, mock_logger):
+ self.mock_shade_client = mock.Mock()
+ self.mock_shade_client.delete_image.side_effect = (
+ exc.OpenStackCloudException('error message'))
+ output = openstack_utils.delete_image(self.mock_shade_client,
+ 'image_name_or_id')
+ mock_logger.error.assert_called_once()
+ self.assertFalse(output)
diff --git a/yardstick/tests/unit/common/test_packages.py b/yardstick/tests/unit/common/test_packages.py
new file mode 100644
index 000000000..ba59a3015
--- /dev/null
+++ b/yardstick/tests/unit/common/test_packages.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+from pip import exceptions as pip_exceptions
+from pip.operations import freeze
+import unittest
+
+from yardstick.common import packages
+
+
+class PipExecuteActionTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_pip_main = mock.patch.object(packages, '_pip_main')
+ self.mock_pip_main = self._mock_pip_main.start()
+ self.mock_pip_main.return_value = 0
+ self._mock_freeze = mock.patch.object(freeze, 'freeze')
+ self.mock_freeze = self._mock_freeze.start()
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_pip_main.stop()
+ self._mock_freeze.stop()
+
+ def test_pip_execute_action(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package'))
+
+ def test_remove(self):
+ self.assertEqual(0, packages._pip_execute_action('test_package',
+ action='uninstall'))
+
+ def test_install(self):
+ self.assertEqual(0, packages._pip_execute_action(
+ 'test_package', action='install', target='temp_dir'))
+
+ def test_pip_execute_action_error(self):
+ self.mock_pip_main.return_value = 1
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_execute_action_exception(self):
+ self.mock_pip_main.side_effect = pip_exceptions.PipError
+ self.assertEqual(1, packages._pip_execute_action('test_package'))
+
+ def test_pip_list(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ pkg_dict = {
+ 'XStatic-Rickshaw': '1.5.0.0',
+ 'xvfbwrapper': '0.2.9',
+ 'yardstick': '50773a24afc02c9652b662ecca2fc5621ea6097a',
+ 'zope.interface': '4.4.3'
+ }
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list()
+ for pkg_name, pkg_version in pkg_output.items():
+ self.assertEqual(pkg_dict.get(pkg_name), pkg_version)
+
+ def test_pip_list_single_package(self):
+ pkg_input = [
+ 'XStatic-Rickshaw==1.5.0.0',
+ 'xvfbwrapper==0.2.9',
+ '-e git+https://git.opnfv.org/yardstick@50773a24afc02c9652b662ecca'
+ '2fc5621ea6097a#egg=yardstick',
+ 'zope.interface==4.4.3'
+ ]
+ self.mock_freeze.return_value = pkg_input
+
+ pkg_output = packages.pip_list(pkg_name='xvfbwrapper')
+ self.assertEqual(1, len(pkg_output))
+ self.assertEqual(pkg_output.get('xvfbwrapper'), '0.2.9')
diff --git a/yardstick/tests/unit/common/test_utils.py b/yardstick/tests/unit/common/test_utils.py
index 9540a39e8..31b10e6da 100644
--- a/yardstick/tests/unit/common/test_utils.py
+++ b/yardstick/tests/unit/common/test_utils.py
@@ -16,13 +16,14 @@ import mock
import os
import six
from six.moves import configparser
+import time
import unittest
import yardstick
from yardstick import ssh
-import yardstick.error
-from yardstick.common import utils
from yardstick.common import constants
+from yardstick.common import utils
+from yardstick.common import exceptions
class IterSubclassesTestCase(unittest.TestCase):
@@ -893,7 +894,7 @@ class TestUtils(unittest.TestCase):
os.environ.clear()
os.environ.update(base_env)
- @mock.patch('yardstick.common.utils.configparser.ConfigParser')
+ @mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file(self, mock_config_parser_type):
defaults = {
'default1': 'value1',
@@ -925,23 +926,26 @@ class TestUtils(unittest.TestCase):
result = utils.parse_ini_file('my_path')
self.assertDictEqual(result, expected)
- @mock.patch('yardstick.common.utils.configparser.ConfigParser')
- def test_parse_ini_file_missing_section_header(self, mock_config_parser_type):
+ @mock.patch.object(utils, 'logger')
+ @mock.patch.object(configparser, 'ConfigParser')
+ def test_parse_ini_file_missing_section_header(
+ self, mock_config_parser_type, *args):
mock_config_parser = mock_config_parser_type()
- mock_config_parser.read.side_effect = \
- configparser.MissingSectionHeaderError(mock.Mock(), 321, mock.Mock())
+ mock_config_parser.read.side_effect = (
+ configparser.MissingSectionHeaderError(mock.Mock(), 321,
+ mock.Mock()))
with self.assertRaises(configparser.MissingSectionHeaderError):
utils.parse_ini_file('my_path')
- @mock.patch('yardstick.common.utils.configparser.ConfigParser')
+ @mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file_no_file(self, mock_config_parser_type):
mock_config_parser = mock_config_parser_type()
mock_config_parser.read.return_value = False
with self.assertRaises(RuntimeError):
utils.parse_ini_file('my_path')
- @mock.patch('yardstick.common.utils.configparser.ConfigParser')
+ @mock.patch.object(configparser, 'ConfigParser')
def test_parse_ini_file_no_default_section_header(self, mock_config_parser_type):
s1 = {
'key1': 'value11',
@@ -987,14 +991,6 @@ class TestUtils(unittest.TestCase):
with self.assertRaises(RuntimeError):
utils.validate_non_string_sequence(1, raise_exc=RuntimeError)
- def test_error_class(self):
- with self.assertRaises(RuntimeError):
- yardstick.error.ErrorClass()
-
- error_instance = yardstick.error.ErrorClass(test='')
- with self.assertRaises(AttributeError):
- error_instance.get_name()
-
class TestUtilsIpAddrMethods(unittest.TestCase):
@@ -1158,3 +1154,54 @@ class ReadMeminfoTestCase(unittest.TestCase):
output = utils.read_meminfo(ssh_client)
mock_get_client.assert_called_once_with('/proc/meminfo', mock.ANY)
self.assertEqual(self.MEMINFO_DICT, output)
+
+
+class TimerTestCase(unittest.TestCase):
+
+ def test__getattr(self):
+ with utils.Timer() as timer:
+ time.sleep(1)
+ self.assertEqual(1, round(timer.total_seconds(), 0))
+ self.assertEqual(1, timer.delta.seconds)
+
+ def test__enter_with_timeout(self):
+ with utils.Timer(timeout=10) as timer:
+ time.sleep(1)
+ self.assertEqual(1, round(timer.total_seconds(), 0))
+
+ def test__enter_with_timeout_exception(self):
+ with self.assertRaises(exceptions.TimerTimeout):
+ with utils.Timer(timeout=1):
+ time.sleep(2)
+
+ def test__enter_with_timeout_no_exception(self):
+ with utils.Timer(timeout=1, raise_exception=False):
+ time.sleep(2)
+
+ def test__iter(self):
+ iterations = []
+ for i in utils.Timer(timeout=2):
+ iterations.append(i)
+ time.sleep(1.1)
+ self.assertEqual(2, len(iterations))
+
+
+class WaitUntilTrueTestCase(unittest.TestCase):
+
+ def test_no_timeout(self):
+ self.assertIsNone(utils.wait_until_true(lambda: True,
+ timeout=1, sleep=1))
+
+ def test_timeout_generic_exception(self):
+ with self.assertRaises(exceptions.WaitTimeout):
+ self.assertIsNone(utils.wait_until_true(lambda: False,
+ timeout=1, sleep=1))
+
+ def test_timeout_given_exception(self):
+ class MyTimeoutException(exceptions.YardstickException):
+ message = 'My timeout exception'
+
+ with self.assertRaises(MyTimeoutException):
+ self.assertIsNone(
+ utils.wait_until_true(lambda: False, timeout=1, sleep=1,
+ exception=MyTimeoutException))
diff --git a/yardstick/tests/unit/network_services/__init__.py b/yardstick/tests/unit/network_services/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/__init__.py
diff --git a/yardstick/tests/unit/network_services/collector/__init__.py b/yardstick/tests/unit/network_services/collector/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/collector/__init__.py
diff --git a/yardstick/tests/unit/network_services/collector/test_publisher.py b/yardstick/tests/unit/network_services/collector/test_publisher.py
new file mode 100644
index 000000000..145441ddd
--- /dev/null
+++ b/yardstick/tests/unit/network_services/collector/test_publisher.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+
+from yardstick.network_services.collector import publisher
+
+
+class PublisherTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.test_publisher = publisher.Publisher()
+
+ def test_successful_init(self):
+ pass
+
+ def test_unsuccessful_init(self):
+ pass
+
+ def test_start(self):
+ self.assertIsNone(self.test_publisher.start())
+
+ def test_stop(self):
+ self.assertIsNone(self.test_publisher.stop())
diff --git a/yardstick/tests/unit/network_services/collector/test_subscriber.py b/yardstick/tests/unit/network_services/collector/test_subscriber.py
new file mode 100644
index 000000000..14e26f7fe
--- /dev/null
+++ b/yardstick/tests/unit/network_services/collector/test_subscriber.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.network_services.collector import subscriber
+from yardstick import ssh
+
+
+class MockVnfAprrox(object):
+
+ def __init__(self):
+ self.result = {}
+ self.name = "vnf__1"
+
+ def collect_kpi(self):
+ self.result = {
+ 'pkt_in_up_stream': 100,
+ 'pkt_drop_up_stream': 5,
+ 'pkt_in_down_stream': 50,
+ 'pkt_drop_down_stream': 40
+ }
+ return self.result
+
+
+class CollectorTestCase(unittest.TestCase):
+
+ def setUp(self):
+ vnf = MockVnfAprrox()
+ vnf.start_collect = mock.Mock()
+ vnf.stop_collect = mock.Mock()
+ self.ssh_patch = mock.patch.object(ssh, 'AutoConnectSSH')
+ mock_ssh = self.ssh_patch.start()
+ mock_instance = mock.Mock()
+ mock_instance.execute.return_value = 0, '', ''
+ mock_ssh.from_node.return_value = mock_instance
+ self.collector = subscriber.Collector([vnf])
+
+ def tearDown(self):
+ self.ssh_patch.stop()
+
+ def test___init__(self, *_):
+ vnf = MockVnfAprrox()
+ collector = subscriber.Collector([vnf])
+ self.assertEqual(len(collector.vnfs), 1)
+
+ def test_start(self, *_):
+ self.assertIsNone(self.collector.start())
+ for vnf in self.collector.vnfs:
+ vnf.start_collect.assert_called_once()
+
+ def test_stop(self, *_):
+ self.assertIsNone(self.collector.stop())
+ for vnf in self.collector.vnfs:
+ vnf.stop_collect.assert_called_once()
+
+ def test_get_kpi(self, *_):
+ result = self.collector.get_kpi()
+
+ self.assertEqual(1, len(result))
+ self.assertEqual(4, len(result["vnf__1"]))
+ self.assertEqual(result["vnf__1"]["pkt_in_up_stream"], 100)
+ self.assertEqual(result["vnf__1"]["pkt_drop_up_stream"], 5)
+ self.assertEqual(result["vnf__1"]["pkt_in_down_stream"], 50)
+ self.assertEqual(result["vnf__1"]["pkt_drop_down_stream"], 40)
diff --git a/yardstick/tests/unit/network_services/helpers/__init__.py b/yardstick/tests/unit/network_services/helpers/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/__init__.py
diff --git a/yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml b/yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml
new file mode 100644
index 000000000..f60834fbd
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/acl_vnf_topology_ixia.yaml
@@ -0,0 +1,50 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+nsd:nsd-catalog:
+ nsd:
+ - id: VACL
+ name: VACL
+ short-name: VACL
+ description: scenario with VACL,L3fwd and VNF
+ constituent-vnfd:
+ - member-vnf-index: '1'
+ vnfd-id-ref: tg__1
+ VNF model: ../../vnf_descriptors/ixia_rfc2544_tpl.yaml
+ - member-vnf-index: '2'
+ vnfd-id-ref: vnf__1
+ VNF model: ../../vnf_descriptors/acl_vnf.yaml
+
+ vld:
+ - id: uplink_1
+ name: tg__1 to vnf__1 link 1
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: tg__1 #TREX
+ - member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: xe0
+ vnfd-id-ref: vnf__1 #VNF
+
+ - id: downlink_1
+ name: vnf__1 to tg__1 link 2
+ type: ELAN
+ vnfd-connection-point-ref:
+ - member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: vnf__1 #L3fwd
+ - member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: xe1
+ vnfd-id-ref: tg__1 #VACL VNF
diff --git a/yardstick/tests/unit/network_services/helpers/test_cpu.py b/yardstick/tests/unit/network_services/helpers/test_cpu.py
new file mode 100644
index 000000000..871fbf8c9
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/test_cpu.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import division
+import unittest
+import mock
+import subprocess
+
+from yardstick.network_services.helpers.cpu import \
+ CpuSysCores
+
+
+class TestCpuSysCores(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_ssh = mock.patch("yardstick.ssh.SSH")
+ self.mock_ssh = self._mock_ssh.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_ssh.stop()
+
+ def test___init__(self):
+ self.mock_ssh.execute.return_value = 1, "", ""
+ self.mock_ssh.put.return_value = 1, "", ""
+ cpu_topo = CpuSysCores(self.mock_ssh)
+ self.assertIsNotNone(cpu_topo.connection)
+
+ def test__get_core_details(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "", ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(1, "", ""))
+ cpu_topo = CpuSysCores(ssh_mock)
+ subprocess.check_output = mock.Mock(return_value=0)
+ lines = ["cpu:1", "topo:2", ""]
+ self.assertEqual([{'topo': '2', 'cpu': '1'}],
+ cpu_topo._get_core_details(lines))
+
+ def test_get_core_socket(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(1, "", ""))
+ cpu_topo = CpuSysCores(ssh_mock)
+ subprocess.check_output = mock.Mock(return_value=0)
+ cpu_topo._get_core_details = \
+ mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
+ [{'physical id': '2', 'processor': '1'}]])
+ self.assertEqual({'thread_per_core': '1', '2': ['1'],
+ 'cores_per_socket': '2'},
+ cpu_topo.get_core_socket())
+
+ def test_validate_cpu_cfg(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(1, "", ""))
+ cpu_topo = CpuSysCores(ssh_mock)
+ subprocess.check_output = mock.Mock(return_value=0)
+ cpu_topo._get_core_details = \
+ mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
+ [{'physical id': '2', 'processor': '1'}]])
+ cpu_topo.core_map = \
+ {'thread_per_core': '1', '2': ['1'], 'cores_per_socket': '2'}
+ self.assertEqual(-1, cpu_topo.validate_cpu_cfg())
+
+ def test_validate_cpu_cfg_2t(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(1, "", ""))
+ cpu_topo = CpuSysCores(ssh_mock)
+ subprocess.check_output = mock.Mock(return_value=0)
+ cpu_topo._get_core_details = \
+ mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
+ [{'physical id': '2', 'processor': '1'}]])
+ cpu_topo.core_map = \
+ {'thread_per_core': 1, '2': ['1'], 'cores_per_socket': '2'}
+ vnf_cfg = {'lb_config': 'SW', 'lb_count': 1, 'worker_config':
+ '1C/2T', 'worker_threads': 1}
+ self.assertEqual(-1, cpu_topo.validate_cpu_cfg(vnf_cfg))
+
+ def test_validate_cpu_cfg_fail(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, "cpu:1\ntest:2\n \n", ""))
+ ssh_mock.put = \
+ mock.Mock(return_value=(1, "", ""))
+ cpu_topo = CpuSysCores(ssh_mock)
+ subprocess.check_output = mock.Mock(return_value=0)
+ cpu_topo._get_core_details = \
+ mock.Mock(side_effect=[[{'Core(s) per socket': '2', 'Thread(s) per core': '1'}],
+ [{'physical id': '2', 'processor': '1'}]])
+ cpu_topo.core_map = \
+ {'thread_per_core': 1, '2': [1], 'cores_per_socket': 2}
+ vnf_cfg = {'lb_config': 'SW', 'lb_count': 1, 'worker_config':
+ '1C/1T', 'worker_threads': 1}
+ self.assertEqual(-1, cpu_topo.validate_cpu_cfg(vnf_cfg))
diff --git a/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
new file mode 100644
index 000000000..e19311613
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/test_dpdkbindnic_helper.py
@@ -0,0 +1,632 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import unittest
+
+import os
+
+from yardstick.common import exceptions
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkInterface
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkNode
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper
+from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelperException
+from yardstick.network_services.helpers.dpdkbindnic_helper import NETWORK_KERNEL
+from yardstick.network_services.helpers.dpdkbindnic_helper import NETWORK_DPDK
+from yardstick.network_services.helpers.dpdkbindnic_helper import CRYPTO_KERNEL
+from yardstick.network_services.helpers.dpdkbindnic_helper import CRYPTO_DPDK
+from yardstick.network_services.helpers.dpdkbindnic_helper import NETWORK_OTHER
+from yardstick.network_services.helpers.dpdkbindnic_helper import CRYPTO_OTHER
+
+
+NAME = "tg_0"
+
+
+class TestDpdkInterface(unittest.TestCase):
+
+ SAMPLE_NETDEVS = {
+ 'enp11s0': {
+ 'address': '0a:de:ad:be:ef:f5',
+ 'device': '0x1533',
+ 'driver': 'igb',
+ 'ifindex': '2',
+ 'interface_name': 'enp11s0',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:0b:00.0',
+ 'subsystem_device': '0x1533',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ },
+ 'lan': {
+ 'address': '0a:de:ad:be:ef:f4',
+ 'device': '0x153a',
+ 'driver': 'e1000e',
+ 'ifindex': '3',
+ 'interface_name': 'lan',
+ 'operstate': 'up',
+ 'pci_bus_id': '0000:00:19.0',
+ 'subsystem_device': '0x153a',
+ 'subsystem_vendor': '0x15d9',
+ 'vendor': '0x8086'
+ }
+ }
+
+ SAMPLE_VM_NETDEVS = {
+ 'eth1': {
+ 'address': 'fa:de:ad:be:ef:5b',
+ 'device': '0x0001',
+ 'driver': 'virtio_net',
+ 'ifindex': '3',
+ 'interface_name': 'eth1',
+ 'operstate': 'down',
+ 'pci_bus_id': '0000:00:04.0',
+ 'vendor': '0x1af4'
+ }
+ }
+
+ def test_parse_netdev_info(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/ifindex:2
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/address:0a:de:ad:be:ef:f5
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/operstate:down
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/device/subsystem_device:0x1533
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/driver:igb
+/sys/devices/pci0000:00/0000:00:1c.3/0000:0b:00.0/net/enp11s0/pci_bus_id:0000:0b:00.0
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/ifindex:3
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/address:0a:de:ad:be:ef:f4
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/operstate:up
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/vendor:0x8086
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_vendor:0x15d9
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/device/subsystem_device:0x153a
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/driver:e1000e
+/sys/devices/pci0000:00/0000:00:19.0/net/lan/pci_bus_id:0000:00:19.0
+"""
+ res = DpdkBindHelper.parse_netdev_info(output)
+ self.assertDictEqual(res, self.SAMPLE_NETDEVS)
+
+ def test_parse_netdev_info_virtio(self):
+ output = """\
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/ifindex:3
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/address:fa:de:ad:be:ef:5b
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/operstate:down
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/vendor:0x1af4
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/device/device:0x0001
+/sys/devices/pci0000:00/0000:00:04.0/virtio1/net/eth1/driver:virtio_net
+"""
+ res = DpdkBindHelper.parse_netdev_info(output)
+ self.assertDictEqual(res, self.SAMPLE_VM_NETDEVS)
+
+ def test_probe_missing_values(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs = self.SAMPLE_NETDEVS.copy()
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ dpdk_intf.probe_missing_values()
+ self.assertEqual(interface['vpci'], '0000:0b:00.0')
+
+ interface['local_mac'] = '0a:de:ad:be:ef:f4'
+ dpdk_intf.probe_missing_values()
+ self.assertEqual(interface['vpci'], '0000:00:19.0')
+
+ def test_probe_missing_values_no_update(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs = self.SAMPLE_NETDEVS.copy()
+ del mock_dpdk_node.netdevs['enp11s0']['driver']
+ del mock_dpdk_node.netdevs['lan']['driver']
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ dpdk_intf.probe_missing_values()
+ self.assertNotIn('vpci', interface)
+ self.assertNotIn('driver', interface)
+
+ def test_probe_missing_values_negative(self):
+ mock_dpdk_node = mock.Mock()
+ mock_dpdk_node.netdevs.values.side_effect = (
+ exceptions.IncorrectNodeSetup(error_msg=''))
+
+ interface = {'local_mac': '0a:de:ad:be:ef:f5'}
+ dpdk_intf = DpdkInterface(mock_dpdk_node, interface)
+
+ with self.assertRaises(exceptions.IncorrectConfig):
+ dpdk_intf.probe_missing_values()
+
+
+class TestDpdkNode(unittest.TestCase):
+
+ INTERFACES = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci10',
+ }},
+ {'name': 'name2',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci2',
+ }},
+ {'name': 'name3',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'some-pci1',
+ }},
+ ]
+
+ def test_probe_dpdk_drivers(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ interfaces = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci10',
+ }},
+ {'name': 'name2',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'pci2',
+ }},
+ {'name': 'name3',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ 'vpci': 'some-pci1',
+ }},
+ ]
+
+ dpdk_node = DpdkNode(NAME, interfaces, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+
+ dpdk_helper.probe_real_kernel_drivers = mock.Mock()
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'pci1': 'driver1',
+ 'pci2': 'driver2',
+ 'pci3': 'driver3',
+ 'pci4': 'driver1',
+ 'pci6': 'driver3',
+ }
+
+ dpdk_node._probe_dpdk_drivers()
+ self.assertNotIn('driver', interfaces[0]['virtual-interface'])
+ self.assertEqual(interfaces[1]['virtual-interface']['driver'], 'driver2')
+ self.assertEqual(interfaces[2]['virtual-interface']['driver'], 'driver1')
+
+ def test_check(self):
+ def update():
+ if not mock_force_rebind.called:
+ raise exceptions.IncorrectConfig(error_msg='')
+
+ interfaces[0]['virtual-interface'].update({
+ 'vpci': '0000:01:02.1',
+ 'local_ip': '10.20.30.40',
+ 'netmask': '255.255.0.0',
+ 'driver': 'ixgbe',
+ })
+
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ interfaces = [
+ {'name': 'name1',
+ 'virtual-interface': {
+ 'local_mac': 404,
+ }},
+ ]
+
+ dpdk_node = DpdkNode(NAME, interfaces, mock_ssh_helper)
+ dpdk_node._probe_missing_values = mock_probe_missing = mock.Mock(side_effect=update)
+ dpdk_node._force_rebind = mock_force_rebind = mock.Mock()
+
+ self.assertIsNone(dpdk_node.check())
+ self.assertEqual(mock_probe_missing.call_count, 2)
+
+ @mock.patch('yardstick.network_services.helpers.dpdkbindnic_helper.DpdkInterface')
+ def test_check_negative(self, mock_intf_type):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ mock_intf_type().check.side_effect = exceptions.SSHError
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+
+ with self.assertRaises(exceptions.IncorrectSetup):
+ dpdk_node.check()
+
+ def test_probe_netdevs(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ expected = {'key1': 500, 'key2': 'hello world'}
+ update = {'key1': 1000, 'key3': []}
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.find_net_devices = mock.Mock(side_effect=[expected, update])
+
+ self.assertDictEqual(dpdk_node.netdevs, {})
+ dpdk_node._probe_netdevs()
+ self.assertDictEqual(dpdk_node.netdevs, expected)
+
+ expected = {'key1': 1000, 'key2': 'hello world', 'key3': []}
+ dpdk_node._probe_netdevs()
+ self.assertDictEqual(dpdk_node.netdevs, expected)
+
+ def test_probe_netdevs_setup_negative(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.find_net_devices = mock.Mock(side_effect=DpdkBindHelperException)
+
+ with self.assertRaises(DpdkBindHelperException):
+ dpdk_node._probe_netdevs()
+
+ def test_force_rebind(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_node = DpdkNode(NAME, self.INTERFACES, mock_ssh_helper)
+ dpdk_helper = dpdk_node.dpdk_helper
+ dpdk_helper.force_dpdk_rebind = mock_helper_func = mock.Mock()
+
+ dpdk_node._force_rebind()
+ mock_helper_func.assert_called_once()
+
+
+class TestDpdkBindHelper(unittest.TestCase):
+ bin_path = "/opt/nsb_bin"
+ EXAMPLE_OUTPUT = """
+
+Network devices using DPDK-compatible driver
+============================================
+0000:00:04.0 'Virtio network device' drv=igb_uio unused=
+0000:00:05.0 'Virtio network device' drv=igb_uio unused=
+
+Network devices using kernel driver
+===================================
+0000:00:03.0 'Virtio network device' if=ens3 drv=virtio-pci unused=igb_uio *Active*
+
+Other network devices
+=====================
+<none>
+
+Crypto devices using DPDK-compatible driver
+===========================================
+<none>
+
+Crypto devices using kernel driver
+==================================
+<none>
+
+Other crypto devices
+====================
+<none>
+"""
+
+ PARSED_EXAMPLE = {
+ NETWORK_DPDK: [
+ {'active': False,
+ 'dev_type': 'Virtio network device',
+ 'driver': 'igb_uio',
+ 'iface': None,
+ 'unused': '',
+ 'vpci': '0000:00:04.0',
+ },
+ {'active': False,
+ 'dev_type': 'Virtio network device',
+ 'driver': 'igb_uio',
+ 'iface': None,
+ 'unused': '',
+ 'vpci': '0000:00:05.0',
+ }
+ ],
+ NETWORK_KERNEL: [
+ {'active': True,
+ 'dev_type': 'Virtio network device',
+ 'driver': 'virtio-pci',
+ 'iface': 'ens3',
+ 'unused': 'igb_uio',
+ 'vpci': '0000:00:03.0',
+ }
+ ],
+ CRYPTO_KERNEL: [],
+ CRYPTO_DPDK: [],
+ NETWORK_OTHER: [],
+ CRYPTO_OTHER: [],
+ }
+
+ CLEAN_STATUS = {
+ NETWORK_KERNEL: [],
+ NETWORK_DPDK: [],
+ CRYPTO_KERNEL: [],
+ CRYPTO_DPDK: [],
+ NETWORK_OTHER: [],
+ CRYPTO_OTHER: [],
+ }
+
+ ONE_INPUT_LINE = ("0000:00:03.0 'Virtio network device' if=ens3 "
+ "drv=virtio-pci unused=igb_uio *Active*")
+
+ ONE_INPUT_LINE_PARSED = [{
+ 'vpci': '0000:00:03.0',
+ 'dev_type': 'Virtio network device',
+ 'iface': 'ens3',
+ 'driver': 'virtio-pci',
+ 'unused': 'igb_uio',
+ 'active': True,
+ }]
+
+ def test___init__(self):
+ conn = mock.Mock()
+ conn.provision_tool = mock.Mock(return_value='path_to_tool')
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ self.assertEqual(conn, dpdk_bind_helper.ssh_helper)
+ self.assertEqual(self.CLEAN_STATUS, dpdk_bind_helper.dpdk_status)
+ self.assertIsNone(dpdk_bind_helper.status_nic_row_re)
+ self.assertEqual(dpdk_bind_helper.dpdk_devbind,
+ os.path.join(self.bin_path, dpdk_bind_helper.DPDK_DEVBIND))
+ self.assertIsNone(dpdk_bind_helper._status_cmd_attr)
+
+ def test__dpdk_execute(self):
+ conn = mock.Mock()
+ conn.execute = mock.Mock(return_value=(0, 'output', 'error'))
+ conn.provision_tool = mock.Mock(return_value='tool_path')
+ dpdk_bind_helper = DpdkBindHelper(conn)
+ self.assertEqual((0, 'output', 'error'), dpdk_bind_helper._dpdk_execute('command'))
+
+ def test__dpdk_execute_failure(self):
+ conn = mock.Mock()
+ conn.execute = mock.Mock(return_value=(1, 'output', 'error'))
+ conn.provision_tool = mock.Mock(return_value='tool_path')
+ dpdk_bind_helper = DpdkBindHelper(conn)
+ with self.assertRaises(DpdkBindHelperException):
+ dpdk_bind_helper._dpdk_execute('command')
+
+ def test__addline(self):
+ conn = mock.Mock()
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ dpdk_bind_helper._add_line(NETWORK_KERNEL, self.ONE_INPUT_LINE)
+
+ self.assertIsNotNone(dpdk_bind_helper.dpdk_status)
+ self.assertEqual(self.ONE_INPUT_LINE_PARSED, dpdk_bind_helper.dpdk_status[NETWORK_KERNEL])
+
+ def test__switch_active_dict_by_header(self):
+ line = "Crypto devices using DPDK-compatible driver"
+ olddict = 'olddict'
+ self.assertEqual(CRYPTO_DPDK, DpdkBindHelper._switch_active_dict(line, olddict))
+
+ def test__switch_active_dict_by_header_empty(self):
+ line = "<none>"
+ olddict = 'olddict'
+ self.assertEqual(olddict, DpdkBindHelper._switch_active_dict(line, olddict))
+
+ def test_parse_dpdk_status_output(self):
+ conn = mock.Mock()
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+
+ self.maxDiff = None
+ self.assertEqual(self.PARSED_EXAMPLE, dpdk_bind_helper.dpdk_status)
+
+ def test_kernel_bound_pci_addresses(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ expected = ['a', 'b', 3]
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+ dpdk_helper.dpdk_status = {
+ NETWORK_DPDK: [{'vpci': 4}, {'vpci': 5}, {'vpci': 'g'}],
+ NETWORK_KERNEL: [{'vpci': 'a'}, {'vpci': 'b'}, {'vpci': 3}],
+ CRYPTO_DPDK: [],
+ }
+
+ result = dpdk_helper.kernel_bound_pci_addresses
+ self.assertEqual(result, expected)
+
+ def test_find_net_devices_negative(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 1, 'error', 'debug'
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+
+ self.assertDictEqual(dpdk_helper.find_net_devices(), {})
+
+ def test_read_status(self):
+ conn = mock.Mock()
+ conn.execute = mock.Mock(return_value=(0, self.EXAMPLE_OUTPUT, ''))
+ conn.provision_tool = mock.Mock(return_value='path_to_tool')
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ self.assertEqual(self.PARSED_EXAMPLE, dpdk_bind_helper.read_status())
+
+ def test__get_bound_pci_addresses(self):
+ conn = mock.Mock()
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+
+ self.assertEqual(['0000:00:04.0', '0000:00:05.0'],
+ dpdk_bind_helper._get_bound_pci_addresses(NETWORK_DPDK))
+ self.assertEqual(['0000:00:03.0'],
+ dpdk_bind_helper._get_bound_pci_addresses(NETWORK_KERNEL))
+
+ def test_interface_driver_map(self):
+ conn = mock.Mock()
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ dpdk_bind_helper._parse_dpdk_status_output(self.EXAMPLE_OUTPUT)
+
+ self.assertEqual({'0000:00:04.0': 'igb_uio',
+ '0000:00:03.0': 'virtio-pci',
+ '0000:00:05.0': 'igb_uio',
+ },
+ dpdk_bind_helper.interface_driver_map)
+
+ def test_bind(self):
+ conn = mock.Mock()
+ conn.execute = mock.Mock(return_value=(0, '', ''))
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+ dpdk_bind_helper.read_status = mock.Mock()
+
+ dpdk_bind_helper.bind(['0000:00:03.0', '0000:00:04.0'], 'my_driver')
+
+ conn.execute.assert_called_with('sudo /opt/nsb_bin/dpdk-devbind.py --force '
+ '-b my_driver 0000:00:03.0 0000:00:04.0')
+ dpdk_bind_helper.read_status.assert_called_once()
+
+ def test_bind_single_pci(self):
+ conn = mock.Mock()
+ conn.execute = mock.Mock(return_value=(0, '', ''))
+ conn.join_bin_path.return_value = os.path.join(self.bin_path, DpdkBindHelper.DPDK_DEVBIND)
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+ dpdk_bind_helper.read_status = mock.Mock()
+
+ dpdk_bind_helper.bind('0000:00:03.0', 'my_driver')
+
+ conn.execute.assert_called_with('sudo /opt/nsb_bin/dpdk-devbind.py --force '
+ '-b my_driver 0000:00:03.0')
+ dpdk_bind_helper.read_status.assert_called_once()
+
+ def test_rebind_drivers(self):
+ conn = mock.Mock()
+
+ dpdk_bind_helper = DpdkBindHelper(conn)
+
+ dpdk_bind_helper.bind = mock.Mock()
+ dpdk_bind_helper.used_drivers = {
+ 'd1': ['0000:05:00.0'],
+ 'd3': ['0000:05:01.0', '0000:05:02.0'],
+ }
+
+ dpdk_bind_helper.rebind_drivers()
+
+ dpdk_bind_helper.bind.assert_any_call(['0000:05:00.0'], 'd1', True)
+ dpdk_bind_helper.bind.assert_any_call(['0000:05:01.0', '0000:05:02.0'], 'd3', True)
+
+ def test_save_used_drivers(self):
+ conn = mock.Mock()
+ dpdk_bind_helper = DpdkBindHelper(conn)
+ dpdk_bind_helper.dpdk_status = self.PARSED_EXAMPLE
+
+ dpdk_bind_helper.save_used_drivers()
+
+ expected = {
+ 'igb_uio': ['0000:00:04.0', '0000:00:05.0'],
+ 'virtio-pci': ['0000:00:03.0'],
+ }
+
+ self.assertDictEqual(expected, dpdk_bind_helper.used_drivers)
+
+ def test_force_dpdk_rebind(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper, 'driver2')
+ dpdk_helper.dpdk_status = {
+ NETWORK_DPDK: [
+ {
+ 'vpci': 'pci1',
+ },
+ {
+ 'vpci': 'pci3',
+ },
+ {
+ 'vpci': 'pci6',
+ },
+ {
+ 'vpci': 'pci3',
+ },
+ ]
+ }
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'pci1': 'real_driver1',
+ 'pci2': 'real_driver2',
+ 'pci3': 'real_driver1',
+ 'pci4': 'real_driver4',
+ 'pci6': 'real_driver6',
+ }
+ dpdk_helper.load_dpdk_driver = mock.Mock()
+ dpdk_helper.read_status = mock.Mock()
+ dpdk_helper.save_real_kernel_interface_driver_map = mock.Mock()
+ dpdk_helper.save_used_drivers = mock.Mock()
+ dpdk_helper.bind = mock_bind = mock.Mock()
+
+ dpdk_helper.force_dpdk_rebind()
+ self.assertEqual(mock_bind.call_count, 2)
+
+ def test_save_real_kernel_drivers(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.return_value = 0, '', ''
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+ dpdk_helper.real_kernel_drivers = {
+ 'abc': '123',
+ }
+ dpdk_helper.real_kernel_interface_driver_map = {
+ 'abc': 'AAA',
+ 'def': 'DDD',
+ 'abs': 'AAA',
+ 'ghi': 'GGG',
+ }
+
+ # save_used_drivers must be called before save_real_kernel_drivers can be
+ with self.assertRaises(AttributeError):
+ dpdk_helper.save_real_kernel_drivers()
+
+ dpdk_helper.save_used_drivers()
+
+ expected_used_drivers = {
+ 'AAA': ['abc', 'abs'],
+ 'DDD': ['def'],
+ 'GGG': ['ghi'],
+ }
+ dpdk_helper.save_real_kernel_drivers()
+ self.assertDictEqual(dpdk_helper.used_drivers, expected_used_drivers)
+ self.assertDictEqual(dpdk_helper.real_kernel_drivers, {})
+
+ def test_get_real_kernel_driver(self):
+ mock_ssh_helper = mock.Mock()
+ mock_ssh_helper.execute.side_effect = [
+ (0, 'non-matching text', ''),
+ (0, 'pre Kernel modules: real_driver1', ''),
+ (0, 'before Ethernet middle Virtio network device after', ''),
+ ]
+
+ dpdk_helper = DpdkBindHelper(mock_ssh_helper)
+
+ self.assertIsNone(dpdk_helper.get_real_kernel_driver('abc'))
+ self.assertEqual(dpdk_helper.get_real_kernel_driver('abc'), 'real_driver1')
+ self.assertEqual(dpdk_helper.get_real_kernel_driver('abc'), DpdkBindHelper.VIRTIO_DRIVER)
diff --git a/yardstick/tests/unit/network_services/helpers/test_iniparser.py b/yardstick/tests/unit/network_services/helpers/test_iniparser.py
new file mode 100644
index 000000000..1a09f0761
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/test_iniparser.py
@@ -0,0 +1,223 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+from contextlib import contextmanager
+import mock
+
+from yardstick.tests import STL_MOCKS
+
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.helpers.iniparser import ParseError
+ from yardstick.network_services.helpers.iniparser import LineParser
+ from yardstick.network_services.helpers.iniparser import BaseParser
+ from yardstick.network_services.helpers.iniparser import ConfigParser
+
+PARSE_TEXT_1 = """\
+
+[section1]
+key1=value1
+list1: value2
+ value3
+ value4
+key3='single quote value' ; comment here
+key4=
+
+[section2] ; comment with #2 other symbol
+# here is a comment line
+list2: value5
+key with no value # mixed comment ; symbols
+; another comment line
+key5=
+
+[section1] ; reopen a section!
+key2="double quote value"
+"""
+
+PARSE_TEXT_2 = """\
+[section1]
+list1 = item1
+ item2
+ ended by eof"""
+
+PARSE_TEXT_BAD_1 = """\
+key1=value1
+"""
+
+PARSE_TEXT_BAD_2 = """\
+[section1
+"""
+
+PARSE_TEXT_BAD_3 = """\
+[]
+"""
+
+PARSE_TEXT_BAD_4 = """\
+[section1]
+ bad continuation
+"""
+
+PARSE_TEXT_BAD_5 = """\
+[section1]
+=value with no key
+"""
+
+
+class TestParseError(unittest.TestCase):
+
+ def test___str__(self):
+ error = ParseError('a', 2, 'c')
+ self.assertEqual(str(error), "at line 2, a: 'c'")
+
+
+class TestLineParser(unittest.TestCase):
+
+ def test___repr__(self):
+ line_parser = LineParser('', 101)
+ self.assertIsNotNone(repr(line_parser))
+
+ def test_error_invalid_assignment(self):
+ line_parser = LineParser('', 101)
+ self.assertIsNotNone(line_parser.error_invalid_assignment())
+
+
+class TestBaseParser(unittest.TestCase):
+
+ @staticmethod
+ def make_open(text_blob):
+ @contextmanager
+ def internal_open(*args):
+ yield text_blob.split('\n')
+
+ return internal_open
+
+ def test_parse(self):
+ parser = BaseParser()
+ parser.parse()
+
+ def test_parse_empty_string(self):
+ parser = BaseParser()
+ self.assertIsNone(parser.parse(''))
+
+ def test_not_implemented_methods(self):
+ parser = BaseParser()
+
+ with self.assertRaises(NotImplementedError):
+ parser.assignment('key', 'value', LineParser('', 100))
+
+ with self.assertRaises(NotImplementedError):
+ parser.new_section('section')
+
+ with self.assertRaises(NotImplementedError):
+ parser.comment('comment')
+
+
+class TestConfigParser(unittest.TestCase):
+
+ @staticmethod
+ def make_open(text_blob):
+ @contextmanager
+ def internal_open(*args):
+ yield text_blob.split('\n')
+
+ return internal_open
+
+ @mock.patch('yardstick.network_services.helpers.iniparser.open')
+ def test_parse(self, mock_open):
+ mock_open.side_effect = self.make_open(PARSE_TEXT_1)
+
+ existing_data = [['section0', [['key0', 'value0']]]]
+ config_parser = ConfigParser('my_file', existing_data)
+ config_parser.parse()
+
+ expected = [
+ [
+ 'section0',
+ [
+ ['key0', 'value0'],
+ ],
+ ],
+ [
+ 'section1',
+ [
+ ['key1', 'value1'],
+ ['list1', 'value2\nvalue3\nvalue4'],
+ ['key3', 'single quote value'],
+ ['key4', ''],
+ ['key2', 'double quote value'],
+ ],
+ ],
+ [
+ 'section2',
+ [
+ ['list2', 'value5'],
+ ['key with no value', '@'],
+ ['key5', ''],
+ ],
+ ],
+ ]
+
+ self.assertEqual(config_parser.sections, expected)
+ self.assertIsNotNone(config_parser.find_section('section1'))
+ self.assertIsNone(config_parser.find_section('section3'))
+ self.assertEqual(config_parser.find_section_index('section1'), 1)
+ self.assertEqual(config_parser.find_section_index('section3'), -1)
+
+ @mock.patch('yardstick.network_services.helpers.iniparser.open')
+ def test_parse_2(self, mock_open):
+ mock_open.side_effect = self.make_open(PARSE_TEXT_2)
+
+ config_parser = ConfigParser('my_file')
+ config_parser.parse()
+
+ expected = [
+ [
+ 'section1',
+ [
+ ['list1', 'item1\nitem2\nended by eof'],
+ ],
+ ],
+ ]
+
+ self.assertEqual(config_parser.sections, expected)
+
+ @mock.patch('yardstick.network_services.helpers.iniparser.open')
+ def test_parse_negative(self, mock_open):
+ bad_text_dict = {
+ 'no section': PARSE_TEXT_BAD_1,
+ 'incomplete section': PARSE_TEXT_BAD_2,
+ 'empty section name': PARSE_TEXT_BAD_3,
+ 'bad_continuation': PARSE_TEXT_BAD_4,
+ 'value with no key': PARSE_TEXT_BAD_5,
+ }
+
+ for bad_reason, bad_text in bad_text_dict.items():
+ mock_open.side_effect = self.make_open(bad_text)
+
+ config_parser = ConfigParser('my_file', [])
+
+ try:
+ # TODO: replace with assertRaises, when the UT framework supports
+ # advanced messages when exceptions fail to occur
+ config_parser.parse()
+ except ParseError:
+ pass
+ else:
+ self.fail('\n'.join([bad_reason, bad_text, str(config_parser.sections)]))
diff --git a/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
new file mode 100644
index 000000000..6d5e1da60
--- /dev/null
+++ b/yardstick/tests/unit/network_services/helpers/test_samplevnf_helper.py
@@ -0,0 +1,1104 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import mock
+import os
+import six
+import unittest
+
+from yardstick.network_services.helpers import samplevnf_helper
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+
+
+class TestPortPairs(unittest.TestCase):
+ def test_port_pairs_list(self):
+ vnfd = TestMultiPortConfig.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ interfaces = vnfd['vdu'][0]['external-interface']
+ port_pairs = samplevnf_helper.PortPairs(interfaces)
+ self.assertEqual(port_pairs.port_pair_list, [("xe0", "xe1")])
+
+ def test_valid_networks(self):
+ vnfd = TestMultiPortConfig.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ interfaces = vnfd['vdu'][0]['external-interface']
+ port_pairs = samplevnf_helper.PortPairs(interfaces)
+ self.assertEqual(port_pairs.valid_networks, [
+ ("uplink_0", "downlink_0")])
+
+ def test_all_ports(self):
+ vnfd = TestMultiPortConfig.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ interfaces = vnfd['vdu'][0]['external-interface']
+ port_pairs = samplevnf_helper.PortPairs(interfaces)
+ self.assertEqual(set(port_pairs.all_ports), {"xe0", "xe1"})
+
+ def test_uplink_ports(self):
+ vnfd = TestMultiPortConfig.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ interfaces = vnfd['vdu'][0]['external-interface']
+ port_pairs = samplevnf_helper.PortPairs(interfaces)
+ self.assertEqual(port_pairs.uplink_ports, ["xe0"])
+
+ def test_downlink_ports(self):
+ vnfd = TestMultiPortConfig.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ interfaces = vnfd['vdu'][0]['external-interface']
+ port_pairs = samplevnf_helper.PortPairs(interfaces)
+ self.assertEqual(port_pairs.downlink_ports, ["xe1"])
+
+
+class TestMultiPortConfig(unittest.TestCase):
+
+ VNFD_0 = {'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [
+ {'virtual-interface':
+ {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'ifname': 'xe0',
+ 'local_iface_name': 'eth0',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'uplink_0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'ifname': 'xe1',
+ 'local_iface_name': 'eth1',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'downlink_0',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}
+ ]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'AclApproxVnf', 'name': 'VPEVnfSsh'}
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+ }
+
+ def setUp(self):
+ self._mock_open = mock.patch.object(six.moves.builtins, 'open')
+ self.mock_open = self._mock_open.start()
+ self._mock_config_parser = mock.patch.object(
+ samplevnf_helper, 'ConfigParser')
+ self.mock_config_parser = self._mock_config_parser.start()
+
+ self.addCleanup(self._cleanup)
+
+ def _cleanup(self):
+ self._mock_open.stop()
+ self._mock_config_parser.stop()
+
+ def test_validate_ip_and_prefixlen(self):
+ ip_addr, prefix_len = (
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
+ '10.20.30.40', '16'))
+ self.assertEqual(ip_addr, '10.20.30.40')
+ self.assertEqual(prefix_len, 16)
+
+ ip_addr, prefix_len = (
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
+ '::1', '40'))
+ self.assertEqual(ip_addr, '0000:0000:0000:0000:0000:0000:0000:0001')
+ self.assertEqual(prefix_len, 40)
+
+ def test_validate_ip_and_prefixlen_negative(self):
+ with self.assertRaises(AttributeError):
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen('', '')
+
+ with self.assertRaises(AttributeError):
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
+ '10.20.30.400', '16')
+
+ with self.assertRaises(AttributeError):
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
+ '10.20.30.40', '33')
+
+ with self.assertRaises(AttributeError):
+ samplevnf_helper.MultiPortConfig.validate_ip_and_prefixlen(
+ '::1', '129')
+
+ @mock.patch.object(os.path, 'isfile', return_value=False)
+ def test___init__(self, *args):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ self.assertEqual(0, opnfv_vnf.swq)
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ self.assertEqual(0, opnfv_vnf.swq)
+
+ def test_update_timer(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ self.assertIsNone(opnfv_vnf.update_timer())
+
+ def test_generate_script(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = VnfdHelper(self.VNFD_0)
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'arp_route_tbl': '', 'arp_route_tbl6': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ self.assertIsNotNone(opnfv_vnf.generate_script(self.VNFD))
+ opnfv_vnf.lb_config = 'HW'
+ self.assertIsNotNone(opnfv_vnf.generate_script(self.VNFD))
+
+ def test_generate_script_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.vnf_type = 'ACL'
+ opnfv_vnf.generate_link_config = mock.Mock()
+ opnfv_vnf.generate_arp_config = mock.Mock()
+ opnfv_vnf.generate_arp_config6 = mock.Mock()
+ opnfv_vnf.generate_action_config = mock.Mock()
+ opnfv_vnf.generate_rule_config = mock.Mock()
+ self.assertIsNotNone(opnfv_vnf.generate_script_data())
+
+ def test_generate_rule_config(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'ACL'
+ opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway6 = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.rules = ''
+ self.assertIsNotNone(opnfv_vnf.generate_rule_config())
+ opnfv_vnf.rules = 'new'
+ self.assertIsNotNone(opnfv_vnf.generate_rule_config())
+
+ def test_generate_action_config(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway6 = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ self.assertIsNotNone(opnfv_vnf.generate_action_config())
+
+ def test_generate_arp_config6(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway6 = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ self.assertIsNotNone(opnfv_vnf.generate_arp_config6())
+
+ def test_generate_arp_config(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.get_ports_gateway = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.get_ports_gateway6 = mock.Mock(return_value=u'1.1.1.1')
+ opnfv_vnf.get_netmask_gateway6 = mock.Mock(
+ return_value=u'255.255.255.0')
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ self.assertIsNotNone(opnfv_vnf.generate_arp_config())
+
+ def test_get_ports_gateway(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.assertIsNotNone(opnfv_vnf.get_ports_gateway('xe0'))
+
+ def test_get_ports_gateway6(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.assertIsNotNone(opnfv_vnf.get_ports_gateway6('xe0'))
+
+ def test_get_netmask_gateway(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.assertIsNotNone(opnfv_vnf.get_netmask_gateway('xe0'))
+
+ def test_get_netmask_gateway6(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.interfaces = mock.MagicMock()
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.assertIsNotNone(opnfv_vnf.get_netmask_gateway6('xe0'))
+
+ def test_generate_link_config(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.get_port_pairs = mock.Mock()
+ opnfv_vnf.vnf_type = 'VFW'
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.all_ports = ['32', '1', '987']
+ opnfv_vnf.validate_ip_and_prefixlen = mock.Mock(
+ return_value=('10.20.30.40', 16))
+
+ result = opnfv_vnf.generate_link_config()
+ self.assertEqual(len(result.splitlines()), 9)
+
+ def test_generate_config(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.get_config_tpl_data = mock.MagicMock()
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.update_write_parser = mock.MagicMock()
+ opnfv_vnf.generate_script_data = \
+ mock.Mock(return_value={'link_config': 0, 'arp_config': '',
+ 'arp_config6': '', 'actions': '',
+ 'rules': ''})
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.get_ports_gateway6 = mock.Mock()
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.generate_lb_to_port_pair_mapping = mock.Mock()
+ opnfv_vnf.generate_config_data = mock.Mock()
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.is_openstack = True
+ self.assertIsNone(opnfv_vnf.generate_config())
+ opnfv_vnf.is_openstack = False
+ self.assertIsNone(opnfv_vnf.generate_config())
+
+ def test_get_config_tpl_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=True)
+ opnfv_vnf.read_parser.get = mock.Mock(return_value='filename')
+
+ self.assertIsNotNone(opnfv_vnf.get_config_tpl_data('filename'))
+
+ def test_get_txrx_tpl_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=True)
+ opnfv_vnf.read_parser.get = mock.Mock(return_value='filename')
+
+ self.assertIsNotNone(opnfv_vnf.get_txrx_tpl_data('filename'))
+
+ def test_init_write_parser_template(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=True)
+ opnfv_vnf.read_parser.get = mock.Mock(return_value='filename')
+
+ self.assertIsNone(opnfv_vnf.init_write_parser_template('filename'))
+ opnfv_vnf.write_parser.add_section = mock.MagicMock()
+ opnfv_vnf.read_parser.item = mock.Mock(return_value=[1, 2, 3])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=False)
+ opnfv_vnf.write_parser.set = mock.Mock()
+ self.assertIsNone(opnfv_vnf.init_write_parser_template('filename'))
+
+ def test_init_write_parser_template_2(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ self.assertIsNone(opnfv_vnf.init_write_parser_template('filename'))
+
+ def test_update_write_parser(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ self.assertIsNone(opnfv_vnf.update_write_parser({'filename': 1}))
+
+ def test_get_worker_threads(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ result = opnfv_vnf.get_worker_threads(1)
+ self.assertEqual(1, result)
+ opnfv_vnf.worker_config = '2t'
+ result = opnfv_vnf.get_worker_threads(2)
+ self.assertEqual(2, result)
+ opnfv_vnf.worker_config = '2t'
+ result = opnfv_vnf.get_worker_threads(3)
+ self.assertEqual(2, result)
+
+ # TODO(elfoley): Split this test into smaller tests
+ def test_generate_next_core_id(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ result = opnfv_vnf.generate_next_core_id()
+ self.assertIsNone(result)
+ opnfv_vnf.worker_config = '2t'
+ opnfv_vnf.start_core = 'a'
+ self.assertRaises(ValueError, opnfv_vnf.generate_next_core_id)
+ opnfv_vnf.worker_config = '2t'
+ opnfv_vnf.start_core = 1
+ result = opnfv_vnf.generate_next_core_id()
+ self.assertIsNone(result)
+
+ def test_generate_lb_to_port_pair_mapping(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = VnfdHelper(self.VNFD_0)
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf._port_pairs = samplevnf_helper.PortPairs(vnfd_mock.interfaces)
+ opnfv_vnf.port_pair_list = opnfv_vnf._port_pairs.port_pair_list
+ result = opnfv_vnf.generate_lb_to_port_pair_mapping()
+ self.assertIsNone(result)
+ result = opnfv_vnf.set_priv_to_pub_mapping()
+ self.assertEqual('(0,1)', result)
+
+ def test_set_priv_que_handler(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = VnfdHelper(self.VNFD_0)
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ result = opnfv_vnf.set_priv_que_handler()
+ self.assertIsNone(result)
+
+ def test_generate_arp_route_tbl(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = ""
+ vnfd_mock = mock.MagicMock()
+ vnfd_mock.port_num.side_effect = ['32', '1', '987']
+ vnfd_mock.find_interface.side_effect = [
+ {
+ 'virtual-interface': {
+ 'dst_ip': '10.20.30.40',
+ 'netmask': '20',
+ },
+ },
+ {
+ 'virtual-interface': {
+ 'dst_ip': '10.200.30.40',
+ 'netmask': '24',
+ },
+ },
+ {
+ 'virtual-interface': {
+ 'dst_ip': '10.20.3.40',
+ 'netmask': '8',
+ },
+ },
+ ]
+
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.all_ports = [3, 2, 5]
+
+ expected = 'routeadd net 32 10.20.30.40 0xfffff000\n' \
+ 'routeadd net 1 10.200.30.40 0xffffff00\n' \
+ 'routeadd net 987 10.20.3.40 0xff000000'
+ result = opnfv_vnf.generate_arp_route_tbl()
+ self.assertEqual(result, expected)
+
+ def test_generate_arpicmp_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ result = opnfv_vnf.generate_arpicmp_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'ovs'
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ result = opnfv_vnf.generate_arpicmp_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'openstack'
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ result = opnfv_vnf.generate_arpicmp_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.lb_config = 'HW'
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ result = opnfv_vnf.generate_arpicmp_data()
+ self.assertIsNotNone(result)
+
+ def test_generate_final_txrx_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.ports_len = 2
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.pktq_out_os = [1, 2]
+ result = opnfv_vnf.generate_final_txrx_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'openstack'
+ opnfv_vnf.pktq_out_os = [1, 2]
+ opnfv_vnf.lb_index = 1
+ result = opnfv_vnf.generate_final_txrx_data()
+ self.assertIsNotNone(result)
+
+ def test_generate_initial_txrx_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 2
+ result = opnfv_vnf.generate_initial_txrx_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'openstack'
+ opnfv_vnf.pktq_out_os = [1, 2]
+ result = opnfv_vnf.generate_initial_txrx_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'ovs'
+ opnfv_vnf.init_ovs = False
+ opnfv_vnf.ovs_pktq_out = ''
+ opnfv_vnf.pktq_out_os = [1, 2]
+ opnfv_vnf.lb_index = 1
+ result = opnfv_vnf.generate_initial_txrx_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.nfv_type = 'ovs'
+ opnfv_vnf.init_ovs = True
+ opnfv_vnf.pktq_out_os = [1, 2]
+ opnfv_vnf.ovs_pktq_out = ''
+ opnfv_vnf.lb_index = 1
+ result = opnfv_vnf.generate_initial_txrx_data()
+ self.assertIsNotNone(result)
+
+ def test_generate_lb_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 2
+ opnfv_vnf.prv_que_handler = 0
+ result = opnfv_vnf.generate_lb_data()
+ self.assertIsNotNone(result)
+
+ def test_generate_vnf_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 1
+ opnfv_vnf.pktq_out = ['1', '2']
+ opnfv_vnf.vnf_tpl = {'public_ip_port_range': '98164810',
+ 'vnf_set': '(2,4,5)'}
+ opnfv_vnf.prv_que_handler = 0
+ result = opnfv_vnf.generate_vnf_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.lb_config = 'HW'
+ opnfv_vnf.mul = 0.1
+ result = opnfv_vnf.generate_vnf_data()
+ self.assertIsNotNone(result)
+ opnfv_vnf.lb_config = 'HW'
+ opnfv_vnf.mul = 0.1
+ opnfv_vnf.vnf_type = 'ACL'
+ result = opnfv_vnf.generate_vnf_data()
+ self.assertIsNotNone(result)
+
+ def test_generate_config_data(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = VnfdHelper(self.VNFD_0)
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 1
+ opnfv_vnf.pktq_out = ['1', '2']
+ opnfv_vnf.prv_que_handler = 0
+ opnfv_vnf.init_write_parser_template = mock.Mock()
+ opnfv_vnf.arpicmp_tpl = mock.MagicMock()
+ opnfv_vnf.txrx_tpl = mock.MagicMock()
+ opnfv_vnf.loadb_tpl = mock.MagicMock()
+ opnfv_vnf.vnf_tpl = {'public_ip_port_range': '98164810 (1,65535)',
+ 'vnf_set': "(2,4,5)"}
+ opnfv_vnf.generate_vnf_data = mock.Mock(return_value={})
+ opnfv_vnf.update_write_parser = mock.Mock()
+ result = opnfv_vnf.generate_config_data()
+ self.assertIsNone(result)
+ opnfv_vnf.generate_final_txrx_data = mock.Mock()
+ opnfv_vnf.update_write_parser = mock.Mock()
+ result = opnfv_vnf.generate_config_data()
+ self.assertIsNone(result)
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 1
+ opnfv_vnf.pktq_out = ['1', '2']
+ opnfv_vnf.prv_que_handler = 0
+ opnfv_vnf.init_write_parser_template = mock.Mock()
+ opnfv_vnf.arpicmp_tpl = mock.MagicMock()
+ opnfv_vnf.txrx_tpl = mock.MagicMock()
+ opnfv_vnf.loadb_tpl = mock.MagicMock()
+ opnfv_vnf.vnf_type = 'CGNAPT'
+ opnfv_vnf.update_timer = mock.Mock()
+ opnfv_vnf.port_pair_list = [("xe0", "xe1"), ("xe0", "xe2")]
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.generate_arpicmp_data = mock.Mock()
+ result = opnfv_vnf.generate_config_data()
+ self.assertIsNone(result)
+
+ def test_init_eal(self):
+ topology_file = mock.Mock()
+ config_tpl = mock.Mock()
+ tmp_file = mock.Mock()
+ vnfd_mock = mock.MagicMock()
+ opnfv_vnf = samplevnf_helper.MultiPortConfig(
+ topology_file, config_tpl, tmp_file, vnfd_mock)
+ opnfv_vnf.socket = 0
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.port_pair_list = [("xe0", "xe1")]
+ opnfv_vnf.port_pairs = [("xe0", "xe1")]
+ opnfv_vnf.txrx_pipeline = ''
+ opnfv_vnf.rules = ''
+ opnfv_vnf.write_parser = mock.MagicMock()
+ opnfv_vnf.read_parser = mock.MagicMock()
+ opnfv_vnf.read_parser.sections = mock.Mock(return_value=['MASTER'])
+ opnfv_vnf.read_parser.has_option = mock.Mock(return_value=[])
+ opnfv_vnf.write_parser.set = mock.Mock()
+ opnfv_vnf.write_parser.add_section = mock.Mock()
+ opnfv_vnf.read_parser.items = mock.MagicMock()
+ opnfv_vnf.pipeline_counter = 0
+ opnfv_vnf.worker_config = '1t'
+ opnfv_vnf.start_core = 0
+ opnfv_vnf.lb_count = 1
+ opnfv_vnf.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ opnfv_vnf.interfaces = opnfv_vnf.vnfd['vdu'][0]['external-interface']
+ opnfv_vnf.lb_to_port_pair_mapping = [0, 1]
+ opnfv_vnf.lb_index = 1
+ opnfv_vnf.ports_len = 1
+ opnfv_vnf.pktq_out = ['1', '2']
+ opnfv_vnf.prv_que_handler = 0
+ opnfv_vnf.init_write_parser_template = mock.Mock()
+ opnfv_vnf.arpicmp_tpl = mock.MagicMock()
+ opnfv_vnf.txrx_tpl = mock.MagicMock()
+ opnfv_vnf.loadb_tpl = mock.MagicMock()
+ opnfv_vnf.vnf_tpl = {'public_ip_port_range': '98164810 (1,65535)'}
+ opnfv_vnf.generate_vnf_data = mock.Mock(return_value={})
+ opnfv_vnf.update_write_parser = mock.Mock()
+ opnfv_vnf.tmp_file = "/tmp/config"
+ result = opnfv_vnf.init_eal()
+ self.assertIsNone(result)
diff --git a/yardstick/tests/unit/network_services/libs/__init__.py b/yardstick/tests/unit/network_services/libs/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/libs/__init__.py
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/__init__.py
diff --git a/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
new file mode 100644
index 000000000..34afa3d5b
--- /dev/null
+++ b/yardstick/tests/unit/network_services/libs/ixia_libs/test_ixnet_api.py
@@ -0,0 +1,512 @@
+# Copyright (c) 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import IxNetwork
+import unittest
+
+from yardstick.common import exceptions
+from yardstick.network_services.libs.ixia_libs.ixnet import ixnet_api
+
+
+UPLINK = 'uplink'
+DOWNLINK = 'downlink'
+
+TRAFFIC_PARAMETERS = {
+ UPLINK: {
+ 'id': 1,
+ 'bidir': 'False',
+ 'duration': 60,
+ 'iload': '100',
+ 'outer_l2': {
+ 'framesize': {'64B': '25', '256B': '75'}
+ },
+ 'outer_l3': {
+ 'count': 512,
+ 'dscp': 0,
+ 'dstip4': '152.16.40.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.100.20',
+ 'ttl': 32
+ },
+ 'outer_l3v4': {
+ 'dscp': 0,
+ 'dstip4': '152.16.40.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.100.20',
+ 'ttl': 32
+ },
+ 'outer_l3v6': {
+ 'count': 1024,
+ 'dscp': 0,
+ 'dstip4': '152.16.100.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.40.20',
+ 'ttl': 32
+ },
+ 'outer_l4': {
+ 'dstport': '2001',
+ 'srcport': '1234'
+ },
+ 'traffic_type': 'continuous'
+ },
+ DOWNLINK: {
+ 'id': 2,
+ 'bidir': 'False',
+ 'duration': 60,
+ 'iload': '100',
+ 'outer_l2': {
+ 'framesize': {'128B': '35', '1024B': '65'}
+ },
+ 'outer_l3': {
+ 'count': 1024,
+ 'dscp': 0,
+ 'dstip4': '152.16.100.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.40.20',
+ 'ttl': 32
+ },
+ 'outer_l3v4': {
+ 'count': 1024,
+ 'dscp': 0,
+ 'dstip4': '152.16.100.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.40.20',
+ 'ttl': 32
+ },
+ 'outer_l3v6': {
+ 'count': 1024,
+ 'dscp': 0,
+ 'dstip4': '152.16.100.20',
+ 'proto': 'udp',
+ 'srcip4': '152.16.40.20',
+ 'ttl': 32
+ },
+ 'outer_l4': {
+ 'dstport': '1234',
+ 'srcport': '2001'
+ },
+ 'traffic_type': 'continuous'
+ }
+}
+
+
+class TestIxNextgen(unittest.TestCase):
+
+ def setUp(self):
+ self.ixnet = mock.Mock()
+ self.ixnet.execute = mock.Mock()
+ self.ixnet.getRoot.return_value = 'my_root'
+
+ def test_get_config(self):
+ tg_cfg = {
+ 'vdu': [
+ {
+ 'external-interface': [
+ {'virtual-interface': {'vpci': '0000:07:00.1'}},
+ {'virtual-interface': {'vpci': '0001:08:01.2'}}
+ ]
+ },
+ ],
+ 'mgmt-interface': {
+ 'ip': 'test1',
+ 'tg-config': {
+ 'dut_result_dir': 'test2',
+ 'version': 'test3',
+ 'ixchassis': 'test4',
+ 'tcl_port': 'test5',
+ },
+ }
+ }
+
+ expected = {
+ 'machine': 'test1',
+ 'port': 'test5',
+ 'chassis': 'test4',
+ 'cards': ['0000', '0001'],
+ 'ports': ['07', '08'],
+ 'output_dir': 'test2',
+ 'version': 'test3',
+ 'bidir': True,
+ }
+
+ result = ixnet_api.IxNextgen.get_config(tg_cfg)
+ self.assertEqual(result, expected)
+
+ def test__get_config_element_by_flow_group_name(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_01'
+ output = ixnet_gen._get_config_element_by_flow_group_name(
+ 'flow_group_01')
+ self.assertEqual('traffic_item/configElement:flow_group_01', output)
+
+ def test__get_config_element_by_flow_group_name_no_match(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.side_effect = [['traffic_item'],
+ ['fg_01']]
+ ixnet_gen._ixnet.getAttribute.return_value = 'flow_group_02'
+ output = ixnet_gen._get_config_element_by_flow_group_name(
+ 'flow_group_01')
+ self.assertIsNone(output)
+
+ def test__get_stack_item(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = ['tcp1', 'tcp2', 'udp']
+ with mock.patch.object(
+ ixnet_gen, '_get_config_element_by_flow_group_name') as \
+ mock_get_cfg_element:
+ mock_get_cfg_element.return_value = 'cfg_element'
+ output = ixnet_gen._get_stack_item(mock.ANY, ixnet_api.PROTO_TCP)
+ self.assertEqual(['tcp1', 'tcp2'], output)
+
+ def test__get_stack_item_no_config_element(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(
+ ixnet_gen, '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ ixnet_gen._get_stack_item(mock.ANY, mock.ANY)
+
+ def test__get_field_in_stack_item(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ output = ixnet_gen._get_field_in_stack_item(mock.ANY, 'field2')
+ self.assertEqual('field2', output)
+
+ def test__get_field_in_stack_item_no_field_present(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = ['field1', 'field2']
+ with self.assertRaises(exceptions.IxNetworkFieldNotPresentInStackItem):
+ ixnet_gen._get_field_in_stack_item(mock.ANY, 'field3')
+
+ def test__parse_framesize(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ framesize = {'64B': '75', '512b': '25'}
+ output = ixnet_gen._parse_framesize(framesize)
+ for idx in range(len(framesize)):
+ if output[idx * 2] == 64:
+ self.assertEqual(75, output[idx * 2 + 1])
+ elif output[idx * 2] == 512:
+ self.assertEqual(25, output[idx * 2 + 1])
+ else:
+ raise self.failureException('Framesize (64, 512) not present')
+
+ @mock.patch.object(IxNetwork, 'IxNet')
+ def test_connect(self, mock_ixnet):
+ mock_ixnet.return_value = self.ixnet
+ ixnet_gen = ixnet_api.IxNextgen()
+ with mock.patch.object(ixnet_gen, 'get_config') as mock_config:
+ mock_config.return_value = {'machine': 'machine_fake',
+ 'port': 'port_fake',
+ 'version': 12345}
+ ixnet_gen.connect(mock.ANY)
+
+ self.ixnet.connect.assert_called_once_with(
+ 'machine_fake', '-port', 'port_fake', '-version', '12345')
+ mock_config.assert_called_once()
+
+ def test_connect_invalid_config_no_machine(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.get_config = mock.Mock(return_value={
+ 'port': 'port_fake',
+ 'version': '12345'})
+ self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet.connect.assert_not_called()
+
+ def test_connect_invalid_config_no_port(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.get_config = mock.Mock(return_value={
+ 'machine': 'machine_fake',
+ 'version': '12345'})
+ self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet.connect.assert_not_called()
+
+ def test_connect_invalid_config_no_version(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.get_config = mock.Mock(return_value={
+ 'machine': 'machine_fake',
+ 'port': 'port_fake'})
+ self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet.connect.assert_not_called()
+
+ def test_connect_no_config(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.get_config = mock.Mock(return_value={})
+ self.assertRaises(KeyError, ixnet_gen.connect, mock.ANY)
+ self.ixnet.connect.assert_not_called()
+
+ def test_clear_config(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.clear_config()
+ self.ixnet.execute.assert_called_once_with('newConfig')
+
+ @mock.patch.object(ixnet_api, 'log')
+ def test_assign_ports_2_ports(self, *args):
+ self.ixnet.getAttribute.side_effect = ['up', 'down']
+ config = {
+ 'chassis': '1.1.1.1',
+ 'cards': ['1', '2'],
+ 'ports': ['2', '2']}
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._cfg = config
+
+ self.assertIsNone(ixnet_gen.assign_ports())
+ self.assertEqual(self.ixnet.execute.call_count, 2)
+ self.assertEqual(self.ixnet.commit.call_count, 4)
+ self.assertEqual(self.ixnet.getAttribute.call_count, 2)
+
+ @mock.patch.object(ixnet_api, 'log')
+ def test_assign_ports_port_down(self, mock_log):
+ self.ixnet.getAttribute.return_value = 'down'
+ config = {
+ 'chassis': '1.1.1.1',
+ 'cards': ['1', '2'],
+ 'ports': ['3', '4']}
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._cfg = config
+ ixnet_gen.assign_ports()
+ mock_log.warning.assert_called()
+
+ def test_assign_ports_no_config(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._cfg = {}
+ self.assertRaises(KeyError, ixnet_gen.assign_ports)
+
+ def test__create_traffic_item(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ self.ixnet.add.return_value = 'my_new_traffic_item'
+ self.ixnet.remapIds.return_value = ['my_traffic_item_id']
+
+ ixnet_gen._create_traffic_item()
+ self.ixnet.add.assert_called_once_with(
+ 'my_root/traffic', 'trafficItem')
+ self.ixnet.setMultiAttribute.assert_called_once_with(
+ 'my_new_traffic_item', '-name', 'RFC2544', '-trafficType', 'raw')
+ self.assertEqual(2, self.ixnet.commit.call_count)
+ self.ixnet.remapIds.assert_called_once_with('my_new_traffic_item')
+ self.ixnet.setAttribute('my_traffic_item_id/tracking',
+ '-trackBy', 'trafficGroupId0')
+
+ def test__create_flow_groups(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.ixnet.getList.side_effect = [['traffic_item'], ['1', '2']]
+ ixnet_gen.ixnet.add.side_effect = ['endp1', 'endp2']
+ ixnet_gen._create_flow_groups()
+ ixnet_gen.ixnet.add.assert_has_calls([
+ mock.call('traffic_item', 'endpointSet'),
+ mock.call('traffic_item', 'endpointSet')])
+ ixnet_gen.ixnet.setMultiAttribute.assert_has_calls([
+ mock.call('endp1', '-name', '1', '-sources', ['1/protocols'],
+ '-destinations', ['2/protocols']),
+ mock.call('endp2', '-name', '2', '-sources', ['2/protocols'],
+ '-destinations', ['1/protocols'])])
+
+ def test__append_protocol_to_stack(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+
+ ixnet_gen._append_procotol_to_stack('my_protocol', 'prev_element')
+ self.ixnet.execute.assert_called_with(
+ 'append', 'prev_element',
+ 'my_root/traffic/protocolTemplate:"my_protocol"')
+
+ def test__setup_config_elements(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen.ixnet.getList.side_effect = [['traffic_item'],
+ ['cfg_element']]
+ with mock.patch.object(ixnet_gen, '_append_procotol_to_stack') as \
+ mock_append_proto:
+ ixnet_gen._setup_config_elements()
+ mock_append_proto.assert_has_calls([
+ mock.call(ixnet_api.PROTO_UDP, 'cfg_element/stack:"ethernet-1"'),
+ mock.call(ixnet_api.PROTO_IPV4, 'cfg_element/stack:"ethernet-1"')])
+ ixnet_gen.ixnet.setAttribute.assert_has_calls([
+ mock.call('cfg_element/frameRateDistribution', '-portDistribution',
+ 'splitRateEvenly'),
+ mock.call('cfg_element/frameRateDistribution',
+ '-streamDistribution', 'splitRateEvenly')])
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_create_traffic_item')
+ @mock.patch.object(ixnet_api.IxNextgen, '_create_flow_groups')
+ @mock.patch.object(ixnet_api.IxNextgen, '_setup_config_elements')
+ def test_create_traffic_model(self, mock__setup_config_elements,
+ mock__create_flow_groups,
+ mock__create_traffic_item):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+
+ ixnet_gen.create_traffic_model()
+ mock__create_traffic_item.assert_called_once()
+ mock__create_flow_groups.assert_called_once()
+ mock__setup_config_elements.assert_called_once()
+
+ def test__update_frame_mac(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(ixnet_gen, '_get_field_in_stack_item') as \
+ mock_get_field:
+ mock_get_field.return_value = 'field_descriptor'
+ ixnet_gen._update_frame_mac('ethernet_descriptor', 'field', 'mac')
+ mock_get_field.assert_called_once_with('ethernet_descriptor', 'field')
+ ixnet_gen.ixnet.setMultiAttribute(
+ 'field_descriptor', '-singleValue', 'mac', '-fieldValue', 'mac',
+ '-valueType', 'singleValue')
+ ixnet_gen.ixnet.commit.assert_called_once()
+
+ def test_update_frame(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(
+ ixnet_gen, '_get_config_element_by_flow_group_name',
+ return_value='cfg_element'), \
+ mock.patch.object(ixnet_gen, '_update_frame_mac') as \
+ mock_update_frame, \
+ mock.patch.object(ixnet_gen, '_get_stack_item') as \
+ mock_get_stack_item:
+ mock_get_stack_item.side_effect = [['item1'], ['item2'],
+ ['item3'], ['item4']]
+ ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(6, len(ixnet_gen.ixnet.setMultiAttribute.mock_calls))
+ self.assertEqual(4, len(mock_update_frame.mock_calls))
+
+ def test_update_frame_flow_not_present(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(
+ ixnet_gen, '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ ixnet_gen.update_frame(TRAFFIC_PARAMETERS)
+
+ def test_get_statistics(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ port_statistics = '::ixNet::OBJ-/statistics/view:"Port Statistics"'
+ flow_statistics = '::ixNet::OBJ-/statistics/view:"Flow Statistics"'
+ with mock.patch.object(ixnet_gen, '_build_stats_map') as \
+ mock_build_stats:
+ ixnet_gen.get_statistics()
+
+ mock_build_stats.assert_has_calls([
+ mock.call(port_statistics, ixnet_gen.PORT_STATS_NAME_MAP),
+ mock.call(flow_statistics, ixnet_gen.LATENCY_NAME_MAP)])
+
+ def test__update_ipv4_address(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(ixnet_gen, '_get_field_in_stack_item',
+ return_value='field_desc'):
+ ixnet_gen._update_ipv4_address(mock.ANY, mock.ANY, '192.168.1.1',
+ 100, '255.255.255.0', 25)
+ ixnet_gen.ixnet.setMultiAttribute.assert_called_once_with(
+ 'field_desc', '-seed', 100, '-fixedBits', '192.168.1.1',
+ '-randomMask', '255.255.255.0', '-valueType', 'random',
+ '-countValue', 25)
+
+ def test_update_ip_packet(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(ixnet_gen, '_update_ipv4_address') as \
+ mock_update_add, \
+ mock.patch.object(ixnet_gen, '_get_stack_item'), \
+ mock.patch.object(ixnet_gen,
+ '_get_config_element_by_flow_group_name', return_value='celm'):
+ ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+
+ self.assertEqual(4, len(mock_update_add.mock_calls))
+
+ def test_update_ip_packet_exception_no_config_element(self):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ with mock.patch.object(ixnet_gen,
+ '_get_config_element_by_flow_group_name',
+ return_value=None):
+ with self.assertRaises(exceptions.IxNetworkFlowNotPresent):
+ ixnet_gen.update_ip_packet(TRAFFIC_PARAMETERS)
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
+ def test_start_traffic(self, mock_ixnextgen_get_traffic_state):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = [0]
+
+ mock_ixnextgen_get_traffic_state.side_effect = [
+ 'stopped', 'started', 'started', 'started']
+
+ result = ixnet_gen.start_traffic()
+ self.assertIsNone(result)
+ self.ixnet.getList.assert_called_once()
+ self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
+ def test_start_traffic_traffic_running(
+ self, mock_ixnextgen_get_traffic_state):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = [0]
+ mock_ixnextgen_get_traffic_state.side_effect = [
+ 'started', 'stopped', 'started']
+
+ result = ixnet_gen.start_traffic()
+ self.assertIsNone(result)
+ self.ixnet.getList.assert_called_once()
+ self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
+ def test_start_traffic_wait_for_traffic_to_stop(
+ self, mock_ixnextgen_get_traffic_state):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = [0]
+ mock_ixnextgen_get_traffic_state.side_effect = [
+ 'started', 'started', 'started', 'stopped', 'started']
+
+ result = ixnet_gen.start_traffic()
+ self.assertIsNone(result)
+ self.ixnet.getList.assert_called_once()
+ self.assertEqual(4, ixnet_gen._ixnet.execute.call_count)
+
+ @mock.patch.object(ixnet_api.IxNextgen, '_get_traffic_state')
+ def test_start_traffic_wait_for_traffic_start(
+ self, mock_ixnextgen_get_traffic_state):
+ ixnet_gen = ixnet_api.IxNextgen()
+ ixnet_gen._ixnet = self.ixnet
+ ixnet_gen._ixnet.getList.return_value = [0]
+ mock_ixnextgen_get_traffic_state.side_effect = [
+ 'stopped', 'stopped', 'stopped', 'started']
+
+ result = ixnet_gen.start_traffic()
+ self.assertIsNone(result)
+ self.ixnet.getList.assert_called_once()
+ self.assertEqual(3, ixnet_gen._ixnet.execute.call_count)
diff --git a/yardstick/tests/unit/network_services/nfvi/__init__.py b/yardstick/tests/unit/network_services/nfvi/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/nfvi/__init__.py
diff --git a/yardstick/tests/unit/network_services/nfvi/test_collectd.py b/yardstick/tests/unit/network_services/nfvi/test_collectd.py
new file mode 100644
index 000000000..fe59aecfb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/nfvi/test_collectd.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import multiprocessing
+import mock
+
+from yardstick.network_services.nfvi.collectd import AmqpConsumer
+
+
+class TestAmqpConsumer(unittest.TestCase):
+ def setUp(self):
+ self.queue = multiprocessing.Queue()
+ self.url = 'amqp://admin:admin@127.0.0.1:5672/%2F'
+ self.amqp_consumer = AmqpConsumer(self.url, self.queue)
+
+ def test___init__(self):
+ self.assertEqual(self.url, self.amqp_consumer._url)
+
+ def test_on_connection_open(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.add_on_close_callback = \
+ mock.Mock(return_value=0)
+ self.amqp_consumer._connection.channel = mock.Mock(return_value=0)
+ self.assertIsNone(self.amqp_consumer.on_connection_open(10))
+
+ def test_on_connection_closed(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.ioloop = mock.Mock()
+ self.amqp_consumer._connection.ioloop.stop = mock.Mock(return_value=0)
+ self.amqp_consumer._connection.add_timeout = mock.Mock(return_value=0)
+ self.amqp_consumer._closing = True
+ self.assertIsNone(
+ self.amqp_consumer.on_connection_closed("", 404, "Not Found"))
+ self.amqp_consumer._closing = False
+ self.assertIsNone(
+ self.amqp_consumer.on_connection_closed("", 404, "Not Found"))
+
+ def test_reconnect(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.ioloop = mock.Mock()
+ self.amqp_consumer._connection.ioloop.stop = mock.Mock(return_value=0)
+ self.amqp_consumer.connect = mock.Mock(return_value=0)
+ self.amqp_consumer._closing = True
+ self.assertIsNone(self.amqp_consumer.reconnect())
+
+ def test_on_channel_open(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.add_on_close_callback = \
+ mock.Mock(return_value=0)
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer.add_on_channel_close_callback = mock.Mock()
+ self.amqp_consumer._channel.exchange_declare = \
+ mock.Mock(return_value=0)
+ self.assertIsNone(
+ self.amqp_consumer.on_channel_open(self.amqp_consumer._channel))
+
+ def test_add_on_channel_close_callback(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.add_on_close_callback = \
+ mock.Mock(return_value=0)
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.add_on_close_callback = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.add_on_channel_close_callback())
+
+ def test_on_channel_closed(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.close = mock.Mock(return_value=0)
+ _channel = mock.Mock()
+ self.assertIsNone(
+ self.amqp_consumer.on_channel_closed(_channel, "", ""))
+
+ def test_ion_exchange_declareok(self):
+ self.amqp_consumer.setup_queue = mock.Mock(return_value=0)
+ self.assertIsNone(self.amqp_consumer.on_exchange_declareok(10))
+
+ def test_setup_queue(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.add_on_close_callback = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.setup_queue("collectd"))
+
+ def test_on_queue_declareok(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.queue_bind = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.on_queue_declareok(10))
+
+ def test__on_bindok(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.basic_consume = mock.Mock()
+ self.amqp_consumer.add_on_cancel_callback = mock.Mock()
+ self.assertIsNone(self.amqp_consumer._on_bindok(10))
+
+ def test_add_on_cancel_callback(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.add_on_cancel_callback = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.add_on_cancel_callback())
+
+ def test_on_consumer_cancelled(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.close = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.on_consumer_cancelled(10))
+
+ def test_on_message(self):
+ body = "msg {} cpu/cpu-0/ipc 101010:10"
+ properties = ""
+ basic_deliver = mock.Mock()
+ basic_deliver.delivery_tag = mock.Mock(return_value=0)
+ self.amqp_consumer.ack_message = mock.Mock()
+ self.assertIsNone(
+ self.amqp_consumer.on_message(10, basic_deliver, properties, body))
+
+ def test_ack_message(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.basic_ack = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.ack_message(10))
+
+ def test_on_cancelok(self):
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.close = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.on_cancelok(10))
+
+ def test_run(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer.connect = mock.Mock()
+ self.amqp_consumer._connection.ioloop.start = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.run())
+
+ def test_stop(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer.connect = mock.Mock()
+ self.amqp_consumer._connection.ioloop.start = mock.Mock()
+ self.amqp_consumer._channel = mock.Mock()
+ self.amqp_consumer._channel.basic_cancel = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.stop())
+
+ def test_close_connection(self):
+ self.amqp_consumer._connection = mock.Mock(autospec=AmqpConsumer)
+ self.amqp_consumer._connection.close = mock.Mock()
+ self.assertIsNone(self.amqp_consumer.close_connection())
diff --git a/yardstick/tests/unit/network_services/nfvi/test_resource.py b/yardstick/tests/unit/network_services/nfvi/test_resource.py
new file mode 100644
index 000000000..de9679456
--- /dev/null
+++ b/yardstick/tests/unit/network_services/nfvi/test_resource.py
@@ -0,0 +1,298 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import errno
+
+import mock
+import unittest
+
+from yardstick.common import exceptions
+from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.nfvi import resource, collectd
+
+
+class TestResourceProfile(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '172.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '172.16.100.20',
+ 'if': 'xe0'},
+ {'network': '172.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '172.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '3c:fd:fe:9e:64:38',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '172.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '172.16.100.20',
+ 'local_mac': '3c:fd:fe:a1:2b:80'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:1e:67:d0:60:5c',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '172.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '172.16.40.20',
+ 'local_mac': '3c:fd:fe:a1:2b:81'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '127.0.0.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '127.0.0.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ def setUp(self):
+ with mock.patch("yardstick.ssh.AutoConnectSSH") as ssh:
+ self.ssh_mock = mock.Mock(autospec=ssh.SSH)
+ self.ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = self.ssh_mock
+
+ mgmt = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['mgmt-interface']
+ # interfaces = \
+ # self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface']
+ port_names = \
+ self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]['vdu'][0]['external-interface']
+ self.resource_profile = \
+ ResourceProfile(mgmt, port_names)
+ self.resource_profile.connection = self.ssh_mock
+
+ def test___init__(self):
+ self.assertTrue(self.resource_profile.enable)
+
+ def test_check_if_system_agent_running(self):
+ self.assertEqual(self.resource_profile.check_if_system_agent_running("collectd"),
+ (0, ""))
+
+ def test_check_if_system_agent_running_excetion(self):
+ with mock.patch.object(self.resource_profile.connection, "execute") as mock_execute:
+ mock_execute.side_effect = OSError(errno.ECONNRESET, "error")
+ self.assertEqual(
+ self.resource_profile.check_if_system_agent_running("collectd"),
+ (1, None))
+
+ def test_get_cpu_data(self):
+ reskey = ["", "cpufreq", "cpufreq-0"]
+ value = "metric:10"
+ val = self.resource_profile.get_cpu_data(reskey[1], reskey[2], value)
+ self.assertIsNotNone(val)
+
+ def test_get_cpu_data_error(self):
+ reskey = ["", "", ""]
+ value = "metric:10"
+ val = self.resource_profile.get_cpu_data(reskey[0], reskey[1], value)
+ self.assertEqual(val, ('error', 'Invalid', '', ''))
+
+ def test__start_collectd(self):
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ ssh_mock.execute = mock.Mock(side_effect=exceptions.SSHError)
+ with self.assertRaises(exceptions.SSHError):
+ self.resource_profile._start_collectd(ssh_mock, "/opt/nsb_bin")
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ self.assertIsNone(self.resource_profile._start_collectd(ssh_mock,
+ "/opt/nsb_bin"))
+
+ def test__start_rabbitmq(self):
+ ssh_mock = mock.Mock()
+ ssh_mock.execute = mock.Mock(return_value=(0, "RabbitMQ", ""))
+ self.assertIsNone(self.resource_profile._start_rabbitmq(ssh_mock))
+
+ ssh_mock.execute = mock.Mock(return_value=(0, "", ""))
+ with self.assertRaises(exceptions.ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
+
+ ssh_mock.execute = mock.Mock(return_value=(1, "", ""))
+ with self.assertRaises(exceptions.ResourceCommandError):
+ self.resource_profile._start_rabbitmq(ssh_mock)
+
+ def test__prepare_collectd_conf(self):
+ self.assertIsNone(
+ self.resource_profile._prepare_collectd_conf("/opt/nsb_bin"))
+
+ def test__setup_ovs_stats(self):
+ # TODO(elfoley): This method doesn't actually return anything, the side
+ # effects should be checked
+ self.assertIsNone(
+ self.resource_profile._setup_ovs_stats(self.ssh_mock))
+
+ def test__provide_config_file(self,):
+ loadplugin = range(5)
+ port_names = range(5)
+ kwargs = {
+ "interval": '25',
+ "loadplugin": loadplugin,
+ "port_names": port_names,
+ }
+ self.resource_profile._provide_config_file("/opt/nsb_bin", "collectd.conf", kwargs)
+ self.ssh_mock.execute.assert_called_once()
+
+ def test_initiate_systemagent(self):
+ self.resource_profile._start_collectd = mock.Mock()
+ self.resource_profile._start_rabbitmq = mock.Mock()
+ self.assertIsNone(
+ self.resource_profile.initiate_systemagent("/opt/nsb_bin"))
+
+ def test_initiate_systemagent_raise(self):
+ self.resource_profile._start_rabbitmq = mock.Mock(side_effect=RuntimeError)
+ with self.assertRaises(RuntimeError):
+ self.resource_profile.initiate_systemagent("/opt/nsb_bin")
+
+ def test__parse_hugepages(self):
+ reskey = ["cpu", "cpuFreq"]
+ value = "timestamp:12345"
+ res = self.resource_profile.parse_hugepages(reskey, value)
+ self.assertEqual({'cpu/cpuFreq': '12345'}, res)
+
+ def test__parse_dpdkstat(self):
+ reskey = ["dpdk0", "0"]
+ value = "tx:12345"
+ res = self.resource_profile.parse_dpdkstat(reskey, value)
+ self.assertEqual({'dpdk0/0': '12345'}, res)
+
+ def test__parse_virt(self):
+ reskey = ["vm0", "cpu"]
+ value = "load:45"
+ res = self.resource_profile.parse_virt(reskey, value)
+ self.assertEqual({'vm0/cpu': '45'}, res)
+
+ def test__parse_ovs_stats(self):
+ reskey = ["ovs", "stats"]
+ value = "tx:45"
+ res = self.resource_profile.parse_ovs_stats(reskey, value)
+ self.assertEqual({'ovs/stats': '45'}, res)
+
+ def test_parse_collectd_result(self):
+ res = self.resource_profile.parse_collectd_result({})
+ expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {},
+ 'memory': {}, 'ovs_stats': {}, 'timestamp': '',
+ 'virt': {}}
+ self.assertDictEqual(res, expected_result)
+
+ def test_parse_collectd_result_cpu(self):
+ metric = {"nsb_stats/cpu/0/ipc": "101"}
+ self.resource_profile.get_cpu_data = mock.Mock(return_value=[1,
+ "ipc",
+ "1234",
+ ""])
+ res = self.resource_profile.parse_collectd_result(metric)
+ expected_result = {'cpu': {1: {'ipc': '1234'}}, 'dpdkstat': {}, 'hugepages': {},
+ 'memory': {}, 'ovs_stats': {}, 'timestamp': '',
+ 'virt': {}}
+ self.assertDictEqual(res, expected_result)
+
+ def test_parse_collectd_result_memory(self):
+ metric = {"nsb_stats/memory/bw": "101"}
+ res = self.resource_profile.parse_collectd_result(metric)
+ expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {},
+ 'memory': {'bw': '101'}, 'ovs_stats': {}, 'timestamp': '',
+ 'virt': {}}
+ self.assertDictEqual(res, expected_result)
+
+ def test_parse_collectd_result_hugepage(self):
+ # amqp returns bytes
+ metric = {b"nsb_stats/hugepages/free": b"101"}
+ self.resource_profile.parse_hugepages = mock.Mock(return_value={"free": "101"})
+ res = self.resource_profile.parse_collectd_result(metric)
+ expected_result = {'cpu': {}, 'dpdkstat': {}, 'hugepages': {'free': '101'},
+ 'memory': {}, 'ovs_stats': {}, 'timestamp': '',
+ 'virt': {}}
+ self.assertDictEqual(res, expected_result)
+
+ def test_parse_collectd_result_dpdk_virt_ovs(self):
+ metric = {b"nsb_stats/dpdkstat/tx": b"101",
+ b"nsb_stats/ovs_stats/tx": b"101",
+ b"nsb_stats/virt/virt/memory": b"101"}
+ self.resource_profile.parse_dpdkstat = \
+ mock.Mock(return_value={"tx": "101"})
+ self.resource_profile.parse_virt = \
+ mock.Mock(return_value={"memory": "101"})
+ self.resource_profile.parse_ovs_stats = \
+ mock.Mock(return_value={"tx": "101"})
+ res = self.resource_profile.parse_collectd_result(metric)
+ expected_result = {'cpu': {}, 'dpdkstat': {'tx': '101'}, 'hugepages': {},
+ 'memory': {}, 'ovs_stats': {'tx': '101'}, 'timestamp': '',
+ 'virt': {'memory': '101'}}
+ self.assertDictEqual(res, expected_result)
+
+ def test_amqp_process_for_nfvi_kpi(self):
+ self.resource_profile.amqp_client = \
+ mock.MagicMock(side_effect=[None, mock.MagicMock()])
+ self.resource_profile.run_collectd_amqp = \
+ mock.Mock(return_value=0)
+ res = self.resource_profile.amqp_process_for_nfvi_kpi()
+ self.assertIsNone(res)
+
+ def test_amqp_collect_nfvi_kpi(self):
+ self.resource_profile.amqp_client = \
+ mock.MagicMock(side_effect=[None, mock.MagicMock()])
+ self.resource_profile.run_collectd_amqp = \
+ mock.Mock(return_value=0)
+ self.resource_profile.parse_collectd_result = mock.Mock()
+ res = self.resource_profile.amqp_collect_nfvi_kpi()
+ self.assertIsNotNone(res)
+
+ def test_run_collectd_amqp(self):
+ resource.AmqpConsumer = mock.Mock(autospec=collectd)
+ self.assertIsNone(self.resource_profile.run_collectd_amqp())
+
+ def test_start(self):
+ self.assertIsNone(self.resource_profile.start())
+
+ def test_stop(self):
+ self.assertIsNone(self.resource_profile.stop())
+
+ def test_stop_amqp_not_running(self):
+ self.resource_profile.amqp_client = mock.MagicMock()
+ # TODO(efoley): Fix this incorrect test.
+ # Should check that we don't try to stop amqp when it's not running
+ self.assertIsNone(self.resource_profile.stop())
diff --git a/yardstick/tests/unit/network_services/test_utils.py b/yardstick/tests/unit/network_services/test_utils.py
new file mode 100644
index 000000000..2b2eb7109
--- /dev/null
+++ b/yardstick/tests/unit/network_services/test_utils.py
@@ -0,0 +1,141 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import unittest
+import mock
+
+from yardstick.network_services import utils
+
+
+class UtilsTestCase(unittest.TestCase):
+ """Test all VNF helper methods."""
+
+ DPDK_PATH = os.path.join(utils.NSB_ROOT, "dpdk-devbind.py")
+
+ def setUp(self):
+ super(UtilsTestCase, self).setUp()
+
+ def test_get_nsb_options(self):
+ result = utils.get_nsb_option("bin_path", None)
+ self.assertEqual(result, utils.NSB_ROOT)
+
+ def test_get_nsb_option_is_invalid_key(self):
+ result = utils.get_nsb_option("bin", None)
+ self.assertEqual(result, None)
+
+ def test_get_nsb_option_default(self):
+ default = object()
+ result = utils.get_nsb_option("nosuch", default)
+ self.assertIs(result, default)
+
+ def test_provision_tool(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, self.DPDK_PATH, ""))
+ ssh.return_value = ssh_mock
+ tool_path = utils.provision_tool(ssh_mock, self.DPDK_PATH)
+ self.assertEqual(tool_path, self.DPDK_PATH)
+
+ def test_provision_tool_no_path(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(1, self.DPDK_PATH, ""))
+ ssh.return_value = ssh_mock
+ tool_path = utils.provision_tool(ssh_mock, self.DPDK_PATH)
+ self.assertEqual(tool_path, self.DPDK_PATH)
+
+
+class PciAddressTestCase(unittest.TestCase):
+
+ PCI_ADDRESS_DBSF = '000A:07:03.2'
+ PCI_ADDRESS_BSF = '06:02.1'
+ PCI_ADDRESS_DBSF_MULTILINE_1 = '0001:08:04.3\nother text\n'
+ PCI_ADDRESS_DBSF_MULTILINE_2 = 'first line\n 0001:08:04.3 \nother text\n'
+ # Will match and return the first address found.
+ PCI_ADDRESS_DBSF_MULTILINE_3 = ' 0001:08:04.1 \n 05:03.1 \nother\n'
+ PCI_ADDRESS_BSF_MULTILINE_1 = 'first line\n 08:04.3 \n 0002:05:03.1\n'
+ BAD_INPUT_1 = 'no address found'
+ BAD_INPUT_2 = '001:08:04.1'
+ BAD_INPUT_3 = '08:4.1'
+
+ def test_pciaddress_dbsf(self):
+ pci_address = utils.PciAddress(PciAddressTestCase.PCI_ADDRESS_DBSF)
+ self.assertEqual('000a', pci_address.domain)
+ self.assertEqual('07', pci_address.bus)
+ self.assertEqual('03', pci_address.slot)
+ self.assertEqual('2', pci_address.function)
+
+ def test_pciaddress_bsf(self):
+ pci_address = utils.PciAddress(PciAddressTestCase.PCI_ADDRESS_BSF)
+ self.assertEqual('0000', pci_address.domain)
+ self.assertEqual('06', pci_address.bus)
+ self.assertEqual('02', pci_address.slot)
+ self.assertEqual('1', pci_address.function)
+
+ def test_pciaddress_dbsf_multiline_1(self):
+ pci_address = utils.PciAddress(
+ PciAddressTestCase.PCI_ADDRESS_DBSF_MULTILINE_1)
+ self.assertEqual('0001', pci_address.domain)
+ self.assertEqual('08', pci_address.bus)
+ self.assertEqual('04', pci_address.slot)
+ self.assertEqual('3', pci_address.function)
+
+ def test_pciaddress_dbsf_multiline_2(self):
+ pci_address = utils.PciAddress(
+ PciAddressTestCase.PCI_ADDRESS_DBSF_MULTILINE_2)
+ self.assertEqual('0001', pci_address.domain)
+ self.assertEqual('08', pci_address.bus)
+ self.assertEqual('04', pci_address.slot)
+ self.assertEqual('3', pci_address.function)
+
+ def test_pciaddress_dbsf_multiline_3(self):
+ pci_address = utils.PciAddress(
+ PciAddressTestCase.PCI_ADDRESS_DBSF_MULTILINE_3)
+ self.assertEqual('0001', pci_address.domain)
+ self.assertEqual('08', pci_address.bus)
+ self.assertEqual('04', pci_address.slot)
+ self.assertEqual('1', pci_address.function)
+
+ def test_pciaddress_bsf_multiline_1(self):
+ pci_address = utils.PciAddress(
+ PciAddressTestCase.PCI_ADDRESS_BSF_MULTILINE_1)
+ self.assertEqual('0000', pci_address.domain)
+ self.assertEqual('08', pci_address.bus)
+ self.assertEqual('04', pci_address.slot)
+ self.assertEqual('3', pci_address.function)
+
+ def test_pciaddress_bad_input_no_address(self):
+ with self.assertRaises(ValueError) as exception:
+ utils.PciAddress(PciAddressTestCase.BAD_INPUT_1)
+ self.assertEqual('Invalid PCI address: {}'.format(
+ PciAddressTestCase.BAD_INPUT_1), str(exception.exception))
+
+ def test_pciaddress_bad_input_dbsf_bad_formatted(self):
+ # In this test case, the domain has only 3 characters instead of 4.
+ pci_address = utils.PciAddress(
+ PciAddressTestCase.BAD_INPUT_2)
+ self.assertEqual('0000', pci_address.domain)
+ self.assertEqual('08', pci_address.bus)
+ self.assertEqual('04', pci_address.slot)
+ self.assertEqual('1', pci_address.function)
+
+ def test_pciaddress_bad_input_bsf_bad_formatted(self):
+ with self.assertRaises(ValueError) as exception:
+ utils.PciAddress(PciAddressTestCase.BAD_INPUT_3)
+ self.assertEqual('Invalid PCI address: {}'.format(
+ PciAddressTestCase.BAD_INPUT_3), str(exception.exception))
diff --git a/yardstick/tests/unit/network_services/test_yang_model.py b/yardstick/tests/unit/network_services/test_yang_model.py
new file mode 100644
index 000000000..cbeb3a1f2
--- /dev/null
+++ b/yardstick/tests/unit/network_services/test_yang_model.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import mock
+import unittest
+
+from yardstick.network_services.yang_model import YangModel
+
+
+class YangModelTestCase(unittest.TestCase):
+ """Test all Yang Model methods."""
+
+ ENTRIES = {
+ 'access-list1': {
+ 'acl': {
+ 'access-list-entries': [{
+ 'ace': {
+ 'ace-oper-data': {
+ 'match-counter': 0},
+ 'actions': 'drop,count',
+ 'matches': {
+ 'destination-ipv4-network':
+ '152.16.40.20/24',
+ 'destination-port-range': {
+ 'lower-port': 0,
+ 'upper-port': 65535},
+ 'source-ipv4-network': '0.0.0.0/0',
+ 'source-port-range': {
+ 'lower-port': 0,
+ 'upper-port': 65535}},
+ 'rule-name': 'rule1588'}},
+ {
+ 'ace': {
+ 'ace-oper-data': {
+ 'match-counter': 0},
+ 'actions': 'drop,count',
+ 'matches': {
+ 'destination-ipv4-network':
+ '0.0.0.0/0',
+ 'destination-port-range': {
+ 'lower-port': 0,
+ 'upper-port': 65535},
+ 'source-ipv4-network':
+ '152.16.100.20/24',
+ 'source-port-range': {
+ 'lower-port': 0,
+ 'upper-port': 65535}},
+ 'rule-name': 'rule1589'}}],
+ 'acl-name': 'sample-ipv4-acl',
+ 'acl-type': 'ipv4-acl'}
+ }
+ }
+
+ def test__init__(self):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ self.assertEqual(y.config_file, cfg)
+
+ def test_config_file_setter(self):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ self.assertEqual(y.config_file, cfg)
+ cfg2 = "yang2.yaml"
+ y.config_file = cfg2
+ self.assertEqual(y.config_file, cfg2)
+
+ def test__get_entries(self):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ y._options = self.ENTRIES
+ y._get_entries()
+ self.assertIn("p acl add", y._rules)
+
+ def test__get_entries_no_options(self):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ y._get_entries()
+ self.assertEqual(y._rules, '')
+
+ @mock.patch('yardstick.network_services.yang_model.open')
+ @mock.patch('yardstick.network_services.yang_model.yaml_load')
+ def test__read_config(self, mock_safe_load, *args):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ mock_safe_load.return_value = expected = {'key1': 'value1', 'key2': 'value2'}
+ y._read_config()
+ self.assertDictEqual(y._options, expected)
+
+ @mock.patch('yardstick.network_services.yang_model.open')
+ def test__read_config_open_error(self, mock_open):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ mock_open.side_effect = IOError('my error')
+
+ self.assertEqual(y._options, {})
+ with self.assertRaises(IOError) as raised:
+ y._read_config()
+
+ self.assertIn('my error', str(raised.exception))
+ self.assertEqual(y._options, {})
+
+ def test_get_rules(self):
+ cfg = "yang.yaml"
+ y = YangModel(cfg)
+ y._read_config = read_mock = mock.Mock()
+ y._get_entries = get_mock = mock.Mock()
+
+ y._rules = None
+ self.assertIsNone(y.get_rules())
+ read_mock.assert_called_once()
+ get_mock.assert_called_once()
+
+ # True value should prevent calling read and get
+ y._rules = 999
+ self.assertEqual(y.get_rules(), 999)
+ read_mock.assert_called_once()
+ get_mock.assert_called_once()
diff --git a/yardstick/tests/unit/network_services/traffic_profile/__init__.py b/yardstick/tests/unit/network_services/traffic_profile/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/__init__.py
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_base.py b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
new file mode 100644
index 000000000..55276af58
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_base.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+
+import mock
+import unittest
+
+from yardstick.common import exceptions
+from yardstick.network_services import traffic_profile as tprofile_package
+from yardstick.network_services.traffic_profile import base
+from yardstick import tests as y_tests
+
+
+class TestTrafficProfile(unittest.TestCase):
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ def _get_res_mock(self, **kw):
+ _mock = mock.MagicMock()
+ for k, v in kw.items():
+ setattr(_mock, k, v)
+ return _mock
+
+ def test___init__(self):
+ traffic_profile = base.TrafficProfile(self.TRAFFIC_PROFILE)
+ self.assertEqual(self.TRAFFIC_PROFILE, traffic_profile.params)
+
+ def test_execute_traffic(self):
+ traffic_profile = base.TrafficProfile(self.TRAFFIC_PROFILE)
+ self.assertRaises(NotImplementedError,
+ traffic_profile.execute_traffic, {})
+
+ def test_get_existing_traffic_profile(self):
+ traffic_profile_list = [
+ 'RFC2544Profile', 'FixedProfile', 'TrafficProfileGenericHTTP',
+ 'IXIARFC2544Profile', 'ProxACLProfile', 'ProxBinSearchProfile',
+ 'ProxProfile', 'ProxRampProfile']
+ with mock.patch.dict(sys.modules, y_tests.STL_MOCKS):
+ tprofile_package.register_modules()
+
+ for tp in traffic_profile_list:
+ traffic_profile = base.TrafficProfile.get(
+ {'traffic_profile': {'traffic_type': tp}})
+ self.assertEqual(tp, traffic_profile.__class__.__name__)
+
+ def test_get_non_existing_traffic_profile(self):
+ self.assertRaises(exceptions.TrafficProfileNotImplemented,
+ base.TrafficProfile.get, self.TRAFFIC_PROFILE)
+
+
+class TestDummyProfile(unittest.TestCase):
+ def test_execute(self):
+ tp_config = {'traffic_profile': {'duration': 15}}
+ dummy_profile = base.DummyProfile(tp_config)
+ self.assertIsNone(dummy_profile.execute({}))
+
+
+class TrafficProfileConfigTestCase(unittest.TestCase):
+
+ def test__init(self):
+ tp_config = {'traffic_profile': {'packet_sizes': {'64B': 100}}}
+ tp_config_obj = base.TrafficProfileConfig(tp_config)
+ self.assertEqual({'64B': 100}, tp_config_obj.packet_sizes)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_SCHEMA,
+ tp_config_obj.schema)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_FRAME_RATE,
+ tp_config_obj.frame_rate)
+ self.assertEqual(base.TrafficProfileConfig.DEFAULT_DURATION,
+ tp_config_obj.duration)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
new file mode 100644
index 000000000..2f6713760
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_fixed.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import mock
+import unittest
+
+from yardstick.tests import STL_MOCKS
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.traffic_profile.base import TrafficProfile
+ from yardstick.network_services.traffic_profile.fixed import FixedProfile
+
+
+class TestFixedProfile(unittest.TestCase):
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ def test___init__(self):
+ fixed_profile = FixedProfile(self.TRAFFIC_PROFILE)
+ self.assertIsNotNone(fixed_profile)
+
+ def test_execute(self):
+ traffic_generator = mock.Mock(autospec=TrafficProfile)
+ traffic_generator.my_ports = [0, 1]
+ traffic_generator.vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ traffic_generator.client = \
+ mock.Mock(return_value=True)
+ fixed_profile = FixedProfile(self.TRAFFIC_PROFILE)
+ fixed_profile.params = self.TRAFFIC_PROFILE
+ fixed_profile.first_run = True
+ self.assertIsNone(fixed_profile.execute(traffic_generator))
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_http.py b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
new file mode 100644
index 000000000..d44fab2b5
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from yardstick.network_services.traffic_profile import http
+
+
+class TestTrafficProfileGenericHTTP(unittest.TestCase):
+
+ TP_CONFIG = {'traffic_profile': {'duration': 10}}
+
+ def test___init__(self):
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
+ self.assertIsNotNone(tp_generic_http)
+
+ def test_execute(self):
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
+ traffic_generator = {}
+ self.assertIsNone(tp_generic_http.execute(traffic_generator))
+
+ def test__send_http_request(self):
+ tp_generic_http = http.TrafficProfileGenericHTTP(
+ self.TP_CONFIG)
+ self.assertIsNone(tp_generic_http._send_http_request(
+ '10.1.1.1', '250', '/req'))
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
new file mode 100644
index 000000000..57de6602d
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_http_ixload.py
@@ -0,0 +1,269 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import mock
+
+from oslo_serialization import jsonutils
+
+from yardstick.network_services.traffic_profile import http_ixload
+from yardstick.network_services.traffic_profile.http_ixload import \
+ join_non_strings, validate_non_string_sequence
+
+
+class TestJoinNonStrings(unittest.TestCase):
+
+ def test_validate_non_string_sequence(self):
+ self.assertEqual(validate_non_string_sequence([1, 2, 3]), [1, 2, 3])
+ self.assertIsNone(validate_non_string_sequence('123'))
+ self.assertIsNone(validate_non_string_sequence(1))
+
+ self.assertEqual(validate_non_string_sequence(1, 2), 2)
+ self.assertEqual(validate_non_string_sequence(1, default=2), 2)
+
+ with self.assertRaises(RuntimeError):
+ validate_non_string_sequence(1, raise_exc=RuntimeError)
+
+ def test_join_non_strings(self):
+ self.assertEqual(join_non_strings(':'), '')
+ self.assertEqual(join_non_strings(':', 'a'), 'a')
+ self.assertEqual(join_non_strings(':', 'a', 2, 'c'), 'a:2:c')
+ self.assertEqual(join_non_strings(':', ['a', 2, 'c']), 'a:2:c')
+ self.assertEqual(join_non_strings(':', 'abc'), 'abc')
+
+
+class TestIxLoadTrafficGen(unittest.TestCase):
+
+ def test_parse_run_test(self):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+ j = jsonutils.dump_as_bytes(test_input)
+ ixload = http_ixload.IXLOADHttpTest(j)
+ self.assertDictEqual(ixload.test_input, test_input)
+ self.assertIsNone(ixload.parse_run_test())
+ self.assertEqual(ixload.ports_to_reassign, [
+ ["IXIA_CHASSIS", "CARD", 1],
+ ["IXIA_CHASSIS", "CARD", 2],
+ ["IXIA_CHASSIS", "CARD", 3],
+ ])
+
+ def test_format_ports_for_reassignment(self):
+ ports = [
+ ["IXIA_CHASSIS", "CARD", 1],
+ ["IXIA_CHASSIS", "CARD", 2],
+ ["IXIA_CHASSIS", "CARD", 3],
+ ]
+ formatted = http_ixload.IXLOADHttpTest.format_ports_for_reassignment(ports)
+ self.assertEqual(formatted, [
+ "IXIA_CHASSIS;CARD;1",
+ "IXIA_CHASSIS;CARD;2",
+ "IXIA_CHASSIS;CARD;3",
+ ])
+
+ def test_reassign_ports(self):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+ j = jsonutils.dump_as_bytes(test_input)
+ ixload = http_ixload.IXLOADHttpTest(j)
+ repository = mock.Mock()
+ test = mock.MagicMock()
+ test.setPorts = mock.Mock()
+ ports_to_reassign = [(1, 2, 3), (1, 2, 4)]
+ ixload.format_ports_for_reassignment = mock.Mock(return_value=["1;2;3"])
+ self.assertIsNone(ixload.reassign_ports(test, repository, ports_to_reassign))
+
+ def test_reassign_ports_error(self):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+ j = jsonutils.dump_as_bytes(test_input)
+ ixload = http_ixload.IXLOADHttpTest(j)
+ repository = mock.Mock()
+ test = "test"
+ ports_to_reassign = [(1, 2, 3), (1, 2, 4)]
+ ixload.format_ports_for_reassignment = mock.Mock(return_value=["1;2;3"])
+ ixload.ix_load = mock.MagicMock()
+ ixload.ix_load.delete = mock.Mock()
+ ixload.ix_load.disconnect = mock.Mock()
+ with self.assertRaises(Exception):
+ ixload.reassign_ports(test, repository, ports_to_reassign)
+
+ def test_stat_collector(self):
+ args = [0, 1]
+ self.assertIsNone(
+ http_ixload.IXLOADHttpTest.stat_collector(*args))
+
+ def test_IxL_StatCollectorCommand(self):
+ args = [[0, 1, 2, 3], [0, 1, 2, 3]]
+ self.assertIsNone(
+ http_ixload.IXLOADHttpTest.IxL_StatCollectorCommand(*args))
+
+ def test_set_results_dir(self):
+ test_stat_collector = mock.MagicMock()
+ test_stat_collector.setResultDir = mock.Mock()
+ results_on_windows = "c:/Results"
+ self.assertIsNone(
+ http_ixload.IXLOADHttpTest.set_results_dir(test_stat_collector,
+ results_on_windows))
+
+ def test_set_results_dir_error(self):
+ test_stat_collector = ""
+ results_on_windows = "c:/Results"
+ with self.assertRaises(Exception):
+ http_ixload.IXLOADHttpTest.set_results_dir(test_stat_collector, results_on_windows)
+
+ def test_load_config_file(self):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+ j = jsonutils.dump_as_bytes(test_input)
+ ixload = http_ixload.IXLOADHttpTest(j)
+ ixload.ix_load = mock.MagicMock()
+ ixload.ix_load.new = mock.Mock(return_value="")
+ self.assertIsNotNone(ixload.load_config_file("ixload.cfg"))
+
+ def test_load_config_file_error(self):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+ j = jsonutils.dump_as_bytes(test_input)
+ ixload = http_ixload.IXLOADHttpTest(j)
+ ixload.ix_load = "test"
+ with self.assertRaises(Exception):
+ ixload.load_config_file("ixload.cfg")
+
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
+ def test_start_http_test_connect_error(self, mock_ixload_type, *args):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+
+ j = jsonutils.dump_as_bytes(test_input)
+
+ mock_ixload_type.return_value.connect.side_effect = RuntimeError
+
+ ixload = http_ixload.IXLOADHttpTest(j)
+ ixload.results_on_windows = 'windows_result_dir'
+ ixload.result_dir = 'my_result_dir'
+
+ with self.assertRaises(RuntimeError):
+ ixload.start_http_test()
+
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
+ def test_start_http_test(self, *args):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+
+ j = jsonutils.dump_as_bytes(test_input)
+
+ ixload = http_ixload.IXLOADHttpTest(j)
+ ixload.results_on_windows = 'windows_result_dir'
+ ixload.result_dir = 'my_result_dir'
+ ixload.load_config_file = mock.MagicMock()
+
+ self.assertIsNone(ixload.start_http_test())
+
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.IxLoad')
+ @mock.patch('yardstick.network_services.traffic_profile.http_ixload.StatCollectorUtils')
+ def test_start_http_test_reassign_error(self, *args):
+ ports = [1, 2, 3]
+ test_input = {
+ "remote_server": "REMOTE_SERVER",
+ "ixload_cfg": "IXLOAD_CFG",
+ "result_dir": "RESULT_DIR",
+ "ixia_chassis": "IXIA_CHASSIS",
+ "IXIA": {
+ "card": "CARD",
+ "ports": ports,
+ },
+ }
+
+ j = jsonutils.dump_as_bytes(test_input)
+
+ ixload = http_ixload.IXLOADHttpTest(j)
+ ixload.load_config_file = mock.MagicMock()
+
+ reassign_ports = mock.Mock(side_effect=RuntimeError)
+ ixload.reassign_ports = reassign_ports
+ ixload.results_on_windows = 'windows_result_dir'
+ ixload.result_dir = 'my_result_dir'
+
+ ixload.start_http_test()
+ reassign_ports.assert_called_once()
+
+ @mock.patch("yardstick.network_services.traffic_profile.http_ixload.IXLOADHttpTest")
+ def test_main(self, *args):
+ args = ["1", "2", "3"]
+ http_ixload.main(args)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
new file mode 100644
index 000000000..6b3532fa2
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py
@@ -0,0 +1,623 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from copy import deepcopy
+
+import mock
+import unittest
+
+from yardstick.network_services.traffic_profile import ixia_rfc2544
+from yardstick.network_services.traffic_profile import trex_traffic_profile
+
+
+class TestIXIARFC2544Profile(unittest.TestCase):
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ PROFILE = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {
+ 'traffic_type': 'IXIARFC2544Profile',
+ 'frame_rate': 100},
+ ixia_rfc2544.IXIARFC2544Profile.DOWNLINK: {
+ 'ipv4': {
+ 'outer_l2': {
+ 'framesize': {
+ '64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4': {
+ 'dstip4': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp',
+ 'count': '1',
+ 'srcip4': '90.90.1.1-90.105.255.255',
+ 'dscp': 0,
+ 'ttl': 32},
+ 'outer_l4': {
+ 'srcport': '2001',
+ 'dsrport': '1234'}}},
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {
+ 'ipv4': {
+ 'outer_l2': {
+ 'framesize': {
+ '64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4': {
+ 'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'count': '1',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0,
+ 'ttl': 32},
+ 'outer_l4': {
+ 'dstport': '2001',
+ 'srcport': '1234'}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+
+ def test_get_ixia_traffic_profile_error(self):
+ traffic_generator = mock.Mock(
+ autospec=trex_traffic_profile.TrexProfile)
+ traffic_generator.my_ports = [0, 1]
+ traffic_generator.uplink_ports = [-1]
+ traffic_generator.downlink_ports = [1]
+ traffic_generator.client = \
+ mock.Mock(return_value=True)
+ STATIC_TRAFFIC = {
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {
+ "id": 1,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:03",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32
+ },
+ "outer_l3v6": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l4": {
+ "dstport": "2001",
+ "srcport": "1234"
+ },
+ "traffic_type": "continuous"
+ },
+ ixia_rfc2544.IXIARFC2544Profile.DOWNLINK: {
+ "id": 2,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:04",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l3v6": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l4": {
+ "dstport": "1234",
+ "srcport": "2001"
+ },
+ "traffic_type": "continuous"
+ }
+ }
+ ixia_rfc2544.STATIC_TRAFFIC = STATIC_TRAFFIC
+
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ r_f_c2544_profile.rate = 100
+ mac = {"src_mac_0": "00:00:00:00:00:01",
+ "src_mac_1": "00:00:00:00:00:02",
+ "src_mac_2": "00:00:00:00:00:02",
+ "dst_mac_0": "00:00:00:00:00:03",
+ "dst_mac_1": "00:00:00:00:00:04",
+ "dst_mac_2": "00:00:00:00:00:04"}
+ result = r_f_c2544_profile._get_ixia_traffic_profile(self.PROFILE, mac)
+ self.assertIsNotNone(result)
+
+ def test_get_ixia_traffic_profile(self):
+ traffic_generator = mock.Mock(
+ autospec=trex_traffic_profile.TrexProfile)
+ traffic_generator.my_ports = [0, 1]
+ traffic_generator.uplink_ports = [-1]
+ traffic_generator.downlink_ports = [1]
+ traffic_generator.client = \
+ mock.Mock(return_value=True)
+ STATIC_TRAFFIC = {
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {
+ "id": 1,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:03",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32,
+ "count": "1"
+ },
+ "outer_l3v6": {
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32,
+ },
+ "outer_l4": {
+ "dstport": "2001",
+ "srcport": "1234",
+ "count": "1"
+ },
+ "traffic_type": "continuous"
+ },
+ ixia_rfc2544.IXIARFC2544Profile.DOWNLINK: {
+ "id": 2,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:04",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32,
+ },
+ "outer_l3v6": {
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32,
+ },
+ "outer_l4": {
+ "dstport": "1234",
+ "srcport": "2001",
+ "count": "1"
+ },
+ "traffic_type": "continuous"
+ }
+ }
+ ixia_rfc2544.STATIC_TRAFFIC = STATIC_TRAFFIC
+
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ r_f_c2544_profile.rate = 100
+ mac = {"src_mac_0": "00:00:00:00:00:01",
+ "src_mac_1": "00:00:00:00:00:02",
+ "src_mac_2": "00:00:00:00:00:02",
+ "dst_mac_0": "00:00:00:00:00:03",
+ "dst_mac_1": "00:00:00:00:00:04",
+ "dst_mac_2": "00:00:00:00:00:04"}
+ result = r_f_c2544_profile._get_ixia_traffic_profile(self.PROFILE, mac)
+ self.assertIsNotNone(result)
+
+ @mock.patch("yardstick.network_services.traffic_profile.ixia_rfc2544.open")
+ def test_get_ixia_traffic_profile_v6(self, *args):
+ traffic_generator = mock.Mock(
+ autospec=trex_traffic_profile.TrexProfile)
+ traffic_generator.my_ports = [0, 1]
+ traffic_generator.uplink_ports = [-1]
+ traffic_generator.downlink_ports = [1]
+ traffic_generator.client = \
+ mock.Mock(return_value=True)
+ STATIC_TRAFFIC = {
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {
+ "id": 1,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:03",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "dscp": 0,
+ "dstip4": "152.16.40.20",
+ "proto": "udp",
+ "srcip4": "152.16.100.20",
+ "ttl": 32
+ },
+ "outer_l3v6": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l4": {
+ "dstport": "2001",
+ "srcport": "1234"
+ },
+ "traffic_type": "continuous"
+ },
+ ixia_rfc2544.IXIARFC2544Profile.DOWNLINK: {
+ "id": 2,
+ "bidir": "False",
+ "duration": 60,
+ "iload": "100",
+ "outer_l2": {
+ "dstmac": "00:00:00:00:00:04",
+ "framesPerSecond": True,
+ "framesize": 64,
+ "srcmac": "00:00:00:00:00:01"
+ },
+ "outer_l3": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l3v4": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l3v6": {
+ "count": 1024,
+ "dscp": 0,
+ "dstip4": "152.16.100.20",
+ "proto": "udp",
+ "srcip4": "152.16.40.20",
+ "ttl": 32
+ },
+ "outer_l4": {
+ "dstport": "1234",
+ "srcport": "2001"
+ },
+ "traffic_type": "continuous"
+ }
+ }
+ ixia_rfc2544.STATIC_TRAFFIC = STATIC_TRAFFIC
+
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ r_f_c2544_profile.rate = 100
+ mac = {"src_mac_0": "00:00:00:00:00:01",
+ "src_mac_1": "00:00:00:00:00:02",
+ "src_mac_2": "00:00:00:00:00:02",
+ "dst_mac_0": "00:00:00:00:00:03",
+ "dst_mac_1": "00:00:00:00:00:04",
+ "dst_mac_2": "00:00:00:00:00:04"}
+ profile_data = {'description': 'Traffic profile to run RFC2544',
+ 'name': 'rfc2544',
+ 'traffic_profile':
+ {'traffic_type': 'IXIARFC2544Profile',
+ 'frame_rate': 100},
+ ixia_rfc2544.IXIARFC2544Profile.DOWNLINK:
+ {'ipv4':
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4': {'dstip4': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip4': '90.90.1.1-90.105.255.255',
+ 'dscp': 0, 'ttl': 32},
+ 'outer_l3v6': {'dstip6': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip6': '90.90.1.1-90.105.255.255',
+ 'dscp': 0, 'ttl': 32},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234'}}},
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {'ipv4':
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32},
+ 'outer_l3v6':
+ {'dstip6': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp', 'count': '1',
+ 'srcip6': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32},
+
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234'}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+ result = r_f_c2544_profile._get_ixia_traffic_profile(profile_data, mac)
+ self.assertIsNotNone(result)
+
+ def test__get_ixia_traffic_profile_default_args(self):
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+
+ expected = {}
+ result = r_f_c2544_profile._get_ixia_traffic_profile({})
+ self.assertDictEqual(result, expected)
+
+ def test__ixia_traffic_generate(self):
+ traffic_generator = mock.Mock(
+ autospec=trex_traffic_profile.TrexProfile)
+ traffic_generator.networks = {
+ "uplink_0": ["xe0"],
+ "downlink_0": ["xe1"],
+ }
+ traffic_generator.client = \
+ mock.Mock(return_value=True)
+ traffic = {ixia_rfc2544.IXIARFC2544Profile.DOWNLINK: {'iload': 10},
+ ixia_rfc2544.IXIARFC2544Profile.UPLINK: {'iload': 10}}
+ ixia_obj = mock.MagicMock()
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(
+ self.TRAFFIC_PROFILE)
+ r_f_c2544_profile.rate = 100
+ result = r_f_c2544_profile._ixia_traffic_generate(traffic, ixia_obj)
+ self.assertIsNone(result)
+
+ def test_execute_traffic_first_run(self):
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.first_run = True
+ rfc2544_profile.rate = 50
+ with mock.patch.object(rfc2544_profile, '_get_ixia_traffic_profile') \
+ as mock_get_tp, \
+ mock.patch.object(rfc2544_profile, '_ixia_traffic_generate') \
+ as mock_tgenerate, \
+ mock.patch.object(rfc2544_profile, 'update_traffic_profile') \
+ as mock_update_tp:
+ mock_get_tp.return_value = 'fake_tprofile'
+ output = rfc2544_profile.execute_traffic(mock.ANY,
+ ixia_obj=mock.ANY)
+
+ self.assertTrue(output)
+ self.assertFalse(rfc2544_profile.first_run)
+ self.assertEqual(50, rfc2544_profile.max_rate)
+ self.assertEqual(0, rfc2544_profile.min_rate)
+ mock_get_tp.assert_called_once()
+ mock_tgenerate.assert_called_once()
+ mock_update_tp.assert_called_once()
+
+ def test_execute_traffic_not_first_run(self):
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.first_run = False
+ rfc2544_profile.max_rate = 70
+ rfc2544_profile.min_rate = 0
+ with mock.patch.object(rfc2544_profile, '_get_ixia_traffic_profile') \
+ as mock_get_tp, \
+ mock.patch.object(rfc2544_profile, '_ixia_traffic_generate') \
+ as mock_tgenerate:
+ mock_get_tp.return_value = 'fake_tprofile'
+ rfc2544_profile.full_profile = mock.ANY
+ output = rfc2544_profile.execute_traffic(mock.ANY,
+ ixia_obj=mock.ANY)
+
+ self.assertFalse(output)
+ self.assertEqual(35.0, rfc2544_profile.rate)
+ mock_get_tp.assert_called_once()
+ mock_tgenerate.assert_called_once()
+
+ def test_update_traffic_profile(self):
+ traffic_generator = mock.Mock(
+ autospec=trex_traffic_profile.TrexProfile)
+ traffic_generator.networks = {
+ "uplink_0": ["xe0"], # private, one value for intfs
+ "downlink_0": ["xe1", "xe2"], # public, two values for intfs
+ "downlink_1": ["xe3"], # not in TRAFFIC PROFILE
+ "tenant_0": ["xe4"], # not public or private
+ }
+
+ ports_expected = [8, 3, 5]
+ traffic_generator.vnfd_helper.port_num.side_effect = ports_expected
+ traffic_generator.client.return_value = True
+
+ traffic_profile = deepcopy(self.TRAFFIC_PROFILE)
+ traffic_profile.update({
+ "uplink_0": ["xe0"],
+ "downlink_0": ["xe1", "xe2"],
+ })
+
+ r_f_c2544_profile = ixia_rfc2544.IXIARFC2544Profile(traffic_profile)
+ r_f_c2544_profile.full_profile = {}
+ r_f_c2544_profile.get_streams = mock.Mock()
+
+ self.assertIsNone(
+ r_f_c2544_profile.update_traffic_profile(traffic_generator))
+ self.assertEqual(r_f_c2544_profile.ports, ports_expected)
+
+ def test_get_drop_percentage_completed(self):
+ samples = {'iface_name_1':
+ {'RxThroughput': 10, 'TxThroughput': 10,
+ 'in_packets': 1000, 'out_packets': 1000},
+ 'iface_name_2':
+ {'RxThroughput': 11, 'TxThroughput': 13,
+ 'in_packets': 1005, 'out_packets': 1007}
+ }
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1)
+ self.assertTrue(completed)
+ self.assertEqual(23.0, samples['TxThroughput'])
+ self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(0.1, samples['DropPercentage'])
+
+ def test_get_drop_percentage_over_drop_percentage(self):
+ samples = {'iface_name_1':
+ {'RxThroughput': 10, 'TxThroughput': 10,
+ 'in_packets': 1000, 'out_packets': 1000},
+ 'iface_name_2':
+ {'RxThroughput': 11, 'TxThroughput': 13,
+ 'in_packets': 1005, 'out_packets': 1007}
+ }
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.rate = 1000
+ completed, samples = rfc2544_profile.get_drop_percentage(
+ samples, 0, 0.05)
+ self.assertFalse(completed)
+ self.assertEqual(23.0, samples['TxThroughput'])
+ self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
+
+ def test_get_drop_percentage_under_drop_percentage(self):
+ samples = {'iface_name_1':
+ {'RxThroughput': 10, 'TxThroughput': 10,
+ 'in_packets': 1000, 'out_packets': 1000},
+ 'iface_name_2':
+ {'RxThroughput': 11, 'TxThroughput': 13,
+ 'in_packets': 1005, 'out_packets': 1007}
+ }
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.rate = 1000
+ completed, samples = rfc2544_profile.get_drop_percentage(
+ samples, 0.2, 1)
+ self.assertFalse(completed)
+ self.assertEqual(23.0, samples['TxThroughput'])
+ self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(rfc2544_profile.rate, rfc2544_profile.min_rate)
+
+ @mock.patch.object(ixia_rfc2544.LOG, 'info')
+ def test_get_drop_percentage_not_flow(self, *args):
+ samples = {'iface_name_1':
+ {'RxThroughput': 0, 'TxThroughput': 10,
+ 'in_packets': 1000, 'out_packets': 0},
+ 'iface_name_2':
+ {'RxThroughput': 0, 'TxThroughput': 13,
+ 'in_packets': 1005, 'out_packets': 0}
+ }
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.rate = 1000
+ completed, samples = rfc2544_profile.get_drop_percentage(
+ samples, 0.2, 1)
+ self.assertFalse(completed)
+ self.assertEqual(23.0, samples['TxThroughput'])
+ self.assertEqual(0, samples['RxThroughput'])
+ self.assertEqual(100, samples['DropPercentage'])
+ self.assertEqual(rfc2544_profile.rate, rfc2544_profile.max_rate)
+
+ def test_get_drop_percentage_first_run(self):
+ samples = {'iface_name_1':
+ {'RxThroughput': 10, 'TxThroughput': 10,
+ 'in_packets': 1000, 'out_packets': 1000},
+ 'iface_name_2':
+ {'RxThroughput': 11, 'TxThroughput': 13,
+ 'in_packets': 1005, 'out_packets': 1007}
+ }
+ rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE)
+ completed, samples = rfc2544_profile.get_drop_percentage(
+ samples, 0, 1, first_run=True)
+ self.assertTrue(completed)
+ self.assertEqual(23.0, samples['TxThroughput'])
+ self.assertEqual(21.0, samples['RxThroughput'])
+ self.assertEqual(0.1, samples['DropPercentage'])
+ self.assertEqual(33.45, rfc2544_profile.rate)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py
new file mode 100644
index 000000000..48c449b20
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_acl.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests import STL_MOCKS
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.traffic_profile.prox_ACL import ProxACLProfile
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple
+
+
+class TestProxACLProfile(unittest.TestCase):
+
+ def test_run_test_with_pkt_size(self):
+ def target(*args):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 75.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'upper_bound': 100.0,
+ 'lower_bound': 0.0,
+ 'tolerated_loss': 50.0,
+ 'attempts': 20
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(
+ 10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(
+ 10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_gen = mock.MagicMock()
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxACLProfile(tp_config)
+ profile.init(mock.MagicMock())
+
+ profile.prox_config["attempts"] = 20
+ profile.queue = mock.MagicMock()
+ profile.tolerated_loss = 50.0
+ profile.pkt_size = 128
+ profile.duration = 30
+ profile.test_value = 100.0
+ profile.tolerated_loss = 100.0
+ profile._profile_helper = profile_helper
+
+ profile.run_test_with_pkt_size(
+ traffic_gen, profile.pkt_size, profile.duration)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
new file mode 100644
index 000000000..7bfd67fe0
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py
@@ -0,0 +1,184 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests import STL_MOCKS
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple
+ from yardstick.network_services.traffic_profile.prox_binsearch import ProxBinSearchProfile
+
+
+class TestProxBinSearchProfile(unittest.TestCase):
+
+ def test_execute_1(self):
+ def target(*args, **_):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 75.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_generator = mock.MagicMock()
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxBinSearchProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.execute_traffic(traffic_generator)
+ self.assertEqual(round(profile.current_lower, 2), 74.69)
+ self.assertEqual(round(profile.current_upper, 2), 76.09)
+ self.assertEqual(len(runs), 7)
+
+ # Result Samples inc theor_max
+ result_tuple = {'Result_Actual_throughput': 5e-07,
+ 'Result_theor_max_throughput': 0.00012340000000000002,
+ 'Result_pktSize': 200}
+
+ profile.queue.put.assert_called_with(result_tuple)
+
+ success_result_tuple = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 0.00012340000000000002}
+
+ calls = profile.queue.put(success_result_tuple)
+ profile.queue.put.assert_has_calls(calls)
+
+ success_result_tuple2 = {"Success_CurrentDropPackets": 0.5,
+ "Success_DropPackets": 0.5,
+ "Success_LatencyAvg": 5.3,
+ "Success_LatencyMax": 5.2,
+ "Success_LatencyMin": 5.1,
+ "Success_PktSize": 200,
+ "Success_RxThroughput": 7.5e-07,
+ "Success_Throughput": 7.5e-07,
+ "Success_TxThroughput": 123.4,
+ "Success_can_be_lost": 409600,
+ "Success_drop_total": 20480,
+ "Success_rx_total": 4075520,
+ "Success_tx_total": 4096000}
+
+ calls = profile.queue.put(success_result_tuple2)
+ profile.queue.put.assert_has_calls(calls)
+
+ def test_execute_2(self):
+ def target(*args, **_):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 25.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_generator = mock.MagicMock()
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxBinSearchProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.execute_traffic(traffic_generator)
+ self.assertEqual(round(profile.current_lower, 2), 24.06)
+ self.assertEqual(round(profile.current_upper, 2), 25.47)
+ self.assertEqual(len(runs), 7)
+
+ def test_execute_3(self):
+ def target(*args, **_):
+ runs.append(args[2])
+ if args[2] < 0 or args[2] > 100:
+ raise RuntimeError(' '.join([str(args), str(runs)]))
+ if args[2] > 75.0:
+ return fail_tuple, {}
+ return success_tuple, {}
+
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': [200],
+ 'test_precision': 2.0,
+ 'tolerated_loss': 0.001,
+ },
+ }
+
+ runs = []
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ traffic_generator = mock.MagicMock()
+
+ profile_helper = mock.MagicMock()
+ profile_helper.run_test = target
+
+ profile = ProxBinSearchProfile(tp_config)
+ profile.init(mock.MagicMock())
+ profile._profile_helper = profile_helper
+
+ profile.upper_bound = 100.0
+ profile.lower_bound = 99.0
+ profile.execute_traffic(traffic_generator)
+
+
+ # Result Samples
+ result_tuple = {"Result_theor_max_throughput": 0, "Result_pktSize": 200}
+ profile.queue.put.assert_called_with(result_tuple)
+
+ # Check for success_ tuple (None expected)
+ calls = profile.queue.put.mock_calls
+ for call in calls:
+ for call_detail in call[1]:
+ for k in call_detail:
+ if "Success_" in k:
+ self.assertRaises(AttributeError)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
new file mode 100644
index 000000000..cf31cc27c
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_profile.py
@@ -0,0 +1,128 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests import STL_MOCKS
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.traffic_profile.prox_profile import ProxProfile
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
+
+
+class TestProxProfile(unittest.TestCase):
+
+ def test_sort_vpci(self):
+ traffic_generator = mock.Mock()
+ interface_1 = {'virtual-interface': {'vpci': 'id1'}, 'name': 'name1'}
+ interface_2 = {'virtual-interface': {'vpci': 'id2'}, 'name': 'name2'}
+ interface_3 = {'virtual-interface': {'vpci': 'id3'}, 'name': 'name3'}
+ interfaces = [interface_2, interface_3, interface_1]
+ traffic_generator.vnfd_helper = {
+ 'vdu': [{'external-interface': interfaces}]}
+ output = ProxProfile.sort_vpci(traffic_generator)
+ self.assertEqual([interface_1, interface_2, interface_3], output)
+
+ def test_fill_samples(self):
+ samples = {}
+
+ traffic_generator = mock.MagicMock()
+ interfaces = [
+ ['id1', 'name1'],
+ ['id2', 'name2']
+ ]
+ traffic_generator.resource_helper.sut.port_stats.side_effect = [
+ list(range(12)),
+ list(range(10, 22)),
+ ]
+
+ expected = {
+ 'name1': {
+ 'in_packets': 6,
+ 'out_packets': 7,
+ },
+ 'name2': {
+ 'in_packets': 16,
+ 'out_packets': 17,
+ },
+ }
+ with mock.patch.object(ProxProfile, 'sort_vpci', return_value=interfaces):
+ ProxProfile.fill_samples(samples, traffic_generator)
+
+ self.assertDictEqual(samples, expected)
+
+ def test_init(self):
+ tp_config = {
+ 'traffic_profile': {},
+ }
+
+ profile = ProxProfile(tp_config)
+ queue = mock.Mock()
+ profile.init(queue)
+ self.assertIs(profile.queue, queue)
+
+ def test_execute_traffic(self):
+ packet_sizes = [
+ 10,
+ 100,
+ 1000,
+ ]
+ tp_config = {
+ 'traffic_profile': {
+ 'packet_sizes': packet_sizes,
+ },
+ }
+
+ traffic_generator = mock.MagicMock()
+
+ setup_helper = traffic_generator.setup_helper
+ setup_helper.find_in_section.return_value = None
+
+ prox_resource_helper = ProxResourceHelper(setup_helper)
+ traffic_generator.resource_helper = prox_resource_helper
+
+ profile = ProxProfile(tp_config)
+
+ self.assertFalse(profile.done)
+ for _ in packet_sizes:
+ with self.assertRaises(NotImplementedError):
+ profile.execute_traffic(traffic_generator)
+
+ self.assertIsNone(profile.execute_traffic(traffic_generator))
+ self.assertTrue(profile.done)
+
+ def test_bounds_iterator(self):
+ tp_config = {
+ 'traffic_profile': {},
+ }
+
+ profile = ProxProfile(tp_config)
+ value = 0.0
+ for value in profile.bounds_iterator():
+ pass
+
+ self.assertEqual(value, 100.0)
+
+ mock_logger = mock.MagicMock()
+ for _ in profile.bounds_iterator(mock_logger):
+ pass
+
+ mock_logger.debug.assert_called_once()
+ self.assertEqual(mock_logger.info.call_count, 10)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py
new file mode 100644
index 000000000..7a77e3295
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_ramp.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests import STL_MOCKS
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.traffic_profile.prox_ramp import ProxRampProfile
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple
+
+
+class TestProxRampProfile(unittest.TestCase):
+
+ def test_run_test_with_pkt_size(self):
+ tp_config = {
+ 'traffic_profile': {
+ 'lower_bound': 10.0,
+ 'upper_bound': 100.0,
+ 'step_value': 10.0,
+ },
+ }
+
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+
+ traffic_gen = mock.MagicMock()
+ traffic_gen._test_type = 'Generic'
+
+ profile_helper = ProxProfileHelper(traffic_gen.resource_helper)
+ profile_helper.run_test = run_test = mock.MagicMock(return_value=success_tuple)
+
+ profile = ProxRampProfile(tp_config)
+ profile.fill_samples = fill_samples = mock.MagicMock()
+ profile.queue = mock.MagicMock()
+ profile._profile_helper = profile_helper
+
+ profile.run_test_with_pkt_size(traffic_gen, 128, 30)
+ self.assertEqual(run_test.call_count, 10)
+ self.assertEqual(fill_samples.call_count, 10)
+
+ def test_run_test_with_pkt_size_with_fail(self):
+ tp_config = {
+ 'traffic_profile': {
+ 'lower_bound': 10.0,
+ 'upper_bound': 100.0,
+ 'step_value': 10.0,
+ },
+ }
+
+ success_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.1, 5.2, 5.3], 995, 1000, 123.4)
+ fail_tuple = ProxTestDataTuple(10.0, 1, 2, 3, 4, [5.6, 5.7, 5.8], 850, 1000, 123.4)
+
+ result_list = [
+ success_tuple,
+ success_tuple,
+ success_tuple,
+ fail_tuple,
+ success_tuple,
+ fail_tuple,
+ fail_tuple,
+ fail_tuple,
+ ]
+
+ traffic_gen = mock.MagicMock()
+ traffic_gen._test_type = 'Generic'
+
+ profile_helper = ProxProfileHelper(traffic_gen.resource_helper)
+ profile_helper.run_test = run_test = mock.MagicMock(side_effect=result_list)
+
+ profile = ProxRampProfile(tp_config)
+ profile.fill_samples = fill_samples = mock.MagicMock()
+ profile.queue = mock.MagicMock()
+ profile._profile_helper = profile_helper
+
+ profile.run_test_with_pkt_size(traffic_gen, 128, 30)
+ self.assertEqual(run_test.call_count, 4)
+ self.assertEqual(fill_samples.call_count, 3)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
new file mode 100644
index 000000000..0cf93f9ae
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_rfc2544.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from trex_stl_lib import api as Pkt
+from trex_stl_lib import trex_stl_client
+from trex_stl_lib import trex_stl_packet_builder_scapy
+from trex_stl_lib import trex_stl_streams
+
+from yardstick.network_services.traffic_profile import rfc2544
+from yardstick.tests.unit import base
+
+
+class TestRFC2544Profile(base.BaseUnitTestCase):
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100,
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ PROFILE = {'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ 'downlink_0':
+ {'ipv4':
+ {'outer_l2':
+ {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '1.1.1.1-1.15.255.255',
+ 'proto': 'udp',
+ 'srcip4': '90.90.1.1-90.105.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4':
+ {'srcport': '2001',
+ 'dsrport': '1234', 'count': 1}}},
+ 'uplink_0':
+ {'ipv4':
+ {'outer_l2':
+ {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'}},
+ 'outer_l3v4':
+ {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4':
+ {'dstport': '2001',
+ 'srcport': '1234', 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+
+ def test___init__(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ self.assertEqual(rfc2544_profile.max_rate, rfc2544_profile.rate)
+ self.assertEqual(0, rfc2544_profile.min_rate)
+
+ def test_stop_traffic(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ mock_generator = mock.Mock()
+ rfc2544_profile.stop_traffic(traffic_generator=mock_generator)
+ mock_generator.client.stop.assert_called_once()
+ mock_generator.client.reset.assert_called_once()
+ mock_generator.client.remove_all_streams.assert_called_once()
+
+ def test_execute_traffic(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ mock_generator = mock.Mock()
+ mock_generator.networks = {
+ 'downlink_0': ['xe0', 'xe1'],
+ 'uplink_0': ['xe2', 'xe3'],
+ 'downlink_1': []}
+ mock_generator.port_num.side_effect = [10, 20, 30, 40]
+ mock_generator.rfc2544_helper.correlated_traffic = False
+ rfc2544_profile.params = {
+ 'downlink_0': 'profile1',
+ 'uplink_0': 'profile2'}
+
+ with mock.patch.object(rfc2544_profile, '_create_profile') as \
+ mock_create_profile:
+ rfc2544_profile.execute_traffic(traffic_generator=mock_generator)
+ mock_create_profile.assert_has_calls([
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile1', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY),
+ mock.call('profile2', rfc2544_profile.rate, mock.ANY)])
+ mock_generator.client.add_streams.assert_has_calls([
+ mock.call(mock.ANY, ports=[10]),
+ mock.call(mock.ANY, ports=[20]),
+ mock.call(mock.ANY, ports=[30]),
+ mock.call(mock.ANY, ports=[40])])
+ mock_generator.client.start(ports=[10, 20, 30, 40],
+ duration=rfc2544_profile.config.duration,
+ force=True)
+
+ @mock.patch.object(trex_stl_streams, 'STLProfile')
+ def test__create_profile(self, mock_stl_profile):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ port_pg_id = mock.ANY
+ profile_data = {'packetid_1': {'outer_l2': {'framesize': 'imix_info'}}}
+ rate = 100
+ with mock.patch.object(rfc2544_profile, '_create_imix_data') as \
+ mock_create_imix, \
+ mock.patch.object(rfc2544_profile, '_create_vm') as \
+ mock_create_vm, \
+ mock.patch.object(rfc2544_profile, '_create_streams') as \
+ mock_create_streams:
+ mock_create_imix.return_value = 'imix_data'
+ mock_create_streams.return_value = ['stream1']
+ rfc2544_profile._create_profile(profile_data, rate, port_pg_id)
+
+ mock_create_imix.assert_called_once_with('imix_info')
+ mock_create_vm.assert_called_once_with(
+ {'outer_l2': {'framesize': 'imix_info'}})
+ mock_create_streams.assert_called_once_with('imix_data', 100,
+ port_pg_id)
+ mock_stl_profile.assert_called_once_with(['stream1'])
+
+ def test__create_imix_data(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ data = {'64B': 50, '128B': 50}
+ self.assertEqual({'64': 50.0, '128': 50.0},
+ rfc2544_profile._create_imix_data(data))
+ data = {'64B': 1, '128b': 3}
+ self.assertEqual({'64': 25.0, '128': 75.0},
+ rfc2544_profile._create_imix_data(data))
+ data = {}
+ self.assertEqual({}, rfc2544_profile._create_imix_data(data))
+
+ def test__create_vm(self):
+ packet = {'outer_l2': 'l2_definition'}
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ with mock.patch.object(rfc2544_profile, '_set_outer_l2_fields') as \
+ mock_l2_fileds:
+ rfc2544_profile._create_vm(packet)
+ mock_l2_fileds.assert_called_once_with('l2_definition')
+
+ @mock.patch.object(trex_stl_packet_builder_scapy, 'STLPktBuilder',
+ return_value='packet')
+ def test__create_single_packet(self, mock_pktbuilder):
+ size = 128
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.ether_packet = Pkt.Eth()
+ rfc2544_profile.ip_packet = Pkt.IP()
+ rfc2544_profile.udp_packet = Pkt.UDP()
+ rfc2544_profile.trex_vm = 'trex_vm'
+ base_pkt = (rfc2544_profile.ether_packet / rfc2544_profile.ip_packet /
+ rfc2544_profile.udp_packet)
+ pad = (size - len(base_pkt)) * 'x'
+ output = rfc2544_profile._create_single_packet(size=size)
+ mock_pktbuilder.assert_called_once_with(pkt=base_pkt / pad,
+ vm='trex_vm')
+ self.assertEqual(output, 'packet')
+
+ @mock.patch.object(trex_stl_packet_builder_scapy, 'STLPktBuilder',
+ return_value='packet')
+ def test__create_single_packet_qinq(self, mock_pktbuilder):
+ size = 128
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ rfc2544_profile.ether_packet = Pkt.Eth()
+ rfc2544_profile.ip_packet = Pkt.IP()
+ rfc2544_profile.udp_packet = Pkt.UDP()
+ rfc2544_profile.trex_vm = 'trex_vm'
+ rfc2544_profile.qinq = True
+ rfc2544_profile.qinq_packet = Pkt.Dot1Q(vlan=1) / Pkt.Dot1Q(vlan=2)
+ base_pkt = (rfc2544_profile.ether_packet /
+ rfc2544_profile.qinq_packet / rfc2544_profile.ip_packet /
+ rfc2544_profile.udp_packet)
+ pad = (size - len(base_pkt)) * 'x'
+ output = rfc2544_profile._create_single_packet(size=size)
+ mock_pktbuilder.assert_called_once_with(pkt=base_pkt / pad,
+ vm='trex_vm')
+ self.assertEqual(output, 'packet')
+
+ @mock.patch.object(trex_stl_streams, 'STLFlowLatencyStats')
+ @mock.patch.object(trex_stl_streams, 'STLTXCont')
+ @mock.patch.object(trex_stl_client, 'STLStream')
+ def test__create_streams(self, mock_stream, mock_txcont, mock_latency):
+ imix_data = {'64': 25, '512': 75}
+ rate = 35
+ port_pg_id = rfc2544.PortPgIDMap()
+ port_pg_id.add_port(10)
+ mock_stream.side_effect = ['stream1', 'stream2']
+ mock_txcont.side_effect = ['txcont1', 'txcont2']
+ mock_latency.side_effect = ['latency1', 'latency2']
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ with mock.patch.object(rfc2544_profile, '_create_single_packet'):
+ output = rfc2544_profile._create_streams(imix_data, rate,
+ port_pg_id)
+ self.assertEqual(['stream1', 'stream2'], output)
+ mock_latency.assert_has_calls([
+ mock.call(pg_id=1), mock.call(pg_id=2)])
+ mock_txcont.assert_has_calls([
+ mock.call(percentage=float(25 * 35) / 100),
+ mock.call(percentage=float(75 * 35) / 100)], any_order=True)
+
+ def test_get_drop_percentage(self):
+ rfc2544_profile = rfc2544.RFC2544Profile(self.TRAFFIC_PROFILE)
+ samples = [
+ {'xe1': {'tx_throughput_fps': 100,
+ 'rx_throughput_fps': 101,
+ 'out_packets': 2000,
+ 'in_packets': 2010},
+ 'xe2': {'tx_throughput_fps': 200,
+ 'rx_throughput_fps': 201,
+ 'out_packets': 4000,
+ 'in_packets': 4010}},
+ {'xe1': {'tx_throughput_fps': 106,
+ 'rx_throughput_fps': 108,
+ 'out_packets': 2031,
+ 'in_packets': 2040,
+ 'latency': 'Latency1'},
+ 'xe2': {'tx_throughput_fps': 203,
+ 'rx_throughput_fps': 215,
+ 'out_packets': 4025,
+ 'in_packets': 4040,
+ 'latency': 'Latency2'}}
+ ]
+ output = rfc2544_profile.get_drop_percentage(samples, 0, 0, False)
+ expected = {'DropPercentage': 0.3963,
+ 'Latency': {'xe1': 'Latency1', 'xe2': 'Latency2'},
+ 'RxThroughput': 312.5,
+ 'TxThroughput': 304.5,
+ 'CurrentDropPercentage': 0.3963,
+ 'Rate': 100,
+ 'Throughput': 312.5}
+ self.assertEqual(expected, output)
+
+
+class PortPgIDMapTestCase(base.BaseUnitTestCase):
+
+ def test_add_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ self.assertEqual(10, port_pg_id_map._last_port)
+ self.assertEqual([], port_pg_id_map._port_pg_id_map[10])
+
+ def test_get_pg_ids(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ port_pg_id_map.increase_pg_id()
+ port_pg_id_map.increase_pg_id()
+ port_pg_id_map.add_port(20)
+ port_pg_id_map.increase_pg_id()
+ self.assertEqual([1, 2], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual([3], port_pg_id_map.get_pg_ids(20))
+
+ def test_increase_pg_id_no_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ self.assertIsNone(port_pg_id_map.increase_pg_id())
+
+ def test_increase_pg_id_last_port(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ self.assertEqual(1, port_pg_id_map.increase_pg_id())
+ self.assertEqual([1], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual(10, port_pg_id_map._last_port)
+
+ def test_increase_pg_id(self):
+ port_pg_id_map = rfc2544.PortPgIDMap()
+ port_pg_id_map.add_port(10)
+ port_pg_id_map.increase_pg_id()
+ self.assertEqual(2, port_pg_id_map.increase_pg_id(port=20))
+ self.assertEqual([1], port_pg_id_map.get_pg_ids(10))
+ self.assertEqual([2], port_pg_id_map.get_pg_ids(20))
+ self.assertEqual(20, port_pg_id_map._last_port)
diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
new file mode 100644
index 000000000..628e85459
--- /dev/null
+++ b/yardstick/tests/unit/network_services/traffic_profile/test_trex_traffic_profile.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import ipaddress
+
+import six
+import unittest
+
+from yardstick.common import exceptions as y_exc
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import trex_traffic_profile
+
+
+class TestTrexProfile(unittest.TestCase):
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ EXAMPLE_ETHERNET_ADDR = "00:00:00:00:00:01"
+ EXAMPLE_IP_ADDR = "10.0.0.1"
+ EXAMPLE_IPv6_ADDR = "0064:ff9b:0:0:0:0:9810:6414"
+
+ PROFILE = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ tp_base.TrafficProfile.DOWNLINK: {
+ 'ipv4': {'outer_l2': {'framesize': {'64B': '100',
+ '1518B': '0',
+ '128B': '0',
+ '1400B': '0',
+ '256B': '0',
+ '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {'dstip4': '1.1.1.1-1.1.2.2',
+ 'proto': 'udp',
+ 'srcip4': '9.9.1.1-90.1.2.2',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ tp_base.TrafficProfile.UPLINK: {
+ 'ipv4':
+ {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {'dstip4': '9.9.1.1-90.105.255.255',
+ 'proto': 'udp',
+ 'srcip4': '1.1.1.1-1.15.255.255',
+ 'dscp': 0, 'ttl': 32, 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+ PROFILE_v6 = {
+ 'description': 'Traffic profile to run RFC2544 latency',
+ 'name': 'rfc2544',
+ 'traffic_profile': {'traffic_type': 'RFC2544Profile',
+ 'frame_rate': 100},
+ tp_base.TrafficProfile.DOWNLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:02",
+ "dstmac": "00:00:00:00:00:01"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'srcport': '2001',
+ 'dsrport': '1234',
+ 'count': 1}}},
+ tp_base.TrafficProfile.UPLINK: {
+ 'ipv6': {'outer_l2': {'framesize':
+ {'64B': '100', '1518B': '0',
+ '128B': '0', '1400B': '0',
+ '256B': '0', '373b': '0',
+ '570B': '0'},
+ "srcmac": "00:00:00:00:00:01",
+ "dstmac": "00:00:00:00:00:02"},
+ 'outer_l3v4': {
+ 'dstip6':
+ '0064:ff9b:0:0:0:0:9810:2814-0064:ff9b:0:0:0:0:9810:2820',
+ 'proto': 'udp',
+ 'srcip6':
+ '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420',
+ 'dscp': 0, 'ttl': 32,
+ 'count': 1},
+ 'outer_l4': {'dstport': '2001',
+ 'srcport': '1234',
+ 'count': 1}}},
+ 'schema': 'isb:traffic_profile:0.1'}
+
+ def test___init__(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ self.assertEqual(trex_profile.pps, 100)
+
+ def test_qinq(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ qinq = {"S-VLAN": {"id": 128, "priority": 0, "cfi": 0},
+ "C-VLAN": {"id": 512, "priority": 0, "cfi": 0}}
+
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ self.assertIsNone(trex_profile.set_qinq(qinq))
+
+ qinq = {"S-VLAN": {"id": "128-130", "priority": 0, "cfi": 0},
+ "C-VLAN": {"id": "512-515", "priority": 0, "cfi": 0}}
+ self.assertIsNone(trex_profile.set_qinq(qinq))
+
+ def test__set_outer_l2_fields(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ qinq = {"S-VLAN": {"id": 128, "priority": 0, "cfi": 0},
+ "C-VLAN": {"id": 512, "priority": 0, "cfi": 0}}
+ outer_l2 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l2']
+ outer_l2['QinQ'] = qinq
+ self.assertIsNone(trex_profile._set_outer_l2_fields(outer_l2))
+
+ def test__set_outer_l3v4_fields(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l3v4 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l3v4']
+ outer_l3v4['proto'] = 'tcp'
+ self.assertIsNone(trex_profile._set_outer_l3v4_fields(outer_l3v4))
+
+ def test__set_outer_l3v6_fields(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l3v6 = self.PROFILE_v6[
+ tp_base.TrafficProfile.UPLINK]['ipv6']['outer_l3v4']
+ outer_l3v6['proto'] = 'tcp'
+ outer_l3v6['tc'] = 1
+ outer_l3v6['hlim'] = 10
+ self.assertIsNone(trex_profile._set_outer_l3v6_fields(outer_l3v6))
+
+ def test__set_outer_l4_fields(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ outer_l4 = self.PROFILE[
+ tp_base.TrafficProfile.UPLINK]['ipv4']['outer_l4']
+ self.assertIsNone(trex_profile._set_outer_l4_fields(outer_l4))
+
+ def test__count_ip_ipv4(self):
+ start, end, count = trex_traffic_profile.TrexProfile._count_ip(
+ '1.1.1.1', '1.2.3.4')
+ self.assertEqual('1.1.1.1', str(start))
+ self.assertEqual('1.2.3.4', str(end))
+ diff = (int(ipaddress.IPv4Address(six.u('1.2.3.4'))) -
+ int(ipaddress.IPv4Address(six.u('1.1.1.1'))))
+ self.assertEqual(diff, count)
+
+ def test__count_ip_ipv6(self):
+ start_ip = '0064:ff9b:0:0:0:0:9810:6414'
+ end_ip = '0064:ff9b:0:0:0:0:9810:6420'
+ start, end, count = trex_traffic_profile.TrexProfile._count_ip(
+ start_ip, end_ip)
+ self.assertEqual(0x98106414, start)
+ self.assertEqual(0x98106420, end)
+ self.assertEqual(0x98106420 - 0x98106414, count)
+
+ def test__count_ip_ipv6_exception(self):
+ start_ip = '0064:ff9b:0:0:0:0:9810:6420'
+ end_ip = '0064:ff9b:0:0:0:0:9810:6414'
+ with self.assertRaises(y_exc.IPv6RangeError):
+ trex_traffic_profile.TrexProfile._count_ip(start_ip, end_ip)
+
+ def test__dscp_range_action_partial_actual_count_zero(self):
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__dscp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ dscp_partial = traffic_profile._dscp_range_action_partial()
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ dscp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_actual_count_zero(self):
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial('field1')
+
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '1', 'unneeded')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__udp_range_action_partial_count_greater_than_actual(self):
+ traffic_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ traffic_profile.udp['field1'] = 'value1'
+ udp_partial = traffic_profile._udp_range_action_partial(
+ 'field1', 'not_used_count')
+ flow_vars_initial_length = len(traffic_profile.vm_flow_vars)
+ udp_partial('1', '10', '100')
+ self.assertEqual(len(traffic_profile.vm_flow_vars), flow_vars_initial_length + 2)
+
+ def test__general_single_action_partial(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+ trex_profile._general_single_action_partial(
+ trex_traffic_profile.ETHERNET)(trex_traffic_profile.SRC)(
+ self.EXAMPLE_ETHERNET_ADDR)
+ self.assertEqual(self.EXAMPLE_ETHERNET_ADDR,
+ trex_profile.ether_packet.src)
+
+ trex_profile._general_single_action_partial(trex_traffic_profile.IP)(
+ trex_traffic_profile.DST)(self.EXAMPLE_IP_ADDR)
+ self.assertEqual(self.EXAMPLE_IP_ADDR, trex_profile.ip_packet.dst)
+
+ trex_profile._general_single_action_partial(trex_traffic_profile.IPv6)(
+ trex_traffic_profile.DST)(self.EXAMPLE_IPv6_ADDR)
+ self.assertEqual(self.EXAMPLE_IPv6_ADDR, trex_profile.ip6_packet.dst)
+
+ trex_profile._general_single_action_partial(trex_traffic_profile.UDP)(
+ trex_traffic_profile.SRC_PORT)(5060)
+ self.assertEqual(5060, trex_profile.udp_packet.sport)
+
+ trex_profile._general_single_action_partial(trex_traffic_profile.IP)(
+ trex_traffic_profile.TYPE_OF_SERVICE)(0)
+ self.assertEqual(0, trex_profile.ip_packet.tos)
+
+ def test__set_proto_addr(self):
+ trex_profile = trex_traffic_profile.TrexProfile(self.PROFILE)
+
+ ether_range = "00:00:00:00:00:01-00:00:00:00:00:02"
+ ip_range = "1.1.1.2-1.1.1.10"
+ ipv6_range = '0064:ff9b:0:0:0:0:9810:6414-0064:ff9b:0:0:0:0:9810:6420'
+
+ trex_profile._set_proto_addr(trex_traffic_profile.ETHERNET,
+ trex_traffic_profile.SRC, ether_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.ETHERNET,
+ trex_traffic_profile.DST, ether_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IP,
+ trex_traffic_profile.SRC, ip_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IP,
+ trex_traffic_profile.DST, ip_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IPv6,
+ trex_traffic_profile.SRC, ipv6_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.IPv6,
+ trex_traffic_profile.DST, ipv6_range)
+ trex_profile._set_proto_addr(trex_traffic_profile.UDP,
+ trex_traffic_profile.SRC_PORT,
+ '5060-5090')
+ trex_profile._set_proto_addr(trex_traffic_profile.UDP,
+ trex_traffic_profile.DST_PORT, '5060')
diff --git a/yardstick/tests/unit/network_services/vnf_generic/__init__.py b/yardstick/tests/unit/network_services/vnf_generic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/__init__.py
diff --git a/yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py b/yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py
new file mode 100644
index 000000000..55b1955bc
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/test_vnfdgen.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from six.moves import range
+import unittest
+
+from yardstick.common.yaml_loader import yaml_load
+from yardstick.network_services.vnf_generic import vnfdgen
+
+
+UPLINK = "uplink"
+DOWNLINK = "downlink"
+
+TREX_VNFD_TEMPLATE = """
+vnfd:vnfd-catalog:
+ vnfd:
+ - id: TrexTrafficGen # ISB class mapping
+ name: trexgen
+ short-name: trexgen
+ description: TRex stateless traffic generator for RFC2544
+ mgmt-interface:
+ vdu-id: trexgen-baremetal
+ user: {{user}} # Value filled by vnfdgen
+ password: {{password}} # Value filled by vnfdgen
+ ip: {{ip}} # Value filled by vnfdgen
+ connection-point:
+ - name: xe0
+ type: VPORT
+ - name: xe1
+ type: VPORT
+ vdu:
+ - id: trexgen-baremetal
+ name: trexgen-baremetal
+ description: TRex stateless traffic generator for RFC2544
+ external-interface:
+ - name: xe0
+ virtual-interface:
+ type: PCI-PASSTHROUGH
+ vpci: '{{ interfaces.xe0.vpci}}'
+ local_ip: '{{ interfaces.xe0.local_ip }}'
+ dst_ip: '{{ interfaces.xe0.dst_ip }}'
+ local_mac: '{{ interfaces.xe0.local_mac }}'
+ dst_mac: '{{ interfaces.xe0.dst_mac }}'
+ bandwidth: 10 Gbps
+ vnfd-connection-point-ref: xe0
+ - name: xe1
+ virtual-interface:
+ type: PCI-PASSTHROUGH
+ vpci: '{{ interfaces.xe1.vpci }}'
+ local_ip: '{{ interfaces.xe1.local_ip }}'
+ dst_ip: '{{ interfaces.xe1.dst_ip }}'
+ local_mac: '{{ interfaces.xe1.local_mac }}'
+ dst_mac: '{{ interfaces.xe1.dst_mac }}'
+ bandwidth: 10 Gbps
+ vnfd-connection-point-ref: xe1
+ routing_table: {{ routing_table }}
+ nd_route_tbl: {{ nd_route_tbl }}
+
+ benchmark:
+ kpi:
+ - rx_throughput_fps
+ - tx_throughput_fps
+ - tx_throughput_mbps
+ - rx_throughput_mbps
+ - tx_throughput_pc_linerate
+ - rx_throughput_pc_linerate
+ - min_latency
+ - max_latency
+ - avg_latency
+"""
+
+COMPLETE_TREX_VNFD = \
+ {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'benchmark':
+ {'kpi':
+ ['rx_throughput_fps',
+ 'tx_throughput_fps',
+ 'tx_throughput_mbps',
+ 'rx_throughput_mbps',
+ 'tx_throughput_pc_linerate',
+ 'rx_throughput_pc_linerate',
+ 'min_latency',
+ 'max_latency',
+ 'avg_latency']},
+ 'connection-point': [{'name': 'xe0',
+ 'type': 'VPORT'},
+ {'name': 'xe1',
+ 'type': 'VPORT'}],
+ 'description': 'TRex stateless traffic generator for RFC2544',
+ 'id': 'TrexTrafficGen',
+ 'mgmt-interface': {'ip': '1.1.1.1',
+ 'password': 'berta',
+ 'user': 'berta',
+ 'vdu-id': 'trexgen-baremetal'},
+ 'name': 'trexgen',
+ 'short-name': 'trexgen',
+ 'vdu': [{'description': 'TRex stateless traffic generator for RFC2544',
+ 'external-interface':
+ [{'name': 'xe0',
+ 'virtual-interface': {'bandwidth': '10 Gbps',
+ 'dst_ip': '1.1.1.1',
+ 'dst_mac': '00:01:02:03:04:05',
+ 'local_ip': '1.1.1.2',
+ 'local_mac': '00:01:02:03:05:05',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vpci': '0000:00:10.2'},
+ 'vnfd-connection-point-ref': 'xe0'},
+ {'name': 'xe1',
+ 'virtual-interface': {'bandwidth': '10 Gbps',
+ 'dst_ip': '2.1.1.1',
+ 'dst_mac': '00:01:02:03:04:06',
+ 'local_ip': '2.1.1.2',
+ 'local_mac': '00:01:02:03:05:06',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vpci': '0000:00:10.1'},
+ 'vnfd-connection-point-ref': 'xe1'}],
+ 'id': 'trexgen-baremetal',
+ 'nd_route_tbl': [{'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ 'netmask': '112',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414'},
+ {'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ 'netmask': '112',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814'}],
+ 'routing_table': [{'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ 'netmask': '255.255.255.0',
+ 'network': '152.16.100.20'},
+ {'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ 'netmask': '255.255.255.0',
+ 'network': '152.16.40.20'}],
+ 'name': 'trexgen-baremetal'}]}]}}
+
+NODE_CFG = {'ip': '1.1.1.1',
+ 'name': 'demeter',
+ 'password': 'berta',
+ 'role': 'TrafficGen',
+ 'user': 'berta',
+ 'interfaces': {'xe0': {'dpdk_port_num': 1,
+ 'dst_ip': '1.1.1.1',
+ 'dst_mac': '00:01:02:03:04:05',
+ 'local_ip': '1.1.1.2',
+ 'local_mac': '00:01:02:03:05:05',
+ 'vpci': '0000:00:10.2'},
+ 'xe1': {'dpdk_port_num': 0,
+ 'dst_ip': '2.1.1.1',
+ 'dst_mac': '00:01:02:03:04:06',
+ 'local_ip': '2.1.1.2',
+ 'local_mac': '00:01:02:03:05:06',
+ 'vpci': '0000:00:10.1'}},
+ 'nd_route_tbl': [{u'gateway': u'0064:ff9b:0:0:0:0:9810:6414',
+ u'if': u'xe0',
+ u'netmask': u'112',
+ u'network': u'0064:ff9b:0:0:0:0:9810:6414'},
+ {u'gateway': u'0064:ff9b:0:0:0:0:9810:2814',
+ u'if': u'xe1',
+ u'netmask': u'112',
+ u'network': u'0064:ff9b:0:0:0:0:9810:2814'}],
+ 'routing_table': [{u'gateway': u'152.16.100.20',
+ u'if': u'xe0',
+ u'netmask': u'255.255.255.0',
+ u'network': u'152.16.100.20'},
+ {u'gateway': u'152.16.40.20',
+ u'if': u'xe1',
+ u'netmask': u'255.255.255.0',
+ u'network': u'152.16.40.20'}],
+ }
+
+
+# need to template, but can't use {} so use %s
+TRAFFIC_PROFILE_TPL = """
+%(0)s:
+ - ipv4:
+ outer_l2:
+ framesize:
+ 64B: "{{ get(imix, '%(0)s.imix_small', 10) }}"
+ 128B: "{{ get(imix, '%(0)s.imix_128B', 10) }}"
+ 256B: "{{ get(imix, '%(0)s.imix_256B', 10) }}"
+ 373B: "{{ get(imix, '%(0)s.imix_373B', 10) }}"
+ 570B: "{{get(imix, '%(0)s.imix_570B', 10) }}"
+ 1400B: "{{get(imix, '%(0)s.imix_1400B', 10) }}"
+ 1518B: "{{get(imix, '%(0)s.imix_1500B', 40) }}"
+""" % {"0": UPLINK}
+
+TRAFFIC_PROFILE = {
+ UPLINK: [{"ipv4": {"outer_l2":
+ {"framesize": {"64B": '10', "128B": '10',
+ "256B": '10', "373B": '10',
+ "570B": '10', "1400B": '10',
+ "1518B": '40'}}}}]}
+
+
+class TestRender(unittest.TestCase):
+
+ def test_render_none(self):
+
+ tmpl = "{{ routing_table }}"
+ self.assertEqual(vnfdgen.render(tmpl, routing_table=None), u'~')
+ self.assertIsNone(
+ yaml_load(vnfdgen.render(tmpl, routing_table=None)))
+
+ def test_render_unicode_dict(self):
+
+ tmpl = "{{ routing_table }}"
+ self.assertEqual(yaml_load(vnfdgen.render(
+ tmpl, **NODE_CFG)), NODE_CFG["routing_table"])
+
+
+class TestVnfdGen(unittest.TestCase):
+ """ Class to verify VNFS testcases """
+
+ def test_generate_vnfd(self):
+ """ Function to verify vnfd generation based on template """
+ self.maxDiff = None
+ generated_vnfd = vnfdgen.generate_vnfd(TREX_VNFD_TEMPLATE, NODE_CFG)
+ self.assertDictEqual(COMPLETE_TREX_VNFD, generated_vnfd)
+
+ def test_generate_tp_no_vars(self):
+ """ Function to verify traffic profile generation without imix """
+
+ self.maxDiff = None
+ generated_tp = vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL, {"imix": {}})
+ self.assertDictEqual(TRAFFIC_PROFILE, generated_tp)
+
+ def test_deepgetitem(self):
+ d = {'a': 1, 'b': 2}
+ self.assertEqual(vnfdgen.deepgetitem(d, "a"), 1)
+
+ def test_dict_flatten_int(self):
+ d = {'a': 1, 'b': 2}
+ self.assertEqual(vnfdgen.deepgetitem(d, "a"), 1)
+
+ def test_dict_flatten_str_int_key_first(self):
+ d = {'0': 1, 0: 24, 'b': 2}
+ self.assertEqual(vnfdgen.deepgetitem(d, "0"), 1)
+
+ def test_dict_flatten_int_key_fallback(self):
+ d = {0: 1, 'b': 2}
+ self.assertEqual(vnfdgen.deepgetitem(d, "0"), 1)
+
+ def test_dict_flatten_list(self):
+ d = {'a': 1, 'b': list(range(2))}
+ self.assertEqual(vnfdgen.deepgetitem(d, "b.0"), 0)
+
+ def test_dict_flatten_dict(self):
+ d = {'a': 1, 'b': {x: x for x in list(range(2))}}
+ self.assertEqual(vnfdgen.deepgetitem(d, "b.0"), 0)
+
+ def test_dict_flatten_only_str_key(self):
+ d = {'0': 1, 0: 24, 'b': 2}
+ self.assertRaises(AttributeError, vnfdgen.deepgetitem, d, 0)
+
+ def test_generate_tp_single_var(self):
+ """ Function to verify traffic profile generation with imix """
+
+ generated_tp = \
+ vnfdgen.generate_vnfd(TRAFFIC_PROFILE_TPL,
+ {"imix": {UPLINK: {"imix_small": '20'}}})
+ self.maxDiff = None
+ tp2 = dict(TRAFFIC_PROFILE)
+ tp2[UPLINK][0]["ipv4"]["outer_l2"]["framesize"]["64B"] = '20'
+ self.assertDictEqual(tp2, generated_tp)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/__init__.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/__init__.py
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml b/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml
new file mode 100644
index 000000000..b184a29e2
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/acl_1rule.yaml
@@ -0,0 +1,47 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+access-list1:
+ acl:
+ access-list-entries:
+ - ace:
+ ace-oper-data:
+ match-counter: 0
+ actions: drop,count
+ matches:
+ destination-ipv4-network: 152.16.40.20/24
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 0.0.0.0/0
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1588
+ - ace:
+ ace-oper-data:
+ match-counter: 0
+ actions: drop,count
+ matches:
+ destination-ipv4-network: 0.0.0.0/0
+ destination-port-range:
+ lower-port: 0
+ upper-port: 65535
+ source-ipv4-network: 152.16.100.20/24
+ source-port-range:
+ lower-port: 0
+ upper-port: 65535
+ rule-name: rule1589
+ acl-name: sample-ipv4-acl
+ acl-type: ipv4-acl
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml b/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
new file mode 100644
index 000000000..fb1be35c1
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/tc_baremetal_rfc2544_ipv4_1flow_64B.yaml
@@ -0,0 +1,41 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+---
+schema: yardstick:task:0.1
+scenarios:
+- type: NSPerf
+ traffic_profile: "../../traffic_profiles/ipv4_throughput_vpe.yaml"
+ topology: vpe_vnf_topology.yaml
+ nodes:
+ tg__1: trafficgen_1.yardstick
+ vnf__1: vnf.yardstick
+ tc_options:
+ rfc2544:
+ allowed_drop_rate: 0.8 - 1
+ vnf_options:
+ vpe:
+ cfg: vpe_config
+ runner:
+ type: Duration
+ duration: 400
+ interval: 35
+ traffic_options:
+ flow: "../../traffic_profiles/ipv4_1flow_Packets_vpe.yaml"
+ imix: "../../traffic_profiles/imix_voice.yaml"
+context:
+ type: Node
+ name: yardstick
+ nfvi_type: baremetal
+ file: /etc/yardstick/nodes/pod.yaml
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
new file mode 100644
index 000000000..f75fa226a
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_acl_vnf.py
@@ -0,0 +1,372 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+import os
+
+from yardstick.tests import STL_MOCKS
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from yardstick.common import utils
+
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxVnf
+ from yardstick.network_services.nfvi.resource import ResourceProfile
+ from yardstick.network_services.vnf_generic.vnf.acl_vnf import AclApproxSetupEnvSetupEnvHelper
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+
+name = 'vnf__1'
+
+
+@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process")
+class TestAclApproxVnf(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'AclApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ scenario_cfg = {'options': {'packetsize': 64, 'traffic_type': 4,
+ 'rfc2544': {'allowed_drop_rate': '0.8 - 1'},
+ 'vnf__1': {'rules': 'acl_1rule.yaml',
+ 'vnf_config': {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1}}
+ },
+ 'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
+ 'task_path': '/tmp',
+ 'tc': 'tc_ipv4_1Mflow_64B_packetsize',
+ 'runner': {'object': 'NetworkServiceTestCase',
+ 'interval': 35,
+ 'output_filename': '/tmp/yardstick.out',
+ 'runner_id': 74476, 'duration': 400,
+ 'type': 'Duration'},
+ 'traffic_profile': 'ipv4_throughput_acl.yaml',
+ 'traffic_options': {'flow': 'ipv4_Packets_acl.yaml',
+ 'imix': 'imix_voice.yaml'},
+ 'type': 'ISB',
+ 'nodes': {'tg__2': 'trafficgen_2.yardstick',
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'topology': 'vpe-tg-topology-baremetal.yaml'}
+
+ context_cfg = {'nodes': {'tg__2':
+ {'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens513f0',
+ 'vld_id': AclApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root'},
+ 'tg__1':
+ {'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens785f0',
+ 'vld_id': AclApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root'},
+ 'vnf__1':
+ {'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens786f0',
+ 'vld_id': AclApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens786f1',
+ 'vld_id': AclApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'routing_table':
+ [{'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0'},
+ {'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl':
+ [{'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'password': 'r00t',
+ 'VNF model': 'acl_vnf.yaml'}}}
+
+ def test___init__(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ self.assertIsNone(acl_approx_vnf._vnf_process)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf.q_in = mock.MagicMock()
+ acl_approx_vnf.q_out = mock.MagicMock()
+ acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ acl_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
+ acl_approx_vnf.vnf_execute = mock.Mock(return_value="")
+ result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0}
+ self.assertEqual(result, acl_approx_vnf.collect_kpi())
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute_command(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf.q_in = mock.MagicMock()
+ acl_approx_vnf.q_out = mock.MagicMock()
+ acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ cmd = "quit"
+ self.assertEqual("", acl_approx_vnf.vnf_execute(cmd))
+
+ @mock.patch(SSH_HELPER)
+ def test_get_stats(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf.q_in = mock.MagicMock()
+ acl_approx_vnf.q_out = mock.MagicMock()
+ acl_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ result = "ACL TOTAL: pkts_processed: 100, pkts_drop: 0, spkts_received: 100"
+ acl_approx_vnf.vnf_execute = mock.Mock(return_value=result)
+ self.assertEqual(result, acl_approx_vnf.get_stats())
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.acl_vnf.hex")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.acl_vnf.eval")
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.acl_vnf.open')
+ @mock.patch(SSH_HELPER)
+ def test_run_acl(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf._build_config = mock.MagicMock()
+ acl_approx_vnf.queue_wrapper = mock.MagicMock()
+ acl_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ acl_approx_vnf.vnf_cfg = {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1}
+ acl_approx_vnf.all_options = {'traffic_type': '4',
+ 'topology': 'nsb_test_case.yaml'}
+ acl_approx_vnf._run()
+ acl_approx_vnf.ssh_helper.run.assert_called_once()
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.acl_vnf.YangModel")
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf.deploy_helper = mock.MagicMock()
+ acl_approx_vnf.resource_helper = mock.MagicMock()
+ acl_approx_vnf._build_config = mock.MagicMock()
+ self.scenario_cfg['vnf_options'] = {'acl': {'cfg': "",
+ 'rules': ""}}
+ acl_approx_vnf.q_out.put("pipeline>")
+ acl_approx_vnf.WAIT_TIME = 0
+ self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
+ self.assertIsNone(acl_approx_vnf.instantiate(self.scenario_cfg,
+ self.context_cfg))
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ acl_approx_vnf = AclApproxVnf(name, vnfd)
+ acl_approx_vnf._vnf_process = mock.MagicMock()
+ acl_approx_vnf._vnf_process.terminate = mock.Mock()
+ acl_approx_vnf.used_drivers = {"01:01.0": "i40e",
+ "01:01.1": "i40e"}
+ acl_approx_vnf.vnf_execute = mock.MagicMock()
+ acl_approx_vnf.dpdk_devbind = "dpdk-devbind.py"
+ acl_approx_vnf._resource_collect_stop = mock.Mock()
+ self.assertIsNone(acl_approx_vnf.terminate())
+
+
+class TestAclApproxSetupEnvSetupEnvHelper(unittest.TestCase):
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
+ @mock.patch.object(utils, 'open_relative_file')
+ def test_build_config(self, *args):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.all_options = {}
+
+ acl_approx_setup_helper = AclApproxSetupEnvSetupEnvHelper(vnfd_helper,
+ ssh_helper,
+ scenario_helper)
+
+ acl_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
+ acl_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
+ acl_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
+ expected = 'sudo tool_path -p 0x3 -f /tmp/acl_config -s /tmp/acl_script --hwlb 3'
+ self.assertEqual(acl_approx_setup_helper.build_config(), expected)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py
new file mode 100644
index 000000000..ebedcb451
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_base.py
@@ -0,0 +1,236 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import multiprocessing
+import os
+
+import mock
+import unittest
+
+from yardstick.network_services.vnf_generic.vnf import base
+from yardstick.ssh import SSH
+
+
+IP_PIPELINE_CFG_FILE_TPL = ("arp_route_tbl = ({port0_local_ip_hex},"
+ "{port0_netmask_hex},1,{port1_local_ip_hex}) "
+ "({port1_local_ip_hex},{port1_netmask_hex},0,"
+ "{port0_local_ip_hex})")
+
+IP_PIPELINE_ND_CFG_FILE_TPL = """
+nd_route_tbl = ({port1_dst_ip_hex6},"""
+"""{port1_dst_netmask_hex6},1,{port1_dst_ip_hex6})"""
+
+_LOCAL_OBJECT = object()
+
+VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+}
+
+VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+}
+
+
+class FileAbsPath(object):
+ def __init__(self, module_file):
+ super(FileAbsPath, self).__init__()
+ self.module_path = os.path.dirname(os.path.abspath(module_file))
+
+ def get_path(self, filename):
+ file_path = os.path.join(self.module_path, filename)
+ return file_path
+
+
+def mock_ssh(mock_ssh_type, spec=None, exec_result=_LOCAL_OBJECT, run_result=_LOCAL_OBJECT):
+ if spec is None:
+ spec = SSH
+
+ if exec_result is _LOCAL_OBJECT:
+ exec_result = 0, "", ""
+
+ if run_result is _LOCAL_OBJECT:
+ run_result = 0, "", ""
+
+ mock_ssh_instance = mock.Mock(autospec=spec)
+ mock_ssh_instance._get_client.return_value = mock.Mock()
+ mock_ssh_instance.execute.return_value = exec_result
+ mock_ssh_instance.run.return_value = run_result
+ mock_ssh_type.from_node.return_value = mock_ssh_instance
+ return mock_ssh_instance
+
+
+class TestQueueFileWrapper(unittest.TestCase):
+ def setUp(self):
+ self.prompt = "pipeline>"
+ self.q_in = multiprocessing.Queue()
+ self.q_out = multiprocessing.Queue()
+
+ def test___init__(self):
+ queue_file_wrapper = \
+ base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
+ self.assertEqual(queue_file_wrapper.prompt, self.prompt)
+
+ def test_clear(self):
+ queue_file_wrapper = \
+ base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
+ queue_file_wrapper.bufsize = 5
+ queue_file_wrapper.write("pipeline>")
+ queue_file_wrapper.close()
+ self.assertIsNone(queue_file_wrapper.clear())
+ self.assertIsNotNone(queue_file_wrapper.q_out.empty())
+
+ def test_close(self):
+ queue_file_wrapper = \
+ base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
+ self.assertIsNone(queue_file_wrapper.close())
+
+ def test_read(self):
+ queue_file_wrapper = \
+ base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
+ queue_file_wrapper.q_in.put("pipeline>")
+ self.assertEqual("pipeline>", queue_file_wrapper.read(20))
+
+ def test_write(self):
+ queue_file_wrapper = \
+ base.QueueFileWrapper(self.q_in, self.q_out, self.prompt)
+ queue_file_wrapper.write("pipeline>")
+ self.assertIsNotNone(queue_file_wrapper.q_out.empty())
+
+
+class TestGenericVNF(unittest.TestCase):
+
+ def test_definition(self):
+ """Make sure that the abstract class cannot be instantiated"""
+ with self.assertRaises(TypeError) as exc:
+ # pylint: disable=abstract-class-instantiated
+ base.GenericVNF('vnf1', VNFD['vnfd:vnfd-catalog']['vnfd'][0])
+
+ msg = ("Can't instantiate abstract class GenericVNF with abstract methods "
+ "collect_kpi, instantiate, scale, start_collect, "
+ "stop_collect, terminate, wait_for_instantiate")
+
+ self.assertEqual(msg, str(exc.exception))
+
+
+class TestGenericTrafficGen(unittest.TestCase):
+
+ def test_definition(self):
+ """Make sure that the abstract class cannot be instantiated"""
+ vnfd = VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ name = 'vnf1'
+ with self.assertRaises(TypeError) as exc:
+ # pylint: disable=abstract-class-instantiated
+ base.GenericTrafficGen(name, vnfd)
+ msg = ("Can't instantiate abstract class GenericTrafficGen with "
+ "abstract methods collect_kpi, instantiate, run_traffic, "
+ "scale, terminate")
+ self.assertEqual(msg, str(exc.exception))
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
new file mode 100644
index 000000000..bd8f53e21
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_cgnapt_vnf.py
@@ -0,0 +1,455 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from copy import deepcopy
+import os
+
+import mock
+import unittest
+
+
+from yardstick.common import utils
+
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+
+
+from yardstick.network_services.vnf_generic.vnf.cgnapt_vnf import CgnaptApproxVnf, \
+ CgnaptApproxSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf import cgnapt_vnf
+from yardstick.network_services.nfvi.resource import ResourceProfile
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+
+name = 'vnf__0'
+
+
+class TestCgnaptApproxSetupEnvHelper(unittest.TestCase):
+
+ def test__generate_ip_from_pool(self):
+
+ ip = CgnaptApproxSetupEnvHelper._generate_ip_from_pool("1.2.3.4")
+ self.assertEqual(next(ip), '1.2.3.4')
+ self.assertEqual(next(ip), '1.2.4.4')
+ self.assertEqual(next(ip), '1.2.5.4')
+
+ def test__update_cgnat_script_file(self):
+
+ sample = """\
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+link 0 down
+link 0 config {port0_local_ip} {port0_prefixlen}
+link 0 up
+link 1 down
+link 1 config {port1_local_ip} {port1_prefixlen}
+link 1 up
+"""
+ header = "This is a header"
+
+ out = CgnaptApproxSetupEnvHelper._update_cgnat_script_file(header, sample.splitlines())
+ self.assertNotIn("This is a header", out)
+
+ def test__get_cgnapt_config(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.uplink_ports = [{"name": 'a'}, {"name": "b"}, {"name": "c"}]
+
+ helper = CgnaptApproxSetupEnvHelper(vnfd_helper, mock.Mock(), mock.Mock())
+ result = helper._get_cgnapt_config()
+ self.assertIsNotNone(result)
+
+ def test_scale(self):
+ helper = CgnaptApproxSetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock())
+ with self.assertRaises(NotImplementedError):
+ helper.scale()
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
+ @mock.patch.object(utils, 'open_relative_file')
+ def test_build_config(self, *args):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.all_options = {}
+
+ cgnat_approx_setup_helper = CgnaptApproxSetupEnvHelper(vnfd_helper,
+ ssh_helper,
+ scenario_helper)
+
+ cgnat_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
+ cgnat_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
+ cgnat_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
+ expected = 'sudo tool_path -p 0x3 -f /tmp/cgnapt_config -s /tmp/cgnapt_script --hwlb 3'
+ self.assertEqual(cgnat_approx_setup_helper.build_config(), expected)
+
+
+@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process")
+class TestCgnaptApproxVnf(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'CgnaptApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ SCENARIO_CFG = {
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ 'vnf__0': {
+ 'napt': 'dynamic',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1,
+ },
+ },
+ 'flow': {'count': 1,
+ 'dst_ip': [{'tg__1': 'xe0'}],
+ 'public_ip': [''],
+ 'src_ip': [{'tg__0': 'xe0'}]},
+ },
+ 'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
+ 'task_path': '/tmp',
+ 'tc': 'tc_ipv4_1Mflow_64B_packetsize',
+ 'runner': {
+ 'object': 'NetworkServiceTestCase',
+ 'interval': 35,
+ 'output_filename': '/tmp/yardstick.out',
+ 'runner_id': 74476,
+ 'duration': 400,
+ 'type': 'Duration',
+ },
+ 'traffic_profile': 'ipv4_throughput_acl.yaml',
+ 'type': 'NSPerf',
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'tg__0': 'trafficgen_0.yardstick',
+ 'vnf__0': 'vnf.yardstick',
+ },
+ 'topology': 'vpe-tg-topology-baremetal.yaml',
+ }
+
+ context_cfg = {'nodes': {'tg__2':
+ {'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens513f0',
+ 'vld_id': CgnaptApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root'},
+ 'tg__1':
+ {'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens785f0',
+ 'vld_id': CgnaptApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root'},
+ 'vnf__0':
+ {'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__0',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens786f0',
+ 'vld_id': CgnaptApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens786f1',
+ 'vld_id': CgnaptApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'routing_table':
+ [{'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0'},
+ {'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl':
+ [{'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'password': 'r00t',
+ 'VNF model': 'cgnapt_vnf.yaml'}}}
+
+ def setUp(self):
+ self.scenario_cfg = deepcopy(self.SCENARIO_CFG)
+
+ def test___init__(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ self.assertIsNone(cgnapt_approx_vnf._vnf_process)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf._vnf_process = mock.MagicMock(
+ **{"is_alive.return_value": True, "exitcode": None})
+ cgnapt_approx_vnf.q_in = mock.MagicMock()
+ cgnapt_approx_vnf.q_out = mock.MagicMock()
+ cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ cgnapt_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
+ result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0}
+ self.assertEqual(result, cgnapt_approx_vnf.collect_kpi())
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute_command(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.q_in = mock.MagicMock()
+ cgnapt_approx_vnf.q_out = mock.MagicMock()
+ cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ cmd = "quit"
+ self.assertEqual("", cgnapt_approx_vnf.vnf_execute(cmd))
+
+ @mock.patch(SSH_HELPER)
+ def test_get_stats(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.q_in = mock.MagicMock()
+ cgnapt_approx_vnf.q_out = mock.MagicMock()
+ cgnapt_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ result = \
+ "CG-NAPT(.*\n)*Received 100, Missed 0, Dropped 0,Translated 100,ingress"
+ cgnapt_approx_vnf.vnf_execute = mock.Mock(return_value=result)
+ self.assertListEqual(list(result), list(cgnapt_approx_vnf.get_stats()))
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.hex")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.eval")
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.cgnapt_vnf.open')
+ @mock.patch(SSH_HELPER)
+ def test_run_vcgnapt(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf._build_config = mock.MagicMock()
+ cgnapt_approx_vnf.queue_wrapper = mock.MagicMock()
+ cgnapt_approx_vnf.ssh_helper = mock.MagicMock()
+ cgnapt_approx_vnf.ssh_helper.run = mock.MagicMock()
+ cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ cgnapt_approx_vnf._run()
+ cgnapt_approx_vnf.ssh_helper.run.assert_called_once()
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf.deploy_helper = mock.MagicMock()
+ cgnapt_approx_vnf.resource_helper = mock.MagicMock()
+ cgnapt_approx_vnf._build_config = mock.MagicMock()
+ self.scenario_cfg['vnf_options'] = {'acl': {'cfg': "",
+ 'rules': ""}}
+ cgnapt_approx_vnf.q_out.put("pipeline>")
+ cgnapt_vnf.WAIT_TIME = 3
+ self.scenario_cfg.update({"nodes": {"vnf__0": ""}})
+ self.assertIsNone(cgnapt_approx_vnf.instantiate(self.scenario_cfg,
+ self.context_cfg))
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf._vnf_process = mock.MagicMock()
+ cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
+ cgnapt_approx_vnf.used_drivers = {"01:01.0": "i40e",
+ "01:01.1": "i40e"}
+ cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
+ cgnapt_approx_vnf.dpdk_nic_bind = "dpdk_nic_bind.py"
+ cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
+ self.assertIsNone(cgnapt_approx_vnf.terminate())
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test__vnf_up_post(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.scenario_cfg['options'][name]['napt'] = 'static'
+
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf._vnf_process = mock.MagicMock()
+ cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
+ cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
+ cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
+ cgnapt_approx_vnf._vnf_up_post()
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test__vnf_up_post_short(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ cgnapt_approx_vnf = CgnaptApproxVnf(name, vnfd)
+ cgnapt_approx_vnf._vnf_process = mock.MagicMock()
+ cgnapt_approx_vnf._vnf_process.terminate = mock.Mock()
+ cgnapt_approx_vnf.vnf_execute = mock.MagicMock()
+ cgnapt_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ cgnapt_approx_vnf._resource_collect_stop = mock.Mock()
+ cgnapt_approx_vnf._vnf_up_post()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
new file mode 100644
index 000000000..cc695a5bf
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_helpers.py
@@ -0,0 +1,2335 @@
+
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from itertools import repeat, chain
+import os
+import socket
+import time
+
+import mock
+import unittest
+
+from yardstick.tests import STL_MOCKS
+from yardstick.common import utils
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+from yardstick.network_services import constants
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxSocketHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import PacketDump
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import CoreSocketTuple
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxTestDataTuple
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import TotStatsTuple
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDataHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxProfileHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxMplsProfileHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxBngProfileHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxVpeProfileHelper
+ from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxlwAFTRProfileHelper
+
+class TestCoreTuple(unittest.TestCase):
+ def test___init__(self):
+ core_tuple = CoreSocketTuple('core 5s6')
+ self.assertEqual(core_tuple.core_id, 5)
+ self.assertEqual(core_tuple.socket_id, 6)
+ self.assertFalse(core_tuple.is_hyperthread())
+
+ core_tuple = CoreSocketTuple('core 5s6h')
+ self.assertEqual(core_tuple.core_id, 5)
+ self.assertEqual(core_tuple.socket_id, 6)
+ self.assertTrue(core_tuple.is_hyperthread())
+
+ def test___init__negative(self):
+ bad_inputs = [
+ '',
+ '5',
+ '5s',
+ '6h',
+ '5s6',
+ 'core',
+ 'core h',
+ 'core 5s',
+ 'core 5 6',
+ 'core 5 6h',
+ 'core 5d6',
+ 'core 5d6h',
+ 1,
+ 2.3,
+ [],
+ {},
+ object(),
+ ]
+
+ for bad_input in bad_inputs:
+ with self.assertRaises(ValueError):
+ CoreSocketTuple(bad_input)
+
+ def test_find_in_topology(self):
+ topology_in = {
+ 6: {
+ 5: {
+ 'key1': ['a', 'b'],
+ 'key2': ['c', 'd'],
+ },
+ },
+ }
+
+ core_tuple = CoreSocketTuple('core 5s6')
+
+ expected = 'a'
+ result = core_tuple.find_in_topology(topology_in)
+ self.assertEqual(result, expected)
+
+ core_tuple = CoreSocketTuple('core 5s6h')
+
+ expected = 'c'
+ result = core_tuple.find_in_topology(topology_in)
+ self.assertEqual(result, expected)
+
+ def test_find_in_topology_negative(self):
+ core_tuple = CoreSocketTuple('core 6s5')
+ with self.assertRaises(ValueError):
+ # no socket key
+ core_tuple.find_in_topology({})
+
+ with self.assertRaises(ValueError):
+ # no core key
+ core_tuple.find_in_topology({5: {}})
+
+ with self.assertRaises(ValueError):
+ # no first value (as needed by non-hyperthread core)
+ core_tuple.find_in_topology({5: {6: {'key1': []}}})
+
+ core_tuple = CoreSocketTuple('core 6s5h')
+ with self.assertRaises(ValueError):
+ # no second value (as needed by hyperthread core)
+ core_tuple.find_in_topology({5: {6: {'key1': ['e']}}})
+
+
+class TestTotStatsTuple(unittest.TestCase):
+ def test___new___negative(self):
+ with self.assertRaises(TypeError):
+ # no values
+ TotStatsTuple()
+
+ with self.assertRaises(TypeError):
+ # one, non-integer value
+ TotStatsTuple('a')
+
+ with self.assertRaises(TypeError):
+ # too many values
+ TotStatsTuple(3, 4, 5, 6, 7)
+
+
+class TestProxTestDataTuple(unittest.TestCase):
+ def test___init__(self):
+ prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, 6, 7, 8, 9)
+ self.assertEqual(prox_test_data.tolerated, 1)
+ self.assertEqual(prox_test_data.tsc_hz, 2)
+ self.assertEqual(prox_test_data.delta_rx, 3)
+ self.assertEqual(prox_test_data.delta_tx, 4)
+ self.assertEqual(prox_test_data.delta_tsc, 5)
+ self.assertEqual(prox_test_data.latency, 6)
+ self.assertEqual(prox_test_data.rx_total, 7)
+ self.assertEqual(prox_test_data.tx_total, 8)
+ self.assertEqual(prox_test_data.requested_pps, 9)
+
+ def test_properties(self):
+ prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, 6, 7, 8, 9)
+ self.assertEqual(prox_test_data.pkt_loss, 12.5)
+ self.assertEqual(prox_test_data.tx_mpps, 1.6 / 1e6)
+ self.assertEqual(prox_test_data.can_be_lost, 0)
+ self.assertEqual(prox_test_data.drop_total, 1)
+ self.assertFalse(prox_test_data.success)
+
+ prox_test_data = ProxTestDataTuple(10, 2, 3, 4, 5, 6, 997, 998, 9)
+ self.assertTrue(prox_test_data.success)
+
+ def test_pkt_loss_zero_division(self):
+ prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, 6, 7, 0, 9)
+ self.assertEqual(prox_test_data.pkt_loss, 100.0)
+
+ def test_get_samples(self):
+ prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, [6.1, 6.9, 6.4], 7, 8, 9)
+
+ expected = {
+ "Throughput": 1.2 / 1e6,
+ "DropPackets": 12.5,
+ "CurrentDropPackets": 12.5,
+ "RequestedTxThroughput": 9 / 1e6,
+ "TxThroughput": 1.6 / 1e6,
+ "RxThroughput": 1.2 / 1e6,
+ "PktSize": 64,
+ "PortSample": 1,
+ "LatencyMin": 6.1,
+ "LatencyMax": 6.9,
+ "LatencyAvg": 6.4,
+ }
+ result = prox_test_data.get_samples(64, port_samples={"PortSample": 1})
+ self.assertDictEqual(result, expected)
+
+ expected = {
+ "Throughput": 1.2 / 1e6,
+ "DropPackets": 0.123,
+ "CurrentDropPackets": 0.123,
+ "RequestedTxThroughput": 9 / 1e6,
+ "TxThroughput": 1.6 / 1e6,
+ "RxThroughput": 1.2 / 1e6,
+ "PktSize": 64,
+ "LatencyMin": 6.1,
+ "LatencyMax": 6.9,
+ "LatencyAvg": 6.4,
+ }
+ result = prox_test_data.get_samples(64, 0.123)
+ self.assertDictEqual(result, expected)
+
+ @mock.patch('yardstick.LOG_RESULT', create=True)
+ def test_log_data(self, mock_logger):
+ my_mock_logger = mock.MagicMock()
+ prox_test_data = ProxTestDataTuple(1, 2, 3, 4, 5, [6.1, 6.9, 6.4], 7, 8, 9)
+ prox_test_data.log_data()
+
+ my_mock_logger.debug.assert_not_called()
+ mock_logger.debug.assert_not_called()
+
+ mock_logger.debug.reset_mock()
+ prox_test_data.log_data(my_mock_logger)
+ my_mock_logger.assert_not_called()
+ mock_logger.debug.assert_not_called()
+
+
+class TestPacketDump(unittest.TestCase):
+ PAYLOAD = "payload"
+
+ def test__init__(self):
+ PacketDump("port_id", len(self.PAYLOAD), self.PAYLOAD)
+
+ def test___str__(self):
+ expected = '<PacketDump port: port_id payload: {}>'.format(self.PAYLOAD)
+ dump1 = PacketDump("port_id", len(self.PAYLOAD), self.PAYLOAD)
+ self.assertEqual(str(dump1), expected)
+
+ def test_port_id(self):
+ p = PacketDump("port_id", len(self.PAYLOAD), self.PAYLOAD)
+ self.assertEqual(p.port_id, "port_id")
+
+ def test_data_len(self):
+ p = PacketDump("port_id", len(self.PAYLOAD), self.PAYLOAD)
+ self.assertEqual(p.data_len, len(self.PAYLOAD))
+
+ def test_payload(self):
+ p = PacketDump("port_id", len(self.PAYLOAD), self.PAYLOAD)
+ self.assertEqual(p.payload(), self.PAYLOAD)
+
+ self.assertEqual(p.payload(3), self.PAYLOAD[3:])
+
+ self.assertEqual(p.payload(end=3), self.PAYLOAD[:4])
+
+ self.assertEqual(p.payload(2, 4), self.PAYLOAD[2:5])
+
+
+PACKET_DUMP_1 = """\
+pktdump,3,11
+hello world
+"""
+
+PACKET_DUMP_2 = """\
+pktdump,3,11
+hello world
+pktdump,2,9
+brown fox jumped over
+pktdump,4,8
+lazy
+dog
+"""
+
+PACKET_DUMP_NON_1 = """\
+not_a_dump,1,2
+other data
+"""
+
+PACKET_DUMP_MIXED_1 = """\
+pktdump,3,11
+hello world
+not_a_dump,1,2
+other data
+"""
+
+PACKET_DUMP_BAD_1 = """\
+pktdump,one,12
+bad port id
+"""
+
+PACKET_DUMP_BAD_2 = """\
+pktdump,3,twelve
+bad data length
+"""
+
+PACKET_DUMP_BAD_3 = """\
+pktdump,3
+no data length value
+"""
+
+
+class TestProxSocketHelper(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_time_sleep.stop()
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ def test___init__(self, mock_socket):
+ expected = mock_socket.socket()
+ prox = ProxSocketHelper()
+ result = prox._sock
+ self.assertEqual(result, expected)
+
+ def test_connect(self):
+ mock_sock = mock.MagicMock()
+ prox = ProxSocketHelper(mock_sock)
+ prox.connect('10.20.30.40', 23456)
+ mock_sock.connect.assert_called_once()
+
+ def test_get_sock(self):
+ mock_sock = mock.MagicMock()
+ prox = ProxSocketHelper(mock_sock)
+ result = prox.get_socket()
+ self.assertIs(result, mock_sock)
+
+ # TODO(elfoley): Split this into three tests
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.select')
+ def test_get_data(self, mock_select):
+ mock_select.select.side_effect = [[1], [0]]
+ mock_socket = mock.MagicMock()
+ mock_recv = mock_socket.recv()
+ mock_recv.decode.return_value = ""
+ prox = ProxSocketHelper(mock_socket)
+ ret = prox.get_data()
+ self.assertEqual(ret, "")
+ self.assertEqual(len(prox._pkt_dumps), 0)
+
+ mock_select.select.reset_mock()
+ mock_select.select.side_effect = chain([['a'], ['']], repeat([1], 3))
+ mock_recv.decode.return_value = PACKET_DUMP_1
+ ret = prox.get_data()
+ self.assertEqual(mock_select.select.call_count, 2)
+ self.assertEqual(ret, 'pktdump,3,11')
+ self.assertEqual(len(prox._pkt_dumps), 1)
+
+ mock_select.select.reset_mock()
+ mock_select.select.side_effect = chain([[object()], [None]], repeat([1], 3))
+ mock_recv.decode.return_value = PACKET_DUMP_2
+ ret = prox.get_data()
+ self.assertEqual(mock_select.select.call_count, 1)
+ self.assertEqual(ret, 'jumped over')
+ self.assertEqual(len(prox._pkt_dumps), 3)
+
+ def test__parse_socket_data_mixed_data(self):
+ prox = ProxSocketHelper(mock.MagicMock())
+ ret, _ = prox._parse_socket_data(PACKET_DUMP_NON_1, False)
+ self.assertEqual(ret, 'not_a_dump,1,2')
+ self.assertEqual(len(prox._pkt_dumps), 0)
+
+ ret, _ = prox._parse_socket_data(PACKET_DUMP_MIXED_1, False)
+ self.assertEqual(ret, 'not_a_dump,1,2')
+ self.assertEqual(len(prox._pkt_dumps), 1)
+
+ def test__parse_socket_data_bad_data(self):
+ prox = ProxSocketHelper(mock.MagicMock())
+ with self.assertRaises(ValueError):
+ prox._parse_socket_data(PACKET_DUMP_BAD_1, False)
+
+ with self.assertRaises(ValueError):
+ prox._parse_socket_data(PACKET_DUMP_BAD_2, False)
+
+ ret, _ = prox._parse_socket_data(PACKET_DUMP_BAD_3, False)
+ self.assertEqual(ret, 'pktdump,3')
+
+ def test__parse_socket_data_pkt_dump_only(self):
+ prox = ProxSocketHelper(mock.MagicMock())
+ ret, _ = prox._parse_socket_data('', True)
+ self.assertFalse(ret)
+
+ ret, _ = prox._parse_socket_data(PACKET_DUMP_1, True)
+ self.assertTrue(ret)
+
+ ret, _ = prox._parse_socket_data(PACKET_DUMP_2, True)
+ self.assertTrue(ret)
+
+ def test_put_command(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.put_command("data")
+ mock_socket.sendall.assert_called_once()
+
+ def test_put_command_socket_error(self):
+ mock_socket = mock.MagicMock()
+ mock_socket.sendall.side_effect = OSError
+ prox = ProxSocketHelper(mock_socket)
+ prox.put_command("data")
+ mock_socket.sendall.assert_called_once()
+
+ def test_get_packet_dump(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox._pkt_dumps = []
+ self.assertIsNone(prox.get_packet_dump())
+
+ prox._pkt_dumps = [234]
+ self.assertEqual(prox.get_packet_dump(), 234)
+ self.assertEqual(prox._pkt_dumps, [])
+
+ def test_stop_all_reset(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.stop_all_reset()
+ mock_socket.sendall.assert_called()
+
+ def test_stop_all(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.stop_all()
+ mock_socket.sendall.assert_called()
+
+ def test_stop(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.stop([3, 4, 5], 16)
+ mock_socket.sendall.assert_called()
+
+ def test_start_all(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.start_all()
+ mock_socket.sendall.assert_called()
+
+ def test_start(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.start([3, 4, 5])
+ mock_socket.sendall.assert_called()
+
+ def test_reset_stats(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.reset_stats()
+ mock_socket.sendall.assert_called()
+
+ def test_set_pkt_size(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_pkt_size([3, 4, 5], 1024)
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_set_value(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_value([3, 4, 5], 10, 20, 30)
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_reset_values(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.reset_values([3, 4, 5])
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_set_speed(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_speed([3, 4, 5], 1000)
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_slope_speed(self):
+ core_data = [
+ {
+ 'cores': [3, 4, 5],
+ 'speed': 1000,
+ },
+ {
+ 'cores': [9, 10, 11],
+ 'speed': '500.5',
+ },
+ ]
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_speed = set_speed = mock.MagicMock()
+ prox.slope_speed(core_data, 5)
+ self.assertEqual(set_speed.call_count, 20)
+
+ set_speed.reset_mock()
+ prox.slope_speed(core_data, 5, 5)
+ self.assertEqual(set_speed.call_count, 10)
+
+ def test_set_pps(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_pps([3, 4, 5], 1000, 512)
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_lat_stats(self):
+ latency_output = [
+ '1, 2 , 3', # has white space
+ '4,5', # too short
+ '7,8,9,10.5,11', # too long with float, but float is in unused portion
+ 'twelve,13,14', # value as English word
+ '15,16.2,17', # float in used portion
+ ]
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(side_effect=latency_output)
+
+ expected = (
+ {
+ 3: 1,
+ 5: 7,
+ },
+ {
+ 3: 2,
+ 5: 8,
+ },
+ {
+ 3: 3,
+ 5: 9,
+ },
+ )
+ result = prox.lat_stats([3, 4, 5, 6, 7], 16)
+ self.assertEqual(mock_socket.sendall.call_count, 5)
+ self.assertEqual(result, expected)
+
+ def test_get_all_tot_stats_error(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='3,4,5')
+ expected = [0, 0, 0, 0]
+ result = prox.get_all_tot_stats()
+ self.assertEqual(result, expected)
+
+ def test_get_all_tot_stats(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='3,4,5,6')
+ expected = 3, 4, 5, 6
+ result = prox.get_all_tot_stats()
+ self.assertEqual(result, expected)
+
+ def test_hz(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='3,4,5,6')
+ expected = 6
+ result = prox.hz()
+ self.assertEqual(result, expected)
+
+ def test_core_stats(self):
+ core_stats = [
+ '3,4,5,6',
+ '7,8,9,10,NaN',
+ '11,12,13,14,15',
+ ]
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(side_effect=core_stats)
+ expected = 21, 24, 27, 14
+ result = prox.core_stats([3, 4, 5], 16)
+ self.assertEqual(result, expected)
+
+ def test_port_stats(self):
+ port_stats = [
+ ','.join(str(n) for n in range(3, 15)),
+ ','.join(str(n) for n in range(8, 32, 2)),
+ ','.join(str(n) for n in range(5, 89, 7)),
+ ]
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(side_effect=port_stats)
+ expected = [16, 26, 36, 46, 56, 66, 76, 86, 96, 106, 116, 126]
+ result = prox.port_stats([3, 4, 5])
+ self.assertEqual(result, expected)
+
+ def test_measure_tot_stats(self):
+ start_tot = 3, 4, 5, 6
+ end_tot = 7, 9, 11, 13
+ delta_tot = 4, 5, 6, 7
+
+ get_data_output = [
+ ','.join(str(n) for n in start_tot),
+ ','.join(str(n) for n in end_tot),
+ ]
+
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(side_effect=get_data_output)
+ expected = {
+ 'start_tot': start_tot,
+ 'end_tot': end_tot,
+ 'delta': delta_tot,
+ }
+ with prox.measure_tot_stats() as result:
+ pass
+ self.assertEqual(result, expected)
+
+ def test_tot_stats(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='3,4,5,6')
+ expected = 3, 4, 5
+ result = prox.tot_stats()
+ self.assertEqual(result, expected)
+
+ def test_tot_ierrors(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.get_data = mock.MagicMock(return_value='3,4,5,6')
+ expected = 3, 3
+ result = prox.tot_ierrors()
+ self.assertEqual(result, expected)
+
+ def test_set_count(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.set_count(432, [3, 4, 5])
+ self.assertEqual(mock_socket.sendall.call_count, 3)
+
+ def test_dump_rx(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.dump_rx(3, 5, 8)
+ mock_socket.sendall.assert_called_once()
+
+ def test_quit(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.quit()
+ mock_socket.sendall.assert_called()
+
+ def test_force_quit(self):
+ mock_socket = mock.MagicMock()
+ prox = ProxSocketHelper(mock_socket)
+ prox.force_quit()
+ mock_socket.sendall.assert_called()
+
+
+class TestProxDpdkVnfSetupEnvHelper(unittest.TestCase):
+
+ VNFD0 = {
+ 'short-name': 'ProxVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'name': 'proxvnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'proxvnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'uplink_0',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.19',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'downlink_0',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'proxvnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'id': 'ProxApproxVnf',
+ 'name': 'ProxVnf',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD0,
+ ],
+ },
+ }
+
+ def test_global_section(self):
+ setup_helper = ProxDpdkVnfSetupEnvHelper(mock.MagicMock(), mock.MagicMock(),
+ mock.MagicMock())
+
+ setup_helper._prox_config_data = [('a', [])]
+
+ with self.assertRaises(KeyError):
+ _ = setup_helper.global_section
+
+ global_section = (
+ 'global', [
+ ('not_name', 'other data'),
+ ('name_not', 'more data'),
+ ('name', 'prox type'),
+ ],
+ )
+
+ setup_helper._prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ global_section,
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'tagged'),
+ ]),
+ ('section3', [
+ ('key1', 'value1'),
+ ('key2', 'value2'),
+ ('key3', 'value3'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'udp'),
+ ]),
+ ]
+
+ result = setup_helper.global_section
+ self.assertEqual(result, global_section[1])
+
+ def test_find_in_section(self):
+ setup_helper = ProxDpdkVnfSetupEnvHelper(mock.MagicMock(), mock.MagicMock(),
+ mock.MagicMock())
+
+ setup_helper._prox_config_data = [
+ ('global', [
+ ('not_name', 'other data'),
+ ('name_not', 'more data'),
+ ('name', 'prox type'),
+ ]),
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'tagged'),
+ ]),
+ ('section3', [
+ ('key1', 'value1'),
+ ('key2', 'value2'),
+ ('key3', 'value3'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'udp'),
+ ]),
+ ]
+
+ expected = 'value3'
+ result = setup_helper.find_in_section('section3', 'key3')
+ self.assertEqual(result, expected)
+
+ expected = 'default value'
+ result = setup_helper.find_in_section('section3', 'key4', 'default value')
+ self.assertEqual(result, expected)
+
+ with self.assertRaises(KeyError):
+ setup_helper.find_in_section('section4', 'key1')
+
+ with self.assertRaises(KeyError):
+ setup_helper.find_in_section('section1', 'key1')
+
+ def test__replace_quoted_with_value(self):
+ # empty string
+ input_str = ''
+ expected = ''
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat')
+ self.assertEqual(result, expected)
+
+ # no quoted substring
+ input_str = 'lion tiger bear'
+ expected = 'lion tiger bear'
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat')
+ self.assertEqual(result, expected)
+
+ # partially quoted substring
+ input_str = 'lion "tiger bear'
+ expected = 'lion "tiger bear'
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat')
+ self.assertEqual(result, expected)
+
+ # one quoted substring
+ input_str = 'lion "tiger" bear'
+ expected = 'lion "cat" bear'
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat')
+ self.assertEqual(result, expected)
+
+ # two quoted substrings
+ input_str = 'lion "tiger" bear "shark" whale'
+ expected = 'lion "cat" bear "shark" whale'
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat')
+ self.assertEqual(result, expected)
+
+ # two quoted substrings, both replaced
+ input_str = 'lion "tiger" bear "shark" whale'
+ expected = 'lion "cat" bear "cat" whale'
+ result = ProxDpdkVnfSetupEnvHelper._replace_quoted_with_value(input_str, 'cat', 2)
+ self.assertEqual(result, expected)
+
+ def test__get_tx_port(self):
+ # no data
+ input_data = {'section1': []}
+ expected = -1
+ result = ProxDpdkVnfSetupEnvHelper._get_tx_port('section1', input_data)
+ self.assertEqual(result, expected)
+
+ # data for other section
+ input_data = {
+ 'section1': [],
+ 'section2': [
+ ('rx port', '3'),
+ ('tx port', '4'),
+ ],
+ }
+ expected = -1
+ result = ProxDpdkVnfSetupEnvHelper._get_tx_port('section1', input_data)
+ self.assertEqual(result, expected)
+
+ # data for section
+ input_data['section1'] = section1 = [
+ ('rx port', '4', 'more', 432),
+ ('tx port', '3'),
+ ]
+ expected = 3
+ result = ProxDpdkVnfSetupEnvHelper._get_tx_port('section1', input_data)
+ self.assertEqual(result, expected)
+
+ # more data for section,
+ section1.extend([
+ ('rx port', '2'),
+ ('tx port', '1', 'and more', 234),
+ ])
+ expected = 1
+ result = ProxDpdkVnfSetupEnvHelper._get_tx_port('section1', input_data)
+ self.assertEqual(result, expected)
+
+ # TODO(elfoley): Split this into several smaller tests
+ def test_write_prox_config(self):
+ input_data = {}
+ expected = ''
+ result = ProxDpdkVnfSetupEnvHelper.write_prox_config(input_data)
+ self.assertEqual(result, expected)
+
+ input_data = [
+ [
+ 'section1',
+ [],
+ ],
+ ]
+ expected = '[section1]'
+ result = ProxDpdkVnfSetupEnvHelper.write_prox_config(input_data)
+ self.assertEqual(result, expected)
+
+ input_data = [
+ [
+ 'section1',
+ [],
+ ],
+ [
+ 'section2',
+ [
+ ['key1', 'value1'],
+ ['__name__', 'not this one'],
+ ['key2', None],
+ ['key3', 234],
+ ['key4', 'multi-line\nvalue'],
+ ],
+ ],
+ ]
+ expected = os.linesep.join([
+ '[section1]',
+ '[section2]',
+ 'key1=value1',
+ 'key2',
+ 'key3=234',
+ 'key4=multi-line\n\tvalue',
+ ])
+ result = ProxDpdkVnfSetupEnvHelper.write_prox_config(input_data)
+ self.assertEqual(result, expected)
+
+ def test_prox_config_data(self):
+ setup_helper = ProxDpdkVnfSetupEnvHelper(mock.MagicMock(), mock.MagicMock(),
+ mock.MagicMock())
+
+ setup_helper.config_queue = config_queue = mock.MagicMock()
+ config_queue.get.return_value = expected = [('s', [('a', 3), ('b', 45)])]
+
+ result = setup_helper.prox_config_data
+ self.assertEqual(result, expected)
+
+ @mock.patch.object(utils, 'find_relative_file')
+ def test_build_config_file_no_additional_file(self, mock_find_path):
+ vnf1 = {
+ 'prox_args': {'-c': ""},
+ 'prox_path': 'd',
+ 'prox_config': 'e/f',
+ 'prox_generate_parameter': False,
+ }
+
+ mock_find_path.side_effect = ['1', '2']
+
+ vnfd_helper = mock.MagicMock()
+ ssh_helper = mock.MagicMock()
+ scenario_helper = ScenarioHelper('vnf1')
+ scenario_helper.scenario_cfg = {
+ 'task_path': 'a/b',
+ 'options': {
+ 'vnf1': vnf1,
+ },
+ }
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.copy_to_target = mock.MagicMock(return_value='3')
+ helper.generate_prox_config_file = mock.MagicMock(return_value='4')
+ helper.upload_prox_config = mock.MagicMock(return_value='5')
+
+ self.assertEqual(helper.additional_files, {})
+ self.assertNotEqual(helper._prox_config_data, '4')
+ self.assertNotEqual(helper.remote_path, '5')
+ helper.build_config_file()
+ self.assertEqual(helper.additional_files, {})
+ self.assertEqual(helper._prox_config_data, '4')
+ self.assertEqual(helper.remote_path, '5')
+
+ @mock.patch.object(utils, 'find_relative_file')
+ def test_build_config_file_additional_file_string(self, mock_find_path):
+ vnf1 = {
+ 'prox_args': {'-c': ""},
+ 'prox_path': 'd',
+ 'prox_config': 'e/f',
+ 'prox_files': 'g/h.i',
+ 'prox_generate_parameter': True,
+ }
+
+ mock_find_path.side_effect = ['1', '2']
+ vnfd_helper = mock.MagicMock()
+ ssh_helper = mock.MagicMock()
+ scenario_helper = ScenarioHelper('vnf1')
+ scenario_helper.scenario_cfg = {
+ 'task_path': 'a/b',
+ 'options': {
+ 'vnf1': vnf1,
+ },
+ }
+
+ vnfd_helper.port_pairs.all_ports = ['xe0', 'xe1', 'xe2', 'xe3']
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.copy_to_target = mock.MagicMock(side_effect=['33', '34', '35'])
+ helper.generate_prox_config_file = mock.MagicMock(return_value='44')
+ helper.upload_prox_config = mock.MagicMock(return_value='55')
+
+ self.assertEqual(helper.additional_files, {})
+ expected = {'h.i': '33'}
+ helper.build_config_file()
+ self.assertDictEqual(helper.additional_files, expected)
+
+ @mock.patch.object(utils, 'find_relative_file')
+ def test_build_config_file_additional_file(self, mock_find_path):
+ vnf1 = {
+ 'prox_args': {'-c': ""},
+ 'prox_path': 'd',
+ 'prox_config': 'e/f',
+ 'prox_files': [
+ 'g/h.i',
+ 'j/k/l',
+ 'm_n',
+ ],
+ }
+
+ mock_find_path.side_effect = ['1', '2'] + [str(i) for i in range(len(vnf1['prox_files']))]
+ vnfd_helper = mock.MagicMock()
+ ssh_helper = mock.MagicMock()
+ scenario_helper = ScenarioHelper('vnf1')
+ scenario_helper.scenario_cfg = {
+ 'task_path': 'a/b',
+ 'options': {
+ 'vnf1': vnf1,
+ },
+ }
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.copy_to_target = mock.MagicMock(side_effect=['33', '34', '35'])
+ helper.generate_prox_config_file = mock.MagicMock(return_value='44')
+ helper.upload_prox_config = mock.MagicMock(return_value='55')
+
+ self.assertEqual(helper.additional_files, {})
+ self.assertNotEqual(helper._prox_config_data, '44')
+ self.assertNotEqual(helper.remote_path, '55')
+ expected = {'h.i': '33', 'l': '34', 'm_n': '35'}
+ helper.build_config_file()
+ self.assertDictEqual(helper.additional_files, expected)
+ self.assertEqual(helper._prox_config_data, '44')
+ self.assertEqual(helper.remote_path, '55')
+
+ def test_build_config(self):
+ vnf1 = {
+ 'prox_args': {'-f': ""},
+ 'prox_path': '/opt/nsb_bin/prox',
+ 'prox_config': 'configs/gen_l2fwd-2.cfg',
+ 'prox_files': [
+ 'g/h.i',
+ 'j/k/l',
+ 'm_n',
+ ],
+ }
+
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ ssh_helper.join_bin_path.return_value = '/opt/nsb_bin/prox'
+ scenario_helper = ScenarioHelper('vnf1')
+ scenario_helper.scenario_cfg = {
+ 'task_path': 'a/b',
+ 'options': {
+ 'vnf1': vnf1,
+ },
+ }
+
+ expected = ("sudo bash -c 'cd /opt/nsb_bin; /opt/nsb_bin/prox -o cli "
+ "-f -f /tmp/prox.cfg '")
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper,
+ scenario_helper)
+ with mock.patch.object(helper, 'build_config_file') as mock_cfg_file:
+ helper.remote_path = '/tmp/prox.cfg'
+ prox_cmd = helper.build_config()
+ self.assertEqual(prox_cmd, expected)
+ mock_cfg_file.assert_called_once()
+
+ def test__insert_additional_file(self):
+ vnfd_helper = mock.MagicMock()
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.additional_files = {"ipv4.lua": "/tmp/ipv4.lua"}
+ res = helper._insert_additional_file('dofile("ipv4.lua")')
+ self.assertEqual(res, 'dofile("/tmp/ipv4.lua")')
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.ConfigParser')
+ def test_generate_prox_config_file(self, mock_parser_type):
+ def init(*args):
+ if sections_data:
+ args[-1].extend(sections_data)
+ return mock.MagicMock()
+
+ sections_data = []
+
+ mock_parser_type.side_effect = init
+
+ vnfd_helper = VnfdHelper(self.VNFD0)
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.additional_files = {}
+
+ expected = []
+ result = helper.generate_prox_config_file('a/b')
+ self.assertEqual(result, expected)
+
+ helper.additional_files = {"ipv4.lua": "/tmp/ipv4.lua"}
+
+ helper.remote_prox_file_name = 'remote'
+ sections_data = [
+ [
+ 'lua',
+ [
+ ['dofile("ipv4.lua")', ''],
+ ],
+ ],
+ [
+ 'port 0',
+ [
+ ['ip', ''],
+ ['mac', 'foo'],
+ ['dst mac', '@@1'],
+ ['tx port', '1'],
+ ],
+ ],
+ [
+ 'port 2',
+ [
+ ['ip', ''],
+ ['$sut_mac0', '@@dst_mac0'],
+ ['tx port', '0'],
+ ['single', '@'],
+ ['user_table', 'dofile("ipv4.lua")'],
+ ['missing_addtional_file', 'dofile("nosuch")'],
+ ],
+ ],
+ ]
+
+ expected = [
+ [
+ 'lua',
+ [
+ ['dofile("/tmp/ipv4.lua")', ''],
+ ],
+ ],
+ [
+ 'port 0',
+ [
+ ['ip', ''],
+ ['mac', 'hardware'],
+ ['dst mac', '00:00:00:00:00:03'],
+ ['tx port', '1'],
+ ],
+ ],
+ [
+ 'port 2',
+ [
+ ['ip', ''],
+ ['$sut_mac0', '00 00 00 00 00 04'],
+ ['tx port', '0'],
+ ['single', '@'],
+ ['user_table', 'dofile("/tmp/ipv4.lua")'],
+ ['missing_addtional_file', 'dofile("nosuch")'],
+ ],
+ ],
+ ]
+ result = helper.generate_prox_config_file('/c/d/e')
+ self.assertEqual(result, expected, str(result))
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.ConfigParser')
+ def test_generate_prox_config_file_negative(self, mock_parser_type):
+ def init(*args):
+ args[-1].update(sections_data)
+ return mock.MagicMock()
+
+ sections_data = {}
+
+ mock_parser_type.side_effect = init
+
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.interfaces = []
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.additional_files = {}
+ helper.remote_prox_file_name = 'remote'
+ vnfd_helper.interfaces = [
+ {
+ 'virtual-interface': {
+ 'dpdk_port_num': 3,
+ 'dst_mac': '00:00:00:de:ad:88',
+ },
+ },
+ {
+ 'virtual-interface': {
+ 'dpdk_port_num': 5,
+ 'dst_mac': '00:00:00:de:ad:ff',
+ },
+ },
+ {
+ 'virtual-interface': {
+ 'dpdk_port_num': 7,
+ 'dst_mac': '00:00:00:de:ad:ff',
+ },
+ },
+ ]
+ sections_data = {
+ 'port 3': [
+ ['ip', ''],
+ ['mac', 'foo'],
+ ['dst mac', ''],
+ ],
+ 'port 5': [
+ ['ip', ''],
+ ['dst mac', ''],
+ ['tx port', '0'],
+ ['???', 'dofile "here" 23'],
+ ],
+ }
+
+ with self.assertRaises(Exception):
+ helper.generate_prox_config_file('a/b')
+
+ def test_put_string_to_file(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.interfaces = []
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+
+ expected = 'a/b'
+ result = helper.put_string_to_file('my long string', 'a/b')
+ self.assertEqual(result, expected)
+
+ def test_copy_to_target(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.interfaces = []
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ expected = '/tmp/c'
+ result = helper.copy_to_target('a/b', 'c')
+ self.assertEqual(result, expected)
+
+ def test_upload_prox_config(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.interfaces = []
+ ssh_helper = mock.MagicMock()
+ scenario_helper = mock.MagicMock()
+
+ helper = ProxDpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ helper.write_prox_config = mock.MagicMock(return_value='a long string')
+ expected = '/tmp/a'
+ result = helper.upload_prox_config('a', {})
+ self.assertEqual(result, expected)
+
+
+class TestProxResourceHelper(unittest.TestCase):
+
+ VNFD0 = {
+ 'short-name': 'ProxVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'name': 'proxvnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'proxvnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'uplink_0',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.19',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'downlink_0',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'proxvnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'id': 'ProxApproxVnf',
+ 'name': 'ProxVnf',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD0,
+ ],
+ },
+ }
+
+ def test_find_pci(self):
+ input_str_list = [
+ 'no target here',
+ 'nor here',
+ 'and still not',
+ ]
+ result = ProxResourceHelper.find_pci('target', input_str_list)
+ self.assertFalse(result)
+
+ input_str_list = [
+ 'no target here',
+ 'nor here',
+ 'this is a target',
+ 'did we miss it',
+ ]
+ result = ProxResourceHelper.find_pci('target', input_str_list)
+ self.assertTrue(result)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.RETRY_INTERVAL', 0)
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.ProxSocketHelper')
+ def test_sut(self, *args):
+ helper = ProxResourceHelper(mock.MagicMock())
+ self.assertIsNone(helper.client)
+ result = helper.sut
+ self.assertIsNotNone(result)
+ self.assertIs(result, helper.client)
+ self.assertIs(result, helper.sut)
+
+ def test_test_type(self):
+ setup_helper = mock.MagicMock()
+ setup_helper.find_in_section.return_value = expected = 'prox type'
+
+ helper = ProxResourceHelper(setup_helper)
+
+ self.assertIsNone(helper._test_type)
+ self.assertEqual(helper.test_type, expected)
+ self.assertEqual(helper._test_type, expected)
+ self.assertEqual(helper.test_type, expected)
+
+ def test_collect_collectd_kpi(self):
+ helper = ProxResourceHelper(mock.MagicMock())
+ helper.resource = resource = mock.MagicMock()
+
+ resource.check_if_system_agent_running.return_value = 0, '1234'
+ resource.amqp_collect_nfvi_kpi.return_value = 543
+ resource.check_if_system_agent_running.return_value = (0, None)
+
+ expected = {'core': 543}
+ result = helper.collect_collectd_kpi()
+ self.assertDictEqual(result, expected)
+
+ def test_collect_kpi(self):
+ helper = ProxResourceHelper(mock.MagicMock())
+ helper._queue = queue = mock.MagicMock()
+ helper._result = {'z': 123}
+ helper.resource = resource = mock.MagicMock()
+
+ resource.check_if_system_agent_running.return_value = 0, '1234'
+ resource.amqp_collect_nfvi_kpi.return_value = 543
+ resource.check_if_system_agent_running.return_value = (0, None)
+
+ queue.empty.return_value = False
+ queue.get.return_value = {'a': 789}
+
+ expected = {'z': 123, 'a': 789, 'collect_stats': {'core': 543}}
+ result = helper.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.ProxSocketHelper')
+ def test__connect(self, mock_socket_helper_type, *args):
+ client = mock_socket_helper_type()
+ client.connect.side_effect = chain(repeat(socket.error, 5), [None])
+
+ setup_helper = mock.MagicMock()
+ setup_helper.vnfd_helper.interfaces = []
+
+ helper = ProxResourceHelper(setup_helper)
+
+ result = helper._connect()
+ self.assertIs(result, client)
+
+ client.connect.side_effect = chain(repeat(socket.error, 65), [None])
+
+ with self.assertRaises(Exception):
+ helper._connect()
+
+ def test_run_traffic(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ traffic_profile = mock.MagicMock(**{"done": True})
+ helper.run_traffic(traffic_profile)
+ self.assertEqual(helper._terminated.value, 1)
+
+ def test__run_traffic_once(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ traffic_profile = mock.MagicMock(**{"done": True})
+ helper._run_traffic_once(traffic_profile)
+ self.assertEqual(helper._terminated.value, 1)
+
+ def test_start_collect(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ helper.resource = resource = mock.MagicMock()
+ self.assertIsNone(helper.start_collect())
+ resource.start.assert_called_once()
+
+ def test_terminate(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ with self.assertRaises(NotImplementedError):
+ helper.terminate()
+
+ def test_up_post(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ helper.client = expected = mock.MagicMock()
+ result = helper.up_post()
+ self.assertEqual(result, expected)
+
+ def test_execute(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxResourceHelper(setup_helper)
+ helper.client = mock.MagicMock()
+
+ expected = helper.client.my_command()
+ result = helper.execute('my_command')
+ self.assertEqual(result, expected)
+
+ # TODO(elfoley): Make this a separate test: test_execute_no_client
+ helper.client = object()
+
+ result = helper.execute('my_command')
+ self.assertIsNone(result)
+
+
+class TestProxDataHelper(unittest.TestCase):
+
+ def test_totals_and_pps(self):
+ pkt_size = 180
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+
+ sut = mock.MagicMock()
+ sut.port_stats.return_value = list(range(10))
+
+ data_helper = ProxDataHelper(
+ vnfd_helper, sut, pkt_size, 25, None,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ self.assertEqual(data_helper.rx_total, 6)
+ self.assertEqual(data_helper.tx_total, 7)
+ self.assertEqual(data_helper.requested_pps, 6.25e6)
+
+ def test_samples(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+ vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)]
+
+ sut = mock.MagicMock()
+ sut.port_stats.return_value = list(range(10))
+
+ data_helper = ProxDataHelper(vnfd_helper, sut, None, None, None, None)
+
+ expected = {
+ 'xe1': {
+ 'in_packets': 6,
+ 'out_packets': 7,
+ },
+ 'xe2': {
+ 'in_packets': 6,
+ 'out_packets': 7,
+ },
+ }
+ result = data_helper.samples
+ self.assertDictEqual(result, expected)
+
+ def test___enter__(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+ vnfd_helper.ports_iter.return_value = [('xe1', 3), ('xe2', 7)]
+
+ sut = mock.MagicMock()
+ sut.port_stats.return_value = list(range(10))
+
+ data_helper = ProxDataHelper(vnfd_helper, sut, None, None,
+ 5.4, constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+ data_helper._totals_and_pps = 12, 32, 4.5
+ data_helper.tsc_hz = 9.8
+ data_helper.measured_stats = {'delta': TotStatsTuple(6.1, 6.2, 6.3, 6.4)}
+ data_helper.latency = 7
+
+ self.assertIsNone(data_helper.result_tuple)
+ self.assertEqual(data_helper.line_speed, 10000000000)
+
+ expected = ProxTestDataTuple(5.4, 9.8, 6.1, 6.2, 6.3, 7, 12, 32, 4.5)
+ with data_helper:
+ pass
+
+ result = data_helper.result_tuple
+ self.assertEqual(result, expected)
+
+ data_helper.make_tuple()
+ self.assertIs(data_helper.result_tuple, result)
+
+ def test___enter___negative(self):
+ vnfd_helper = mock.MagicMock()
+
+ data_helper = ProxDataHelper(vnfd_helper, None, None, None, None, None)
+
+ vnfd_helper.port_pairs.all_ports = []
+ with self.assertRaises(AssertionError):
+ with data_helper:
+ pass
+
+ vnfd_helper.port_pairs.all_ports = [0, 1, 2]
+ with self.assertRaises(AssertionError):
+ with data_helper:
+ pass
+
+ def test_measure_tot_stats(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+
+ start = (3, 4, 1, 2)
+ end = (9, 7, 6, 8)
+
+ sut = ProxSocketHelper(mock.MagicMock())
+ sut.get_all_tot_stats = mock.MagicMock(side_effect=[start, end])
+
+ data_helper = ProxDataHelper(vnfd_helper, sut, None, None, 5.4, None)
+
+ self.assertIsNone(data_helper.measured_stats)
+
+ expected = {
+ 'start_tot': start,
+ 'end_tot': end,
+ 'delta': TotStatsTuple(6, 3, 5, 6),
+ }
+ with data_helper.measure_tot_stats():
+ pass
+
+ self.assertEqual(data_helper.measured_stats, expected)
+
+ def test_capture_tsc_hz(self):
+ vnfd_helper = mock.MagicMock()
+ vnfd_helper.port_pairs.all_ports = list(range(4))
+
+ sut = mock.MagicMock()
+ sut.hz.return_value = '54.6'
+
+ data_helper = ProxDataHelper(vnfd_helper, sut, None, None, None, None)
+
+ self.assertIsNone(data_helper.tsc_hz)
+
+ expected = 54.6
+ data_helper.capture_tsc_hz()
+ self.assertEqual(data_helper.tsc_hz, expected)
+
+
+class TestProxProfileHelper(unittest.TestCase):
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.utils')
+ def test_get_cls(self, mock_utils):
+ mock_type1 = mock.MagicMock()
+ mock_type1.__prox_profile_type__ = 'another_type'
+ mock_type2 = mock.MagicMock()
+ mock_type2.__prox_profile_type__ = 'my_type'
+ mock_utils.itersubclasses.return_value = [mock_type1, mock_type2]
+
+ self.assertEqual(ProxProfileHelper.get_cls('my_type'), mock_type2)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.utils')
+ def test_get_cls_default(self, mock_utils):
+ mock_utils.itersubclasses.return_value = []
+ ProxProfileHelper.get_cls('my_type')
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.SocketTopology')
+ def test_cpu_topology(self, mock_socket_topology):
+ mock_socket_topology.parse_cpuinfo.return_value = 432
+
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.ssh_helper.execute.return_value = 0, 'output', ''
+
+ helper = ProxProfileHelper(resource_helper)
+ self.assertIsNone(helper._cpu_topology)
+ result = helper.cpu_topology
+ self.assertEqual(result, 432)
+ self.assertIs(result, helper._cpu_topology)
+ self.assertIs(result, helper.cpu_topology)
+
+ # TODO(elfoley): Split this test; there are two sets of inputs/outputs
+ def test_test_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = []
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._cpu_topology = []
+
+ expected = []
+ result = helper.test_cores
+ self.assertEqual(result, expected)
+
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1s3', []),
+ ('core 2s5', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3s1', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ]),
+ ('core 4s9h', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ]),
+ ]
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 1: {
+ 3: {
+ 'key1': (23, 32),
+ 'key2': (12, 21),
+ 'key3': (44, 33),
+ },
+ },
+ 9: {
+ 4: {
+ 'key1': (44, 32),
+ 'key2': (23, 21),
+ 'key3': (12, 33),
+ },
+ },
+ }
+
+ self.assertIsNone(helper._test_cores)
+ expected = [3, 4]
+ result = helper.test_cores
+ self.assertEqual(result, expected)
+ self.assertIs(result, helper._test_cores)
+ self.assertIs(result, helper.test_cores)
+
+ # TODO(elfoley): Split this test; there are two sets of inputs/outputs
+ def test_latency_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = []
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._cpu_topology = []
+
+ expected = []
+ result = helper.latency_cores
+ self.assertEqual(result, expected)
+
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1s3', []),
+ ('core 2s5', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3s1', [
+ ('index', 5),
+ ('mode', 'lat'),
+ ]),
+ ('core 4s9h', [
+ ('index', 7),
+ ('mode', 'lat'),
+ ]),
+ ]
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 1: {
+ 3: {
+ 'key1': (23, 32),
+ 'key2': (12, 21),
+ 'key3': (44, 33),
+ },
+ },
+ 9: {
+ 4: {
+ 'key1': (44, 32),
+ 'key2': (23, 21),
+ 'key3': (12, 33),
+ },
+ },
+ }
+
+ self.assertIsNone(helper._latency_cores)
+ expected = [3, 4]
+ result = helper.latency_cores
+ self.assertEqual(result, expected)
+ self.assertIs(result, helper._latency_cores)
+ self.assertIs(result, helper.latency_cores)
+
+ def test_all_rx_cores(self):
+ helper = ProxBngProfileHelper(mock.MagicMock())
+ helper._latency_cores = expected = [3, 4, 6]
+ helper._test_cores = [5, 2, 1]
+
+ result = helper.all_rx_cores
+ self.assertEqual(result, expected)
+
+ def test_get_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ]),
+ ]
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ }
+ }
+
+ expected = [3, 4]
+ result = helper.get_cores(helper.PROX_CORE_GEN_MODE)
+ self.assertEqual(result, expected)
+
+ def test_get_latency(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.vnfd_helper.interfaces = []
+
+ helper = ProxProfileHelper(resource_helper)
+ helper._latency_cores = []
+
+ expected = []
+ result = helper.get_latency()
+ self.assertEqual(result, expected)
+
+ helper._latency_cores = [1, 2]
+ helper.client = mock.MagicMock()
+
+ expected = helper.sut.lat_stats()
+ result = helper.get_latency()
+ self.assertIs(result, expected)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ def test_traffic_context(self, *args):
+ setup_helper = mock.MagicMock()
+ setup_helper.vnfd_helper.interfaces = []
+
+ helper = ProxProfileHelper(setup_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ }
+ }
+
+ setup_helper.prox_config_data = [
+ ('global', [
+ ('not_name', 'other data'),
+ ('name_not', 'more data'),
+ ('name', helper.__prox_profile_type__),
+ ]),
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'tagged'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'udp'),
+ ]),
+ ]
+
+ client = mock.MagicMock()
+ client.hz.return_value = 2
+ client.port_stats.return_value = tuple(range(12))
+
+ helper.client = client
+ helper.get_latency = mock.MagicMock(return_value=[3.3, 3.6, 3.8])
+
+ helper._test_cores = [3, 4]
+
+ with helper.traffic_context(64, 1):
+ pass
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ def test_run_test(self, _):
+ resource_helper = mock.MagicMock()
+ resource_helper.step_delta = 0.4
+ resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
+ resource_helper.sut.port_stats.return_value = list(range(10))
+
+ helper = ProxProfileHelper(resource_helper)
+
+ helper.run_test(120, 5, 6.5,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+
+class TestProxMplsProfileHelper(unittest.TestCase):
+
+ def test_mpls_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'tagged'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'udp'),
+ ]),
+ ]
+
+ helper = ProxMplsProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ }
+ }
+
+ expected_tagged = [3]
+ expected_plain = [4]
+ self.assertIsNone(helper._cores_tuple)
+ self.assertEqual(helper.tagged_cores, expected_tagged)
+ self.assertEqual(helper.plain_cores, expected_plain)
+ self.assertEqual(helper._cores_tuple, (expected_tagged, expected_plain))
+
+ def test_traffic_context(self):
+ setup_helper = mock.MagicMock()
+ helper = ProxMplsProfileHelper(setup_helper)
+
+ with helper.traffic_context(120, 5.4):
+ pass
+
+
+class TestProxBngProfileHelper(unittest.TestCase):
+
+ def test_bng_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'cpe'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'inet'),
+ ]),
+ ('core 6', [
+ ('index', 3),
+ ('mode', 'gen'),
+ ('name', 'arp_task'),
+ ]),
+ ('core 9', [
+ ('index', 2),
+ ('mode', 'gen'),
+ ('name', 'arp'),
+ ]),
+ ]
+
+ helper = ProxBngProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ 6: {
+ 1: (4, 8, 0)
+ },
+ 9: {
+ 2: (3, 7, 0)
+ },
+ }
+ }
+
+ expected_cpe = [3]
+ expected_inet = [4]
+ expected_arp = [6, 9]
+ expected_arp_task = [0, 6]
+ expected_combined = (expected_cpe, expected_inet, expected_arp, expected_arp_task)
+
+ self.assertIsNone(helper._cores_tuple)
+ self.assertEqual(helper.cpe_cores, expected_cpe)
+ self.assertEqual(helper.inet_cores, expected_inet)
+ self.assertEqual(helper.arp_cores, expected_arp)
+ self.assertEqual(helper.arp_task_cores, expected_arp_task)
+ self.assertEqual(helper._cores_tuple, expected_combined)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ def test_run_test(self, _):
+ resource_helper = mock.MagicMock()
+ resource_helper.step_delta = 0.4
+ resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
+ resource_helper.sut.port_stats.return_value = list(range(10))
+
+ helper = ProxBngProfileHelper(resource_helper)
+
+ helper.run_test(120, 5, 6.5,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+ # negative pkt_size is the only way to make ratio > 1
+ helper.run_test(-1000, 5, 6.5,
+ constants.NIC_GBPS_DEFAULT * constants.ONE_GIGABIT_IN_BITS)
+
+
+class TestProxVpeProfileHelper(unittest.TestCase):
+
+ def test_vpe_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'cpe'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'inet'),
+ ]),
+ ]
+
+ helper = ProxVpeProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ }
+ }
+
+ expected_cpe = [3]
+ expected_inet = [4]
+ expected_combined = (expected_cpe, expected_inet)
+
+ self.assertIsNone(helper._cores_tuple)
+ self.assertEqual(helper.cpe_cores, expected_cpe)
+ self.assertEqual(helper.inet_cores, expected_inet)
+ self.assertEqual(helper._cores_tuple, expected_combined)
+
+ def test_vpe_ports(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('port 3', [
+ ('index', '5'),
+ ('name', 'cpe'),
+ ('mac', 'hardware'),
+ ]),
+ ('port 4', [
+ ('index', '7'),
+ ('name', 'inet'),
+ ('mac', 'hardware'),
+ ]),
+ ]
+
+ helper = ProxVpeProfileHelper(resource_helper)
+ helper._port_list = {
+ 0: {
+ 1: {
+ 5: 'cpe'
+ },
+ 2: {
+ 6: 'inet'
+ },
+ 3: {
+ 7: 'cpe'
+ },
+ 4: {
+ 8: 'inet'
+ },
+ }
+ }
+
+ expected_cpe = [3]
+ expected_inet = [4]
+ expected_combined = (expected_cpe, expected_inet)
+
+ self.assertIsNone(helper._ports_tuple)
+ self.assertEqual(helper.cpe_ports, expected_cpe)
+ self.assertEqual(helper.inet_ports, expected_inet)
+ self.assertEqual(helper._ports_tuple, expected_combined)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ def test_run_test(self, _):
+ resource_helper = mock.MagicMock()
+ resource_helper.step_delta = 0.4
+ resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
+ resource_helper.sut.port_stats.return_value = list(range(10))
+
+ helper = ProxVpeProfileHelper(resource_helper)
+
+ helper.run_test(120, 5, 6.5)
+ helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1
+
+
+class TestProxlwAFTRProfileHelper(unittest.TestCase):
+
+ def test_lwaftr_cores(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('core 1', []),
+ ('core 2', [
+ ('index', 8),
+ ('mode', ''),
+ ]),
+ ('core 3', [
+ ('index', 5),
+ ('mode', 'gen'),
+ ('name', 'tun'),
+ ]),
+ ('core 4', [
+ ('index', 7),
+ ('mode', 'gen'),
+ ('name', 'inet'),
+ ]),
+ ]
+
+ helper = ProxlwAFTRProfileHelper(resource_helper)
+ helper._cpu_topology = {
+ 0: {
+ 1: {
+ 5: (5, 1, 0)
+ },
+ 2: {
+ 6: (6, 2, 0)
+ },
+ 3: {
+ 7: (7, 3, 0)
+ },
+ 4: {
+ 8: (8, 3, 0)
+ },
+ }
+ }
+
+ expected_tun = [3]
+ expected_inet = [4]
+ expected_combined = (expected_tun, expected_inet)
+
+ self.assertIsNone(helper._cores_tuple)
+ self.assertEqual(helper.tun_cores, expected_tun)
+ self.assertEqual(helper.inet_cores, expected_inet)
+ self.assertEqual(helper._cores_tuple, expected_combined)
+
+ def test_tun_ports(self):
+ resource_helper = mock.MagicMock()
+ resource_helper.setup_helper.prox_config_data = [
+ ('section1', []),
+ ('section2', [
+ ('a', 'b'),
+ ('c', 'd'),
+ ]),
+ ('port 3', [
+ ('index', '5'),
+ ('name', 'lwB4'),
+ ('mac', 'hardware'),
+ ]),
+ ('port 4', [
+ ('index', '7'),
+ ('name', 'inet'),
+ ('mac', 'hardware'),
+ ]),
+ ]
+
+ helper = ProxlwAFTRProfileHelper(resource_helper)
+ helper._port_list = {
+ 0: {
+ 1: {
+ 5: 'lwB4'
+ },
+ 2: {
+ 6: 'inet'
+ },
+ 3: {
+ 7: 'lwB4'
+ },
+ 4: {
+ 8: 'inet'
+ },
+ }
+ }
+
+ expected_tun = [3]
+ expected_inet = [4]
+ expected_combined = (expected_tun, expected_inet)
+
+ self.assertIsNone(helper._ports_tuple)
+ self.assertEqual(helper.tun_ports, expected_tun)
+ self.assertEqual(helper.inet_ports, expected_inet)
+ self.assertEqual(helper._ports_tuple, expected_combined)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+ def test_run_test(self, _):
+ resource_helper = mock.MagicMock()
+ resource_helper.step_delta = 0.4
+ resource_helper.vnfd_helper.port_pairs.all_ports = list(range(2))
+ resource_helper.sut.port_stats.return_value = list(range(10))
+
+ helper = ProxlwAFTRProfileHelper(resource_helper)
+
+ helper.run_test(120, 5, 6.5)
+ helper.run_test(-1000, 5, 6.5) # negative pkt_size is the only way to make ratio > 1
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
new file mode 100644
index 000000000..f5f4b3907
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_prox_vnf.py
@@ -0,0 +1,455 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import errno
+import os
+import unittest
+import mock
+from copy import deepcopy
+
+from yardstick.tests import STL_MOCKS
+
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf
+ from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+
+NAME = "vnf__1"
+
+
+@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+class TestProxApproxVnf(unittest.TestCase):
+
+ VNFD0 = {
+ 'short-name': 'ProxVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'name': 'proxvnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'proxvnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe1',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'proxvnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ 'curr_packets_fwd',
+ 'curr_packets_in'
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'ProxApproxVnf',
+ 'name': 'ProxVnf',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD0,
+ ],
+ },
+ }
+
+ SCENARIO_CFG = {
+ 'task_path': "",
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'runner': {
+ 'duration': 600, 'type': 'Duration'},
+ 'topology': 'prox-tg-topology-2.yaml',
+ 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
+ 'type': 'NSPerf',
+ 'options': {
+ 'tg__1': {'prox_args': {'-e': '',
+ '-t': ''},
+ 'prox_config': 'configs/l3-gen-2.cfg',
+ 'prox_path':
+ '/root/dppd-PROX-v035/build/prox'},
+ 'vnf__1': {
+ 'prox_args': {'-t': ''},
+ 'prox_config': 'configs/l3-swap-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
+
+ CONTEXT_CFG = {
+ 'nodes': {
+ 'tg__2': {
+ 'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens513f0',
+ 'vld_id': ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root',
+ },
+ 'tg__1': {
+ 'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens785f0',
+ 'vld_id': ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root',
+ },
+ 'vnf__1': {
+ 'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens786f0',
+ 'vld_id': ProxApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens786f1',
+ 'vld_id': ProxApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'password': 'r00t',
+ 'VNF model': 'prox_vnf.yaml',
+ },
+ },
+ }
+
+ @mock.patch(SSH_HELPER)
+ def test___init__(self, ssh, *args):
+ mock_ssh(ssh)
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ self.assertIsNone(prox_approx_vnf._vnf_process)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_no_client(self, ssh, *args):
+ mock_ssh(ssh)
+
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.resource_helper = None
+ expected = {
+ 'packets_in': 0,
+ 'packets_dropped': 0,
+ 'packets_fwd': 0,
+ 'collect_stats': {'core': {}}
+ }
+ result = prox_approx_vnf.collect_kpi()
+ self.assertEqual(result, expected)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ resource_helper = mock.MagicMock()
+ resource_helper.execute.return_value = list(range(12))
+ resource_helper.collect_collectd_kpi.return_value = {'core': {'result': 234}}
+
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.resource_helper = resource_helper
+
+ expected = {
+ 'packets_in': 6,
+ 'packets_dropped': 1,
+ 'packets_fwd': 7,
+ 'collect_stats': {'core': {'result': 234}},
+ }
+ result = prox_approx_vnf.collect_kpi()
+ self.assertEqual(result['packets_in'], expected['packets_in'])
+ self.assertEqual(result['packets_dropped'], expected['packets_dropped'])
+ self.assertEqual(result['packets_fwd'], expected['packets_fwd'])
+ self.assertNotEqual(result['packets_fwd'], 0)
+ self.assertNotEqual(result['packets_fwd'], 0)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_error(self, ssh, *args):
+ mock_ssh(ssh)
+
+ resource_helper = mock.MagicMock()
+
+ prox_approx_vnf = ProxApproxVnf(NAME, deepcopy(self.VNFD0))
+ prox_approx_vnf.resource_helper = resource_helper
+ prox_approx_vnf.vnfd_helper['vdu'][0]['external-interface'] = []
+ prox_approx_vnf.vnfd_helper.port_pairs.interfaces = []
+
+ with self.assertRaises(RuntimeError):
+ prox_approx_vnf.collect_kpi()
+
+ def _get_file_abspath(self, filename, *args):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ @mock.patch('yardstick.common.utils.open', create=True)
+ @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True)
+ @mock.patch('yardstick.network_services.helpers.iniparser.open', create=True)
+ @mock.patch(SSH_HELPER)
+ def test_run_prox(self, ssh, *_):
+ mock_ssh(ssh)
+
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+ prox_approx_vnf.ssh_helper.join_bin_path.return_value = '/tool_path12/tool_file34'
+ prox_approx_vnf.setup_helper.remote_path = 'configs/file56.cfg'
+
+ expected = "sudo bash -c 'cd /tool_path12; " \
+ "/tool_path12/tool_file34 -o cli -t -f /tmp/l3-swap-2.cfg '"
+
+ prox_approx_vnf._run()
+ result = prox_approx_vnf.ssh_helper.run.call_args[0][0]
+ self.assertEqual(result, expected)
+
+ @mock.patch(SSH_HELPER)
+ def bad_test_instantiate(self, *args):
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.scenario_helper = mock.MagicMock()
+ prox_approx_vnf.setup_helper = mock.MagicMock()
+ # we can't mock super
+ prox_approx_vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG)
+ prox_approx_vnf.setup_helper.build_config.assert_called_once()
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_panic(self, ssh, *args):
+ mock_ssh(ssh, exec_result=(1, "", ""))
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf._vnf_process = mock.MagicMock(**{"is_alive.return_value": True})
+ prox_approx_vnf._run_prox = mock.Mock(return_value=0)
+ prox_approx_vnf.WAIT_TIME = 0
+ prox_approx_vnf.q_out.put("PANIC")
+ with self.assertRaises(RuntimeError):
+ prox_approx_vnf.wait_for_instantiate()
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf._vnf_process = mock.MagicMock()
+ prox_approx_vnf._vnf_process.terminate = mock.Mock()
+ prox_approx_vnf.ssh_helper = mock.MagicMock()
+ prox_approx_vnf.setup_helper = mock.Mock()
+ prox_approx_vnf.resource_helper = mock.MagicMock()
+
+ self.assertIsNone(prox_approx_vnf.terminate())
+
+ @mock.patch(SSH_HELPER)
+ def test__vnf_up_post(self, ssh, *args):
+ mock_ssh(ssh)
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.resource_helper = resource_helper = mock.Mock()
+
+ prox_approx_vnf._vnf_up_post()
+ resource_helper.up_post.assert_called_once()
+
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute_oserror(self, ssh, *args):
+ mock_ssh(ssh)
+ prox_approx_vnf = ProxApproxVnf(NAME, self.VNFD0)
+ prox_approx_vnf.resource_helper = resource_helper = mock.Mock()
+
+ resource_helper.execute.side_effect = OSError(errno.EPIPE, "")
+ prox_approx_vnf.vnf_execute("", _ignore_errors=True)
+
+ resource_helper.execute.side_effect = OSError(errno.ESHUTDOWN, "")
+ prox_approx_vnf.vnf_execute("", _ignore_errors=True)
+
+ resource_helper.execute.side_effect = OSError(errno.EADDRINUSE, "")
+ with self.assertRaises(OSError):
+ prox_approx_vnf.vnf_execute("", _ignore_errors=True)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py
new file mode 100644
index 000000000..5574c6770
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_router_vnf.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests import STL_MOCKS
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.router_vnf import RouterVNF
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+
+name = 'vnf__1'
+
+
+class TestRouterVNF(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'RouterVNF',
+ 'vdu':
+ [{'routing_table': [],
+ 'description': 'RouterVNF',
+ 'name': 'router-baremetal',
+ 'nd_route_tbl': [],
+ 'id': 'router-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'RouterVNF',
+ 'mgmt-interface':
+ {'vdu-id': 'router-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'RouterVNF', 'name': 'VPEVnfSsh'}]}}
+
+ scenario_cfg = {'nodes': {'cpt__0': 'compute_0.compute_nodes',
+ 'tg__0': 'trafficgen_1.baremetal',
+ 'vnf__0': 'vnf.yardstick'},
+ 'options': {'flow': {'count': 128000,
+ 'dst_ip': ['10.0.3.26-10.0.3.105'],
+ 'dst_port': ['2001-2004'],
+ 'src_ip': ['10.0.2.26-10.0.2.105'],
+ 'src_port': ['1234-1238']},
+ 'framesize': {'downlink': {'1024B': 100},
+ 'uplink': {'1024B': 100}},
+ 'rfc2544': {'allowed_drop_rate': '0.0001 - 0.1'},
+ 'tg__0': {'queues_per_port': 7},
+ 'traffic_type': 4,
+ 'vnf__0': {'nfvi_enable': True}},
+ 'runner': {'interval': 35,
+ 'iterations': 10,
+ 'type': 'Iteration'},
+ 'topology': 'router-tg-topology.yaml',
+ 'traffic_profile': '../../traffic_profiles/ipv4_throughput.yaml',
+ 'type': 'NSPerf'}
+
+ context_cfg = {'nodes': {'tg__1':
+ {'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens785f0',
+ 'vld_id': RouterVNF.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root'},
+ 'vnf__1':
+ {'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens786f0',
+ 'vld_id': RouterVNF.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens786f1',
+ 'vld_id': RouterVNF.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'routing_table': [],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [],
+ 'password': 'r00t',
+ 'VNF model': 'router_vnf.yaml'}}}
+
+ IP_SHOW_STATS_OUTPUT = """\
+2: em1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
+ link/ether d4:c9:ef:52:7c:4d brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped overrun mcast
+ 2781945429 3202213 0 0 0 30131
+ RX errors: length crc frame fifo missed
+ 0 0 0 0 0
+ TX: bytes packets errors dropped carrier collsns
+ 646221183 2145799 0 0 0 0
+ TX errors: aborted fifo window heartbeat
+ 0 0 0 0
+"""
+ STATS = {
+ 'RX:bytes': '2781945429',
+ 'RX:dropped': '0',
+ 'RX:errors': '0',
+ 'RX:mcast': '30131',
+ 'RX:overrun': '0',
+ 'RX:packets': '3202213',
+ 'RX errors:length': '0',
+ 'RX errors:crc': '0',
+ 'RX errors:frame': '0',
+ 'RX errors:fifo': '0',
+ 'RX errors:missed': '0',
+ 'TX:bytes': '646221183',
+ 'TX:carrier': '0',
+ 'TX:collsns': '0',
+ 'TX:dropped': '0',
+ 'TX:errors': '0',
+ 'TX:packets': '2145799',
+ 'TX errors:aborted': '0',
+ 'TX errors:fifo': '0',
+ 'TX errors:window': '0',
+ 'TX errors:heartbeat': '0',
+ }
+
+ def test___init__(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ router_vnf = RouterVNF(name, vnfd)
+ self.assertIsNone(router_vnf._vnf_process)
+
+ def test_get_stats(self):
+ stats = RouterVNF.get_stats(self.IP_SHOW_STATS_OUTPUT)
+ self.assertDictEqual(stats, self.STATS)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, _):
+ m = mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ router_vnf = RouterVNF(name, vnfd)
+ router_vnf.ssh_helper = m
+ result = {'packets_dropped': 0, 'packets_fwd': 0, 'packets_in': 0, 'link_stats': {}}
+ self.assertEqual(result, router_vnf.collect_kpi())
+
+ @mock.patch(SSH_HELPER)
+ def test_run_router(self, ssh):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ router_vnf = RouterVNF(name, vnfd)
+ router_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ router_vnf._run()
+ router_vnf.ssh_helper.drop_connection.assert_called_once()
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.router_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh, _):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ router_vnf = RouterVNF(name, vnfd)
+ router_vnf.WAIT_TIME = 0
+ router_vnf.INTERFACE_WAIT = 0
+ self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
+ self.assertIsNone(router_vnf.instantiate(self.scenario_cfg,
+ self.context_cfg))
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, _):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ router_vnf = RouterVNF(name, vnfd)
+ router_vnf._vnf_process = mock.MagicMock()
+ router_vnf._vnf_process.terminate = mock.Mock()
+ self.assertIsNone(router_vnf.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
new file mode 100644
index 000000000..7c22563e8
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_sample_vnf.py
@@ -0,0 +1,1949 @@
+# Copyright (c) 2017-2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from copy import deepcopy
+
+import unittest
+import mock
+import six
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.common import exceptions as y_exceptions
+from yardstick.common import utils
+from yardstick.network_services.nfvi.resource import ResourceProfile
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFDeployHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import ClientResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import Rfc2544ResourceHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNFTrafficGen
+from yardstick.network_services.vnf_generic.vnf.sample_vnf import DpdkVnfSetupEnvHelper
+from yardstick.tests.unit.network_services.vnf_generic.vnf import test_base
+
+
+class MockError(Exception):
+ pass
+
+
+class TestVnfSshHelper(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+ }
+
+ def assertAll(self, iterable, message=None):
+ self.assertTrue(all(iterable), message)
+
+ def test_get_class(self):
+ self.assertIs(VnfSshHelper.get_class(), VnfSshHelper)
+
+ @mock.patch('yardstick.ssh.paramiko')
+ def test_copy(self, _):
+ ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path')
+ ssh_helper._run = mock.Mock()
+
+ ssh_helper.execute('ls')
+ self.assertTrue(ssh_helper.is_connected)
+ result = ssh_helper.copy()
+ self.assertIsInstance(result, VnfSshHelper)
+ self.assertFalse(result.is_connected)
+ self.assertEqual(result.bin_path, ssh_helper.bin_path)
+ self.assertEqual(result.host, ssh_helper.host)
+ self.assertEqual(result.port, ssh_helper.port)
+ self.assertEqual(result.user, ssh_helper.user)
+ self.assertEqual(result.password, ssh_helper.password)
+ self.assertEqual(result.key_filename, ssh_helper.key_filename)
+
+ @mock.patch('yardstick.ssh.paramiko')
+ def test_upload_config_file(self, mock_paramiko):
+ ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path')
+ ssh_helper._run = mock.MagicMock()
+
+ self.assertFalse(ssh_helper.is_connected)
+ cfg_file = ssh_helper.upload_config_file('my/prefix', 'my content')
+ self.assertTrue(ssh_helper.is_connected)
+ mock_paramiko.SSHClient.assert_called_once()
+ self.assertTrue(cfg_file.startswith('/tmp'))
+
+ cfg_file = ssh_helper.upload_config_file('/my/prefix', 'my content')
+ self.assertTrue(ssh_helper.is_connected)
+ mock_paramiko.SSHClient.assert_called_once()
+ self.assertEqual(cfg_file, '/my/prefix')
+
+ def test_join_bin_path(self):
+ ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path')
+
+ expected_start = 'my'
+ expected_middle_list = ['bin']
+ expected_end = 'path'
+ result = ssh_helper.join_bin_path()
+ self.assertTrue(result.startswith(expected_start))
+ self.assertAll(middle in result for middle in expected_middle_list)
+ self.assertTrue(result.endswith(expected_end))
+
+ expected_middle_list.append(expected_end)
+ expected_end = 'some_file.sh'
+ result = ssh_helper.join_bin_path('some_file.sh')
+ self.assertTrue(result.startswith(expected_start))
+ self.assertAll(middle in result for middle in expected_middle_list)
+ self.assertTrue(result.endswith(expected_end))
+
+ expected_middle_list.append('some_dir')
+ expected_end = 'some_file.sh'
+ result = ssh_helper.join_bin_path('some_dir', 'some_file.sh')
+ self.assertTrue(result.startswith(expected_start))
+ self.assertAll(middle in result for middle in expected_middle_list)
+ self.assertTrue(result.endswith(expected_end))
+
+ @mock.patch('yardstick.ssh.paramiko')
+ @mock.patch('yardstick.ssh.provision_tool')
+ def test_provision_tool(self, mock_provision_tool, mock_paramiko):
+ ssh_helper = VnfSshHelper(self.VNFD_0['mgmt-interface'], 'my/bin/path')
+ ssh_helper._run = mock.MagicMock()
+
+ self.assertFalse(ssh_helper.is_connected)
+ ssh_helper.provision_tool()
+ self.assertTrue(ssh_helper.is_connected)
+ mock_paramiko.SSHClient.assert_called_once()
+ mock_provision_tool.assert_called_once()
+
+ ssh_helper.provision_tool(tool_file='my_tool.sh')
+ self.assertTrue(ssh_helper.is_connected)
+ mock_paramiko.SSHClient.assert_called_once()
+ self.assertEqual(mock_provision_tool.call_count, 2)
+
+ ssh_helper.provision_tool('tool_path', 'my_tool.sh')
+ self.assertTrue(ssh_helper.is_connected)
+ mock_paramiko.SSHClient.assert_called_once()
+ self.assertEqual(mock_provision_tool.call_count, 3)
+
+
+class TestSetupEnvHelper(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ def test_build_config(self):
+ setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock())
+
+ with self.assertRaises(NotImplementedError):
+ setup_env_helper.build_config()
+
+ def test_setup_vnf_environment(self):
+ setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock())
+ self.assertIsNone(setup_env_helper.setup_vnf_environment())
+
+ def test_tear_down(self):
+ setup_env_helper = SetupEnvHelper(mock.Mock(), mock.Mock(), mock.Mock())
+
+ with self.assertRaises(NotImplementedError):
+ setup_env_helper.tear_down()
+
+
+class TestDpdkVnfSetupEnvHelper(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ 'driver': 'i40e',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+ }
+
+ def test__update_packet_type(self):
+ ip_pipeline_cfg = 'pkt_type = ipv4'
+ pkt_type = {'pkt_type': '1'}
+
+ expected = "pkt_type = 1"
+ result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type)
+ self.assertEqual(result, expected)
+
+ def test__update_packet_type_no_op(self):
+ ip_pipeline_cfg = 'pkt_type = ipv6'
+ pkt_type = {'pkt_type': '1'}
+
+ expected = "pkt_type = ipv6"
+ result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type)
+ self.assertEqual(result, expected)
+
+ def test__update_packet_type_multi_op(self):
+ ip_pipeline_cfg = 'pkt_type = ipv4\npkt_type = 1\npkt_type = ipv4'
+ pkt_type = {'pkt_type': '1'}
+
+ expected = 'pkt_type = 1\npkt_type = 1\npkt_type = 1'
+ result = DpdkVnfSetupEnvHelper._update_packet_type(ip_pipeline_cfg, pkt_type)
+ self.assertEqual(result, expected)
+
+ def test__update_traffic_type(self):
+ ip_pipeline_cfg = 'pkt_type = ipv4'
+
+ traffic_options = {"vnf_type": DpdkVnfSetupEnvHelper.APP_NAME, 'traffic_type': 4}
+ expected = "pkt_type = ipv4"
+ result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options)
+ self.assertEqual(result, expected)
+
+ def test__update_traffic_type_ipv6(self):
+ ip_pipeline_cfg = 'pkt_type = ipv4'
+
+ traffic_options = {"vnf_type": DpdkVnfSetupEnvHelper.APP_NAME, 'traffic_type': 6}
+ expected = "pkt_type = ipv6"
+ result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options)
+ self.assertEqual(result, expected)
+
+ def test__update_traffic_type_not_app_name(self):
+ ip_pipeline_cfg = 'traffic_type = 4'
+
+ vnf_type = ''.join(["Not", DpdkVnfSetupEnvHelper.APP_NAME])
+ traffic_options = {"vnf_type": vnf_type, 'traffic_type': 8}
+ expected = "traffic_type = 8"
+ result = DpdkVnfSetupEnvHelper._update_traffic_type(ip_pipeline_cfg, traffic_options)
+ self.assertEqual(result, expected)
+
+ @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
+ @mock.patch.object(utils, 'read_meminfo',
+ return_value={'Hugepagesize': '2048'})
+ def test__setup_hugepages_no_hugepages_defined(self, mock_meminfo, *args):
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.all_options = {}
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ mock.ANY, ssh_helper, scenario_helper)
+ with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
+ dpdk_setup_helper._setup_hugepages()
+ mock_info.assert_called_once_with(
+ 'Hugepages size (kB): %s, number claimed: %s, number set: '
+ '%s', 2048, 8192, 100)
+ mock_meminfo.assert_called_once_with(ssh_helper)
+
+ @mock.patch.object(six, 'BytesIO', return_value=six.BytesIO(b'100\n'))
+ @mock.patch.object(utils, 'read_meminfo',
+ return_value={'Hugepagesize': '1048576'})
+ def test__setup_hugepages_8gb_hugepages_defined(self, mock_meminfo, *args):
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.all_options = {'hugepages_gb': 8}
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ mock.ANY, ssh_helper, scenario_helper)
+ with mock.patch.object(sample_vnf.LOG, 'info') as mock_info:
+ dpdk_setup_helper._setup_hugepages()
+ mock_info.assert_called_once_with(
+ 'Hugepages size (kB): %s, number claimed: %s, number set: '
+ '%s', 1048576, 8, 100)
+ mock_meminfo.assert_called_once_with(ssh_helper)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
+ @mock.patch.object(utils, 'open_relative_file')
+ def test_build_config(self, mock_open_rf, mock_multi_port_config_class, mock_find, *args):
+ mock_multi_port_config = mock_multi_port_config_class()
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.vnf_cfg = {}
+ scenario_helper.all_options = {}
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+
+ dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command'
+ result = dpdk_setup_helper.build_config()
+ self.assertEqual(result, expected)
+ self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2)
+ mock_find.assert_called()
+ mock_multi_port_config.generate_config.assert_called()
+ mock_multi_port_config.generate_script.assert_called()
+
+ scenario_helper.vnf_cfg = {'file': 'fake_file'}
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ mock_open_rf.side_effect = mock.mock_open(read_data='fake_data')
+ dpdk_setup_helper.PIPELINE_COMMAND = expected = 'pipeline command'
+
+ result = dpdk_setup_helper.build_config()
+
+ mock_open_rf.assert_called_once()
+ self.assertEqual(result, expected)
+ self.assertGreaterEqual(ssh_helper.upload_config_file.call_count, 2)
+ mock_find.assert_called()
+ mock_multi_port_config.generate_config.assert_called()
+ mock_multi_port_config.generate_script.assert_called()
+
+ def test__build_pipeline_kwargs(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ ssh_helper.provision_tool.return_value = 'tool_path'
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper.CFG_CONFIG = 'config'
+ dpdk_setup_helper.CFG_SCRIPT = 'script'
+ dpdk_setup_helper.pipeline_kwargs = {}
+ dpdk_setup_helper.all_ports = [0, 1, 2]
+ dpdk_setup_helper.scenario_helper.vnf_cfg = {'lb_config': 'HW',
+ 'worker_threads': 1}
+
+ expected = {
+ 'cfg_file': 'config',
+ 'script': 'script',
+ 'port_mask_hex': '0x3',
+ 'tool_path': 'tool_path',
+ 'hwlb': ' --hwlb 1',
+ }
+ dpdk_setup_helper._build_pipeline_kwargs()
+ self.assertDictEqual(dpdk_setup_helper.pipeline_kwargs, expected)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch('yardstick.ssh.SSH')
+ def test_setup_vnf_environment(self, *args):
+ def execute(cmd):
+ if cmd.startswith('which '):
+ return exec_failure
+ return exec_success
+
+ exec_success = (0, 'good output', '')
+ exec_failure = (1, 'bad output', 'error output')
+
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ ssh_helper.execute = execute
+
+ scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
+ dpdk_vnf_setup_env_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_vnf_setup_env_helper._validate_cpu_cfg = mock.Mock(return_value=[])
+
+ with mock.patch.object(dpdk_vnf_setup_env_helper, '_setup_dpdk'):
+ self.assertIsInstance(
+ dpdk_vnf_setup_env_helper.setup_vnf_environment(),
+ ResourceProfile)
+
+ def test__setup_dpdk(self):
+ ssh_helper = mock.Mock()
+ ssh_helper.execute = mock.Mock()
+ ssh_helper.execute.return_value = (0, 0, 0)
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(mock.ANY, ssh_helper, mock.ANY)
+ with mock.patch.object(dpdk_setup_helper, '_setup_hugepages') as \
+ mock_setup_hp:
+ dpdk_setup_helper._setup_dpdk()
+ mock_setup_hp.assert_called_once()
+ ssh_helper.execute.assert_has_calls([
+ mock.call('sudo modprobe uio && sudo modprobe igb_uio'),
+ mock.call('lsmod | grep -i igb_uio')
+ ])
+
+ @mock.patch('yardstick.ssh.SSH')
+ def test__setup_resources(self, _):
+ vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0))
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper._validate_cpu_cfg = mock.Mock()
+
+ dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in
+ vnfd_helper.interfaces]
+ result = dpdk_setup_helper._setup_resources()
+ self.assertIsInstance(result, ResourceProfile)
+ self.assertEqual(dpdk_setup_helper.socket, 0)
+
+ @mock.patch('yardstick.ssh.SSH')
+ def test__setup_resources_socket_1(self, _):
+ vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0))
+ vnfd_helper.interfaces[0]['virtual-interface']['vpci'] = '0000:55:00.0'
+ vnfd_helper.interfaces[1]['virtual-interface']['vpci'] = '0000:35:00.0'
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper._validate_cpu_cfg = mock.Mock()
+
+ dpdk_setup_helper.bound_pci = [v['virtual-interface']["vpci"] for v in
+ vnfd_helper.interfaces]
+ result = dpdk_setup_helper._setup_resources()
+ self.assertIsInstance(result, ResourceProfile)
+ self.assertEqual(dpdk_setup_helper.socket, 1)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ def test__detect_and_bind_drivers(self, *args):
+ vnfd_helper = VnfdHelper(deepcopy(self.VNFD_0))
+ ssh_helper = mock.Mock()
+ # ssh_helper.execute = mock.Mock(return_value = (0, 'text', ''))
+ # ssh_helper.execute.return_value = 0, 'output', ''
+ scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
+ rv = ['0000:05:00.1', '0000:05:00.0']
+
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper.dpdk_bind_helper._get_bound_pci_addresses = mock.Mock(return_value=rv)
+ dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock()
+ dpdk_setup_helper.dpdk_bind_helper.read_status = mock.Mock()
+
+ self.assertIsNone(dpdk_setup_helper._detect_and_bind_drivers())
+
+ intf_0 = vnfd_helper.vdu[0]['external-interface'][0]['virtual-interface']
+ intf_1 = vnfd_helper.vdu[0]['external-interface'][1]['virtual-interface']
+ self.assertEqual(0, intf_0['dpdk_port_num'])
+ self.assertEqual(1, intf_1['dpdk_port_num'])
+
+ def test_tear_down(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.nodes = [None, None]
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper.dpdk_bind_helper.bind = mock.Mock()
+ dpdk_setup_helper.dpdk_bind_helper.used_drivers = {
+ 'd1': ['0000:05:00.0'],
+ 'd3': ['0000:05:01.0'],
+ }
+
+ self.assertIsNone(dpdk_setup_helper.tear_down())
+ dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call(['0000:05:00.0'], 'd1', True)
+ dpdk_setup_helper.dpdk_bind_helper.bind.assert_any_call(['0000:05:01.0'], 'd3', True)
+
+
+class TestResourceHelper(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'driver': 'i40e',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ def test_setup(self):
+ resource = object()
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ dpdk_setup_helper.setup_vnf_environment = mock.Mock(return_value=resource)
+ resource_helper = ResourceHelper(dpdk_setup_helper)
+
+ self.assertIsNone(resource_helper.setup())
+ self.assertIs(resource_helper.resource, resource)
+
+ def test_generate_cfg(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ resource_helper = ResourceHelper(dpdk_setup_helper)
+
+ self.assertIsNone(resource_helper.generate_cfg())
+
+ def test_stop_collect(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ resource_helper = ResourceHelper(dpdk_setup_helper)
+ resource_helper.resource = mock.Mock()
+
+ self.assertIsNone(resource_helper.stop_collect())
+
+ def test_stop_collect_none(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ resource_helper = ResourceHelper(dpdk_setup_helper)
+ resource_helper.resource = None
+
+ self.assertIsNone(resource_helper.stop_collect())
+
+
+class TestClientResourceHelper(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'driver': 'i40e',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:13',
+ 'vpci': '0000:05:00.2',
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 2,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.30',
+ 'local_mac': '00:00:00:00:00:11'
+ },
+ 'vnfd-connection-point-ref': 'xe2',
+ 'name': 'xe2'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.LOG')
+ @mock.patch.object(sample_vnf, 'STLError', new_callable=lambda: MockError)
+ def test_get_stats_not_connected(self, mock_stl_error, *args):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper.client = mock.Mock()
+ client_resource_helper.client.get_stats.side_effect = mock_stl_error
+
+ self.assertEqual(client_resource_helper.get_stats(), {})
+ client_resource_helper.client.get_stats.assert_called_once()
+
+ def test_clear_stats(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper.client = mock.Mock()
+
+ self.assertIsNone(client_resource_helper.clear_stats())
+ self.assertEqual(
+ client_resource_helper.client.clear_stats.call_count, 1)
+
+ def test_clear_stats_of_ports(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper.client = mock.Mock()
+
+ self.assertIsNone(client_resource_helper.clear_stats([3, 4]))
+ self.assertEqual(
+ client_resource_helper.client.clear_stats.call_count, 1)
+
+ def test_start(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper.client = mock.Mock()
+
+ self.assertIsNone(client_resource_helper.start())
+ client_resource_helper.client.start.assert_called_once()
+
+ def test_start_ports(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(
+ vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper.client = mock.Mock()
+
+ self.assertIsNone(client_resource_helper.start([3, 4]))
+ client_resource_helper.client.start.assert_called_once()
+
+ def test_collect_kpi_with_queue(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client_resource_helper._result = {'existing': 43, 'replaceable': 12}
+ client_resource_helper._queue = mock.Mock()
+ client_resource_helper._queue.empty.return_value = False
+ client_resource_helper._queue.get.return_value = {'incoming': 34, 'replaceable': 99}
+
+ expected = {
+ 'existing': 43,
+ 'incoming': 34,
+ 'replaceable': 99,
+ }
+ result = client_resource_helper.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch.object(sample_vnf, 'STLError')
+ def test__connect_with_failures(self, mock_stl_error, *args):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ dpdk_setup_helper = DpdkVnfSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+ client_resource_helper = ClientResourceHelper(dpdk_setup_helper)
+ client = mock.MagicMock()
+ client.connect.side_effect = mock_stl_error(msg='msg')
+
+ self.assertIs(client_resource_helper._connect(client), client)
+
+
+class TestRfc2544ResourceHelper(unittest.TestCase):
+
+ RFC2544_CFG_1 = {
+ 'latency': True,
+ 'correlated_traffic': True,
+ 'allowed_drop_rate': '0.1 - 0.15',
+ }
+
+ RFC2544_CFG_2 = {
+ 'allowed_drop_rate': ' 0.25 - 0.05 ',
+ }
+
+ RFC2544_CFG_3 = {
+ 'allowed_drop_rate': '0.2',
+ }
+
+ RFC2544_CFG_4 = {
+ 'latency': True,
+ }
+
+ SCENARIO_CFG_1 = {
+ 'options': {
+ 'rfc2544': RFC2544_CFG_1,
+ }
+ }
+
+ SCENARIO_CFG_2 = {
+ 'options': {
+ 'rfc2544': RFC2544_CFG_2,
+ }
+ }
+
+ SCENARIO_CFG_3 = {
+ 'options': {
+ 'rfc2544': RFC2544_CFG_3,
+ }
+ }
+
+ SCENARIO_CFG_4 = {
+ 'options': {
+ 'rfc2544': RFC2544_CFG_4,
+ }
+ }
+
+ def test_property_rfc2544(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_1
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertIsNone(rfc2544_resource_helper._rfc2544)
+ self.assertDictEqual(rfc2544_resource_helper.rfc2544, self.RFC2544_CFG_1)
+ self.assertDictEqual(rfc2544_resource_helper._rfc2544, self.RFC2544_CFG_1)
+ scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
+ self.assertDictEqual(rfc2544_resource_helper.rfc2544, self.RFC2544_CFG_1)
+
+ def test_property_tolerance_high(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_1
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertIsNone(rfc2544_resource_helper._tolerance_high)
+ self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15)
+ self.assertEqual(rfc2544_resource_helper._tolerance_high, 0.15)
+ scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
+ self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.15)
+
+ def test_property_tolerance_low(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_1
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertIsNone(rfc2544_resource_helper._tolerance_low)
+ self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.1)
+ self.assertEqual(rfc2544_resource_helper._tolerance_low, 0.1)
+ scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
+ self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.1)
+
+ def test_property_tolerance_high_range_swap(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_2
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.25)
+
+ def test_property_tolerance_low_range_swap(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_2
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.05)
+
+ def test_property_tolerance_high_not_range(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_3
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.2)
+
+ def test_property_tolerance_low_not_range(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_3
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.2)
+
+ def test_property_tolerance_high_default(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_4
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_high, 0.0001)
+
+ def test_property_tolerance_low_default(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_4
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertEqual(rfc2544_resource_helper.tolerance_low, 0.0001)
+
+ def test_property_latency(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_1
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertIsNone(rfc2544_resource_helper._latency)
+ self.assertTrue(rfc2544_resource_helper.latency)
+ self.assertTrue(rfc2544_resource_helper._latency)
+ scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
+ self.assertTrue(rfc2544_resource_helper.latency)
+
+ def test_property_latency_default(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_2
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertFalse(rfc2544_resource_helper.latency)
+
+ def test_property_correlated_traffic(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_1
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertIsNone(rfc2544_resource_helper._correlated_traffic)
+ self.assertTrue(rfc2544_resource_helper.correlated_traffic)
+ self.assertTrue(rfc2544_resource_helper._correlated_traffic)
+ scenario_helper.scenario_cfg = {} # ensure that resource_helper caches
+ self.assertTrue(rfc2544_resource_helper.correlated_traffic)
+
+ def test_property_correlated_traffic_default(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = self.SCENARIO_CFG_2
+ rfc2544_resource_helper = Rfc2544ResourceHelper(scenario_helper)
+
+ self.assertFalse(rfc2544_resource_helper.correlated_traffic)
+
+
+class TestSampleVNFDeployHelper(unittest.TestCase):
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch('subprocess.check_output')
+ def test_deploy_vnfs_disabled(self, *_):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ ssh_helper.join_bin_path.return_value = 'joined_path'
+ ssh_helper.execute.return_value = 1, 'bad output', 'error output'
+ ssh_helper.put.return_value = None
+ sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper)
+
+ self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
+ sample_vnf_deploy_helper.DISABLE_DEPLOY = True
+ self.assertEqual(ssh_helper.execute.call_count, 5)
+ ssh_helper.put.assert_called_once()
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.time')
+ @mock.patch('subprocess.check_output')
+ def test_deploy_vnfs(self, *args):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ ssh_helper.join_bin_path.return_value = 'joined_path'
+ ssh_helper.execute.return_value = 1, 'bad output', 'error output'
+ ssh_helper.put.return_value = None
+ sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper)
+ sample_vnf_deploy_helper.DISABLE_DEPLOY = False
+
+ self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
+ self.assertEqual(ssh_helper.execute.call_count, 5)
+ ssh_helper.put.assert_called_once()
+
+ @mock.patch('subprocess.check_output')
+ def test_deploy_vnfs_early_success(self, *args):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ ssh_helper.join_bin_path.return_value = 'joined_path'
+ ssh_helper.execute.return_value = 0, 'output', ''
+ ssh_helper.put.return_value = None
+ sample_vnf_deploy_helper = SampleVNFDeployHelper(vnfd_helper, ssh_helper)
+ sample_vnf_deploy_helper.DISABLE_DEPLOY = False
+
+ self.assertIsNone(sample_vnf_deploy_helper.deploy_vnfs('name1'))
+ ssh_helper.execute.assert_called_once()
+ ssh_helper.put.assert_not_called()
+
+
+class TestScenarioHelper(unittest.TestCase):
+
+ def test_property_task_path(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'task_path': 'my_path',
+ }
+
+ self.assertEqual(scenario_helper.task_path, 'my_path')
+
+ def test_property_nodes(self):
+ nodes = ['node1', 'node2']
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'nodes': nodes,
+ }
+
+ self.assertEqual(scenario_helper.nodes, nodes)
+
+ def test_property_all_options(self):
+ data = {
+ 'name1': {
+ 'key3': 'value3',
+ },
+ 'name2': {}
+ }
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'options': data,
+ }
+
+ self.assertDictEqual(scenario_helper.all_options, data)
+
+ def test_property_options(self):
+ data = {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ }
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'options': {
+ 'name1': data,
+ },
+ }
+
+ self.assertDictEqual(scenario_helper.options, data)
+
+ def test_property_vnf_cfg(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'options': {
+ 'name1': {
+ 'vnf_config': 'my_config',
+ },
+ },
+ }
+
+ self.assertEqual(scenario_helper.vnf_cfg, 'my_config')
+
+ def test_property_vnf_cfg_default(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'options': {
+ 'name1': {},
+ },
+ }
+
+ self.assertDictEqual(scenario_helper.vnf_cfg, ScenarioHelper.DEFAULT_VNF_CFG)
+
+ def test_property_topology(self):
+ scenario_helper = ScenarioHelper('name1')
+ scenario_helper.scenario_cfg = {
+ 'topology': 'my_topology',
+ }
+
+ self.assertEqual(scenario_helper.topology, 'my_topology')
+
+
+class TestSampleVnf(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ]
+ }
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ def test___init__(self):
+ sample_vnf = SampleVNF('vnf1', self.VNFD_0)
+
+ self.assertEqual(sample_vnf.name, 'vnf1')
+ self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0)
+
+ # test the default setup helper is SetupEnvHelper, not subclass
+ self.assertEqual(type(sample_vnf.setup_helper), SetupEnvHelper)
+
+ # test the default resource helper is ResourceHelper, not subclass
+ self.assertEqual(type(sample_vnf.resource_helper), ResourceHelper)
+
+ def test___init___alt_types(self):
+ class MySetupEnvHelper(SetupEnvHelper):
+ pass
+
+ class MyResourceHelper(ResourceHelper):
+ pass
+
+ sample_vnf = SampleVNF('vnf1', self.VNFD_0, MySetupEnvHelper, MyResourceHelper)
+
+ self.assertEqual(sample_vnf.name, 'vnf1')
+ self.assertDictEqual(sample_vnf.vnfd_helper, self.VNFD_0)
+
+ # test the default setup helper is MySetupEnvHelper, not subclass
+ self.assertEqual(type(sample_vnf.setup_helper), MySetupEnvHelper)
+
+ # test the default resource helper is MyResourceHelper, not subclass
+ self.assertEqual(type(sample_vnf.resource_helper), MyResourceHelper)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.Process')
+ def test__start_vnf(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf._run = mock.Mock()
+
+ self.assertIsNone(sample_vnf.queue_wrapper)
+ self.assertIsNone(sample_vnf._vnf_process)
+ self.assertIsNone(sample_vnf._start_vnf())
+ self.assertIsNotNone(sample_vnf.queue_wrapper)
+ self.assertIsNotNone(sample_vnf._vnf_process)
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_instantiate(self, ssh):
+ test_base.mock_ssh(ssh)
+
+ nodes = {
+ 'vnf1': 'name1',
+ 'vnf2': 'name2',
+ }
+
+ context1 = mock.Mock()
+ context1._get_server.return_value = None
+ context2 = mock.Mock()
+ context2._get_server.return_value = context2
+
+ try:
+ Context.list.clear()
+ except AttributeError:
+ # clear() but works in Py2.7
+ Context.list[:] = []
+
+ Context.list.extend([
+ context1,
+ context2,
+ ])
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf._start_server = mock.Mock(return_value=0)
+ sample_vnf._vnf_process = mock.MagicMock()
+ sample_vnf._vnf_process._is_alive.return_value = 1
+ sample_vnf.ssh_helper = mock.MagicMock()
+ sample_vnf.deploy_helper = mock.MagicMock()
+ sample_vnf.resource_helper.ssh_helper = mock.MagicMock()
+ scenario_cfg = {
+ 'nodes': nodes,
+ }
+
+ self.assertIsNone(sample_vnf.instantiate(scenario_cfg, {}))
+ self.assertEqual(sample_vnf.nfvi_context, context2)
+
+ def test__update_collectd_options(self):
+ scenario_cfg = {'options':
+ {'collectd':
+ {'interval': 3,
+ 'plugins':
+ {'plugin3': {'param': 3}}},
+ 'vnf__0':
+ {'collectd':
+ {'interval': 2,
+ 'plugins':
+ {'plugin3': {'param': 2},
+ 'plugin2': {'param': 2}}}}}}
+ context_cfg = {'nodes':
+ {'vnf__0':
+ {'collectd':
+ {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}}}}
+ expected = {'interval': 1,
+ 'plugins':
+ {'plugin3': {'param': 1},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf__0', vnfd)
+ sample_vnf._update_collectd_options(scenario_cfg, context_cfg)
+ self.assertEqual(sample_vnf.setup_helper.collectd_options, expected)
+
+ def test__update_options(self):
+ options1 = {'interval': 1,
+ 'param1': 'value1',
+ 'plugins':
+ {'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+ options2 = {'interval': 2,
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin2': {'param': 2},
+ 'plugin1': {'param': 2}}}
+ expected = {'interval': 1,
+ 'param1': 'value1',
+ 'param2': 'value2',
+ 'plugins':
+ {'plugin4': {'param': 4},
+ 'plugin3': {'param': 3},
+ 'plugin2': {'param': 1},
+ 'plugin1': {'param': 1}}}
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf._update_options(options2, options1)
+ self.assertEqual(options2, expected)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch("yardstick.ssh.SSH")
+ def test_wait_for_instantiate_empty_queue(self, ssh, *args):
+ test_base.mock_ssh(ssh, exec_result=(1, "", ""))
+
+ queue_size_list = [
+ 0,
+ 1,
+ 0,
+ 1,
+ ]
+
+ queue_get_list = [
+ 'some output',
+ 'pipeline> ',
+ ]
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.WAIT_TIME_FOR_SCRIPT = 0
+ sample_vnf._start_server = mock.Mock(return_value=0)
+ sample_vnf._vnf_process = mock.MagicMock()
+ sample_vnf._vnf_process.exitcode = 0
+ sample_vnf._vnf_process._is_alive.return_value = 1
+ sample_vnf.queue_wrapper = mock.Mock()
+ sample_vnf.q_out = mock.Mock()
+ sample_vnf.q_out.qsize.side_effect = iter(queue_size_list)
+ sample_vnf.q_out.get.side_effect = iter(queue_get_list)
+ sample_vnf.ssh_helper = mock.MagicMock()
+ sample_vnf.resource_helper.ssh_helper = mock.MagicMock()
+ sample_vnf.resource_helper.start_collect = mock.MagicMock()
+
+ self.assertEqual(sample_vnf.wait_for_instantiate(), 0)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ def test_vnf_execute_with_queue_data(self, *args):
+ queue_size_list = [
+ 1,
+ 1,
+ 0,
+ ]
+
+ queue_get_list = [
+ 'hello ',
+ 'world'
+ ]
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.q_out = mock.Mock()
+ sample_vnf.q_out.qsize.side_effect = iter(queue_size_list)
+ sample_vnf.q_out.get.side_effect = iter(queue_get_list)
+
+ self.assertEqual(sample_vnf.vnf_execute('my command'), 'hello world')
+
+ def test_terminate_without_vnf_process(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.vnf_execute = mock.Mock()
+ sample_vnf.ssh_helper = mock.Mock()
+ sample_vnf._tear_down = mock.Mock()
+ sample_vnf.resource_helper = mock.Mock()
+
+ self.assertIsNone(sample_vnf.terminate())
+
+ def test_get_stats(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.APP_WORD = 'sample1'
+ sample_vnf.vnf_execute = mock.Mock(return_value='the stats')
+
+ self.assertEqual(sample_vnf.get_stats(), 'the stats')
+
+ def test_collect_kpi(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)'
+ sample_vnf.COLLECT_MAP = {
+ 'k1': 3,
+ 'k2': 1,
+ 'k3': 2,
+ }
+ sample_vnf.get_stats = mock.Mock(return_value='index0: 34 -- 91, 27')
+ sample_vnf.resource_helper = mock.Mock()
+ sample_vnf.resource_helper.collect_kpi.return_value = {}
+
+ expected = {
+ 'k1': 27,
+ 'k2': 34,
+ 'k3': 91,
+ 'collect_stats': {},
+ }
+ result = sample_vnf.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ def test_collect_kpi_default(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.APP_NAME = 'sample1'
+ sample_vnf.COLLECT_KPI = r'\s(\d+)\D*(\d+)\D*(\d+)'
+ sample_vnf.get_stats = mock.Mock(return_value='')
+
+ expected = {
+ 'packets_in': 0,
+ 'packets_fwd': 0,
+ 'packets_dropped': 0,
+ }
+ result = sample_vnf.collect_kpi()
+ self.assertDictEqual(result, expected)
+
+ def test_scale(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ self.assertRaises(y_exceptions.FunctionNotImplemented,
+ sample_vnf.scale)
+
+ def test__run(self):
+ test_cmd = 'test cmd'
+ run_kwargs = {'arg1': 'val1', 'arg2': 'val2'}
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sample_vnf = SampleVNF('vnf1', vnfd)
+ sample_vnf.ssh_helper = mock.Mock()
+ sample_vnf.setup_helper = mock.Mock()
+ with mock.patch.object(sample_vnf, '_build_config',
+ return_value=test_cmd), \
+ mock.patch.object(sample_vnf, '_build_run_kwargs'):
+ sample_vnf.run_kwargs = run_kwargs
+ sample_vnf._run()
+ sample_vnf.ssh_helper.drop_connection.assert_called_once()
+ sample_vnf.ssh_helper.run.assert_called_once_with(test_cmd,
+ **run_kwargs)
+ sample_vnf.setup_helper.kill_vnf.assert_called_once()
+
+
+class TestSampleVNFTrafficGen(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'driver': 'i40e',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01'
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'driver': 'ixgbe',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ def test__check_status(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+
+ with self.assertRaises(NotImplementedError):
+ sample_vnf_tg._check_status()
+
+ def test_listen_traffic(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+
+ sample_vnf_tg.listen_traffic(mock.Mock())
+
+ def test_verify_traffic(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+
+ sample_vnf_tg.verify_traffic(mock.Mock())
+
+ def test_terminate(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ sample_vnf_tg._traffic_process = mock.Mock()
+ sample_vnf_tg._tg_process = mock.Mock()
+
+ sample_vnf_tg.terminate()
+
+ def test__wait_for_process(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ with mock.patch.object(sample_vnf_tg, '_check_status',
+ return_value=0) as mock_status, \
+ mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = True
+ mock_proc.exitcode = 234
+ self.assertEqual(sample_vnf_tg._wait_for_process(), 234)
+ mock_proc.is_alive.assert_called_once()
+ mock_status.assert_called_once()
+
+ def test__wait_for_process_not_alive(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ with mock.patch.object(sample_vnf_tg, '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = False
+ self.assertRaises(RuntimeError, sample_vnf_tg._wait_for_process)
+ mock_proc.is_alive.assert_called_once()
+
+ def test__wait_for_process_delayed(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ with mock.patch.object(sample_vnf_tg, '_check_status',
+ side_effect=[1, 0]) as mock_status, \
+ mock.patch.object(sample_vnf_tg,
+ '_tg_process') as mock_proc:
+ mock_proc.is_alive.return_value = True
+ mock_proc.exitcode = 234
+ self.assertEqual(sample_vnf_tg._wait_for_process(), 234)
+ mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()])
+ mock_status.assert_has_calls([mock.call(), mock.call()])
+
+ def test_scale(self):
+ sample_vnf_tg = SampleVNFTrafficGen('tg1', self.VNFD_0)
+ self.assertRaises(y_exceptions.FunctionNotImplemented,
+ sample_vnf_tg.scale)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
new file mode 100644
index 000000000..59594a3c3
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ixload.py
@@ -0,0 +1,321 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import subprocess
+
+import mock
+import six
+import unittest
+
+from yardstick import ssh
+from yardstick.common import utils
+from yardstick.tests import STL_MOCKS
+
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadTrafficGen
+ from yardstick.network_services.vnf_generic.vnf.tg_ixload import IxLoadResourceHelper
+ from yardstick.network_services.traffic_profile.base import TrafficProfile
+
+
+NAME = "tg__1"
+
+
+class TestIxLoadTrafficGen(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ def setUp(self):
+ self._mock_call = mock.patch.object(subprocess, "call")
+ self.mock_call = self._mock_call.start()
+ self._mock_open = mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open")
+ self.mock_open = self._mock_open.start()
+
+ self.addCleanup(self._stop_mock)
+
+ def _stop_mock(self):
+ self._mock_call.stop()
+ self._mock_open.stop()
+
+ def test___init__(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ self.assertIsNone(ixload_traffic_gen.resource_helper.data)
+
+ def test_collect_kpi(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ ixload_traffic_gen.data = {}
+ restult = ixload_traffic_gen.collect_kpi()
+ self.assertEqual({}, restult)
+
+ def test_listen_traffic(self):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ self.assertIsNone(ixload_traffic_gen.listen_traffic({}))
+
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch.object(utils, 'makedirs')
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
+ def test_instantiate(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh_mock.run = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ scenario_cfg = {'tc': "nsb_test_case",
+ 'ixia_profile': "ixload.cfg",
+ 'task_path': "/path/to/task"}
+ ixload_traffic_gen.RESULTS_MOUNT = "/tmp/result"
+ scenario_cfg.update({'options': {'packetsize': 64, 'traffic_type': 4,
+ 'rfc2544': {'allowed_drop_rate': '0.8 - 1'},
+ 'vnf__1': {'rules': 'acl_1rule.yaml',
+ 'vnf_config': {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1}}
+ }})
+ with mock.patch.object(six.moves.builtins, 'open',
+ create=True) as mock_open:
+ mock_open.return_value = mock.MagicMock()
+ ixload_traffic_gen.instantiate(scenario_cfg, {})
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
+ def test_run_traffic(self, *args):
+ mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh_mock.run = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnfd["mgmt-interface"].update({"tg-config": {}})
+ vnfd["mgmt-interface"]["tg-config"].update({"ixchassis":
+ "1.1.1.1"})
+ vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path":
+ "/root"})
+ sut = IxLoadTrafficGen(NAME, vnfd)
+ sut.connection = mock.Mock()
+ sut.connection.run = mock.Mock()
+ sut._traffic_runner = mock.Mock(return_value=0)
+ result = sut.run_traffic(mock_traffic_profile)
+ self.assertIsNone(result)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.open")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.min")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.max")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.len")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.shutil")
+ def test_run_traffic_csv(self, *args):
+ mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh_mock.run = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnfd["mgmt-interface"].update({"tg-config": {}})
+ vnfd["mgmt-interface"]["tg-config"].update({"ixchassis":
+ "1.1.1.1"})
+ vnfd["mgmt-interface"]["tg-config"].update({"py_bin_path":
+ "/root"})
+ sut = IxLoadTrafficGen(NAME, vnfd)
+ sut.connection = mock.Mock()
+ sut.connection.run = mock.Mock()
+ sut._traffic_runner = mock.Mock(return_value=0)
+ subprocess.call(["touch", "/tmp/1.csv"])
+ sut.rel_bin_path = mock.Mock(return_value="/tmp/*.csv")
+ result = sut.run_traffic(mock_traffic_profile)
+ self.assertIsNone(result)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.tg_ixload.call")
+ @mock.patch.object(ssh, 'SSH')
+ def test_terminate(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ self.assertIsNone(ixload_traffic_gen.terminate())
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_parse_csv_read(self, mock_ssh):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ kpi_data = {
+ 'HTTP Total Throughput (Kbps)': 1,
+ 'HTTP Simulated Users': 2,
+ 'HTTP Concurrent Connections': '3',
+ 'HTTP Connection Rate': 4.3,
+ 'HTTP Transaction Rate': True,
+ }
+ http_reader = [kpi_data]
+
+ mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
+ mock_ssh_type.execute.return_value = 0, "", ""
+ mock_ssh.from_node.return_value = mock_ssh_type
+
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ result = ixload_traffic_gen.resource_helper.result
+
+ ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
+ for key_left, key_right in IxLoadResourceHelper.KPI_LIST.items():
+ self.assertEqual(result[key_left][-1], int(kpi_data[key_right]))
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_parse_csv_read_value_error(self, mock_ssh):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ http_reader = [{
+ 'HTTP Total Throughput (Kbps)': 1,
+ 'HTTP Simulated Users': 2,
+ 'HTTP Concurrent Connections': "not a number",
+ 'HTTP Connection Rate': 4,
+ 'HTTP Transaction Rate': 5,
+ }]
+
+ mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
+ mock_ssh_type.execute.return_value = 0, "", ""
+ mock_ssh.from_node.return_value = mock_ssh_type
+
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+ init_value = ixload_traffic_gen.resource_helper.result
+
+ ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
+ self.assertDictEqual(ixload_traffic_gen.resource_helper.result, init_value)
+
+ @mock.patch.object(ssh, 'SSH')
+ def test_parse_csv_read_error(self, mock_ssh):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ http_reader = [{
+ 'HTTP Total Throughput (Kbps)': 1,
+ 'HTTP Simulated Users': 2,
+ 'HTTP Concurrent Connections': 3,
+ 'HTTP Transaction Rate': 5,
+ }]
+
+ mock_ssh_type = mock.Mock(autospec=mock_ssh.SSH)
+ mock_ssh_type.execute.return_value = 0, "", ""
+ mock_ssh.from_node.return_value = mock_ssh_type
+
+ ixload_traffic_gen = IxLoadTrafficGen(NAME, vnfd)
+
+ with self.assertRaises(KeyError):
+ ixload_traffic_gen.resource_helper.parse_csv_read(http_reader)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
new file mode 100644
index 000000000..14e0db788
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_ping.py
@@ -0,0 +1,292 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from multiprocessing import Queue
+import multiprocessing
+
+import mock
+import unittest
+
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from yardstick.tests import STL_MOCKS
+
+SSH_HELPER = "yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper"
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.tg_ping import PingParser
+ from yardstick.network_services.vnf_generic.vnf.tg_ping import PingTrafficGen
+ from yardstick.network_services.vnf_generic.vnf.tg_ping import PingResourceHelper
+ from yardstick.network_services.vnf_generic.vnf.tg_ping import PingSetupEnvHelper
+ from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper
+
+
+class TestPingResourceHelper(unittest.TestCase):
+ def test___init__(self):
+ setup_helper = mock.Mock()
+ helper = PingResourceHelper(setup_helper)
+
+ self.assertIsInstance(helper._queue, multiprocessing.queues.Queue)
+ self.assertIsInstance(helper._parser, PingParser)
+
+ def test_run_traffic(self):
+ setup_helper = mock.Mock()
+ traffic_profile = mock.Mock()
+ traffic_profile.params = {
+ 'traffic_profile': {
+ 'frame_size': 64,
+ },
+ }
+
+ helper = PingResourceHelper(setup_helper)
+ helper.cmd_kwargs = {'target_ip': '10.0.0.2',
+ 'local_ip': '10.0.0.1',
+ 'local_if_name': 'eth0',
+ }
+ helper.ssh_helper = mock.Mock()
+ helper.run_traffic(traffic_profile)
+ helper.ssh_helper.run.called_with('ping-s 64 10.0.0.2')
+
+
+class TestPingParser(unittest.TestCase):
+ def test___init__(self):
+ q_out = Queue()
+ ping_parser = PingParser(q_out)
+ self.assertIsNotNone(ping_parser.queue)
+
+ def test_clear(self):
+ sample_out = """
+64 bytes from 10.102.22.93: icmp_seq=3 ttl=64 time=0.296 ms
+ """
+ q_out = Queue()
+ ping_parser = PingParser(q_out)
+ ping_parser.write(sample_out)
+ ping_parser.clear()
+ self.assertTrue(q_out.empty())
+
+ def test_close(self):
+ q_out = Queue()
+ ping_parser = PingParser(q_out)
+ self.assertIsNone(ping_parser.close())
+
+ def test_write(self):
+ sample_out = """
+64 bytes from 10.102.22.93: icmp_seq=3 ttl=64 time=0.296 ms
+ """
+ q_out = Queue()
+ ping_parser = PingParser(q_out)
+ ping_parser.write(sample_out)
+
+ self.assertEqual({"packets_received": 3.0, "rtt": 0.296}, q_out.get())
+
+
+class TestPingTrafficGen(unittest.TestCase):
+ VNFD_0_EXT_IF_0 = {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': u'152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': u'152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ }
+
+ VNFD_0_EXT_IF_1 = {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': u'152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': u'152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ }
+
+ VNFD_0_EXT_IF_LIST = [
+ VNFD_0_EXT_IF_0,
+ VNFD_0_EXT_IF_1,
+ ]
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': u'152.16.100.20',
+ 'netmask': u'255.255.255.0',
+ 'gateway': u'152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': u'152.16.40.20',
+ 'netmask': u'255.255.255.0',
+ 'gateway': u'152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': VNFD_0_EXT_IF_LIST,
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf',
+ 'name': 'VPEVnfSsh',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ CMD_KWARGS = {
+ 'target_ip': u'152.16.100.20',
+ 'local_ip': u'152.16.100.19',
+ 'local_if_name': u'xe0_fake',
+ }
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test___init__(self, ssh):
+ ssh.from_node.return_value.execute.return_value = 0, "success", ""
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+
+ self.assertIsInstance(ping_traffic_gen.setup_helper, PingSetupEnvHelper)
+ self.assertIsInstance(ping_traffic_gen.resource_helper, PingResourceHelper)
+ self.assertEqual(ping_traffic_gen._result, {})
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test__bind_device_kernel_with_failure(self, ssh):
+ mock_ssh(ssh)
+
+ execute_result_data = [
+ (1, 'bad stdout messages', 'error messages'),
+ (0, '', ''),
+ (0, 'if_name_1', ''),
+ (0, 'if_name_2', ''),
+ ]
+ ssh.from_node.return_value.execute.side_effect = iter(execute_result_data)
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ext_ifs = ping_traffic_gen.vnfd_helper.interfaces
+ self.assertNotEqual(ext_ifs[0]['virtual-interface']['local_iface_name'], 'if_name_1')
+ self.assertNotEqual(ext_ifs[1]['virtual-interface']['local_iface_name'], 'if_name_2')
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_collect_kpi(self, ssh):
+ mock_ssh(ssh, exec_result=(0, "success", ""))
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen._queue = Queue()
+ ping_traffic_gen._queue.put({})
+ ping_traffic_gen.collect_kpi()
+ self.assertEqual(ping_traffic_gen._result, {})
+
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh):
+ mock_ssh(ssh, spec=VnfSshHelper, exec_result=(0, "success", ""))
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ ping_traffic_gen.setup_helper.ssh_helper = mock.MagicMock(
+ **{"execute.return_value": (0, "xe0_fake", "")})
+ self.assertIsInstance(ping_traffic_gen.ssh_helper, mock.Mock)
+ self.assertEqual(ping_traffic_gen._result, {})
+
+ self.assertIsNone(ping_traffic_gen.instantiate({}, {}))
+
+ self.assertEqual(
+ ping_traffic_gen.vnfd_helper.interfaces[0]['virtual-interface']['local_iface_name'],
+ 'xe0_fake')
+ self.assertEqual(self.CMD_KWARGS, ping_traffic_gen.resource_helper.cmd_kwargs)
+ self.assertIsNotNone(ping_traffic_gen._result)
+
+ def test_listen_traffic(self):
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ self.assertIsNone(ping_traffic_gen.listen_traffic({}))
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_terminate(self, ssh):
+ ssh.from_node.return_value.execute.return_value = 0, "success", ""
+ ssh.from_node.return_value.run.return_value = 0, "success", ""
+
+ ping_traffic_gen = PingTrafficGen('vnf1', self.VNFD_0)
+ self.assertIsNone(ping_traffic_gen.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
new file mode 100644
index 000000000..f581ec8d9
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py
@@ -0,0 +1,426 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from yardstick.tests import STL_MOCKS
+
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+NAME = 'vnf__1'
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.tg_prox import ProxTrafficGen
+ from yardstick.network_services.traffic_profile.base import TrafficProfile
+
+
+@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.time')
+class TestProxTrafficGen(unittest.TestCase):
+ VNFD0 = {
+ 'short-name': 'ProxVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'name': 'proxvnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'proxvnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': '',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': '',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'PROX approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'proxvnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'ProxApproxVnf',
+ 'name': 'ProxVnf',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD0,
+ ],
+ },
+ }
+
+ SCENARIO_CFG = {
+ 'task_path': "",
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'runner': {
+ 'duration': 600, 'type': 'Duration'},
+ 'topology': 'prox-tg-topology-2.yaml',
+ 'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
+ 'type': 'NSPerf',
+ 'options': {
+ 'tg__1': {'prox_args': {'-e': '',
+ '-t': ''},
+ 'prox_config': 'configs/l3-gen-2.cfg',
+ 'prox_path':
+ '/root/dppd-PROX-v035/build/prox'},
+ 'vnf__1': {
+ 'prox_args': {'-t': ''},
+ 'prox_config': 'configs/l3-swap-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
+
+ CONTEXT_CFG = {
+ 'nodes': {
+ 'tg__2': {
+ 'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens513f0',
+ 'vld_id': ProxTrafficGen.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root',
+ },
+ 'tg__1': {
+ 'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens785f0',
+ 'vld_id': ProxTrafficGen.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root',
+ },
+ 'vnf__1': {
+ 'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens786f0',
+ 'vld_id': ProxTrafficGen.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens786f1',
+ 'vld_id': ProxTrafficGen.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'password': 'r00t',
+ 'VNF model': 'prox_vnf.yaml',
+ },
+ },
+ }
+
+ TRAFFIC_PROFILE = {
+ 'description': 'Binary search for max no-drop throughput over given packet sizes',
+ 'name': 'prox_binsearch',
+ 'schema': 'nsb:traffic_profile:0.1',
+ 'traffic_profile': {
+ 'duration': 5,
+ 'lower_bound': 0.0,
+ 'packet_sizes': [64, 65],
+ 'test_precision': 1.0,
+ 'tolerated_loss': 0.0,
+ 'traffic_type': 'ProxBinSearchProfile',
+ 'upper_bound': 100.0}}
+
+ @mock.patch(SSH_HELPER)
+ def test___init__(self, ssh, *args):
+ mock_ssh(ssh)
+ prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0)
+ self.assertIsNone(prox_traffic_gen._tg_process)
+ self.assertIsNone(prox_traffic_gen._traffic_process)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0)
+ prox_traffic_gen._vnf_wrapper.resource_helper.resource = mock.MagicMock(
+ **{"self.check_if_system_agent_running.return_value": [False]})
+ prox_traffic_gen._vnf_wrapper.vnf_execute = mock.Mock(return_value="")
+ self.assertEqual({}, prox_traffic_gen.collect_kpi())
+
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.find_relative_file')
+ @mock.patch(
+ 'yardstick.network_services.vnf_generic.vnf.sample_vnf.CpuSysCores')
+ @mock.patch(SSH_HELPER)
+ def bad_test_instantiate(self, ssh, mock_cpu_sys_cores, *args):
+ mock_ssh(ssh)
+
+ mock_cpu_sys_cores.get_core_socket.return_value = {'0': '01234'}
+
+ mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ ssh_helper = mock.MagicMock(
+ **{"execute.return_value": (0, "", ""), "bin_path": ""})
+ prox_traffic_gen.ssh_helper = ssh_helper
+ prox_traffic_gen.setup_helper.dpdk_bind_helper.ssh_helper = ssh_helper
+ prox_traffic_gen.setup_helper._setup_resources = mock.MagicMock()
+ prox_traffic_gen.setup_hugepages = mock.MagicMock()
+ prox_traffic_gen.generate_prox_config_file = mock.MagicMock()
+ prox_traffic_gen.upload_prox_config = mock.MagicMock()
+ prox_traffic_gen.setup_helper._find_used_drivers = mock.MagicMock()
+ prox_traffic_gen.setup_helper.used_drivers = {}
+ prox_traffic_gen.setup_helper.bound_pci = []
+ prox_traffic_gen._start_server = mock.Mock(return_value=0)
+ prox_traffic_gen._tg_process = mock.MagicMock()
+ prox_traffic_gen._tg_process.start = mock.Mock()
+ prox_traffic_gen._tg_process.exitcode = 0
+ prox_traffic_gen._tg_process._is_alive = mock.Mock(return_value=1)
+ prox_traffic_gen.ssh_helper = mock.MagicMock()
+ prox_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ scenario_cfg = {
+ 'task_path': '',
+ 'options': {'tg__1': {'prox_args': {'-e': '',
+ '-t': ''},
+ 'prox_config': 'configs/l3-gen-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'},
+ 'vnf__1': {'prox_args': {'-t': ''},
+ 'prox_config': 'configs/l3-swap-2.cfg',
+ 'prox_path': '/root/dppd-PROX-v035/build/prox'}
+ }
+ }
+ prox_traffic_gen.instantiate(scenario_cfg, {})
+
+ @mock.patch(SSH_HELPER)
+ def test__traffic_runner(self, ssh, *args):
+ mock_ssh(ssh)
+
+ mock_traffic_profile = mock.Mock(autospec=TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.execute_traffic.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sut = ProxTrafficGen(NAME, vnfd)
+ sut._get_socket = mock.MagicMock()
+ sut.ssh_helper = mock.Mock()
+ sut.ssh_helper.run = mock.Mock()
+ sut.setup_helper.prox_config_dict = {}
+ sut._connect_client = mock.Mock(autospec=STLClient)
+ sut._connect_client.get_stats = mock.Mock(return_value="0")
+ sut._traffic_runner(mock_traffic_profile)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ @mock.patch(SSH_HELPER)
+ def test_listen_traffic(self, ssh, *args):
+ mock_ssh(ssh)
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ self.assertIsNone(prox_traffic_gen.listen_traffic(mock.Mock()))
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh, *args):
+ mock_ssh(ssh)
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ prox_traffic_gen = ProxTrafficGen(NAME, vnfd)
+ prox_traffic_gen._terminated = mock.MagicMock()
+ prox_traffic_gen._traffic_process = mock.MagicMock()
+ prox_traffic_gen._traffic_process.terminate = mock.Mock()
+ prox_traffic_gen.ssh_helper = mock.MagicMock()
+ prox_traffic_gen.setup_helper = mock.MagicMock()
+ prox_traffic_gen.resource_helper = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper.setup_helper = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper._vnf_process = mock.MagicMock()
+ prox_traffic_gen._vnf_wrapper.resource_helper = mock.MagicMock()
+ self.assertIsNone(prox_traffic_gen.terminate())
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
new file mode 100644
index 000000000..8cc118a31
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py
@@ -0,0 +1,374 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import mock
+import six
+import unittest
+
+from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_ixia
+from yardstick.network_services.traffic_profile import base as tp_base
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+
+NAME = "tg__1"
+
+
+class TestIxiaResourceHelper(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_IxNextgen = mock.patch.object(tg_rfc2544_ixia,
+ 'IxNextgen')
+ self.mock_IxNextgen = self._mock_IxNextgen.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_IxNextgen.stop()
+
+ def test___init___with_custom_rfc_helper(self):
+ class MyRfcHelper(tg_rfc2544_ixia.IxiaRfc2544Helper):
+ pass
+
+ ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(
+ mock.Mock(), MyRfcHelper)
+ self.assertIsInstance(ixia_resource_helper.rfc_helper, MyRfcHelper)
+
+ def test_stop_collect_with_client(self):
+ mock_client = mock.Mock()
+
+ ixia_resource_helper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
+
+ ixia_resource_helper.client = mock_client
+ ixia_resource_helper.stop_collect()
+ mock_client.ix_stop_traffic.assert_called_once()
+
+ def test_run_traffic(self):
+ mock_tprofile = mock.Mock()
+ mock_tprofile.get_drop_percentage.return_value = True, 'fake_samples'
+ ixia_rhelper = tg_rfc2544_ixia.IxiaResourceHelper(mock.Mock())
+ ixia_rhelper.rfc_helper = mock.Mock()
+ ixia_rhelper.vnfd_helper = mock.Mock()
+ ixia_rhelper.vnfd_helper.port_pairs.all_ports = []
+ with mock.patch.object(ixia_rhelper, 'generate_samples'), \
+ mock.patch.object(ixia_rhelper, '_build_ports'), \
+ mock.patch.object(ixia_rhelper, '_initialize_client'):
+ ixia_rhelper.run_traffic(mock_tprofile)
+
+ self.assertEqual('fake_samples', ixia_rhelper._queue.get())
+
+
+@mock.patch(
+ "yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.IxNextgen")
+class TestIXIATrafficGen(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd',
+ 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64}}
+
+ TC_YAML = {'scenarios': [{'tc_options':
+ {'rfc2544': {'allowed_drop_rate': '0.8 - 1'}},
+ 'runner': {'duration': 400,
+ 'interval': 35, 'type': 'Duration'},
+ 'traffic_options':
+ {'flow': 'ipv4_1flow_Packets_vpe.yaml',
+ 'imix': 'imix_voice.yaml'},
+ 'vnf_options': {'vpe': {'cfg': 'vpe_config'}},
+ 'traffic_profile': 'ipv4_throughput_vpe.yaml',
+ 'type': 'NSPerf',
+ 'nodes': {'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'topology': 'vpe_vnf_topology.yaml'}],
+ 'context': {'nfvi_type': 'baremetal', 'type': 'Node',
+ 'name': 'yardstick',
+ 'file': '/etc/yardstick/nodes/pod.yaml'},
+ 'schema': 'yardstick:task:0.1'}
+
+ def test___init__(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ # NOTE(ralonsoh): check the object returned.
+ tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+
+ def test_listen_traffic(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ self.assertIsNone(ixnet_traffic_gen.listen_traffic({}))
+
+ def test_instantiate(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh_mock.run = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ scenario_cfg = {'tc': "nsb_test_case", "topology": "",
+ 'ixia_profile': "ixload.cfg"}
+ scenario_cfg.update(
+ {
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1'},
+ 'vnf__1': {
+ 'rules': 'acl_1rule.yaml',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1}}}})
+ ixnet_traffic_gen.topology = ""
+ ixnet_traffic_gen.get_ixobj = mock.MagicMock()
+ ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock()
+ ixnet_traffic_gen._ixia_traffic_gen._connect = mock.Mock()
+ self.assertRaises(
+ IOError,
+ ixnet_traffic_gen.instantiate(scenario_cfg, {}))
+
+ def test_collect_kpi(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen.data = {}
+ restult = ixnet_traffic_gen.collect_kpi()
+ self.assertEqual({}, restult)
+
+ def test_terminate(self, *args):
+ with mock.patch("yardstick.ssh.SSH") as ssh:
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ ssh_mock = mock.Mock(autospec=ssh.SSH)
+ ssh_mock.execute = \
+ mock.Mock(return_value=(0, "", ""))
+ ssh.from_node.return_value = ssh_mock
+ ixnet_traffic_gen = tg_rfc2544_ixia.IxiaTrafficGen(NAME, vnfd)
+ ixnet_traffic_gen._terminated = mock.MagicMock()
+ ixnet_traffic_gen._terminated.value = 0
+ ixnet_traffic_gen._ixia_traffic_gen = mock.MagicMock()
+ ixnet_traffic_gen._ixia_traffic_gen.ix_stop_traffic = mock.Mock()
+ ixnet_traffic_gen._traffic_process = mock.MagicMock()
+ ixnet_traffic_gen._traffic_process.terminate = mock.Mock()
+ self.assertIsNone(ixnet_traffic_gen.terminate())
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ def test__check_status(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ sut = tg_rfc2544_ixia.IxiaTrafficGen('vnf1', vnfd)
+ sut._check_status()
+
+ @mock.patch("yardstick.ssh.SSH")
+ def test_traffic_runner(self, mock_ssh, *args):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+ # traffic_profile.ports is standardized on port_num
+ mock_traffic_profile.ports = [0, 1]
+
+ mock_ssh_instance = mock.Mock(autospec=mock_ssh.SSH)
+ mock_ssh_instance.execute.return_value = 0, "", ""
+ mock_ssh_instance.run.return_value = 0, "", ""
+
+ mock_ssh.from_node.return_value = mock_ssh_instance
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vnfd["mgmt-interface"].update({
+ 'tg-config': {
+ "ixchassis": "1.1.1.1",
+ "py_bin_path": "/root",
+ }
+ })
+
+ samples = {}
+ name = ''
+ for ifname in range(1):
+ name = "xe{}".format(ifname)
+ samples[name] = {
+ "Rx_Rate_Kbps": 20,
+ "Tx_Rate_Kbps": 20,
+ "Rx_Rate_Mbps": 10,
+ "Tx_Rate_Mbps": 10,
+ "RxThroughput": 10,
+ "TxThroughput": 10,
+ "Valid_Frames_Rx": 1000,
+ "Frames_Tx": 1000,
+ "in_packets": 1000,
+ "out_packets": 1000,
+ }
+
+ samples.update({"CurrentDropPercentage": 0.0})
+
+ last_res = [
+ 0,
+ {
+ "Rx_Rate_Kbps": [20, 20],
+ "Tx_Rate_Kbps": [20, 20],
+ "Rx_Rate_Mbps": [10, 10],
+ "Tx_Rate_Mbps": [10, 10],
+ "CurrentDropPercentage": [0, 0],
+ "RxThroughput": [10, 10],
+ "TxThroughput": [10, 10],
+ "Frames_Tx": [1000, 1000],
+ "in_packets": [1000, 1000],
+ "Valid_Frames_Rx": [1000, 1000],
+ "out_packets": [1000, 1000],
+ },
+ ]
+
+ mock_traffic_profile.execute_traffic.return_value = [
+ 'Completed', samples]
+ mock_traffic_profile.get_drop_percentage.return_value = [
+ 'Completed', samples]
+
+ sut = tg_rfc2544_ixia.IxiaTrafficGen(name, vnfd)
+ sut.vnf_port_pairs = [[[0], [1]]]
+ sut.tc_file_name = self._get_file_abspath(TEST_FILE_YAML)
+ sut.topology = ""
+
+ sut.ssh_helper = mock.Mock()
+ sut._traffic_process = mock.MagicMock()
+ sut.generate_port_pairs = mock.Mock()
+
+ sut._ixia_traffic_gen = mock.MagicMock()
+ sut._ixia_traffic_gen.ix_get_statistics.return_value = last_res
+
+ sut.resource_helper.client = mock.MagicMock()
+ sut.resource_helper.client_started = mock.MagicMock()
+ sut.resource_helper.client_started.value = 1
+ sut.resource_helper.rfc_helper.iteration.value = 11
+
+ sut.scenario_helper.scenario_cfg = {
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ 'latency': True
+ },
+ 'vnf__1': {
+ 'rules': 'acl_1rule.yaml',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1,
+ },
+ },
+ },
+ 'ixia_profile': '/path/to/profile',
+ 'task_path': '/path/to/task'
+ }
+
+ @mock.patch.object(six.moves.builtins, 'open', create=True)
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.open',
+ mock.mock_open(), create=True)
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.LOG.exception')
+ def _traffic_runner(*args):
+ result = sut._traffic_runner(mock_traffic_profile)
+ self.assertIsNone(result)
+
+ _traffic_runner()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
new file mode 100644
index 000000000..9531b90c4
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_trex.py
@@ -0,0 +1,293 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+import unittest
+
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_rfc2544_trex
+
+
+class TestTrexRfcResouceHelper(unittest.TestCase):
+
+ def test__run_traffic_once(self):
+ mock_setup_helper = mock.Mock()
+ mock_traffic_profile = mock.Mock()
+ mock_traffic_profile.config.duration = 3
+ mock_traffic_profile.execute_traffic.return_value = ('fake_ports',
+ 'port_pg_id_map')
+ mock_traffic_profile.get_drop_percentage.return_value = 'percentage'
+ rfc_rh = tg_rfc2544_trex.TrexRfcResourceHelper(mock_setup_helper)
+ rfc_rh.TRANSIENT_PERIOD = 0
+ rfc_rh.rfc2544_helper = mock.Mock()
+
+ with mock.patch.object(rfc_rh, '_get_samples') as mock_get_samples:
+ rfc_rh._run_traffic_once(mock_traffic_profile)
+
+ mock_traffic_profile.execute_traffic.assert_called_once_with(rfc_rh)
+ mock_traffic_profile.stop_traffic.assert_called_once_with(rfc_rh)
+ mock_traffic_profile.stop_traffic.assert_called_once()
+ mock_get_samples.assert_has_calls([
+ mock.call('fake_ports', port_pg_id='port_pg_id_map'),
+ mock.call('fake_ports', port_pg_id='port_pg_id_map')])
+
+
+class TestTrexTrafficGenRFC(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'ifname': 'xe0',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'vld_id': 'uplink_0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:01',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'ifname': 'xe1',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'vld_id': 'downlink_0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:02'
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf',
+ 'name': 'VPEVnfSsh',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64,
+ },
+ }
+
+ TC_YAML = {
+ 'scenarios': [
+ {
+ 'tc_options': {
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ },
+ 'runner': {
+ 'duration': 400,
+ 'interval': 35,
+ 'type': 'Duration',
+ },
+ 'traffic_options': {
+ 'flow': 'ipv4_1flow_Packets_vpe.yaml',
+ 'imix': 'imix_voice.yaml',
+ },
+ 'vnf_options': {
+ 'vpe': {
+ 'cfg': 'vpe_config',
+ },
+ },
+ 'traffic_profile': 'ipv4_throughput_vpe.yaml',
+ 'type': 'NSPerf',
+ 'nodes': {
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick',
+ },
+ 'topology': 'vpe_vnf_topology.yaml',
+ },
+ ],
+ 'context': {
+ 'nfvi_type': 'baremetal',
+ 'type': 'Node',
+ 'name': 'yardstick',
+ 'file': '/etc/yardstick/nodes/pod.yaml',
+ },
+ 'schema': 'yardstick:task:0.1',
+ }
+
+ def setUp(self):
+ self._mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper')
+ self.mock_ssh_helper = self._mock_ssh_helper.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_ssh_helper.stop()
+
+ def test___init__(self):
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ self.assertIsNotNone(trex_traffic_gen.resource_helper._terminated.value)
+
+ def test_instantiate(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen._start_server = mock.Mock(return_value=0)
+ trex_traffic_gen.resource_helper = mock.MagicMock()
+ trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
+
+ scenario_cfg = {
+ "tc": "tc_baremetal_rfc2544_ipv4_1flow_64B",
+ "topology": 'nsb_test_case.yaml',
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ 'vnf__1': {
+ 'rules': 'acl_1rule.yaml',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1
+ },
+ },
+ },
+ }
+ tg_rfc2544_trex.WAIT_TIME = 3
+ scenario_cfg.update({"nodes": ["tg_1", "vnf_1"]})
+ self.assertIsNone(trex_traffic_gen.instantiate(scenario_cfg, {}))
+
+ def test_instantiate_error(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ trex_traffic_gen = tg_rfc2544_trex.TrexTrafficGenRFC('vnf1', self.VNFD_0)
+ trex_traffic_gen.resource_helper = mock.MagicMock()
+ trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
+ scenario_cfg = {
+ "tc": "tc_baremetal_rfc2544_ipv4_1flow_64B",
+ "nodes": [
+ "tg_1",
+ "vnf_1",
+ ],
+ "topology": 'nsb_test_case.yaml',
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ 'vnf__1': {
+ 'rules': 'acl_1rule.yaml',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1,
+ },
+ },
+ },
+ }
+ trex_traffic_gen.instantiate(scenario_cfg, {})
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
new file mode 100644
index 000000000..4f8742477
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_trex.py
@@ -0,0 +1,505 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import mock
+import unittest
+
+from yardstick.network_services.traffic_profile import base as tp_base
+from yardstick.network_services.traffic_profile import rfc2544
+from yardstick.network_services.vnf_generic.vnf import sample_vnf
+from yardstick.network_services.vnf_generic.vnf import tg_trex
+
+
+NAME = 'vnf_1'
+
+
+class TestTrexTrafficGen(unittest.TestCase):
+
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd',
+ 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ TRAFFIC_PROFILE = {
+ "schema": "isb:traffic_profile:0.1",
+ "name": "fixed",
+ "description": "Fixed traffic profile to run UDP traffic",
+ "traffic_profile": {
+ "traffic_type": "FixedTraffic",
+ "frame_rate": 100, # pps
+ "flow_number": 10,
+ "frame_size": 64
+ },
+ }
+
+ SCENARIO_CFG = {
+ "options": {
+ "packetsize": 64,
+ "traffic_type": 4,
+ "rfc2544": {
+ "allowed_drop_rate": "0.8 - 1",
+ },
+ "vnf__1": {
+ "rules": "acl_1rule.yaml",
+ "vnf_config": {
+ "lb_config": "SW",
+ "lb_count": 1,
+ "worker_config": "1C/1T",
+ "worker_threads": 1,
+ }
+ }
+ },
+ "task_id": "a70bdf4a-8e67-47a3-9dc1-273c14506eb7",
+ "tc": "tc_ipv4_1Mflow_64B_packetsize",
+ "runner": {
+ "object": "NetworkServiceTestCase",
+ "interval": 35,
+ "output_filename": "/tmp/yardstick.out",
+ "runner_id": 74476, "duration": 400,
+ "type": "Duration"
+ },
+ "traffic_profile": "ipv4_throughput_acl.yaml",
+ "traffic_options": {
+ "flow": "ipv4_Packets_acl.yaml",
+ "imix": "imix_voice.yaml"
+ },
+ "type": "ISB",
+ "nodes": {
+ "tg__2": "trafficgen_2.yardstick",
+ "tg__1": "trafficgen_1.yardstick",
+ "vnf__1": "vnf.yardstick"
+ },
+ "topology": "udpreplay-tg-topology-baremetal.yaml"
+ }
+
+ CONTEXT_CFG = {
+ "nodes": {
+ "vnf__1": {
+ "vnfd-id-ref": "vnf__1",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens786f0",
+ "vld_id": tp_base.TrafficProfile.UPLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.0",
+ "local_ip": "152.16.100.19",
+ "driver": "i40e",
+ "dst_ip": "152.16.100.20",
+ "local_mac": "00:00:00:00:00:02",
+ "dst_mac": "00:00:00:00:00:04",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_iface_name": "ens786f1",
+ "vld_id": tp_base.TrafficProfile.DOWNLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.1",
+ "local_ip": "152.16.40.19",
+ "driver": "i40e",
+ "dst_ip": "152.16.40.20",
+ "local_mac": "00:00:00:00:00:01",
+ "dst_mac": "00:00:00:00:00:03",
+ "dpdk_port_num": 1
+ }
+ },
+ "host": "1.2.1.1",
+ "user": "root",
+ "nd_route_tbl": [
+ {
+ "netmask": "112",
+ "if": "xe0",
+ "gateway": "0064:ff9b:0:0:0:0:9810:6414",
+ "network": "0064:ff9b:0:0:0:0:9810:6414"
+ },
+ {
+ "netmask": "112",
+ "if": "xe1",
+ "gateway": "0064:ff9b:0:0:0:0:9810:2814",
+ "network": "0064:ff9b:0:0:0:0:9810:2814"
+ }
+ ],
+ "password": "r00t",
+ "VNF model": "udp_replay.yaml",
+ "name": "vnf.yardstick",
+ "member-vnf-index": "2",
+ "routing_table": [
+ {
+ "netmask": "255.255.255.0",
+ "if": "xe0",
+ "gateway": "152.16.100.20",
+ "network": "152.16.100.20"
+ },
+ {
+ "netmask": "255.255.255.0",
+ "if": "xe1",
+ "gateway": "152.16.40.20",
+ "network": "152.16.40.20"
+ }
+ ],
+ "role": "vnf"
+ },
+ "trafficgen_2.yardstick": {
+ "member-vnf-index": "3",
+ "role": "TrafficGen",
+ "name": "trafficgen_2.yardstick",
+ "vnfd-id-ref": "tg__2",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens513f0",
+ "vld_id": tp_base.TrafficProfile.DOWNLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:02:00.0",
+ "local_ip": "152.16.40.20",
+ "driver": "ixgbe",
+ "dst_ip": "152.16.40.19",
+ "local_mac": "00:00:00:00:00:03",
+ "dst_mac": "00:00:00:00:00:01",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_iface_name": "ens513f1",
+ "netmask": "255.255.255.0",
+ "network": "202.16.100.0",
+ "local_ip": "202.16.100.20",
+ "driver": "ixgbe",
+ "local_mac": "00:1e:67:d0:60:5d",
+ "vpci": "0000:02:00.1",
+ "dpdk_port_num": 1
+ }
+ },
+ "password": "r00t",
+ "VNF model": "l3fwd_vnf.yaml",
+ "user": "root"
+ },
+ "trafficgen_1.yardstick": {
+ "member-vnf-index": "1",
+ "role": "TrafficGen",
+ "name": "trafficgen_1.yardstick",
+ "vnfd-id-ref": "tg__1",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens785f0",
+ "vld_id": tp_base.TrafficProfile.UPLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.0",
+ "local_ip": "152.16.100.20",
+ "driver": "i40e",
+ "dst_ip": "152.16.100.19",
+ "local_mac": "00:00:00:00:00:04",
+ "dst_mac": "00:00:00:00:00:02",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_ip": "152.16.100.21",
+ "driver": "i40e",
+ "vpci": "0000:05:00.1",
+ "dpdk_port_num": 1,
+ "local_iface_name": "ens785f1",
+ "netmask": "255.255.255.0",
+ "local_mac": "00:00:00:00:00:01"
+ }
+ },
+ "password": "r00t",
+ "VNF model": "tg_rfc2544_tpl.yaml",
+ "user": "root"
+ }
+ }
+ }
+
+ def setUp(self):
+ self._mock_ssh_helper = mock.patch.object(sample_vnf, 'VnfSshHelper')
+ self.mock_ssh_helper = self._mock_ssh_helper.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_ssh_helper.stop()
+
+ def test___init__(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.assertIsInstance(trex_traffic_gen.resource_helper,
+ tg_trex.TrexResourceHelper)
+
+ def test_collect_kpi(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.resource_helper._queue.put({})
+ result = trex_traffic_gen.collect_kpi()
+ self.assertEqual({}, result)
+
+ def test_listen_traffic(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.assertIsNone(trex_traffic_gen.listen_traffic({}))
+
+ def test_instantiate(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen._start_server = mock.Mock(return_value=0)
+ trex_traffic_gen._tg_process = mock.MagicMock()
+ trex_traffic_gen._tg_process.start = mock.Mock()
+ trex_traffic_gen._tg_process.exitcode = 0
+ trex_traffic_gen._tg_process._is_alive = mock.Mock(return_value=1)
+ trex_traffic_gen.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
+ self.assertIsNone(trex_traffic_gen.instantiate(self.SCENARIO_CFG,
+ self.CONTEXT_CFG))
+
+ def test_instantiate_error(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen._start_server = mock.Mock(return_value=0)
+ trex_traffic_gen._tg_process = mock.MagicMock()
+ trex_traffic_gen._tg_process.start = mock.Mock()
+ trex_traffic_gen._tg_process._is_alive = mock.Mock(return_value=0)
+ trex_traffic_gen.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.setup_helper.setup_vnf_environment = mock.MagicMock()
+ self.assertIsNone(trex_traffic_gen.instantiate(self.SCENARIO_CFG,
+ self.CONTEXT_CFG))
+
+ def test__start_server(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.scenario_helper.scenario_cfg = {}
+ self.assertIsNone(trex_traffic_gen._start_server())
+
+ def test__start_server_multiple_queues(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.scenario_helper.scenario_cfg = {
+ "options": {NAME: {"queues_per_port": 2}}}
+ self.assertIsNone(trex_traffic_gen._start_server())
+
+ def test__traffic_runner(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.execute_traffic.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.sut.ssh_helper = mock.Mock()
+ self.sut.ssh_helper.run = mock.Mock()
+ self.sut._connect_client = mock.Mock()
+ self.sut._connect_client.get_stats = mock.Mock(return_value="0")
+ self.sut.resource_helper.RUN_DURATION = 0
+ self.sut.resource_helper.QUEUE_WAIT_TIME = 0
+ # must generate cfg before we can run traffic so Trex port mapping is
+ # created
+ self.sut.resource_helper.generate_cfg()
+ with mock.patch.object(self.sut.resource_helper, 'run_traffic'):
+ self.sut._traffic_runner(mock_traffic_profile)
+
+ def test__generate_trex_cfg(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ self.assertIsNone(trex_traffic_gen.resource_helper.generate_cfg())
+
+ def test_build_ports_reversed_pci_ordering(self):
+ vnfd = copy.deepcopy(self.VNFD['vnfd:vnfd-catalog']['vnfd'][0])
+ vnfd['vdu'][0]['external-interface'] = [
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 2,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:04:00.0',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.generate_cfg()
+ trex_traffic_gen.resource_helper._build_ports()
+ self.assertEqual(sorted(trex_traffic_gen.resource_helper.all_ports),
+ [0, 1])
+ # there is a gap in ordering
+ self.assertEqual(
+ {0: 0, 2: 1},
+ dict(trex_traffic_gen.resource_helper.dpdk_to_trex_port_map))
+
+ def test_run_traffic(self):
+ mock_traffic_profile = mock.Mock(autospec=tp_base.TrafficProfile)
+ mock_traffic_profile.get_traffic_definition.return_value = "64"
+ mock_traffic_profile.params = self.TRAFFIC_PROFILE
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ self.sut = tg_trex.TrexTrafficGen(NAME, vnfd)
+ self.sut.ssh_helper = mock.Mock()
+ self.sut.ssh_helper.run = mock.Mock()
+ self.sut._traffic_runner = mock.Mock(return_value=0)
+ self.sut.resource_helper.client_started.value = 1
+ result = self.sut.run_traffic(mock_traffic_profile)
+ self.sut._traffic_process.terminate()
+ self.assertIsNotNone(result)
+
+ def test_terminate(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ trex_traffic_gen.ssh_helper = mock.MagicMock()
+ trex_traffic_gen.resource_helper.ssh_helper = mock.MagicMock()
+ self.assertIsNone(trex_traffic_gen.terminate())
+
+ def test__connect_client(self):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ trex_traffic_gen = tg_trex.TrexTrafficGen(NAME, vnfd)
+ client = mock.Mock()
+ client.connect = mock.Mock(return_value=0)
+ self.assertIsNotNone(trex_traffic_gen.resource_helper._connect(client))
+
+
+class TrexResourceHelperTestCase(unittest.TestCase):
+
+ def test__get_samples(self):
+ mock_setup_helper = mock.Mock()
+ trex_rh = tg_trex.TrexResourceHelper(mock_setup_helper)
+ trex_rh.vnfd_helper.interfaces = [
+ {'name': 'interface1'},
+ {'name': 'interface2'}]
+ stats = {
+ 10: {'rx_pps': 5, 'ipackets': 200},
+ 20: {'rx_pps': 10, 'ipackets': 300},
+ 'latency': {1: {'latency': 'latency_port_10_pg_id_1'},
+ 2: {'latency': 'latency_port_10_pg_id_2'},
+ 3: {'latency': 'latency_port_20_pg_id_3'},
+ 4: {'latency': 'latency_port_20_pg_id_4'}}
+ }
+ port_pg_id = rfc2544.PortPgIDMap()
+ port_pg_id.add_port(10)
+ port_pg_id.increase_pg_id()
+ port_pg_id.increase_pg_id()
+ port_pg_id.add_port(20)
+ port_pg_id.increase_pg_id()
+ port_pg_id.increase_pg_id()
+
+ with mock.patch.object(trex_rh, 'get_stats') as mock_get_stats, \
+ mock.patch.object(trex_rh.vnfd_helper, 'port_num') as \
+ mock_port_num:
+ mock_get_stats.return_value = stats
+ mock_port_num.side_effect = [10, 20]
+ output = trex_rh._get_samples([10, 20], port_pg_id=port_pg_id)
+
+ interface = output['interface1']
+ self.assertEqual(5.0, interface['rx_throughput_fps'])
+ self.assertEqual(200, interface['in_packets'])
+ self.assertEqual('latency_port_10_pg_id_1', interface['latency'][1])
+ self.assertEqual('latency_port_10_pg_id_2', interface['latency'][2])
+
+ interface = output['interface2']
+ self.assertEqual(10.0, interface['rx_throughput_fps'])
+ self.assertEqual(300, interface['in_packets'])
+ self.assertEqual('latency_port_20_pg_id_3', interface['latency'][3])
+ self.assertEqual('latency_port_20_pg_id_4', interface['latency'][4])
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
new file mode 100644
index 000000000..05a0ead71
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_udp_replay.py
@@ -0,0 +1,464 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+import os
+
+from yardstick.tests import STL_MOCKS
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.udp_replay import UdpReplayApproxVnf
+ from yardstick.network_services.vnf_generic.vnf.sample_vnf import ScenarioHelper
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+
+
+NAME = "vnf__1"
+
+
+@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process")
+class TestUdpReplayApproxVnf(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'UdpReplayVnf',
+ 'vdu': [
+ {
+ 'description': 'UDPReplay approximation using DPDK',
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'if': 'xe0',
+ 'network': '152.16.100.20',
+ 'gateway': '152.16.100.20',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'if': 'xe1',
+ 'network': '152.16.40.20',
+ 'gateway': '152.16.40.20',
+ }
+ ],
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'driver': 'i40e',
+ 'local_iface_name': 'xe0',
+ 'bandwidth': '10 Gbps',
+ 'local_ip': '152.16.100.19',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ 'netmask': '255.255.255.0',
+ 'dst_ip': '152.16.100.20',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'driver': 'i40e',
+ 'local_iface_name': 'xe1',
+ 'bandwidth': '10 Gbps',
+ 'local_ip': '152.16.40.19',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ 'netmask': '255.255.255.0',
+ 'dst_ip': '152.16.40.20',
+ 'type': 'PCI-PASSTHROUGH',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ }
+ ],
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'if': 'xe0',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ },
+ {
+ 'netmask': '112',
+ 'if': 'xe1',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ }
+ ],
+ 'id': 'udpreplayvnf-baremetal',
+ 'name': 'udpreplayvnf-baremetal',
+ }
+ ],
+ 'description': 'UDPReplay approximation using DPDK',
+ 'name': 'VPEVnfSsh',
+ 'mgmt-interface': {
+ 'vdu-id': 'udpreplay-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ]
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ }
+ ],
+ 'id': 'UdpReplayApproxVnf',
+ }
+
+ SCENARIO_CFG = {
+ "options": {
+ "packetsize": 64,
+ "traffic_type": 4,
+ "rfc2544": {
+ "allowed_drop_rate": "0.8 - 1",
+ },
+ "vnf__1": {
+ "rules": "acl_1rule.yaml",
+ "vnf_config": {
+ "lb_config": "SW",
+ "lb_count": 1,
+ "worker_config": "1C/1T",
+ "worker_threads": 1,
+ },
+ "hw_csum": "false",
+ }
+ },
+ "task_id": "a70bdf4a-8e67-47a3-9dc1-273c14506eb7",
+ "tc": "tc_ipv4_1Mflow_64B_packetsize",
+ "runner": {
+ "object": "NetworkServiceTestCase",
+ "interval": 35,
+ "output_filename": "/tmp/yardstick.out",
+ "runner_id": 74476, "duration": 400,
+ "type": "Duration"
+ },
+ "traffic_profile": "ipv4_throughput_acl.yaml",
+ "traffic_options": {
+ "flow": "ipv4_Packets_acl.yaml",
+ "imix": "imix_voice.yaml"
+ },
+ "type": "ISB",
+ "nodes": {
+ "tg__2": "trafficgen_2.yardstick",
+ "tg__1": "trafficgen_1.yardstick",
+ "vnf__1": "vnf.yardstick"
+ },
+ "topology": "udpreplay-tg-topology-baremetal.yaml"
+ }
+
+ CONTEXT_CFG = {
+ "nodes": {
+ "vnf__1": {
+ "vnfd-id-ref": "vnf__1",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens786f0",
+ "vld_id": UdpReplayApproxVnf.UPLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.0",
+ "local_ip": "152.16.100.19",
+ "driver": "i40e",
+ "dst_ip": "152.16.100.20",
+ "local_mac": "00:00:00:00:00:02",
+ "dst_mac": "00:00:00:00:00:04",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_iface_name": "ens786f1",
+ "vld_id": UdpReplayApproxVnf.DOWNLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.1",
+ "local_ip": "152.16.40.19",
+ "driver": "i40e",
+ "dst_ip": "152.16.40.20",
+ "local_mac": "00:00:00:00:00:01",
+ "dst_mac": "00:00:00:00:00:03",
+ "dpdk_port_num": 1
+ }
+ },
+ "host": "1.2.1.1",
+ "user": "root",
+ "nd_route_tbl": [
+ {
+ "netmask": "112",
+ "if": "xe0",
+ "gateway": "0064:ff9b:0:0:0:0:9810:6414",
+ "network": "0064:ff9b:0:0:0:0:9810:6414"
+ },
+ {
+ "netmask": "112",
+ "if": "xe1",
+ "gateway": "0064:ff9b:0:0:0:0:9810:2814",
+ "network": "0064:ff9b:0:0:0:0:9810:2814"
+ }
+ ],
+ "password": "r00t",
+ "VNF model": "udp_replay.yaml",
+ "name": "vnf.yardstick",
+ "member-vnf-index": "2",
+ "routing_table": [
+ {
+ "netmask": "255.255.255.0",
+ "if": "xe0",
+ "gateway": "152.16.100.20",
+ "network": "152.16.100.20"
+ },
+ {
+ "netmask": "255.255.255.0",
+ "if": "xe1",
+ "gateway": "152.16.40.20",
+ "network": "152.16.40.20"
+ }
+ ],
+ "role": "vnf"
+ },
+ "trafficgen_2.yardstick": {
+ "member-vnf-index": "3",
+ "role": "TrafficGen",
+ "name": "trafficgen_2.yardstick",
+ "vnfd-id-ref": "tg__2",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens513f0",
+ "vld_id": UdpReplayApproxVnf.DOWNLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:02:00.0",
+ "local_ip": "152.16.40.20",
+ "driver": "ixgbe",
+ "dst_ip": "152.16.40.19",
+ "local_mac": "00:00:00:00:00:03",
+ "dst_mac": "00:00:00:00:00:01",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_iface_name": "ens513f1",
+ "netmask": "255.255.255.0",
+ "network": "202.16.100.0",
+ "local_ip": "202.16.100.20",
+ "driver": "ixgbe",
+ "local_mac": "00:1e:67:d0:60:5d",
+ "vpci": "0000:02:00.1",
+ "dpdk_port_num": 1
+ }
+ },
+ "password": "r00t",
+ "VNF model": "l3fwd_vnf.yaml",
+ "user": "root"
+ },
+ "trafficgen_1.yardstick": {
+ "member-vnf-index": "1",
+ "role": "TrafficGen",
+ "name": "trafficgen_1.yardstick",
+ "vnfd-id-ref": "tg__1",
+ "ip": "1.2.1.1",
+ "interfaces": {
+ "xe0": {
+ "local_iface_name": "ens785f0",
+ "vld_id": UdpReplayApproxVnf.UPLINK,
+ "netmask": "255.255.255.0",
+ "vpci": "0000:05:00.0",
+ "local_ip": "152.16.100.20",
+ "driver": "i40e",
+ "dst_ip": "152.16.100.19",
+ "local_mac": "00:00:00:00:00:04",
+ "dst_mac": "00:00:00:00:00:02",
+ "dpdk_port_num": 0
+ },
+ "xe1": {
+ "local_ip": "152.16.100.21",
+ "driver": "i40e",
+ "vpci": "0000:05:00.1",
+ "dpdk_port_num": 1,
+ "local_iface_name": "ens785f1",
+ "netmask": "255.255.255.0",
+ "local_mac": "00:00:00:00:00:01"
+ }
+ },
+ "password": "r00t",
+ "VNF model": "tg_rfc2544_tpl.yaml",
+ "user": "root"
+ }
+ }
+ }
+
+ def test___init__(self, *args):
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ self.assertIsNone(udp_replay_approx_vnf._vnf_process)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD_0
+ get_stats_ret_val = \
+ "stats\r\r\n\r\nUDP_Replay stats:\r\n--------------\r\n" \
+ "Port\t\tRx Packet\t\tTx Packet\t\tRx Pkt Drop\t\tTx Pkt Drop \r\n"\
+ "0\t\t7374156\t\t7374136\t\t\t0\t\t\t0\r\n" \
+ "1\t\t7374316\t\t7374315\t\t\t0\t\t\t0\r\n\r\nReplay>\r\r\nReplay>"
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, vnfd)
+ udp_replay_approx_vnf.q_in = mock.MagicMock()
+ udp_replay_approx_vnf.q_out = mock.MagicMock()
+ udp_replay_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ udp_replay_approx_vnf.all_ports = ["xe0", "xe1"]
+ udp_replay_approx_vnf.get_stats = mock.Mock(return_value=get_stats_ret_val)
+
+ result = {'collect_stats': {}, 'packets_dropped': 0,
+ 'packets_fwd': 14748451, 'packets_in': 14748472}
+ self.assertEqual(result, udp_replay_approx_vnf.collect_kpi())
+
+ @mock.patch(SSH_HELPER)
+ def test_get_stats(self, ssh, *args):
+ mock_ssh(ssh)
+
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf.q_in = mock.MagicMock()
+ udp_replay_approx_vnf.q_out = mock.MagicMock()
+ udp_replay_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ mock_result = \
+ "CG-NAPT(.*\n)*Received 100, Missed 0, Dropped 0,Translated 100,ingress"
+
+ udp_replay_approx_vnf.vnf_execute = mock.Mock(return_value=mock_result)
+
+ self.assertEqual(mock_result,
+ udp_replay_approx_vnf.get_stats())
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test__build_config(self, ssh, mock_context, *args):
+ mock_ssh(ssh)
+
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf.queue_wrapper = mock.MagicMock()
+ udp_replay_approx_vnf.nfvi_context = mock_context
+ udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'}
+ udp_replay_approx_vnf.setup_helper.bound_pci = []
+ udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path")
+ udp_replay_approx_vnf.scenario_helper = ScenarioHelper(name='vnf__1')
+ udp_replay_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+
+ cmd_line = udp_replay_approx_vnf._build_config()
+
+ expected = \
+ "sudo tool_path --log-level=5 -c 0x7 -n 4 -w -- -p 0x3 --config='(0,0,1),(1,0,2)'"
+ self.assertEqual(cmd_line, expected)
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.udp_replay.open')
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test__build_pipeline_kwargs(self, ssh, mock_context, *args):
+ mock_ssh(ssh)
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf.nfvi_context = mock_context
+ udp_replay_approx_vnf.nfvi_context.attrs = {'nfvi_type': 'baremetal'}
+ udp_replay_approx_vnf.setup_helper.bound_pci = ['0000:00:0.1', '0000:00:0.3']
+ udp_replay_approx_vnf.all_ports = ["xe0", "xe1"]
+ udp_replay_approx_vnf.ssh_helper.provision_tool = mock.MagicMock(return_value="tool_path")
+ udp_replay_approx_vnf.scenario_helper = ScenarioHelper(name='vnf__1')
+ udp_replay_approx_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
+
+ udp_replay_approx_vnf._build_pipeline_kwargs()
+
+ self.assertEqual(udp_replay_approx_vnf.pipeline_kwargs, {
+ 'config': '(0,0,1),(1,0,2)',
+ 'cpu_mask_hex': '0x7',
+ 'hw_csum': '',
+ 'port_mask_hex': '0x3',
+ 'tool_path': 'tool_path',
+ 'whitelist': '0000:00:0.1 -w 0000:00:0.3'
+ })
+
+ @mock.patch(SSH_HELPER)
+ def test_run_udp_replay(self, ssh, *args):
+ mock_ssh(ssh)
+
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf._build_config = mock.MagicMock()
+ udp_replay_approx_vnf.queue_wrapper = mock.MagicMock()
+ udp_replay_approx_vnf.scenario_helper = mock.MagicMock()
+
+ udp_replay_approx_vnf._run()
+
+ udp_replay_approx_vnf.ssh_helper.run.assert_called_once()
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf.q_out.put("Replay>")
+ udp_replay_approx_vnf.WAIT_TIME = 0
+ udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock()
+
+ udp_replay_approx_vnf.deploy_helper = mock.MagicMock()
+ udp_replay_approx_vnf.deploy_vnfs = mock.MagicMock()
+ self.assertIsNone(udp_replay_approx_vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG))
+
+ udp_replay_approx_vnf._vnf_process.is_alive = mock.Mock(return_value=1)
+ udp_replay_approx_vnf._vnf_process.exitcode = 0
+
+ self.assertEqual(udp_replay_approx_vnf.wait_for_instantiate(), 0)
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch('yardstick.ssh.SSH')
+ @mock.patch(SSH_HELPER)
+ def test_instantiate_panic(self, *args):
+ udp_replay_approx_vnf = UdpReplayApproxVnf(NAME, self.VNFD_0)
+ udp_replay_approx_vnf.WAIT_TIME = 0
+ udp_replay_approx_vnf.q_out.put("some text PANIC some text")
+ udp_replay_approx_vnf.setup_helper.setup_vnf_environment = mock.Mock()
+
+ udp_replay_approx_vnf.deploy_helper = mock.MagicMock()
+ self.assertIsNone(udp_replay_approx_vnf.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG))
+ with self.assertRaises(RuntimeError):
+ udp_replay_approx_vnf.wait_for_instantiate()
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
new file mode 100644
index 000000000..ffb5cd6f0
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vfw_vnf.py
@@ -0,0 +1,374 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import unittest
+import mock
+import os
+
+from yardstick.tests import STL_MOCKS
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+
+from yardstick.common import utils
+
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxVnf
+ from yardstick.network_services.nfvi.resource import ResourceProfile
+ from yardstick.network_services.vnf_generic.vnf.vfw_vnf import FWApproxSetupEnvHelper
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+name = 'vnf__1'
+
+
+@mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Process")
+class TestFWApproxVnf(unittest.TestCase):
+ VNFD = {'vnfd:vnfd-catalog':
+ {'vnfd':
+ [{'short-name': 'VpeVnf',
+ 'vdu':
+ [{'routing_table':
+ [{'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'},
+ {'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl':
+ [{'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface':
+ [{'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02'},
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'},
+ {'virtual-interface':
+ {'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01'},
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'}]}],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface':
+ {'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1'},
+ 'benchmark':
+ {'kpi': ['packets_in', 'packets_fwd', 'packets_dropped']},
+ 'connection-point': [{'type': 'VPORT', 'name': 'xe0'},
+ {'type': 'VPORT', 'name': 'xe1'}],
+ 'id': 'FWApproxVnf', 'name': 'VPEVnfSsh'}]}}
+
+ scenario_cfg = {'options': {'packetsize': 64, 'traffic_type': 4,
+ 'rfc2544': {'allowed_drop_rate': '0.8 - 1'},
+ 'vnf__1': {'rules': 'acl_1rule.yaml',
+ 'vnf_config': {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1}}
+ },
+ 'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
+ 'task_path': '/tmp',
+ 'tc': 'tc_ipv4_1Mflow_64B_packetsize',
+ 'runner': {'object': 'NetworkServiceTestCase',
+ 'interval': 35,
+ 'output_filename': '/tmp/yardstick.out',
+ 'runner_id': 74476, 'duration': 400,
+ 'type': 'Duration'},
+ 'traffic_profile': 'ipv4_throughput_vfw.yaml',
+ 'traffic_options': {'flow': 'ipv4_Packets_vfw.yaml',
+ 'imix': 'imix_voice.yaml'},
+ 'type': 'ISB',
+ 'nodes': {'tg__2': 'trafficgen_2.yardstick',
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick'},
+ 'topology': 'vpe-tg-topology-baremetal.yaml'}
+
+ context_cfg = {'nodes': {'tg__2':
+ {'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens513f0',
+ 'vld_id': FWApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root'},
+ 'tg__1':
+ {'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens785f0',
+ 'vld_id': FWApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root'},
+ 'vnf__1':
+ {'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces':
+ {'xe0': {'local_iface_name': 'ens786f0',
+ 'vld_id': FWApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0},
+ 'xe1': {'local_iface_name': 'ens786f1',
+ 'vld_id': FWApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1}},
+ 'routing_table':
+ [{'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0'},
+ {'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1'}],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl':
+ [{'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'},
+ {'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'}],
+ 'password': 'r00t',
+ 'VNF model': 'vfw_vnf.yaml'}}}
+
+ def test___init__(self, *args):
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ self.assertIsNone(vfw_approx_vnf._vnf_process)
+
+ STATS = """\
+p vfw stats
+
+VFW Stats
+{"VFW_counters" : {"id" : "PIPELINE4", " pkts_received": 6007180, " pkts_fw_forwarded": 6007180, " pkts_drop_fw": 0, " pkts_acl_forwarded": 6007180, "pkts_drop_without_rule" : 0, "average_pkts_in_batch" : 31, "average_internal_time_in_clocks" : 17427, "average_external_time_in_clocks" : 261120, "total_time_measures" : 189829, "ct_packets_forwarded" : 6007148, "ct_packets_dropped" : 0, "bytes_processed ": 360430800, "ct_sessions" : {"active" : 130050, "open_attempt" : 130050, "re-open_attempt" : 0, "established" : 0, "closed" : 0, "timeout" : 0}, "ct_drops" : {"out_of_window" : 0, "invalid_conn" : 0, "invalid_state_transition" : 0 "RST" : 0}}
+VFW TOTAL: pkts_received: 6007180, "pkts_fw_forwarded": 6007180, "pkts_drop_fw": 0, "fw_drops" : {"TTL_zero" : 0, "bad_size" : 0, "fragmented_packet" : 0, "unsupported_packet_types" : 0, "no_arp_entry" : 6007180}, "pkts_acl_forwarded": 6007180, "pkts_drop_without_rule": 0, "packets_last_sec" : 0, "average_packets_per_sec" : 0, "bytes_last_sec" : 0, "average_bytes_per_sec" : 0, "bytes_processed ": 360430800
+"CT TOTAL: ct_packets_forwarded" : 6007180, " ct_packets_dropped" : 0, "ct_sessions" : {"active" : 130050, "open_attempt" : 130050, "re-open_attempt" : 0, "established" : 0, "closed" : 0, "timeout" : 0}, "ct_drops" : {"out_of_window" : 0, "invalid_conn" : 0, "invalid_state_transition" : 0 "RST" : 0}
+Action ID: 00, packetCount: 2954633, byteCount: 177277980
+Action ID: 01, packetCount: 3052547, byteCount: 183152820
+pipeline>
+
+pipeline>
+""" # noqa
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf.q_in = mock.MagicMock()
+ vfw_approx_vnf.q_out = mock.MagicMock()
+ vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ vfw_approx_vnf.resource = mock.Mock(autospec=ResourceProfile)
+ vfw_approx_vnf.resource_helper = mock.MagicMock(
+ **{'collect_kpi.return_value': {"core": {}}})
+ vfw_approx_vnf.vnf_execute = mock.Mock(return_value=self.STATS)
+ result = {
+ 'packets_dropped': 0,
+ 'packets_fwd': 6007180,
+ 'packets_in': 6007180,
+ 'collect_stats': {'core': {}},
+ }
+ self.assertEqual(result, vfw_approx_vnf.collect_kpi())
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.time")
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute_command(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf.q_in = mock.MagicMock()
+ vfw_approx_vnf.q_out = mock.MagicMock()
+ vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ cmd = "quit"
+ self.assertEqual(vfw_approx_vnf.vnf_execute(cmd), "")
+
+ @mock.patch(SSH_HELPER)
+ def test_get_stats(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf.q_in = mock.MagicMock()
+ vfw_approx_vnf.q_out = mock.MagicMock()
+ vfw_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ vfw_approx_vnf.vnf_execute = mock.Mock(return_value=self.STATS)
+ self.assertEqual(self.STATS, vfw_approx_vnf.get_stats())
+
+ def _get_file_abspath(self, filename):
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ file_path = os.path.join(curr_path, filename)
+ return file_path
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.vfw_vnf.hex")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.vfw_vnf.eval")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.vfw_vnf.open")
+ @mock.patch(SSH_HELPER)
+ def test_run_vfw(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf._build_config = mock.MagicMock()
+ vfw_approx_vnf.queue_wrapper = mock.MagicMock()
+ vfw_approx_vnf.ssh_helper = mock.MagicMock()
+ vfw_approx_vnf.ssh_helper.run = mock.MagicMock()
+ vfw_approx_vnf.scenario_helper.scenario_cfg = self.scenario_cfg
+ vfw_approx_vnf.vnf_cfg = {'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1}
+ vfw_approx_vnf.all_options = {'traffic_type': '4',
+ 'topology': 'nsb_test_case.yaml'}
+ vfw_approx_vnf._run()
+ vfw_approx_vnf.ssh_helper.run.assert_called_once()
+
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.vfw_vnf.YangModel")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch(SSH_HELPER)
+ def test_instantiate(self, ssh, *args):
+ mock_ssh(ssh)
+
+ vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
+ vfw_approx_vnf = FWApproxVnf(name, vnfd)
+ vfw_approx_vnf.ssh_helper = ssh
+ vfw_approx_vnf.deploy_helper = mock.MagicMock()
+ vfw_approx_vnf.resource_helper = mock.MagicMock()
+ vfw_approx_vnf._build_config = mock.MagicMock()
+ self.scenario_cfg['vnf_options'] = {'acl': {'cfg': "",
+ 'rules': ""}}
+ self.scenario_cfg.update({"nodes": {"vnf__1": ""}})
+ self.assertIsNone(vfw_approx_vnf.instantiate(self.scenario_cfg, self.context_cfg))
+
+
+class TestFWApproxSetupEnvHelper(unittest.TestCase):
+
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.open')
+ @mock.patch.object(utils, 'find_relative_file')
+ @mock.patch('yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig')
+ @mock.patch.object(utils, 'open_relative_file')
+ def test_build_config(self, *args):
+ vnfd_helper = mock.Mock()
+ ssh_helper = mock.Mock()
+ scenario_helper = mock.Mock()
+ scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+ scenario_helper.all_options = {}
+
+ vfw_approx_setup_helper = FWApproxSetupEnvHelper(vnfd_helper, ssh_helper, scenario_helper)
+
+ vfw_approx_setup_helper.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
+ vfw_approx_setup_helper.ssh_helper.all_ports = mock.Mock()
+ vfw_approx_setup_helper.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
+ expected = 'sudo tool_path -p 0x3 -f /tmp/vfw_config -s /tmp/vfw_script --hwlb 3'
+ self.assertEqual(vfw_approx_setup_helper.build_config(), expected)
diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
new file mode 100644
index 000000000..73f91d1b1
--- /dev/null
+++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_vpe_vnf.py
@@ -0,0 +1,799 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from multiprocessing import Process, Queue
+import os
+import time
+
+import mock
+from six.moves import configparser
+import unittest
+
+from yardstick.tests import STL_MOCKS
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import FileAbsPath
+from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
+from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
+from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
+
+
+SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
+
+STLClient = mock.MagicMock()
+stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
+stl_patch.start()
+
+if stl_patch:
+ from yardstick.network_services.vnf_generic.vnf.vpe_vnf import ConfigCreate
+ from yardstick.network_services.nfvi.resource import ResourceProfile
+ from yardstick.network_services.vnf_generic.vnf.vpe_vnf import \
+ VpeApproxVnf, VpeApproxSetupEnvHelper
+
+
+TEST_FILE_YAML = 'nsb_test_case.yaml'
+
+NAME = 'vnf_1'
+
+PING_OUTPUT_1 = "Pkts in: 101\r\n\tPkts dropped by AH: 100\r\n\tPkts dropped by other: 100"
+
+MODULE_PATH = FileAbsPath(__file__)
+get_file_abspath = MODULE_PATH.get_path
+
+
+class TestConfigCreate(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1'
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0'
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1'
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.100.20',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0'
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1'
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.1.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.1.1.1'
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
+ }
+
+ def test___init__(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ self.assertEqual(config_create.uplink_ports, ['xe0'])
+ self.assertEqual(config_create.downlink_ports, ['xe1'])
+ self.assertEqual(config_create.socket, 2)
+
+ def test_dpdk_port_to_link_id(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ self.assertEqual(config_create.dpdk_port_to_link_id_map, {'xe0': 0, 'xe1': 1})
+
+ def test_vpe_initialize(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ config = configparser.ConfigParser()
+ config_create.vpe_initialize(config)
+ self.assertEqual(config.get('EAL', 'log_level'), '0')
+ self.assertEqual(config.get('PIPELINE0', 'type'), 'MASTER')
+ self.assertEqual(config.get('PIPELINE0', 'core'), 's2C0')
+ self.assertEqual(config.get('MEMPOOL0', 'pool_size'), '256K')
+ self.assertEqual(config.get('MEMPOOL1', 'pool_size'), '2M')
+
+ def test_vpe_rxq(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ config = configparser.ConfigParser()
+ config_create.downlink_ports = ['xe0']
+ config_create.vpe_rxq(config)
+ self.assertEqual(config.get('RXQ0.0', 'mempool'), 'MEMPOOL1')
+
+ def test_get_sink_swq(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 2)
+ config = configparser.ConfigParser()
+ config.add_section('PIPELINE0')
+ config.set('PIPELINE0', 'key1', 'value1')
+ config.set('PIPELINE0', 'key2', 'value2 SINK')
+ config.set('PIPELINE0', 'key3', 'TM value3')
+ config.set('PIPELINE0', 'key4', 'value4')
+ config.set('PIPELINE0', 'key5', 'the SINK value5')
+
+ self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key1', 5), 'SWQ-1')
+ self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key2', 5), 'SWQ-1 SINK0')
+ self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key3', 5), 'SWQ-1 TM5')
+ config_create.sw_q += 1
+ self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key4', 5), 'SWQ0')
+ self.assertEqual(config_create.get_sink_swq(config, 'PIPELINE0', 'key5', 5), 'SWQ0 SINK1')
+
+ def test_generate_vpe_script(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ vpe_config_vnf = ConfigCreate(vnfd_helper, 2)
+ intf = [
+ {
+ "name": 'xe1',
+ "virtual-interface": {
+ "dst_ip": "1.1.1.1",
+ "dst_mac": "00:00:00:00:00:00:02",
+ },
+ },
+ {
+ "name": 'xe2',
+ "virtual-interface": {
+ "dst_ip": "1.1.1.1",
+ "dst_mac": "00:00:00:00:00:00:02",
+ },
+ },
+ ]
+ vpe_config_vnf.downlink_ports = ['xe1']
+ vpe_config_vnf.uplink_ports = ['xe2']
+ result = vpe_config_vnf.generate_vpe_script(intf)
+ self.assertIsInstance(result, str)
+ self.assertNotEqual(result, '')
+
+ def test_create_vpe_config(self):
+ vnfd_helper = VnfdHelper(self.VNFD_0)
+ config_create = ConfigCreate(vnfd_helper, 23)
+ config_create.downlink_ports = ['xe1']
+ config_create.uplink_ports = ['xe1']
+ curr_path = os.path.dirname(os.path.abspath(__file__))
+ vpe_cfg = "samples/vnf_samples/nsut/vpe/vpe_config"
+ vnf_cfg = os.path.join(curr_path, "../../../../..", vpe_cfg)
+ config_create.create_vpe_config(vnf_cfg)
+ os.system("git checkout -- %s" % vnf_cfg)
+
+
+class TestVpeApproxVnf(unittest.TestCase):
+
+ VNFD_0 = {
+ 'short-name': 'VpeVnf',
+ 'vdu': [
+ {
+ 'routing_table': [
+ {
+ 'network': '152.16.100.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '152.16.40.20',
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'description': 'VPE approximation using DPDK',
+ 'name': 'vpevnf-baremetal',
+ 'nd_route_tbl': [
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'id': 'vpevnf-baremetal',
+ 'external-interface': [
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:04',
+ 'vpci': '0000:05:00.0',
+ 'local_ip': '152.16.100.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 0,
+ 'bandwidth': '10 Gbps',
+ 'driver': "i40e",
+ 'dst_ip': '152.16.100.20',
+ 'local_iface_name': 'xe0',
+ 'local_mac': '00:00:00:00:00:02',
+ 'vld_id': 'uplink_0',
+ 'ifname': 'xe0',
+ },
+ 'vnfd-connection-point-ref': 'xe0',
+ 'name': 'xe0',
+ },
+ {
+ 'virtual-interface': {
+ 'dst_mac': '00:00:00:00:00:03',
+ 'vpci': '0000:05:00.1',
+ 'local_ip': '152.16.40.19',
+ 'type': 'PCI-PASSTHROUGH',
+ 'driver': "i40e",
+ 'netmask': '255.255.255.0',
+ 'dpdk_port_num': 1,
+ 'bandwidth': '10 Gbps',
+ 'dst_ip': '152.16.40.20',
+ 'local_iface_name': 'xe1',
+ 'local_mac': '00:00:00:00:00:01',
+ 'vld_id': 'downlink_0',
+ 'ifname': 'xe1',
+ },
+ 'vnfd-connection-point-ref': 'xe1',
+ 'name': 'xe1',
+ },
+ ],
+ },
+ ],
+ 'description': 'Vpe approximation using DPDK',
+ 'mgmt-interface': {
+ 'vdu-id': 'vpevnf-baremetal',
+ 'host': '1.2.1.1',
+ 'password': 'r00t',
+ 'user': 'root',
+ 'ip': '1.2.1.1',
+ },
+ 'benchmark': {
+ 'kpi': [
+ 'packets_in',
+ 'packets_fwd',
+ 'packets_dropped',
+ ],
+ },
+ 'connection-point': [
+ {
+ 'type': 'VPORT',
+ 'name': 'xe0',
+ },
+ {
+ 'type': 'VPORT',
+ 'name': 'xe1',
+ },
+ ],
+ 'id': 'VpeApproxVnf',
+ 'name': 'VPEVnfSsh',
+ }
+
+ VNFD = {
+ 'vnfd:vnfd-catalog': {
+ 'vnfd': [
+ VNFD_0,
+ ],
+ },
+ }
+
+ SCENARIO_CFG = {
+ 'options': {
+ 'packetsize': 64,
+ 'traffic_type': 4,
+ 'rfc2544': {
+ 'allowed_drop_rate': '0.8 - 1',
+ },
+ 'vnf__1': {
+ 'cfg': 'acl_1rule.yaml',
+ 'vnf_config': {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config':
+ '1C/1T',
+ 'worker_threads': 1,
+ },
+ }
+ },
+ 'task_id': 'a70bdf4a-8e67-47a3-9dc1-273c14506eb7',
+ 'tc': 'tc_ipv4_1Mflow_64B_packetsize',
+ 'runner': {
+ 'object': 'NetworkServiceTestCase',
+ 'interval': 35,
+ 'output_filename': '/tmp/yardstick.out',
+ 'runner_id': 74476,
+ 'duration': 400,
+ 'type': 'Duration',
+ },
+ 'traffic_profile': 'ipv4_throughput_vpe.yaml',
+ 'traffic_options': {
+ 'flow': 'ipv4_Packets_vpe.yaml',
+ 'imix': 'imix_voice.yaml',
+ },
+ 'type': 'ISB',
+ 'nodes': {
+ 'tg__2': 'trafficgen_2.yardstick',
+ 'tg__1': 'trafficgen_1.yardstick',
+ 'vnf__1': 'vnf.yardstick',
+ },
+ 'topology': 'vpe-tg-topology-baremetal.yaml',
+ }
+
+ CONTEXT_CFG = {
+ 'nodes': {
+ 'tg__2': {
+ 'member-vnf-index': '3',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_2.yardstick',
+ 'vnfd-id-ref': 'tg__2',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens513f0',
+ 'vld_id': VpeApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.20',
+ 'dst_mac': '00:00:00:00:00:01',
+ 'local_mac': '00:00:00:00:00:03',
+ 'dst_ip': '152.16.40.19',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens513f1',
+ 'netmask': '255.255.255.0',
+ 'network': '202.16.100.0',
+ 'local_ip': '202.16.100.20',
+ 'local_mac': '00:1e:67:d0:60:5d',
+ 'driver': 'ixgbe',
+ 'vpci': '0000:02:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'l3fwd_vnf.yaml',
+ 'user': 'root',
+ },
+ 'tg__1': {
+ 'member-vnf-index': '1',
+ 'role': 'TrafficGen',
+ 'name': 'trafficgen_1.yardstick',
+ 'vnfd-id-ref': 'tg__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens785f0',
+ 'vld_id': VpeApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.20',
+ 'dst_mac': '00:00:00:00:00:02',
+ 'local_mac': '00:00:00:00:00:04',
+ 'dst_ip': '152.16.100.19',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens785f1',
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.21',
+ 'local_mac': '00:00:00:00:00:01',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'password': 'r00t',
+ 'VNF model': 'tg_rfc2544_tpl.yaml',
+ 'user': 'root',
+ },
+ 'vnf__1': {
+ 'name': 'vnf.yardstick',
+ 'vnfd-id-ref': 'vnf__1',
+ 'ip': '1.2.1.1',
+ 'interfaces': {
+ 'xe0': {
+ 'local_iface_name': 'ens786f0',
+ 'vld_id': VpeApproxVnf.UPLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.100.19',
+ 'dst_mac': '00:00:00:00:00:04',
+ 'local_mac': '00:00:00:00:00:02',
+ 'dst_ip': '152.16.100.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.0',
+ 'dpdk_port_num': 0,
+ },
+ 'xe1': {
+ 'local_iface_name': 'ens786f1',
+ 'vld_id': VpeApproxVnf.DOWNLINK,
+ 'netmask': '255.255.255.0',
+ 'local_ip': '152.16.40.19',
+ 'dst_mac': '00:00:00:00:00:03',
+ 'local_mac': '00:00:00:00:00:01',
+ 'dst_ip': '152.16.40.20',
+ 'driver': 'i40e',
+ 'vpci': '0000:05:00.1',
+ 'dpdk_port_num': 1,
+ },
+ },
+ 'routing_table': [
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.100.20',
+ 'network': '152.16.100.20',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '255.255.255.0',
+ 'gateway': '152.16.40.20',
+ 'network': '152.16.40.20',
+ 'if': 'xe1',
+ },
+ ],
+ 'member-vnf-index': '2',
+ 'host': '1.2.1.1',
+ 'role': 'vnf',
+ 'user': 'root',
+ 'nd_route_tbl': [
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:6414',
+ 'network': '0064:ff9b:0:0:0:0:9810:6414',
+ 'if': 'xe0',
+ },
+ {
+ 'netmask': '112',
+ 'gateway': '0064:ff9b:0:0:0:0:9810:2814',
+ 'network': '0064:ff9b:0:0:0:0:9810:2814',
+ 'if': 'xe1',
+ },
+ ],
+ 'password': 'r00t',
+ 'VNF model': 'vpe_vnf.yaml',
+ },
+ },
+ }
+
+ def setUp(self):
+ self._mock_time_sleep = mock.patch.object(time, 'sleep')
+ self.mock_time_sleep = self._mock_time_sleep.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_time_sleep.stop()
+
+ def test___init__(self):
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ self.assertIsNone(vpe_approx_vnf._vnf_process)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_sa_not_running(self, ssh):
+ mock_ssh(ssh)
+
+ resource = mock.Mock(autospec=ResourceProfile)
+ resource.check_if_system_agent_running.return_value = 1, ''
+ resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234}
+ resource.check_if_system_agent_running.return_value = (1, None)
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf.q_in = mock.MagicMock()
+ vpe_approx_vnf.q_out = mock.MagicMock()
+ vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ vpe_approx_vnf.resource_helper.resource = resource
+
+ expected = {
+ 'pkt_in_down_stream': 0,
+ 'pkt_in_up_stream': 0,
+ 'pkt_drop_down_stream': 0,
+ 'pkt_drop_up_stream': 0,
+ 'collect_stats': {'core': {}},
+ }
+ self.assertEqual(vpe_approx_vnf.collect_kpi(), expected)
+
+ @mock.patch(SSH_HELPER)
+ def test_collect_kpi_sa_running(self, ssh):
+ mock_ssh(ssh)
+
+ resource = mock.Mock(autospec=ResourceProfile)
+ resource.check_if_system_agent_running.return_value = 0, '1234'
+ resource.amqp_collect_nfvi_kpi.return_value = {'foo': 234}
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf.q_in = mock.MagicMock()
+ vpe_approx_vnf.q_out = mock.MagicMock()
+ vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ vpe_approx_vnf.resource_helper.resource = resource
+
+ expected = {
+ 'pkt_in_down_stream': 0,
+ 'pkt_in_up_stream': 0,
+ 'pkt_drop_down_stream': 0,
+ 'pkt_drop_up_stream': 0,
+ 'collect_stats': {'core': {'foo': 234}},
+ }
+ self.assertEqual(vpe_approx_vnf.collect_kpi(), expected)
+
+ @mock.patch(SSH_HELPER)
+ def test_vnf_execute(self, ssh):
+ mock_ssh(ssh)
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf.q_in = mock.MagicMock()
+ vpe_approx_vnf.q_out = mock.MagicMock()
+ vpe_approx_vnf.q_out.qsize = mock.Mock(return_value=0)
+ self.assertEqual(vpe_approx_vnf.vnf_execute("quit", 0), '')
+
+ @mock.patch(SSH_HELPER)
+ def test_run_vpe(self, ssh):
+ mock_ssh(ssh)
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML)
+ vpe_approx_vnf.vnf_cfg = {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1,
+ }
+ vpe_approx_vnf.scenario_helper.scenario_cfg = {
+ 'options': {
+ NAME: {
+ 'traffic_type': '4',
+ 'topology': 'nsb_test_case.yaml',
+ 'vnf_config': 'vpe_config',
+ }
+ }
+ }
+ vpe_approx_vnf.topology = "nsb_test_case.yaml"
+ vpe_approx_vnf.nfvi_type = "baremetal"
+ vpe_approx_vnf._provide_config_file = mock.Mock()
+ vpe_approx_vnf._build_config = mock.MagicMock()
+
+ self.assertIsInstance(vpe_approx_vnf.ssh_helper, mock.Mock)
+ self.assertIsInstance(vpe_approx_vnf.ssh_helper, mock.Mock)
+ self.assertIsNone(vpe_approx_vnf._run())
+
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.MultiPortConfig")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.sample_vnf.Context")
+ @mock.patch("yardstick.network_services.vnf_generic.vnf.vpe_vnf.ConfigCreate")
+ @mock.patch("six.moves.builtins.open")
+ @mock.patch(SSH_HELPER)
+ def test_build_config(self, ssh, *args):
+ mock_ssh(ssh)
+ vpe_approx_vnf = VpeApproxSetupEnvHelper(mock.MagicMock(),
+ mock.MagicMock(), mock.MagicMock())
+ vpe_approx_vnf.tc_file_name = get_file_abspath(TEST_FILE_YAML)
+ vpe_approx_vnf.generate_port_pairs = mock.Mock()
+ vpe_approx_vnf.vnf_cfg = {
+ 'lb_config': 'SW',
+ 'lb_count': 1,
+ 'worker_config': '1C/1T',
+ 'worker_threads': 1,
+ }
+ vpe_approx_vnf.scenario_helper.scenario_cfg = {
+ 'options': {
+ NAME: {
+ 'traffic_type': '4',
+ 'topology': 'nsb_test_case.yaml',
+ 'vnf_config': 'vpe_config',
+ }
+ }
+ }
+ vpe_approx_vnf.topology = "nsb_test_case.yaml"
+ vpe_approx_vnf.nfvi_type = "baremetal"
+ vpe_approx_vnf._provide_config_file = mock.Mock()
+
+ vpe_approx_vnf.ssh_helper = mock.MagicMock()
+ vpe_approx_vnf.scenario_helper = mock.MagicMock()
+ vpe_approx_vnf.ssh_helper.bin_path = mock.Mock()
+ vpe_approx_vnf.ssh_helper.upload_config_file = mock.MagicMock()
+ self.assertIsNone(vpe_approx_vnf._build_vnf_ports())
+
+ vpe_approx_vnf.ssh_helper.provision_tool = mock.Mock(return_value='tool_path')
+ vpe_approx_vnf.ssh_helper.all_ports = mock.Mock()
+ vpe_approx_vnf.vnfd_helper.port_nums = mock.Mock(return_value=[0, 1])
+ vpe_approx_vnf.scenario_helper.vnf_cfg = {'lb_config': 'HW'}
+
+ expected = 'sudo tool_path -p 0x3 -f /tmp/vpe_config -s /tmp/vpe_script --hwlb 3'
+ self.assertEqual(vpe_approx_vnf.build_config(), expected)
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate(self, ssh):
+ mock_ssh(ssh)
+
+ mock_process = mock.Mock(autospec=Process)
+ mock_process.is_alive.return_value = True
+ mock_process.exitcode = 432
+
+ mock_q_out = mock.Mock(autospec=Queue)
+ mock_q_out.get.side_effect = iter(["pipeline>"])
+ mock_q_out.qsize.side_effect = range(1, -1, -1)
+
+ mock_resource = mock.MagicMock()
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock_process
+ vpe_approx_vnf.q_out = mock_q_out
+ vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper)
+ vpe_approx_vnf.resource_helper.resource = mock_resource
+
+ vpe_approx_vnf.q_out.put("pipeline>")
+ self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432)
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_fragmented(self, ssh):
+ mock_ssh(ssh)
+
+ mock_process = mock.Mock(autospec=Process)
+ mock_process.is_alive.return_value = True
+ mock_process.exitcode = 432
+
+ # test that fragmented pipeline prompt is recognized
+ mock_q_out = mock.Mock(autospec=Queue)
+ mock_q_out.get.side_effect = iter(["wow pipel", "ine>"])
+ mock_q_out.qsize.side_effect = range(2, -1, -1)
+
+ mock_resource = mock.MagicMock()
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock_process
+ vpe_approx_vnf.q_out = mock_q_out
+ vpe_approx_vnf.queue_wrapper = mock.Mock(autospec=QueueFileWrapper)
+ vpe_approx_vnf.resource_helper.resource = mock_resource
+
+ self.assertEqual(vpe_approx_vnf.wait_for_instantiate(), 432)
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_crash(self, ssh):
+ mock_ssh(ssh, exec_result=(1, "", ""))
+
+ mock_process = mock.Mock(autospec=Process)
+ mock_process.is_alive.return_value = False
+ mock_process.exitcode = 432
+
+ mock_resource = mock.MagicMock()
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock_process
+ vpe_approx_vnf.resource_helper.resource = mock_resource
+
+ with self.assertRaises(RuntimeError) as raised:
+ vpe_approx_vnf.wait_for_instantiate()
+
+ self.assertIn('VNF process died', str(raised.exception))
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_panic(self, ssh):
+ mock_ssh(ssh, exec_result=(1, "", ""))
+
+ mock_process = mock.Mock(autospec=Process)
+ mock_process.is_alive.return_value = True
+ mock_process.exitcode = 432
+
+ mock_resource = mock.MagicMock()
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock_process
+ vpe_approx_vnf.resource_helper.resource = mock_resource
+
+ vpe_approx_vnf.q_out.put("PANIC")
+ with self.assertRaises(RuntimeError) as raised:
+ vpe_approx_vnf.wait_for_instantiate()
+
+ self.assertIn('Error starting', str(raised.exception))
+
+ @mock.patch(SSH_HELPER)
+ def test_wait_for_instantiate_panic_fragmented(self, ssh):
+ mock_ssh(ssh, exec_result=(1, "", ""))
+
+ mock_process = mock.Mock(autospec=Process)
+ mock_process.is_alive.return_value = True
+ mock_process.exitcode = 432
+
+ # test that fragmented PANIC is recognized
+ mock_q_out = mock.Mock(autospec=Queue)
+ mock_q_out.get.side_effect = iter(["omg PA", "NIC this is bad"])
+ mock_q_out.qsize.side_effect = range(2, -1, -1)
+
+ mock_resource = mock.MagicMock()
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock_process
+ vpe_approx_vnf.q_out = mock_q_out
+ vpe_approx_vnf.resource_helper.resource = mock_resource
+
+ with self.assertRaises(RuntimeError) as raised:
+ vpe_approx_vnf.wait_for_instantiate()
+
+ self.assertIn('Error starting', str(raised.exception))
+
+ @mock.patch(SSH_HELPER)
+ def test_terminate(self, ssh):
+ mock_ssh(ssh)
+
+ vpe_approx_vnf = VpeApproxVnf(NAME, self.VNFD_0)
+ vpe_approx_vnf._vnf_process = mock.MagicMock()
+ vpe_approx_vnf._resource_collect_stop = mock.Mock()
+ vpe_approx_vnf.resource_helper = mock.MagicMock()
+
+ self.assertIsNone(vpe_approx_vnf.terminate())
diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py
index 9598eeb04..3ec59a3c2 100644
--- a/yardstick/tests/unit/orchestrator/test_heat.py
+++ b/yardstick/tests/unit/orchestrator/test_heat.py
@@ -17,6 +17,7 @@ import shade
import unittest
from yardstick.benchmark.contexts import node
+from yardstick.common import constants
from yardstick.common import exceptions
from yardstick.orchestrator import heat
@@ -53,6 +54,14 @@ class HeatStackTestCase(unittest.TestCase):
self._mock_stack_get.stop()
heat._DEPLOYED_STACKS = {}
+ @mock.patch.object(shade, 'openstack_cloud')
+ def test__init(self, mock_openstack_cloud):
+ os_cloud_config = {'key': 'value'}
+ heatstack = heat.HeatStack('name', os_cloud_config=os_cloud_config)
+ self.assertEqual('name', heatstack.name)
+ os_cloud_config.update(constants.OS_CLOUD_DEFAULT_CONFIG)
+ mock_openstack_cloud.assert_called_once_with(**os_cloud_config)
+
def test_create(self):
template = {'tkey': 'tval'}
heat_parameters = {'pkey': 'pval'}
@@ -192,7 +201,9 @@ class HeatStackTestCase(unittest.TestCase):
class HeatTemplateTestCase(unittest.TestCase):
def setUp(self):
- self.template = heat.HeatTemplate('test')
+ self._os_cloud_config = {'key1': 'value1'}
+ self.template = heat.HeatTemplate(
+ 'test', os_cloud_config=self._os_cloud_config)
def test_add_tenant_network(self):
self.template.add_network('some-network')
@@ -337,8 +348,12 @@ class HeatTemplateTestCase(unittest.TestCase):
def test_create_not_block(self):
heat_stack = mock.Mock()
- with mock.patch.object(heat, 'HeatStack', return_value=heat_stack):
+ with mock.patch.object(heat, 'HeatStack', return_value=heat_stack) \
+ as mock_heatstack:
ret = self.template.create(block=False)
+
+ mock_heatstack.assert_called_once_with(
+ self.template.name, os_cloud_config=self.template._os_cloud_config)
heat_stack.create.assert_called_once_with(
self.template._template, self.template.heat_parameters, False,
3600)
diff --git a/yardstick/tests/unit/orchestrator/test_kubernetes.py b/yardstick/tests/unit/orchestrator/test_kubernetes.py
index f2bc5b0f4..58971f515 100644
--- a/yardstick/tests/unit/orchestrator/test_kubernetes.py
+++ b/yardstick/tests/unit/orchestrator/test_kubernetes.py
@@ -47,7 +47,7 @@ service ssh restart;while true ; do sleep 10000; done"
"name": "host-k8s-86096c30-container",
"volumeMounts": [
{
- "mountPath": "/root/.ssh/",
+ "mountPath": "/tmp/.ssh/",
"name": "k8s-86096c30-key"
}
]
diff --git a/yardstick/tests/unit/service/test_environment.py b/yardstick/tests/unit/service/test_environment.py
index 4af9a3958..be4882e30 100644
--- a/yardstick/tests/unit/service/test_environment.py
+++ b/yardstick/tests/unit/service/test_environment.py
@@ -6,16 +6,16 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-import unittest
import mock
+from yardstick.common.exceptions import UnsupportedPodFormatError
from yardstick.service.environment import Environment
from yardstick.service.environment import AnsibleCommon
-from yardstick.common.exceptions import UnsupportedPodFormatError
+from yardstick.tests.unit import base as ut_base
-class EnvironmentTestCase(unittest.TestCase):
+class EnvironmentTestCase(ut_base.BaseUnitTestCase):
def test_get_sut_info(self):
pod_info = {
@@ -31,11 +31,11 @@ class EnvironmentTestCase(unittest.TestCase):
]
}
- AnsibleCommon.gen_inventory_ini_dict = mock.MagicMock()
- AnsibleCommon.get_sut_info = mock.MagicMock(return_value={'node1': {}})
-
- env = Environment(pod=pod_info)
- env.get_sut_info()
+ with mock.patch.object(AnsibleCommon, 'gen_inventory_ini_dict'), \
+ mock.patch.object(AnsibleCommon, 'get_sut_info',
+ return_value={'node1': {}}):
+ env = Environment(pod=pod_info)
+ env.get_sut_info()
def test_get_sut_info_pod_str(self):
pod_info = 'nodes'
@@ -43,7 +43,3 @@ class EnvironmentTestCase(unittest.TestCase):
env = Environment(pod=pod_info)
with self.assertRaises(UnsupportedPodFormatError):
env.get_sut_info()
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/yardstick/tests/unit/test_cmd/test_NSBperf.py b/yardstick/tests/unit/test_cmd/test_NSBperf.py
index d64b0c551..5de892212 100644
--- a/yardstick/tests/unit/test_cmd/test_NSBperf.py
+++ b/yardstick/tests/unit/test_cmd/test_NSBperf.py
@@ -11,15 +11,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-#
-from __future__ import absolute_import
-import unittest
-import mock
-import subprocess
+import argparse
import os
+import subprocess
+
+import mock
+from six.moves import builtins
+import unittest
-from yardstick.cmd.NSBperf import YardstickNSCli
from yardstick.cmd import NSBperf
@@ -32,30 +32,39 @@ class TestHandler(unittest.TestCase):
class TestYardstickNSCli(unittest.TestCase):
+
+ def setUp(self):
+ self._mock_print = mock.patch.object(builtins, 'print')
+ self.mock_print = self._mock_print.start()
+ self.addCleanup(self._stop_mocks)
+
+ def _stop_mocks(self):
+ self._mock_print.stop()
+
def test___init__(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
self.assertIsNotNone(yardstick_ns_cli)
def test_generate_final_report(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
test_case = "tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml"
if os.path.isfile("/tmp/yardstick.out"):
os.remove('/tmp/yardstick.out')
self.assertIsNone(yardstick_ns_cli.generate_final_report(test_case))
def test_generate_kpi_results(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
tkey = "cpu"
tgen = {"cpu": {"ipc": 0}}
self.assertIsNone(yardstick_ns_cli.generate_kpi_results(tkey, tgen))
def test_generate_nfvi_results(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
nfvi = {"collect_stats": {"cpu": {"ipc": 0, "Hz": 2.6}}}
self.assertIsNone(yardstick_ns_cli.generate_nfvi_results(nfvi))
def test_handle_list_options(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
CLI_PATH = os.path.dirname(os.path.realpath(__file__))
repo_dir = CLI_PATH + "/../../../"
test_path = os.path.join(repo_dir, "../samples/vnf_samples/nsut/")
@@ -68,16 +77,21 @@ class TestYardstickNSCli(unittest.TestCase):
args, test_path)
def test_main(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
yardstick_ns_cli.parse_arguments = mock.Mock(return_value=0)
yardstick_ns_cli.handle_list_options = mock.Mock(return_value=0)
yardstick_ns_cli.terminate_if_less_options = mock.Mock(return_value=0)
yardstick_ns_cli.run_test = mock.Mock(return_value=0)
self.assertIsNone(yardstick_ns_cli.main())
- def test_parse_arguments(self):
- yardstick_ns_cli = YardstickNSCli()
- self.assertRaises(SystemExit, yardstick_ns_cli.parse_arguments)
+ @mock.patch.object(argparse.ArgumentParser, 'parse_args')
+ def test_parse_arguments(self, mock_parse):
+ class DummyArgs(object):
+ var1 = 'value1'
+
+ mock_parse.return_value = DummyArgs
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
+ self.assertIn('var1', yardstick_ns_cli.parse_arguments())
def test_run_test(self):
cur_dir = os.getcwd()
@@ -85,7 +99,7 @@ class TestYardstickNSCli(unittest.TestCase):
YARDSTICK_REPOS_DIR = os.path.join(CLI_PATH + "/../../")
test_path = os.path.join(YARDSTICK_REPOS_DIR,
"../samples/vnf_samples/nsut/")
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
subprocess.check_output = mock.Mock(return_value=0)
args = {"vnf": "vpe",
"test": "tc_baremetal_rfc2544_ipv4_1flow_1518B.yaml"}
@@ -103,13 +117,13 @@ class TestYardstickNSCli(unittest.TestCase):
os.chdir(cur_dir)
def test_terminate_if_less_options(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
args = {"vnf": False}
self.assertRaises(SystemExit,
yardstick_ns_cli.terminate_if_less_options, args)
def test_validate_input(self):
- yardstick_ns_cli = YardstickNSCli()
+ yardstick_ns_cli = NSBperf.YardstickNSCli()
self.assertEqual(1, yardstick_ns_cli.validate_input("", 4))
NSBperf.input = lambda _: 'yes'
self.assertEqual(1, yardstick_ns_cli.validate_input(5, 4))
diff --git a/yardstick/tests/unit/test_ssh.py b/yardstick/tests/unit/test_ssh.py
index f92290070..5cf1e50a0 100644
--- a/yardstick/tests/unit/test_ssh.py
+++ b/yardstick/tests/unit/test_ssh.py
@@ -13,10 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# yardstick comment: this file is a modified copy of
-# rally/tests/unit/common/test_sshutils.py
-
-from __future__ import absolute_import
import os
import socket
import unittest
@@ -26,8 +22,8 @@ from itertools import count
import mock
from oslo_utils import encodeutils
+from yardstick.common import exceptions
from yardstick import ssh
-from yardstick.ssh import SSHError, SSHTimeout
from yardstick.ssh import SSH
from yardstick.ssh import AutoConnectSSH
@@ -127,7 +123,7 @@ class SSHTestCase(unittest.TestCase):
dss = mock_paramiko.dsskey.DSSKey
rsa.from_private_key.side_effect = mock_paramiko.SSHException
dss.from_private_key.side_effect = mock_paramiko.SSHException
- self.assertRaises(ssh.SSHError, self.test_client._get_pkey, "key")
+ self.assertRaises(exceptions.SSHError, self.test_client._get_pkey, "key")
@mock.patch("yardstick.ssh.six.moves.StringIO")
@mock.patch("yardstick.ssh.paramiko")
@@ -194,13 +190,13 @@ class SSHTestCase(unittest.TestCase):
test_ssh = ssh.SSH("admin", "example.net", pkey="key")
- with self.assertRaises(SSHError) as raised:
+ with self.assertRaises(exceptions.SSHError) as raised:
test_ssh._get_client()
- self.assertEqual(mock_paramiko.SSHClient.call_count, 1)
- self.assertEqual(mock_paramiko.AutoAddPolicy.call_count, 1)
- self.assertEqual(fake_client.set_missing_host_key_policy.call_count, 1)
- self.assertEqual(fake_client.connect.call_count, 1)
+ mock_paramiko.SSHClient.assert_called_once()
+ mock_paramiko.AutoAddPolicy.assert_called_once()
+ fake_client.set_missing_host_key_policy.assert_called_once()
+ fake_client.connect.assert_called_once()
exc_str = str(raised.exception)
self.assertIn('raised during connect', exc_str)
self.assertIn('MyError', exc_str)
@@ -245,18 +241,18 @@ class SSHTestCase(unittest.TestCase):
@mock.patch("yardstick.ssh.time")
def test_wait_timeout(self, mock_time):
mock_time.time.side_effect = [1, 50, 150]
- self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError,
- ssh.SSHError,
+ self.test_client.execute = mock.Mock(side_effect=[exceptions.SSHError,
+ exceptions.SSHError,
0])
- self.assertRaises(ssh.SSHTimeout, self.test_client.wait)
+ self.assertRaises(exceptions.SSHTimeout, self.test_client.wait)
self.assertEqual([mock.call("uname")] * 2,
self.test_client.execute.mock_calls)
@mock.patch("yardstick.ssh.time")
def test_wait(self, mock_time):
mock_time.time.side_effect = [1, 50, 100]
- self.test_client.execute = mock.Mock(side_effect=[ssh.SSHError,
- ssh.SSHError,
+ self.test_client.execute = mock.Mock(side_effect=[exceptions.SSHError,
+ exceptions.SSHError,
0])
self.test_client.wait()
self.assertEqual([mock.call("uname")] * 3,
@@ -333,7 +329,7 @@ class SSHRunTestCase(unittest.TestCase):
def test_run_nonzero_status(self, mock_select):
mock_select.select.return_value = ([], [], [])
self.fake_session.recv_exit_status.return_value = 1
- self.assertRaises(ssh.SSHError, self.test_client.run, "cmd")
+ self.assertRaises(exceptions.SSHError, self.test_client.run, "cmd")
self.assertEqual(1, self.test_client.run("cmd", raise_on_error=False))
@mock.patch("yardstick.ssh.select")
@@ -401,7 +397,7 @@ class SSHRunTestCase(unittest.TestCase):
def test_run_select_error(self, mock_select):
self.fake_session.exit_status_ready.return_value = False
mock_select.select.return_value = ([], [], [True])
- self.assertRaises(ssh.SSHError, self.test_client.run, "cmd")
+ self.assertRaises(exceptions.SSHError, self.test_client.run, "cmd")
@mock.patch("yardstick.ssh.time")
@mock.patch("yardstick.ssh.select")
@@ -409,7 +405,7 @@ class SSHRunTestCase(unittest.TestCase):
mock_time.time.side_effect = [1, 3700]
mock_select.select.return_value = ([], [], [])
self.fake_session.exit_status_ready.return_value = False
- self.assertRaises(ssh.SSHTimeout, self.test_client.run, "cmd")
+ self.assertRaises(exceptions.SSHTimeout, self.test_client.run, "cmd")
@mock.patch("yardstick.ssh.open", create=True)
def test__put_file_shell(self, mock_open):
@@ -514,7 +510,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh._get_client = mock__get_client = mock.Mock()
auto_connect_ssh._connect()
- self.assertEqual(mock__get_client.call_count, 1)
+ mock__get_client.assert_called_once()
def test___init___negative(self):
with self.assertRaises(TypeError):
@@ -529,9 +525,9 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh = AutoConnectSSH('user1', 'host1', wait=10)
auto_connect_ssh._get_client = mock__get_client = mock.Mock()
- mock__get_client.side_effect = SSHError
+ mock__get_client.side_effect = exceptions.SSHError
- with self.assertRaises(SSHTimeout):
+ with self.assertRaises(exceptions.SSHTimeout):
auto_connect_ssh._connect()
self.assertEqual(mock_time.time.call_count, 12)
@@ -547,7 +543,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.get_file_obj('remote/path', mock.Mock())
- self.assertEqual(mock_sftp.getfo.call_count, 1)
+ mock_sftp.getfo.assert_called_once()
def test__make_dict(self):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')
@@ -584,7 +580,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.put('a', 'z')
with mock_scp_client_type() as mock_scp_client:
- self.assertEqual(mock_scp_client.put.call_count, 1)
+ mock_scp_client.put.assert_called_once()
@mock.patch('yardstick.ssh.SCPClient')
def test_get(self, mock_scp_client_type):
@@ -593,7 +589,7 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh.get('a', 'z')
with mock_scp_client_type() as mock_scp_client:
- self.assertEqual(mock_scp_client.get.call_count, 1)
+ mock_scp_client.get.assert_called_once()
def test_put_file(self):
auto_connect_ssh = AutoConnectSSH('user1', 'host1')
@@ -601,4 +597,4 @@ class TestAutoConnectSSH(unittest.TestCase):
auto_connect_ssh._put_file_sftp = mock_put_sftp = mock.Mock()
auto_connect_ssh.put_file('a', 'b')
- self.assertEqual(mock_put_sftp.call_count, 1)
+ mock_put_sftp.assert_called_once()