diff options
11 files changed, 322 insertions, 129 deletions
diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml index a37f83b83..3e630caf2 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc025.yaml @@ -19,7 +19,7 @@ description: > {% set file = file or '/etc/yardstick/pod.yaml' %} {% set jump_host = jump_host or 'node0' %} {% set attack_host = attack_host or 'node1' %} -{% set monitor_time = monitor_time or 180 %} +{% set monitor_time = monitor_time or 30 %} scenarios: - diff --git a/tests/unit/benchmark/contexts/test_kubernetes.py b/tests/unit/benchmark/contexts/test_kubernetes.py index 4976a9fe0..3a926f85c 100644 --- a/tests/unit/benchmark/contexts/test_kubernetes.py +++ b/tests/unit/benchmark/contexts/test_kubernetes.py @@ -47,13 +47,15 @@ class KubernetesTestCase(unittest.TestCase): # clear kubernetes contexts from global list so we don't break other tests Context.list = [] + @mock.patch('{}.KubernetesContext._delete_services'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_ssh_key'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_rcs'.format(prefix)) @mock.patch('{}.KubernetesContext._delete_pods'.format(prefix)) def test_undeploy(self, mock_delete_pods, mock_delete_rcs, - mock_delete_ssh): + mock_delete_ssh, + mock_delete_services): k8s_context = KubernetesContext() k8s_context.init(context_cfg) @@ -61,7 +63,9 @@ class KubernetesTestCase(unittest.TestCase): self.assertTrue(mock_delete_ssh.called) self.assertTrue(mock_delete_rcs.called) self.assertTrue(mock_delete_pods.called) + self.assertTrue(mock_delete_services.called) + @mock.patch('{}.KubernetesContext._create_services'.format(prefix)) @mock.patch('{}.KubernetesContext._wait_until_running'.format(prefix)) @mock.patch('{}.KubernetesTemplate.get_rc_pods'.format(prefix)) @mock.patch('{}.KubernetesContext._create_rcs'.format(prefix)) @@ -70,7 +74,8 @@ class KubernetesTestCase(unittest.TestCase): mock_set_ssh_key, mock_create_rcs, mock_get_rc_pods, - mock_wait_until_running): + mock_wait_until_running, + mock_create_services): k8s_context = KubernetesContext() k8s_context.init(context_cfg) @@ -78,6 +83,7 @@ class KubernetesTestCase(unittest.TestCase): k8s_context.deploy() self.assertTrue(mock_set_ssh_key.called) self.assertTrue(mock_create_rcs.called) + self.assertTrue(mock_create_services.called) self.assertTrue(mock_get_rc_pods.called) self.assertTrue(mock_wait_until_running.called) @@ -106,14 +112,39 @@ class KubernetesTestCase(unittest.TestCase): mock_read_pod_status.return_value = 'Running' k8s_context._wait_until_running() - @mock.patch('{}.k8s_utils.get_pod_list'.format(prefix)) - def test_get_server(self, mock_get_pod_list): + @mock.patch('{}.k8s_utils.get_pod_by_name'.format(prefix)) + @mock.patch('{}.KubernetesContext._get_node_ip'.format(prefix)) + @mock.patch('{}.k8s_utils.get_service_by_name'.format(prefix)) + def test_get_server(self, + mock_get_service_by_name, + mock_get_node_ip, + mock_get_pod_by_name): + class Service(object): + def __init__(self): + self.name = 'yardstick' + self.node_port = 30000 + + class Services(object): + def __init__(self): + self.ports = [Service()] + + class Status(object): + def __init__(self): + self.pod_ip = '172.16.10.131' + + class Pod(object): + def __init__(self): + self.status = Status() + k8s_context = KubernetesContext() k8s_context.init(context_cfg) - mock_get_pod_list.return_value.items = [] + mock_get_service_by_name.return_value = Services() + mock_get_pod_by_name.return_value = Pod() + mock_get_node_ip.return_value = '172.16.10.131' + server = k8s_context._get_server('server') - self.assertIsNone(server) + self.assertIsNotNone(server) @mock.patch('{}.KubernetesContext._create_rc'.format(prefix)) def test_create_rcs(self, mock_create_rc): @@ -143,6 +174,28 @@ class KubernetesTestCase(unittest.TestCase): k8s_context._delete_rc({}) self.assertTrue(mock_delete_replication_controller.called) + @mock.patch('{}.k8s_utils.get_node_list'.format(prefix)) + def test_get_node_ip(self, mock_get_node_list): + + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._get_node_ip() + self.assertTrue(mock_get_node_list.called) + + @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.create') + def test_create_services(self, mock_create): + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._create_services() + self.assertTrue(mock_create.called) + + @mock.patch('yardstick.orchestrator.kubernetes.ServiceObject.delete') + def test_delete_services(self, mock_delete): + k8s_context = KubernetesContext() + k8s_context.init(context_cfg) + k8s_context._delete_services() + self.assertTrue(mock_delete.called) + def main(): unittest.main() diff --git a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py index 0e303dc3b..f62a0fb3b 100644 --- a/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py +++ b/tests/unit/network_services/vnf_generic/vnf/test_tg_rfc2544_ixia.py @@ -252,6 +252,7 @@ class TestIXIATrafficGen(unittest.TestCase): mock_traffic_profile = mock.Mock(autospec=TrafficProfile) mock_traffic_profile.get_traffic_definition.return_value = "64" mock_traffic_profile.params = self.TRAFFIC_PROFILE + # traffic_profile.ports is standardized on port_num mock_traffic_profile.ports = [0, 1] mock_ssh_instance = mock.Mock(autospec=mock_ssh.SSH) @@ -346,8 +347,12 @@ class TestIXIATrafficGen(unittest.TestCase): 'task_path': '/path/to/task' } - with mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', - create=True) as mock_open: - mock_open.return_value = mock.MagicMock() + @mock.patch('yardstick.benchmark.scenarios.networking.vnf_generic.open', create=True) + @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.open', + mock.mock_open(), create=True) + @mock.patch('yardstick.network_services.vnf_generic.vnf.tg_rfc2544_ixia.LOG.exception') + def _traffic_runner(*args): result = sut._traffic_runner(mock_traffic_profile) self.assertIsNone(result) + + _traffic_runner() diff --git a/yardstick/benchmark/contexts/kubernetes.py b/yardstick/benchmark/contexts/kubernetes.py index a39f63137..2334e5076 100644 --- a/yardstick/benchmark/contexts/kubernetes.py +++ b/yardstick/benchmark/contexts/kubernetes.py @@ -54,6 +54,7 @@ class KubernetesContext(Context): LOG.info('Launch containers') self._create_rcs() + self._create_services() time.sleep(1) self.template.get_rc_pods() @@ -63,6 +64,7 @@ class KubernetesContext(Context): self._delete_ssh_key() self._delete_rcs() self._delete_pods() + self._delete_services() super(KubernetesContext, self).undeploy() @@ -80,6 +82,14 @@ class KubernetesContext(Context): return False return True + def _create_services(self): + for obj in self.template.service_objs: + obj.create() + + def _delete_services(self): + for obj in self.template.service_objs: + obj.delete() + def _create_rcs(self): for obj in self.template.k8s_objs: self._create_rc(obj.get_template()) @@ -126,15 +136,22 @@ class KubernetesContext(Context): utils.remove_file(self.public_key_path) def _get_server(self, name): - resp = k8s_utils.get_pod_list() - hosts = ({'name': n.metadata.name, - 'ip': n.status.pod_ip, - 'user': 'root', - 'key_filename': self.key_path, - 'private_ip': n.status.pod_ip} - for n in resp.items if n.metadata.name.startswith(name)) - - return next(hosts, None) + service_name = '{}-service'.format(name) + service = k8s_utils.get_service_by_name(service_name).ports[0] + + host = { + 'name': service.name, + 'ip': self._get_node_ip(), + 'private_ip': k8s_utils.get_pod_by_name(name).status.pod_ip, + 'ssh_port': service.node_port, + 'user': 'root', + 'key_filename': self.key_path, + } + + return host + + def _get_node_ip(self): + return k8s_utils.get_node_list().items[0].status.addresses[0].address def _get_network(self, attr_name): return None diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 75703cf50..c175a950b 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -125,9 +125,10 @@ class Task(object): # pragma: no cover except KeyboardInterrupt: raise except Exception: - LOG.exception("Running test case %s failed!", case_name) + LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} else: + LOG.info('Testcase: "%s" SUCCESS!!!', case_name) testcases[case_name] = {'criteria': 'PASS', 'tc_data': data} if args.keep_deploy: @@ -272,7 +273,9 @@ class Task(object): # pragma: no cover runner = self.run_one_scenario(scenario, output_file) status = runner_join(runner) if status != 0: - LOG.error('Scenario: %s ERROR', scenario.get('type')) + LOG.error('Scenario NO.%s: "%s" ERROR!', + scenarios.index(scenario) + 1, + scenario.get('type')) raise RuntimeError self.outputs.update(runner.get_output()) result.extend(runner.get_result()) @@ -325,23 +328,30 @@ class Task(object): # pragma: no cover # TODO support get multi hosts/vms info context_cfg = {} - if "host" in scenario_cfg: - context_cfg['host'] = Context.get_server(scenario_cfg["host"]) + server_name = scenario_cfg.get('options', {}).get('server_name', {}) - if "target" in scenario_cfg: - if is_ip_addr(scenario_cfg["target"]): - context_cfg['target'] = {} - context_cfg['target']["ipaddr"] = scenario_cfg["target"] + def config_context_target(cfg): + target = cfg['target'] + if is_ip_addr(target): + context_cfg['target'] = {"ipaddr": target} else: - context_cfg['target'] = Context.get_server( - scenario_cfg["target"]) - if self._is_same_heat_context(scenario_cfg["host"], - scenario_cfg["target"]): - context_cfg["target"]["ipaddr"] = \ - context_cfg["target"]["private_ip"] + context_cfg['target'] = Context.get_server(target) + if self._is_same_context(cfg["host"], target): + context_cfg['target']["ipaddr"] = context_cfg['target']["private_ip"] else: - context_cfg["target"]["ipaddr"] = \ - context_cfg["target"]["ip"] + context_cfg['target']["ipaddr"] = context_cfg['target']["ip"] + + host_name = server_name.get('host', scenario_cfg.get('host')) + if host_name: + context_cfg['host'] = Context.get_server(host_name) + + for item in [server_name, scenario_cfg]: + try: + config_context_target(item) + except KeyError: + pass + else: + break if "targets" in scenario_cfg: ip_list = [] @@ -351,8 +361,8 @@ class Task(object): # pragma: no cover context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) - if self._is_same_heat_context(scenario_cfg["host"], - target): + if self._is_same_context(scenario_cfg["host"], + target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) @@ -370,7 +380,7 @@ class Task(object): # pragma: no cover return runner - def _is_same_heat_context(self, host_attr, target_attr): + def _is_same_context(self, host_attr, target_attr): """check if two servers are in the same heat context host_attr: either a name for a server created by yardstick or a dict with attribute name mapping when using external heat templates @@ -378,7 +388,7 @@ class Task(object): # pragma: no cover with attribute name mapping when using external heat templates """ for context in self.contexts: - if context.__context_type__ != "Heat": + if context.__context_type__ not in {"Heat", "Kubernetes"}: continue host = context._get_server(host_attr) @@ -669,25 +679,24 @@ def parse_task_args(src_name, args): def change_server_name(scenario, suffix): - try: - host = scenario['host'] - except KeyError: - pass - else: - try: - host['name'] += suffix - except TypeError: - scenario['host'] += suffix - try: - target = scenario['target'] - except KeyError: - pass - else: + def add_suffix(cfg, key): try: - target['name'] += suffix - except TypeError: - scenario['target'] += suffix + value = cfg[key] + except KeyError: + pass + else: + try: + value['name'] += suffix + except TypeError: + cfg[key] += suffix + + server_name = scenario.get('options', {}).get('server_name', {}) + + add_suffix(scenario, 'host') + add_suffix(scenario, 'target') + add_suffix(server_name, 'host') + add_suffix(server_name, 'target') try: key = 'targets' diff --git a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py index 50d44c1ca..979e3ab14 100644 --- a/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py +++ b/yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py @@ -40,6 +40,21 @@ class BaremetalAttacker(BaseAttacker): self.connection = ssh.SSH.from_node(host, defaults={"user": "root"}) self.connection.wait(timeout=600) LOG.debug("ssh host success!") + + jump_host_name = self._config.get("jump_host", None) + self.jump_connection = None + if jump_host_name is not None: + jump_host = self._context.get(jump_host_name, None) + + LOG.debug("jump_host ip:%s user:%s", jump_host['ip'], jump_host['user']) + self.jump_connection = ssh.SSH.from_node( + jump_host, + # why do we allow pwd for password? + defaults={"user": "root", "password": jump_host.get("pwd")} + ) + self.jump_connection.wait(timeout=600) + LOG.debug("ssh jump host success!") + self.host_ip = host['ip'] self.ipmi_ip = host.get("ipmi_ip", None) @@ -49,6 +64,7 @@ class BaremetalAttacker(BaseAttacker): self.fault_cfg = BaseAttacker.attacker_cfgs.get('bare-metal-down') self.check_script = self.get_script_fullpath( self.fault_cfg['check_script']) + self.inject_script = self.get_script_fullpath(self.fault_cfg['inject_script']) self.recovery_script = self.get_script_fullpath( self.fault_cfg['recovery_script']) @@ -70,39 +86,27 @@ class BaremetalAttacker(BaseAttacker): return True def inject_fault(self): - exit_status, stdout, stderr = self.connection.execute( - "sudo shutdown -h now") - LOG.debug("inject fault ret: %s out:%s err:%s", - exit_status, stdout, stderr) - if not exit_status: - LOG.info("inject fault success") + LOG.info("Inject fault START") + cmd = "sudo /bin/bash -s {0} {1} {2} {3}".format( + self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "off") + with open(self.inject_script, "r") as stdin_file: + if self.jump_connection is not None: + LOG.info("Power off node via IPMI") + self.jump_connection.execute(cmd, stdin=stdin_file) + else: + _execute_shell_command(cmd, stdin=stdin_file) + LOG.info("Inject fault END") def recover(self): - jump_host_name = self._config.get("jump_host", None) - self.jump_connection = None - if jump_host_name is not None: - host = self._context.get(jump_host_name, None) - - LOG.debug("jump_host ip:%s user:%s", host['ip'], host['user']) - self.jump_connection = ssh.SSH.from_node( - host, - # why do we allow pwd for password? - defaults={"user": "root", "password": host.get("pwd")} - ) - self.jump_connection.wait(timeout=600) - LOG.debug("ssh jump host success!") - - if self.jump_connection is not None: - with open(self.recovery_script, "r") as stdin_file: - self.jump_connection.execute( - "sudo /bin/bash -s {0} {1} {2} {3}".format( - self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"), - stdin=stdin_file) - else: - _execute_shell_command( - "sudo /bin/bash -s {0} {1} {2} {3}".format( - self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on"), - stdin=open(self.recovery_script, "r")) + LOG.info("Recover fault START") + cmd = "sudo /bin/bash -s {0} {1} {2} {3}".format( + self.ipmi_ip, self.ipmi_user, self.ipmi_pwd, "on") + with open(self.recovery_script, "r") as stdin_file: + if self.jump_connection is not None: + self.jump_connection.execute(cmd, stdin=stdin_file) + else: + _execute_shell_command(cmd, stdin=stdin_file) + LOG.info("Recover fault END") def _test(): # pragma: no cover diff --git a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml index ee7ea7d83..5f43a701a 100644 --- a/yardstick/benchmark/scenarios/availability/attacker_conf.yaml +++ b/yardstick/benchmark/scenarios/availability/attacker_conf.yaml @@ -23,6 +23,7 @@ kill-lxc-process: bare-metal-down: check_script: ha_tools/check_host_ping.bash + inject_script: ha_tools/ipmi_power.bash recovery_script: ha_tools/ipmi_power.bash stop-service: diff --git a/yardstick/common/kubernetes_utils.py b/yardstick/common/kubernetes_utils.py index e4c232830..0cf7b9eab 100644 --- a/yardstick/common/kubernetes_utils.py +++ b/yardstick/common/kubernetes_utils.py @@ -28,6 +28,60 @@ def get_core_api(): # pragma: no cover return client.CoreV1Api() +def get_node_list(**kwargs): # pragma: no cover + core_v1_api = get_core_api() + try: + return core_v1_api.list_node(**kwargs) + except ApiException: + LOG.exception('Get node list failed') + raise + + +def create_service(template, + namespace='default', + wait=False, + **kwargs): # pragma: no cover + core_v1_api = get_core_api() + metadata = client.V1ObjectMeta(**template.get('metadata', {})) + + ports = [client.V1ServicePort(**port) for port in + template.get('spec', {}).get('ports', [])] + template['spec']['ports'] = ports + spec = client.V1ServiceSpec(**template.get('spec', {})) + + service = client.V1Service(metadata=metadata, spec=spec) + + try: + core_v1_api.create_namespaced_service('default', service) + except ApiException: + LOG.exception('Create Service failed') + raise + + +def delete_service(name, + namespace='default', + **kwargs): # pragma: no cover + core_v1_api = get_core_api() + try: + core_v1_api.delete_namespaced_service(name, namespace, **kwargs) + except ApiException: + LOG.exception('Delete Service failed') + + +def get_service_list(namespace='default', **kwargs): + core_v1_api = get_core_api() + try: + return core_v1_api.list_namespaced_service(namespace, **kwargs) + except ApiException: + LOG.exception('Get Service list failed') + raise + + +def get_service_by_name(name): # pragma: no cover + service_list = get_service_list() + return next((s.spec for s in service_list.items if s.metadata.name == name), None) + + def create_replication_controller(template, namespace='default', wait=False, @@ -135,3 +189,8 @@ def get_pod_list(namespace='default'): # pragma: no cover except ApiException: LOG.exception('Get pod list failed') raise + + +def get_pod_by_name(name): # pragma: no cover + pod_list = get_pod_list() + return next((n for n in pod_list.items if n.metadata.name.startswith(name)), None) diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index cb8a34796..ee58172d8 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -103,7 +103,9 @@ class IXIARFC2544Profile(TrexProfile): self.ports = [port for port in port_generator()] - def execute_traffic(self, traffic_generator, ixia_obj, mac={}, xfile=None): + def execute_traffic(self, traffic_generator, ixia_obj, mac=None, xfile=None): + if mac is None: + mac = {} if self.first_run: self.full_profile = {} self.pg_id = 0 @@ -121,15 +123,18 @@ class IXIARFC2544Profile(TrexProfile): return str(multiplier) def start_ixia_latency(self, traffic_generator, ixia_obj, - mac={}, xfile=None): + mac=None, xfile=None): + if mac is None: + mac = {} self.update_traffic_profile(traffic_generator) traffic = \ self._get_ixia_traffic_profile(self.full_profile, mac, xfile) - self._ixia_traffic_generate(traffic_generator, traffic, - ixia_obj, xfile) + self._ixia_traffic_generate(traffic_generator, traffic, ixia_obj) def get_drop_percentage(self, traffic_generator, samples, tol_min, - tolerance, ixia_obj, mac={}, xfile=None): + tolerance, ixia_obj, mac=None, xfile=None): + if mac is None: + mac = {} status = 'Running' drop_percent = 100 in_packets = sum([samples[iface]['in_packets'] for iface in samples]) diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py index 449f22296..12266d6ad 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_ixia.py @@ -13,6 +13,8 @@ # limitations under the License. from __future__ import absolute_import + +import json import time import os import logging @@ -79,31 +81,31 @@ class IxiaResourceHelper(ClientResourceHelper): latency = stats[0] samples = {} - for interface in self.vnfd_helper.interfaces: + # this is not DPDK port num, but this is whatever number we gave + # when we selected ports and programmed the profile + for port_num in ports: try: - name = interface["name"] - # this is not DPDK port num, but this is whatever number we gave - # when we selected ports and programmed the profile - port = self.vnfd_helper.port_num(name) - if port in ports: - samples[name] = { - "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port]), - "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port]), - "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][port]), - "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][port]), - "in_packets": int(last_result["Valid_Frames_Rx"][port]), - "out_packets": int(last_result["Frames_Tx"][port]), - "RxThroughput": int(last_result["Valid_Frames_Rx"][port]) / 30, - "TxThroughput": int(last_result["Frames_Tx"][port]) / 30, - } - if key: - avg_latency = latency["Store-Forward_Avg_latency_ns"][port] - min_latency = latency["Store-Forward_Min_latency_ns"][port] - max_latency = latency["Store-Forward_Max_latency_ns"][port] - samples[name][key] = \ - {"Store-Forward_Avg_latency_ns": avg_latency, - "Store-Forward_Min_latency_ns": min_latency, - "Store-Forward_Max_latency_ns": max_latency} + # reverse lookup port name from port_num so the stats dict is descriptive + intf = self.vnfd_helper.find_interface_by_port(port_num) + port_name = intf["name"] + samples[port_name] = { + "rx_throughput_kps": float(last_result["Rx_Rate_Kbps"][port_num]), + "tx_throughput_kps": float(last_result["Tx_Rate_Kbps"][port_num]), + "rx_throughput_mbps": float(last_result["Rx_Rate_Mbps"][port_num]), + "tx_throughput_mbps": float(last_result["Tx_Rate_Mbps"][port_num]), + "in_packets": int(last_result["Valid_Frames_Rx"][port_num]), + "out_packets": int(last_result["Frames_Tx"][port_num]), + "RxThroughput": int(last_result["Valid_Frames_Rx"][port_num]) / 30, + "TxThroughput": int(last_result["Frames_Tx"][port_num]) / 30, + } + if key: + avg_latency = latency["Store-Forward_Avg_latency_ns"][port_num] + min_latency = latency["Store-Forward_Min_latency_ns"][port_num] + max_latency = latency["Store-Forward_Max_latency_ns"][port_num] + samples[port_name][key] = \ + {"Store-Forward_Avg_latency_ns": avg_latency, + "Store-Forward_Min_latency_ns": min_latency, + "Store-Forward_Max_latency_ns": max_latency} except IndexError: pass @@ -128,19 +130,27 @@ class IxiaResourceHelper(ClientResourceHelper): self.client.ix_assign_ports() + ixia_file = find_relative_file("ixia_traffic.cfg", + self.scenario_helper.scenario_cfg["task_path"]) + + static_traffic = {} + with open(ixia_file) as stream: + try: + static_traffic = json.load(stream) + except Exception: + LOG.exception("") mac = {} - # TODO: shouldn't this index map to port number we used to generate the profile - for index, interface in enumerate(self.vnfd_helper.interfaces, 1): - virt_intf = interface["virtual-interface"] - mac.update({ - "src_mac_{}".format(index): virt_intf.get("local_mac", default), - "dst_mac_{}".format(index): virt_intf.get("dst_mac", default), - }) + for vld_id, traffic in static_traffic.items(): + intfs = self.vnfd_helper.port_pairs.networks.get(vld_id, []) + interface = next(iter(intfs), None) + if interface: + virt_intf = self.vnfd_helper.find_interface(name=interface)["virtual-interface"] + # we only know static traffic id by reading the json + # this is used by _get_ixia_traffic_profile + mac["src_mac_{}".format(traffic["id"])] = virt_intf.get("local_mac", default) + mac["dst_mac_{}".format(traffic["id"])] = virt_intf.get("dst_mac", default) samples = {} - - ixia_file = find_relative_file("ixia_traffic.cfg", - self.scenario_helper.scenario_cfg["task_path"]) # Generate ixia traffic config... try: while not self._terminated.value: @@ -161,8 +171,7 @@ class IxiaResourceHelper(ClientResourceHelper): self.client.ix_stop_traffic() self._queue.put(samples) except Exception: - LOG.info("Run Traffic terminated") - pass + LOG.exception("Run Traffic terminated") if not self.rfc_helper.is_done(): self._terminated.value = 1 diff --git a/yardstick/orchestrator/kubernetes.py b/yardstick/orchestrator/kubernetes.py index 3d4548527..198eeac6d 100644 --- a/yardstick/orchestrator/kubernetes.py +++ b/yardstick/orchestrator/kubernetes.py @@ -38,7 +38,7 @@ class KubernetesObject(object): "template": { "metadata": { "labels": { - "app": "" + "app": name } }, "spec": { @@ -114,6 +114,35 @@ class KubernetesObject(object): self._add_volume(key_volume) +class ServiceObject(object): + + def __init__(self, name): + self.name = '{}-service'.format(name) + self.template = { + 'metadata': { + 'name': '{}-service'.format(name) + }, + 'spec': { + 'type': 'NodePort', + 'ports': [ + { + 'port': 22, + 'protocol': 'TCP' + } + ], + 'selector': { + 'app': name + } + } + } + + def create(self): + k8s_utils.create_service(self.template) + + def delete(self): + k8s_utils.delete_service(self.name) + + class KubernetesTemplate(object): def __init__(self, name, template_cfg): @@ -125,6 +154,8 @@ class KubernetesTemplate(object): ssh_key=self.ssh_key, **cfg) for rc, cfg in template_cfg.items()] + self.service_objs = [ServiceObject(s) for s in self.rcs] + self.pods = [] def _get_rc_name(self, rc_name): |