aboutsummaryrefslogtreecommitdiffstats
path: root/yardstick/network_services
diff options
context:
space:
mode:
Diffstat (limited to 'yardstick/network_services')
-rw-r--r--yardstick/network_services/traffic_profile/fixed.py60
-rw-r--r--yardstick/network_services/traffic_profile/http.py33
-rw-r--r--yardstick/network_services/traffic_profile/rfc2544.py106
-rw-r--r--yardstick/network_services/traffic_profile/traffic_profile.py499
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_ping.py167
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py285
-rw-r--r--yardstick/network_services/vnf_generic/vnf/tg_trex.py278
-rw-r--r--yardstick/network_services/vnf_generic/vnf/vpe_vnf.py331
8 files changed, 1759 insertions, 0 deletions
diff --git a/yardstick/network_services/traffic_profile/fixed.py b/yardstick/network_services/traffic_profile/fixed.py
new file mode 100644
index 000000000..a456c2bd7
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/fixed.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Fixed traffic profile definitions """
+
+from __future__ import absolute_import
+
+from yardstick.network_services.traffic_profile.base import TrafficProfile
+from stl.trex_stl_lib.trex_stl_streams import STLTXCont
+from stl.trex_stl_lib.trex_stl_client import STLStream
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from stl.trex_stl_lib import api as Pkt
+
+
+class FixedProfile(TrafficProfile):
+ """
+ This profile adds a single stream at the beginning of the traffic session
+ """
+ def __init__(self, tp_config):
+ super(FixedProfile, self).__init__(tp_config)
+ self.first_run = True
+
+ def execute(self, traffic_generator):
+ if self.first_run:
+ for index, ports in enumerate(traffic_generator.my_ports):
+ ext_intf = \
+ traffic_generator.vnfd["vdu"][0]["external-interface"]
+ virtual_interface = ext_intf[index]["virtual-interface"]
+ src_ip = virtual_interface["local_ip"]
+ dst_ip = virtual_interface["dst_ip"]
+
+ traffic_generator.client.add_streams(
+ self._create_stream(src_ip, dst_ip),
+ ports=[ports])
+
+ traffic_generator.client.start(ports=traffic_generator.my_ports)
+ self.first_run = False
+
+ def _create_stream(self, src_ip, dst_ip):
+ base_frame = \
+ Pkt.Ether() / Pkt.IP(src=src_ip, dst=dst_ip) / Pkt.UDP(dport=12,
+ sport=1025)
+
+ frame_size = self.params["traffic_profile"]["frame_size"]
+ pad_size = max(0, frame_size - len(base_frame))
+ frame = base_frame / ("x" * max(0, pad_size))
+
+ frame_rate = self.params["traffic_profile"]["frame_rate"]
+ return STLStream(packet=STLPktBuilder(pkt=frame),
+ mode=STLTXCont(pps=frame_rate))
diff --git a/yardstick/network_services/traffic_profile/http.py b/yardstick/network_services/traffic_profile/http.py
new file mode 100644
index 000000000..2d00fb849
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/http.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Generic HTTP profile used by different Traffic generators """
+
+from __future__ import absolute_import
+
+from yardstick.network_services.traffic_profile.base import TrafficProfile
+
+
+class TrafficProfileGenericHTTP(TrafficProfile):
+ """ This Class handles setup of generic http traffic profile """
+
+ def __init__(self, TrafficProfile):
+ super(TrafficProfileGenericHTTP, self).__init__(TrafficProfile)
+
+ def execute(self, traffic_generator):
+ ''' send run traffic for a selected traffic generator'''
+ pass
+
+ def _send_http_request(self, server, port, locator, **kwargs):
+ ''' send http request for a given server, port '''
+ pass
diff --git a/yardstick/network_services/traffic_profile/rfc2544.py b/yardstick/network_services/traffic_profile/rfc2544.py
new file mode 100644
index 000000000..99964d329
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/rfc2544.py
@@ -0,0 +1,106 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" RFC2544 Throughput implemenation """
+
+from __future__ import absolute_import
+from __future__ import division
+import logging
+
+from yardstick.network_services.traffic_profile.traffic_profile \
+ import TrexProfile
+
+LOGGING = logging.getLogger(__name__)
+
+
+class RFC2544Profile(TrexProfile):
+ """ This class handles rfc2544 implemenation. """
+
+ def __init__(self, traffic_generator):
+ super(RFC2544Profile, self).__init__(traffic_generator)
+ self.max_rate = None
+ self.min_rate = None
+ self.rate = 100
+ self.tmp_drop = None
+ self.tmp_throughput = None
+ self.profile_data = None
+
+ def execute(self, traffic_generator):
+ ''' Generate the stream and run traffic on the given ports '''
+ if self.first_run:
+ self.profile_data = self.params.get('private', '')
+ ports = [traffic_generator.my_ports[0]]
+ traffic_generator.client.add_streams(self.get_streams(),
+ ports=ports[0])
+ profile_data = self.params.get('public', '')
+ if profile_data:
+ self.profile_data = profile_data
+ ports.append(traffic_generator.my_ports[1])
+ traffic_generator.client.add_streams(self.get_streams(),
+ ports=ports[1])
+
+ self.max_rate = self.rate
+ self.min_rate = 0
+ traffic_generator.client.start(ports=ports,
+ mult=self.get_multiplier(),
+ duration=30, force=True)
+ self.tmp_drop = 0
+ self.tmp_throughput = 0
+
+ def get_multiplier(self):
+ ''' Get the rate at which next iternation to run '''
+ self.rate = round((self.max_rate + self.min_rate) / 2.0, 2)
+ multiplier = round(self.rate / self.pps, 2)
+ return str(multiplier)
+
+ def get_drop_percentage(self, traffic_generator,
+ samples, tol_min, tolerance):
+ ''' Calculate the drop percentage and run the traffic '''
+ in_packets = sum([samples[iface]['in_packets'] for iface in samples])
+ out_packets = sum([samples[iface]['out_packets'] for iface in samples])
+ packet_drop = abs(out_packets - in_packets)
+ drop_percent = 100.0
+ try:
+ drop_percent = round((packet_drop / float(out_packets)) * 100, 2)
+ except ZeroDivisionError:
+ LOGGING.info('No traffic is flowing')
+ samples['TxThroughput'] = out_packets / 30
+ samples['RxThroughput'] = in_packets / 30
+ samples['CurrentDropPercentage'] = drop_percent
+ samples['Throughput'] = self.tmp_throughput
+ samples['DropPercentage'] = self.tmp_drop
+ if drop_percent > tolerance and self.tmp_throughput == 0:
+ samples['Throughput'] = (in_packets / 30)
+ samples['DropPercentage'] = drop_percent
+ if self.first_run:
+ max_supported_rate = out_packets / 30
+ self.rate = max_supported_rate
+ self.first_run = False
+ if drop_percent > tolerance:
+ self.max_rate = self.rate
+ elif drop_percent < tol_min:
+ self.min_rate = self.rate
+ if drop_percent >= self.tmp_drop:
+ self.tmp_drop = drop_percent
+ self.tmp_throughput = (in_packets / 30)
+ samples['Throughput'] = (in_packets / 30)
+ samples['DropPercentage'] = drop_percent
+ else:
+ samples['Throughput'] = (in_packets / 30)
+ samples['DropPercentage'] = drop_percent
+
+ traffic_generator.client.clear_stats(ports=traffic_generator.my_ports)
+ traffic_generator.client.start(ports=traffic_generator.my_ports,
+ mult=self.get_multiplier(),
+ duration=30, force=True)
+ return samples
diff --git a/yardstick/network_services/traffic_profile/traffic_profile.py b/yardstick/network_services/traffic_profile/traffic_profile.py
new file mode 100644
index 000000000..156cc6644
--- /dev/null
+++ b/yardstick/network_services/traffic_profile/traffic_profile.py
@@ -0,0 +1,499 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Trex Traffic Profile definitions """
+
+from __future__ import absolute_import
+import struct
+import socket
+import logging
+from random import SystemRandom
+import six
+
+from yardstick.network_services.traffic_profile.base import TrafficProfile
+from stl.trex_stl_lib.trex_stl_client import STLStream
+from stl.trex_stl_lib.trex_stl_streams import STLFlowLatencyStats
+from stl.trex_stl_lib.trex_stl_streams import STLTXCont
+from stl.trex_stl_lib.trex_stl_streams import STLProfile
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmWrFlowVar
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFlowVar
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLPktBuilder
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLScVmRaw
+from stl.trex_stl_lib.trex_stl_packet_builder_scapy import STLVmFixIpv4
+from stl.trex_stl_lib import api as Pkt
+
+
+class TrexProfile(TrafficProfile):
+ """ This class handles Trex Traffic profile generation and execution """
+
+ def __init__(self, yaml_data):
+ super(TrexProfile, self).__init__(yaml_data)
+ self.flows = 100
+ self.pps = 100
+ self.pg_id = 0
+ self.first_run = True
+ self.streams = 1
+ self.profile_data = []
+ self.profile = None
+ self.base_pkt = None
+ self.fsize = None
+ self.trex_vm = None
+ self.vms = []
+ self.rate = None
+ self.ip_packet = None
+ self.ip6_packet = None
+ self.udp_packet = None
+ self.udp_dport = ''
+ self.udp_sport = ''
+ self.qinq_packet = None
+ self.qinq = False
+ self.vm_flow_vars = []
+ self.packets = []
+ self.ether_packet = []
+
+ def execute(self, traffic_generator):
+ """ Generate the stream and run traffic on the given ports """
+ pass
+
+ def _set_ether_fields(self, **kwargs):
+ """ set ethernet protocol fields """
+ if not self.ether_packet:
+ self.ether_packet = Pkt.Ether()
+ for key, value in six.iteritems(kwargs):
+ setattr(self.ether_packet, key, value)
+
+ def _set_ip_fields(self, **kwargs):
+ """ set l3 ipv4 protocol fields """
+
+ if not self.ip_packet:
+ self.ip_packet = Pkt.IP()
+ for key in kwargs:
+ setattr(self.ip_packet, key, kwargs[key])
+
+ def _set_ip6_fields(self, **kwargs):
+ """ set l3 ipv6 protocol fields """
+ if not self.ip6_packet:
+ self.ip6_packet = Pkt.IPv6()
+ for key in kwargs:
+ setattr(self.ip6_packet, key, kwargs[key])
+
+ def _set_udp_fields(self, **kwargs):
+ """ set l4 udp ports fields """
+ if not self.udp_packet:
+ self.udp_packet = Pkt.UDP()
+ for key in kwargs:
+ setattr(self.udp_packet, key, kwargs[key])
+
+ def set_src_mac(self, src_mac):
+ """ set source mac address fields """
+ src_macs = src_mac.split('-')
+ min_value = src_macs[0]
+ if len(src_macs) == 1:
+ src_mac = min_value
+ self._set_ether_fields(src=src_mac)
+ else:
+ stl_vm_flow_var = STLVmFlowVar(name="mac_src",
+ min_value=1,
+ max_value=30,
+ size=4,
+ op='inc',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_src',
+ pkt_offset='Ether.src')
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_dst_mac(self, dst_mac):
+ """ set destination mac address fields """
+ dst_macs = dst_mac.split('-')
+ min_value = dst_macs[0]
+ if len(dst_macs) == 1:
+ dst_mac = min_value
+ self._set_ether_fields(dst=dst_mac)
+ else:
+ stl_vm_flow_var = STLVmFlowVar(name="mac_dst",
+ min_value=1,
+ max_value=30,
+ size=4,
+ op='inc',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='mac_dst',
+ pkt_offset='Ether.dst')
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_src_ip4(self, src_ip4):
+ """ set source ipv4 address fields """
+ src_ips = src_ip4.split('-')
+ min_value = src_ips[0]
+ max_value = src_ips[1] if len(src_ips) == 2 else src_ips[0]
+ if len(src_ips) == 1:
+ src_ip4 = min_value
+ self._set_ip_fields(src=src_ip4)
+ else:
+ stl_vm_flow_var = STLVmFlowVar(name="ip4_src",
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip4_src',
+ pkt_offset='IP.src')
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
+ self.vm_flow_vars.append(stl_vm_fix_ipv4)
+
+ def set_dst_ip4(self, dst_ip4):
+ """ set destination ipv4 address fields """
+ dst_ips = dst_ip4.split('-')
+ min_value = dst_ips[0]
+ max_value = dst_ips[1] if len(dst_ips) == 2 else dst_ips[0]
+ if len(dst_ips) == 1:
+ dst_ip4 = min_value
+ self._set_ip_fields(dst=dst_ip4)
+ else:
+ stl_vm_flow_var = STLVmFlowVar(name="dst_ip4",
+ min_value=min_value,
+ max_value=max_value,
+ size=4,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dst_ip4',
+ pkt_offset='IP.dst')
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+ stl_vm_fix_ipv4 = STLVmFixIpv4(offset="IP")
+ self.vm_flow_vars.append(stl_vm_fix_ipv4)
+
+ def set_src_ip6(self, src_ip6):
+ """ set source ipv6 address fields """
+ src_ips = src_ip6.split('-')
+ min_value = src_ips[0]
+ max_value = src_ips[1] if len(src_ips) == 2 else src_ips[0]
+ src_ip6 = min_value
+ self._set_ip6_fields(src=src_ip6)
+ if len(src_ips) == 2:
+ min_value, max_value = \
+ self._get_start_end_ipv6(min_value, max_value)
+ stl_vm_flow_var = STLVmFlowVar(name="ip6_src",
+ min_value=min_value,
+ max_value=max_value,
+ size=8,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='ip6_src',
+ pkt_offset='IPv6.src',
+ offset_fixup=8)
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_dst_ip6(self, dst_ip6):
+ """ set destination ipv6 address fields """
+ dst_ips = dst_ip6.split('-')
+ min_value = dst_ips[0]
+ max_value = dst_ips[1] if len(dst_ips) == 2 else dst_ips[0]
+ dst_ip6 = min_value
+ self._set_ip6_fields(dst=dst_ip6)
+ if len(dst_ips) == 2:
+ min_value, max_value = \
+ self._get_start_end_ipv6(min_value, max_value)
+ stl_vm_flow_var = STLVmFlowVar(name="dst_ip6",
+ min_value=min_value,
+ max_value=max_value,
+ size=8,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dst_ip6',
+ pkt_offset='IPv6.dst',
+ offset_fixup=8)
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_dscp(self, dscp):
+ """ set dscp for trex """
+ dscps = str(dscp).split('-')
+ min_value = int(dscps[0])
+ max_value = int(dscps[1]) if len(dscps) == 2 else int(dscps[0])
+ if len(dscps) == 1:
+ dscp = min_value
+ self._set_ip_fields(tos=dscp)
+ else:
+ stl_vm_flow_var = STLVmFlowVar(name="dscp",
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ op='inc',
+ step=8)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='dscp',
+ pkt_offset='IP.tos')
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_src_port(self, src_port):
+ """ set packet source port """
+ src_ports = str(src_port).split('-')
+ min_value = int(src_ports[0])
+ if len(src_ports) == 1:
+ max_value = int(src_ports[0])
+ src_port = min_value
+ self._set_udp_fields(sport=src_port)
+ else:
+ max_value = int(src_ports[1])
+ stl_vm_flow_var = STLVmFlowVar(name="port_src",
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_src',
+ pkt_offset=self.udp_sport)
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_dst_port(self, dst_port):
+ """ set packet destnation port """
+ dst_ports = str(dst_port).split('-')
+ min_value = int(dst_ports[0])
+ if len(dst_ports) == 1:
+ max_value = int(dst_ports[0])
+ dst_port = min_value
+ self._set_udp_fields(dport=dst_port)
+ else:
+ max_value = int(dst_ports[1])
+ stl_vm_flow_var = STLVmFlowVar(name="port_dst",
+ min_value=min_value,
+ max_value=max_value,
+ size=2,
+ op='random',
+ step=1)
+ self.vm_flow_vars.append(stl_vm_flow_var)
+ stl_vm_wr_flow_var = STLVmWrFlowVar(fv_name='port_dst',
+ pkt_offset=self.udp_dport)
+ self.vm_flow_vars.append(stl_vm_wr_flow_var)
+
+ def set_svlan_cvlan(self, svlan, cvlan):
+ """ set svlan & cvlan """
+ self.qinq = True
+ ether_params = {'type': 0x8100}
+ self._set_ether_fields(**ether_params)
+ svlans = str(svlan['id']).split('-')
+ svlan_min = int(svlans[0])
+ svlan_max = int(svlans[1]) if len(svlans) == 2 else int(svlans[0])
+ if len(svlans) == 2:
+ svlan = self._get_random_value(svlan_min, svlan_max)
+ else:
+ svlan = svlan_min
+ cvlans = str(cvlan['id']).split('-')
+ cvlan_min = int(cvlans[0])
+ cvlan_max = int(cvlans[1]) if len(cvlans) == 2 else int(cvlans[0])
+ if len(cvlans) == 2:
+ cvlan = self._get_random_value(cvlan_min, cvlan_max)
+ else:
+ cvlan = cvlan_min
+ self.qinq_packet = Pkt.Dot1Q(vlan=svlan) / Pkt.Dot1Q(vlan=cvlan)
+
+ def set_qinq(self, qinq):
+ """ set qinq in packet """
+ self.set_svlan_cvlan(qinq['S-VLAN'], qinq['C-VLAN'])
+
+ def set_outer_l2_fields(self, outer_l2):
+ """ setup outer l2 fields from traffic profile """
+ ether_params = {'type': 0x800}
+ self._set_ether_fields(**ether_params)
+ if 'srcmac' in outer_l2:
+ self.set_src_mac(outer_l2['srcmac'])
+ if 'dstmac' in outer_l2:
+ self.set_dst_mac(outer_l2['dstmac'])
+ if 'QinQ' in outer_l2:
+ self.set_qinq(outer_l2['QinQ'])
+
+ def set_outer_l3v4_fields(self, outer_l3v4):
+ """ setup outer l3v4 fields from traffic profile """
+ ip_params = {}
+ if 'proto' in outer_l3v4:
+ ip_params['proto'] = outer_l3v4['proto']
+ if outer_l3v4['proto'] == 'tcp':
+ self.udp_packet = Pkt.TCP()
+ self.udp_dport = 'TCP.dport'
+ self.udp_sport = 'TCP.sport'
+ tcp_params = {'flags': '', 'window': 0}
+ self._set_udp_fields(**tcp_params)
+ if 'ttl' in outer_l3v4:
+ ip_params['ttl'] = outer_l3v4['ttl']
+ self._set_ip_fields(**ip_params)
+ if 'dscp' in outer_l3v4:
+ self.set_dscp(outer_l3v4['dscp'])
+ if 'srcip4' in outer_l3v4:
+ self.set_src_ip4(outer_l3v4['srcip4'])
+ if 'dstip4' in outer_l3v4:
+ self.set_dst_ip4(outer_l3v4['dstip4'])
+
+ def set_outer_l3v6_fields(self, outer_l3v6):
+ """ setup outer l3v6 fields from traffic profile """
+ ether_params = {'type': 0x86dd}
+ self._set_ether_fields(**ether_params)
+ ip6_params = {}
+ if 'proto' in outer_l3v6:
+ ip6_params['proto'] = outer_l3v6['proto']
+ if outer_l3v6['proto'] == 'tcp':
+ self.udp_packet = Pkt.TCP()
+ self.udp_dport = 'TCP.dport'
+ self.udp_sport = 'TCP.sport'
+ tcp_params = {'flags': '', 'window': 0}
+ self._set_udp_fields(**tcp_params)
+ if 'ttl' in outer_l3v6:
+ ip6_params['ttl'] = outer_l3v6['ttl']
+ if 'tc' in outer_l3v6:
+ ip6_params['tc'] = outer_l3v6['tc']
+ if 'hlim' in outer_l3v6:
+ ip6_params['hlim'] = outer_l3v6['hlim']
+ self._set_ip6_fields(**ip6_params)
+ if 'srcip6' in outer_l3v6:
+ self.set_src_ip6(outer_l3v6['srcip6'])
+ if 'dstip6' in outer_l3v6:
+ self.set_dst_ip6(outer_l3v6['dstip6'])
+
+ def set_outer_l4_fields(self, outer_l4):
+ """ setup outer l4 fields from traffic profile """
+ if 'srcport' in outer_l4:
+ self.set_src_port(outer_l4['srcport'])
+ if 'dstport' in outer_l4:
+ self.set_dst_port(outer_l4['dstport'])
+
+ def generate_imix_data(self, packet_definition):
+ """ generate packet size for a given traffic profile """
+ imix_count = {}
+ imix_data = {}
+ if not packet_definition:
+ return imix_count
+ imix = packet_definition.get('framesize')
+ if imix:
+ for size in imix:
+ data = imix[size]
+ imix_data[int(size[:-1])] = int(data)
+ imix_sum = sum(imix_data.values())
+ if imix_sum > 100:
+ raise SystemExit("Error in IMIX data")
+ elif imix_sum < 100:
+ imix_data[64] = imix_data.get(64, 0) + (100 - imix_sum)
+
+ avg_size = 0.0
+ for size in imix_data:
+ count = int(imix_data[size])
+ if count:
+ avg_size += round(size * count / 100, 2)
+ pps = round(self.pps * count / 100, 0)
+ imix_count[size] = pps
+ self.rate = round(1342177280 / avg_size, 0) * 2
+ logging.debug("Imax: %s rate: %s", imix_count, self.rate)
+ return imix_count
+
+ def get_streams(self):
+ """ generate trex stream """
+ self.streams = []
+ self.pps = self.params['traffic_profile'].get('frame_rate', 100)
+ for packet_name in self.profile_data:
+ outer_l2 = self.profile_data[packet_name].get('outer_l2')
+ imix_data = self.generate_imix_data(outer_l2)
+ if not imix_data:
+ imix_data = {64: self.pps}
+ self.generate_vm(self.profile_data[packet_name])
+ for size in imix_data:
+ self._generate_streams(size, imix_data[size])
+ self._generate_profile()
+ return self.profile
+
+ def generate_vm(self, packet_definition):
+ """ generate trex vm with flows setup """
+ self.ether_packet = Pkt.Ether()
+ self.ip_packet = Pkt.IP()
+ self.ip6_packet = None
+ self.udp_packet = Pkt.UDP()
+ self.udp_dport = 'UDP.dport'
+ self.udp_sport = 'UDP.sport'
+ self.qinq = False
+ self.vm_flow_vars = []
+ outer_l2 = packet_definition.get('outer_l2', None)
+ outer_l3v4 = packet_definition.get('outer_l3v4', None)
+ outer_l3v6 = packet_definition.get('outer_l3v6', None)
+ outer_l4 = packet_definition.get('outer_l4', None)
+ if outer_l2:
+ self.set_outer_l2_fields(outer_l2)
+ if outer_l3v4:
+ self.set_outer_l3v4_fields(outer_l3v4)
+ if outer_l3v6:
+ self.set_outer_l3v6_fields(outer_l3v6)
+ if outer_l4:
+ self.set_outer_l4_fields(outer_l4)
+ self.trex_vm = STLScVmRaw(self.vm_flow_vars)
+
+ def generate_packets(self):
+ """ generate packets from trex TG """
+ base_pkt = self.base_pkt
+ size = self.fsize - 4
+ pad = max(0, size - len(base_pkt)) * 'x'
+ self.packets = [STLPktBuilder(pkt=base_pkt / pad,
+ vm=vm) for vm in self.vms]
+
+ def _create_single_packet(self, size=64):
+ size = size - 4
+ ether_packet = self.ether_packet
+ ip_packet = self.ip6_packet if self.ip6_packet else self.ip_packet
+ udp_packet = self.udp_packet
+ if self.qinq:
+ qinq_packet = self.qinq_packet
+ base_pkt = ether_packet / qinq_packet / ip_packet / udp_packet
+ else:
+ base_pkt = ether_packet / ip_packet / udp_packet
+ pad = max(0, size - len(base_pkt)) * 'x'
+ packet = STLPktBuilder(pkt=base_pkt / pad, vm=self.trex_vm)
+ return packet
+
+ def _create_single_stream(self, packet_size, pps, isg=0):
+ packet = self._create_single_packet(packet_size)
+ if self.pg_id:
+ self.pg_id += 1
+ stl_flow = STLFlowLatencyStats(pg_id=self.pg_id)
+ stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps),
+ flow_stats=stl_flow)
+ else:
+ stream = STLStream(isg=isg, packet=packet, mode=STLTXCont(pps=pps))
+ return stream
+
+ def _generate_streams(self, packet_size, pps):
+ self.streams.append(self._create_single_stream(packet_size, pps))
+
+ def _generate_profile(self):
+ self.profile = STLProfile(self.streams)
+
+ @classmethod
+ def _get_start_end_ipv6(cls, start_ip, end_ip):
+ try:
+ ip1 = socket.inet_pton(socket.AF_INET6, start_ip)
+ ip2 = socket.inet_pton(socket.AF_INET6, end_ip)
+ hi1, lo1 = struct.unpack('!QQ', ip1)
+ hi2, lo2 = struct.unpack('!QQ', ip2)
+ if ((hi1 << 64) | lo1) > ((hi2 << 64) | lo2):
+ raise SystemExit("IPv6: start_ip is greater then end_ip")
+ max_p1 = abs(int(lo1) - int(lo2))
+ base_p1 = lo1
+ except Exception as ex_error:
+ raise SystemExit(ex_error)
+ else:
+ return base_p1, max_p1 + base_p1
+
+ @classmethod
+ def _get_random_value(cls, min_port, max_port):
+ cryptogen = SystemRandom()
+ return cryptogen.randrange(min_port, max_port)
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_ping.py b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
new file mode 100644
index 000000000..2844a5c01
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_ping.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PING acts as traffic generation and vnf definitions based on IETS Spec """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import logging
+import multiprocessing
+import re
+import time
+import os
+
+from yardstick import ssh
+from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
+from yardstick.network_services.utils import provision_tool
+
+LOG = logging.getLogger(__name__)
+
+
+class PingParser(object):
+ """ Class providing file-like API for talking with SSH connection """
+
+ def __init__(self, q_out):
+ self.queue = q_out
+ self.closed = False
+
+ def write(self, chunk):
+ """ 64 bytes from 10.102.22.93: icmp_seq=1 ttl=64 time=0.296 ms """
+ match = re.search(r"icmp_seq=(\d+).*time=([0-9.]+)", chunk)
+ LOG.debug("Parser called on %s", chunk)
+ if match:
+ # IMPORTANT: in order for the data to be properly taken
+ # in by InfluxDB, it needs to be converted to numeric types
+ self.queue.put({"packets_received": float(match.group(1)),
+ "rtt": float(match.group(2))})
+
+ def close(self):
+ ''' close the ssh connection '''
+ pass
+
+ def clear(self):
+ ''' clear queue till Empty '''
+ while self.queue.qsize() > 0:
+ self.queue.get()
+
+
+class PingTrafficGen(GenericTrafficGen):
+ """
+ This traffic generator can ping a single IP with pingsize
+ and target given in traffic profile
+ """
+
+ def __init__(self, vnfd):
+ super(PingTrafficGen, self).__init__(vnfd)
+ self._result = {}
+ self._parser = None
+ self._queue = None
+ self._traffic_process = None
+
+ mgmt_interface = vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ LOG.debug("Connecting to %s", mgmt_interface["ip"])
+
+ self.connection = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ self.connection.wait()
+
+ def _bind_device_kernel(self, connection):
+ dpdk_nic_bind = \
+ provision_tool(self.connection,
+ os.path.join(self.bin_path, "dpdk_nic_bind.py"))
+
+ drivers = {intf["virtual-interface"]["vpci"]:
+ intf["virtual-interface"]["driver"]
+ for intf in self.vnfd["vdu"][0]["external-interface"]}
+
+ commands = \
+ ['"{0}" --force -b "{1}" "{2}"'.format(dpdk_nic_bind, value, key)
+ for key, value in drivers.items()]
+ for command in commands:
+ connection.execute(command)
+
+ for index, out in enumerate(self.vnfd["vdu"][0]["external-interface"]):
+ vpci = out["virtual-interface"]["vpci"]
+ net = "find /sys/class/net -lname '*{}*' -printf '%f'".format(vpci)
+ out = connection.execute(net)[1]
+ ifname = out.split('/')[-1].strip('\n')
+ self.vnfd["vdu"][0]["external-interface"][index][
+ "virtual-interface"]["local_iface_name"] = ifname
+
+ def scale(self, flavor=""):
+ ''' scale vnfbased on flavor input '''
+ super(PingTrafficGen, self).scale(flavor)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._result = {"packets_received": 0, "rtt": 0}
+ self._bind_device_kernel(self.connection)
+
+ def run_traffic(self, traffic_profile):
+ self._queue = multiprocessing.Queue()
+ self._parser = PingParser(self._queue)
+ self._traffic_process = \
+ multiprocessing.Process(target=self._traffic_runner,
+ args=(traffic_profile, self._parser))
+ self._traffic_process.start()
+ # Wait for traffic process to start
+ time.sleep(4)
+ return self._traffic_process.is_alive()
+
+ def listen_traffic(self, traffic_profile):
+ """ Not needed for ping
+
+ :param traffic_profile:
+ :return:
+ """
+ pass
+
+ def _traffic_runner(self, traffic_profile, filewrapper):
+
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ self.connection = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ self.connection.wait()
+ external_interface = self.vnfd["vdu"][0]["external-interface"]
+ virtual_interface = external_interface[0]["virtual-interface"]
+ target_ip = virtual_interface["dst_ip"].split('/')[0]
+ local_ip = virtual_interface["local_ip"].split('/')[0]
+ local_if_name = \
+ virtual_interface["local_iface_name"].split('/')[0]
+ packet_size = traffic_profile.params["traffic_profile"]["frame_size"]
+
+ run_cmd = []
+
+ run_cmd.append("ip addr flush %s" % local_if_name)
+ run_cmd.append("ip addr add %s/24 dev %s" % (local_ip, local_if_name))
+ run_cmd.append("ip link set %s up" % local_if_name)
+
+ for cmd in run_cmd:
+ self.connection.execute(cmd)
+
+ ping_cmd = ("ping -s %s %s" % (packet_size, target_ip))
+ self.connection.run(ping_cmd, stdout=filewrapper,
+ keep_stdin_open=True, pty=True)
+
+ def collect_kpi(self):
+ if not self._queue.empty():
+ kpi = self._queue.get()
+ self._result.update(kpi)
+ return self._result
+
+ def terminate(self):
+ if self._traffic_process is not None:
+ self._traffic_process.terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
new file mode 100644
index 000000000..37c1a7345
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_rfc2544_trex.py
@@ -0,0 +1,285 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Trex traffic generation definitions which implements rfc2544 """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import multiprocessing
+import time
+import logging
+import os
+import yaml
+
+from yardstick import ssh
+from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
+from yardstick.network_services.utils import get_nsb_option
+from stl.trex_stl_lib.trex_stl_client import STLClient
+from stl.trex_stl_lib.trex_stl_client import LoggerApi
+from stl.trex_stl_lib.trex_stl_exceptions import STLError
+
+LOGGING = logging.getLogger(__name__)
+
+DURATION = 30
+WAIT_TIME = 3
+TREX_SYNC_PORT = 4500
+TREX_ASYNC_PORT = 4501
+
+
+class TrexTrafficGenRFC(GenericTrafficGen):
+ """
+ This class handles mapping traffic profile and generating
+ traffic for rfc2544 testcase.
+ """
+
+ def __init__(self, vnfd):
+ super(TrexTrafficGenRFC, self).__init__(vnfd)
+ self._result = {}
+ self._terminated = multiprocessing.Value('i', 0)
+ self._queue = multiprocessing.Queue()
+ self._terminated = multiprocessing.Value('i', 0)
+ self._traffic_process = None
+ self._vpci_ascending = None
+ self.tc_file_name = None
+ self.client = None
+ self.my_ports = None
+
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ self.connection = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ self.connection.wait()
+
+ @classmethod
+ def _split_mac_address_into_list(cls, mac):
+ octets = mac.split(':')
+ for i, elem in enumerate(octets):
+ octets[i] = "0x" + str(elem)
+ return octets
+
+ def _generate_trex_cfg(self, vnfd):
+ """
+
+ :param vnfd: vnfd.yaml
+ :return: trex_cfg.yaml file
+ """
+ trex_cfg = dict(
+ port_limit=0,
+ version='2',
+ interfaces=[],
+ port_info=list(dict(
+ ))
+ )
+ trex_cfg["port_limit"] = len(vnfd["vdu"][0]["external-interface"])
+ trex_cfg["version"] = '2'
+
+ cfg_file = []
+ vpci = []
+ port = {}
+
+ ext_intf = vnfd["vdu"][0]["external-interface"]
+ for interface in ext_intf:
+ virt_intf = interface["virtual-interface"]
+ vpci.append(virt_intf["vpci"])
+
+ port["src_mac"] = \
+ self._split_mac_address_into_list(virt_intf["local_mac"])
+
+ time.sleep(WAIT_TIME)
+ port["dest_mac"] = \
+ self._split_mac_address_into_list(virt_intf["dst_mac"])
+ if virt_intf["dst_mac"]:
+ trex_cfg["port_info"].append(port.copy())
+
+ trex_cfg["interfaces"] = vpci
+ cfg_file.append(trex_cfg)
+
+ with open('/tmp/trex_cfg.yaml', 'w') as outfile:
+ outfile.write(yaml.safe_dump(cfg_file, default_flow_style=False))
+ self.connection.put('/tmp/trex_cfg.yaml', '/etc')
+
+ self._vpci_ascending = sorted(vpci)
+
+ def scale(self, flavor=""):
+ ''' scale vnfbased on flavor input '''
+ super(TrexTrafficGenRFC, self).scale(flavor)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._generate_trex_cfg(self.vnfd)
+ self.tc_file_name = '{0}.yaml'.format(scenario_cfg['tc'])
+ trex = os.path.join(self.bin_path, "trex")
+ err, _, _ = \
+ self.connection.execute("ls {} >/dev/null 2>&1".format(trex))
+ if err != 0:
+ self.connection.put(trex, trex, True)
+
+ LOGGING.debug("Starting TRex server...")
+ _tg_server = \
+ multiprocessing.Process(target=self._start_server)
+ _tg_server.start()
+ while True:
+ LOGGING.info("Waiting for TG Server to start.. ")
+ time.sleep(WAIT_TIME)
+
+ status = \
+ self.connection.execute("lsof -i:%s" % TREX_SYNC_PORT)[0]
+ if status == 0:
+ LOGGING.info("TG server is up and running.")
+ return _tg_server.exitcode
+ if not _tg_server.is_alive():
+ raise RuntimeError("Traffic Generator process died.")
+
+ def listen_traffic(self, traffic_profile):
+ pass
+
+ def _get_logical_if_name(self, vpci):
+ ext_intf = self.vnfd["vdu"][0]["external-interface"]
+ for interface in range(len(self.vnfd["vdu"][0]["external-interface"])):
+ virtual_intf = ext_intf[interface]["virtual-interface"]
+ if virtual_intf["vpci"] == vpci:
+ return ext_intf[interface]["name"]
+
+ def run_traffic(self, traffic_profile,
+ client_started=multiprocessing.Value('i', 0)):
+
+ self._traffic_process = \
+ multiprocessing.Process(target=self._traffic_runner,
+ args=(traffic_profile, self._queue,
+ client_started, self._terminated))
+ self._traffic_process.start()
+ # Wait for traffic process to start
+ while client_started.value == 0:
+ time.sleep(1)
+
+ return self._traffic_process.is_alive()
+
+ def _start_server(self):
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ _server = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ _server.wait()
+
+ _server.execute("fuser -n tcp %s %s -k > /dev/null 2>&1" %
+ (TREX_SYNC_PORT, TREX_ASYNC_PORT))
+ _server.execute("pkill -9 rex > /dev/null 2>&1")
+
+ trex_path = os.path.join(self.bin_path, "trex/scripts")
+ path = get_nsb_option("trex_path", trex_path)
+ trex_cmd = "cd " + path + "; sudo ./t-rex-64 -i > /dev/null 2>&1"
+
+ _server.execute(trex_cmd)
+
+ def _connect_client(self, client=None):
+ if client is None:
+ client = STLClient(username=self.vnfd["mgmt-interface"]["user"],
+ server=self.vnfd["mgmt-interface"]["ip"],
+ verbose_level=LoggerApi.VERBOSE_QUIET)
+ for idx in range(6):
+ try:
+ client.connect()
+ break
+ except STLError:
+ LOGGING.info("Unable to connect to Trex. Attempt %s", idx)
+ time.sleep(WAIT_TIME)
+ return client
+
+ @classmethod
+ def _get_rfc_tolerance(cls, tc_yaml):
+ tolerance = '0.8 - 1.0'
+ if 'tc_options' in tc_yaml['scenarios'][0]:
+ tc_options = tc_yaml['scenarios'][0]['tc_options']
+ if 'rfc2544' in tc_options:
+ tolerance = \
+ tc_options['rfc2544'].get('allowed_drop_rate', '0.8 - 1.0')
+
+ tolerance = tolerance.split('-')
+ min_tol = float(tolerance[0])
+ if len(tolerance) == 2:
+ max_tol = float(tolerance[1])
+ else:
+ max_tol = float(tolerance[0])
+
+ return [min_tol, max_tol]
+
+ def _traffic_runner(self, traffic_profile, queue,
+ client_started, terminated):
+ LOGGING.info("Starting TRex client...")
+ tc_yaml = {}
+
+ with open(self.tc_file_name) as tc_file:
+ tc_yaml = yaml.load(tc_file.read())
+
+ tolerance = self._get_rfc_tolerance(tc_yaml)
+
+ # fixme: fix passing correct trex config file,
+ # instead of searching the default path
+ self.my_ports = [0, 1]
+ self.client = self._connect_client()
+ self.client.reset(ports=self.my_ports)
+ self.client.remove_all_streams(self.my_ports) # remove all streams
+ while not terminated.value:
+ traffic_profile.execute(self)
+ client_started.value = 1
+ time.sleep(DURATION)
+ self.client.stop(self.my_ports)
+ time.sleep(WAIT_TIME)
+ last_res = self.client.get_stats(self.my_ports)
+ samples = {}
+ for vpci_idx in range(len(self._vpci_ascending)):
+ name = \
+ self._get_logical_if_name(self._vpci_ascending[vpci_idx])
+ # fixme: VNFDs KPIs values needs to be mapped to TRex structure
+ if not isinstance(last_res, dict):
+ terminated.value = 1
+ last_res = {}
+
+ samples[name] = \
+ {"rx_throughput_fps":
+ float(last_res.get(vpci_idx, {}).get("rx_pps", 0.0)),
+ "tx_throughput_fps":
+ float(last_res.get(vpci_idx, {}).get("tx_pps", 0.0)),
+ "rx_throughput_mbps":
+ float(last_res.get(vpci_idx, {}).get("rx_bps", 0.0)),
+ "tx_throughput_mbps":
+ float(last_res.get(vpci_idx, {}).get("tx_bps", 0.0)),
+ "in_packets":
+ last_res.get(vpci_idx, {}).get("ipackets", 0),
+ "out_packets":
+ last_res.get(vpci_idx, {}).get("opackets", 0)}
+
+ samples = \
+ traffic_profile.get_drop_percentage(self, samples,
+ tolerance[0], tolerance[1])
+ queue.put(samples)
+ self.client.stop(self.my_ports)
+ self.client.disconnect()
+ queue.put(samples)
+
+ def collect_kpi(self):
+ if not self._queue.empty():
+ result = self._queue.get()
+ self._result.update(result)
+ LOGGING.debug("trex collect Kpis %s", self._result)
+ return self._result
+
+ def terminate(self):
+ self._terminated.value = 1 # stop Trex clinet
+
+ self.connection.execute("fuser -n tcp %s %s -k > /dev/null 2>&1" %
+ (TREX_SYNC_PORT, TREX_ASYNC_PORT))
+
+ if self._traffic_process:
+ self._traffic_process.terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/tg_trex.py b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
new file mode 100644
index 000000000..2731476e0
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/tg_trex.py
@@ -0,0 +1,278 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Trex acts as traffic generation and vnf definitions based on IETS Spec """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import multiprocessing
+import time
+import logging
+import os
+import yaml
+
+from yardstick import ssh
+from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen
+from yardstick.network_services.utils import get_nsb_option
+from yardstick.network_services.utils import provision_tool
+from stl.trex_stl_lib.trex_stl_client import STLClient
+from stl.trex_stl_lib.trex_stl_client import LoggerApi
+from stl.trex_stl_lib.trex_stl_exceptions import STLError
+
+LOG = logging.getLogger(__name__)
+DURATION = 30
+TREX_SYNC_PORT = 4500
+TREX_ASYNC_PORT = 4501
+
+
+class TrexTrafficGen(GenericTrafficGen):
+ """
+ This class handles mapping traffic profile and generating
+ traffic for given testcase
+ """
+
+ def __init__(self, vnfd):
+ super(TrexTrafficGen, self).__init__(vnfd)
+ self._result = {}
+ self._queue = multiprocessing.Queue()
+ self._terminated = multiprocessing.Value('i', 0)
+ self._traffic_process = None
+ self._vpci_ascending = None
+ self.client = None
+ self.my_ports = None
+ self.client_started = multiprocessing.Value('i', 0)
+
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ self.connection = ssh.SSH(mgmt_interface["user"],
+ mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ self.connection.wait()
+
+ @classmethod
+ def _split_mac_address_into_list(cls, mac):
+ octets = mac.split(':')
+ for i, elem in enumerate(octets):
+ octets[i] = "0x" + str(elem)
+ return octets
+
+ def _generate_trex_cfg(self, vnfd):
+ """
+
+ :param vnfd: vnfd.yaml
+ :return: trex_cfg.yaml file
+ """
+ trex_cfg = dict(
+ port_limit=0,
+ version='2',
+ interfaces=[],
+ port_info=list(dict(
+ ))
+ )
+ trex_cfg["port_limit"] = len(vnfd["vdu"][0]["external-interface"])
+ trex_cfg["version"] = '2'
+
+ cfg_file = []
+ vpci = []
+ port = {}
+
+ for interface in range(len(vnfd["vdu"][0]["external-interface"])):
+ ext_intrf = vnfd["vdu"][0]["external-interface"]
+ virtual_interface = ext_intrf[interface]["virtual-interface"]
+ vpci.append(virtual_interface["vpci"])
+
+ port["src_mac"] = self._split_mac_address_into_list(
+ virtual_interface["local_mac"])
+ port["dest_mac"] = self._split_mac_address_into_list(
+ virtual_interface["dst_mac"])
+
+ trex_cfg["port_info"].append(port.copy())
+
+ trex_cfg["interfaces"] = vpci
+ cfg_file.append(trex_cfg)
+
+ with open('/tmp/trex_cfg.yaml', 'w') as outfile:
+ outfile.write(yaml.safe_dump(cfg_file, default_flow_style=False))
+ self.connection.put('/tmp/trex_cfg.yaml', '/etc')
+
+ self._vpci_ascending = sorted(vpci)
+
+ @classmethod
+ def __setup_hugepages(cls, connection):
+ hugepages = \
+ connection.execute(
+ "awk '/Hugepagesize/ { print $2$3 }' < /proc/meminfo")[1]
+ hugepages = hugepages.rstrip()
+
+ memory_path = \
+ '/sys/kernel/mm/hugepages/hugepages-%s/nr_hugepages' % hugepages
+ connection.execute("awk -F: '{ print $1 }' < %s" % memory_path)
+
+ pages = 16384 if hugepages.rstrip() == "2048kB" else 16
+ connection.execute("echo %s > %s" % (pages, memory_path))
+
+ def setup_vnf_environment(self, connection):
+ ''' setup dpdk environment needed for vnf to run '''
+
+ self.__setup_hugepages(connection)
+ connection.execute("modprobe uio && modprobe igb_uio")
+
+ exit_status = connection.execute("lsmod | grep -i igb_uio")[0]
+ if exit_status == 0:
+ return
+
+ dpdk = os.path.join(self.bin_path, "dpdk-16.07")
+ dpdk_setup = \
+ provision_tool(self.connection,
+ os.path.join(self.bin_path, "nsb_setup.sh"))
+ status = connection.execute("ls {} >/dev/null 2>&1".format(dpdk))[0]
+ if status:
+ connection.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup)
+
+ def scale(self, flavor=""):
+ ''' scale vnfbased on flavor input '''
+ super(TrexTrafficGen, self).scale(flavor)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ self._generate_trex_cfg(self.vnfd)
+ self.setup_vnf_environment(self.connection)
+
+ trex = os.path.join(self.bin_path, "trex")
+ err = \
+ self.connection.execute("ls {} >/dev/null 2>&1".format(trex))[0]
+ if err != 0:
+ LOG.info("Copying trex to destination...")
+ self.connection.put("/root/.bash_profile", "/root/.bash_profile")
+ self.connection.put(trex, trex, True)
+ ko_src = os.path.join(trex, "scripts/ko/src/")
+ self.connection.execute("cd %s && make && make install" % ko_src)
+
+ LOG.info("Starting TRex server...")
+ _tg_process = \
+ multiprocessing.Process(target=self._start_server)
+ _tg_process.start()
+ while True:
+ if not _tg_process.is_alive():
+ raise RuntimeError("Traffic Generator process died.")
+ LOG.info("Waiting for TG Server to start.. ")
+ time.sleep(1)
+ status = \
+ self.connection.execute("lsof -i:%s" % TREX_SYNC_PORT)[0]
+ if status == 0:
+ LOG.info("TG server is up and running.")
+ return _tg_process.exitcode
+
+ def listen_traffic(self, traffic_profile):
+ pass
+
+ def _get_logical_if_name(self, vpci):
+ ext_intf = self.vnfd["vdu"][0]["external-interface"]
+ for interface in range(len(self.vnfd["vdu"][0]["external-interface"])):
+ virtual_intf = ext_intf[interface]["virtual-interface"]
+ if virtual_intf["vpci"] == vpci:
+ return ext_intf[interface]["name"]
+
+ def run_traffic(self, traffic_profile):
+ self._traffic_process = \
+ multiprocessing.Process(target=self._traffic_runner,
+ args=(traffic_profile, self._queue,
+ self.client_started,
+ self._terminated))
+ self._traffic_process.start()
+ # Wait for traffic process to start
+ while self.client_started.value == 0:
+ time.sleep(1)
+
+ return self._traffic_process.is_alive()
+
+ def _start_server(self):
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ _server = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ _server.wait()
+
+ _server.execute("fuser -n tcp %s %s -k > /dev/null 2>&1" %
+ (TREX_SYNC_PORT, TREX_ASYNC_PORT))
+
+ trex_path = os.path.join(self.bin_path, "trex/scripts")
+ path = get_nsb_option("trex_path", trex_path)
+ trex_cmd = "cd " + path + "; sudo ./t-rex-64 -i > /dev/null 2>&1"
+
+ _server.execute(trex_cmd)
+
+ def _connect_client(self, client=None):
+ if client is None:
+ client = STLClient(username=self.vnfd["mgmt-interface"]["user"],
+ server=self.vnfd["mgmt-interface"]["ip"],
+ verbose_level=LoggerApi.VERBOSE_QUIET)
+ # try to connect with 5s intervals, 30s max
+ for idx in range(6):
+ try:
+ client.connect()
+ break
+ except STLError:
+ LOG.info("Unable to connect to Trex Server.. Attempt %s", idx)
+ time.sleep(5)
+ return client
+
+ def _traffic_runner(self, traffic_profile, queue,
+ client_started, terminated):
+ LOG.info("Starting TRex client...")
+
+ self.my_ports = [0, 1]
+ self.client = self._connect_client()
+ self.client.reset(ports=self.my_ports)
+
+ self.client.remove_all_streams(self.my_ports) # remove all streams
+
+ while not terminated.value:
+ traffic_profile.execute(self)
+ client_started.value = 1
+ last_res = self.client.get_stats(self.my_ports)
+ if not isinstance(last_res, dict): # added for mock unit test
+ terminated.value = 1
+ last_res = {}
+
+ samples = {}
+ for vpci_idx in range(len(self._vpci_ascending)):
+ name = \
+ self._get_logical_if_name(self._vpci_ascending[vpci_idx])
+ # fixme: VNFDs KPIs values needs to be mapped to TRex structure
+ xe_value = last_res.get(vpci_idx, {})
+ samples[name] = \
+ {"rx_throughput_fps": float(xe_value.get("rx_pps", 0.0)),
+ "tx_throughput_fps": float(xe_value.get("tx_pps", 0.0)),
+ "rx_throughput_mbps": float(xe_value.get("rx_bps", 0.0)),
+ "tx_throughput_mbps": float(xe_value.get("tx_bps", 0.0)),
+ "in_packets": xe_value.get("ipackets", 0),
+ "out_packets": xe_value.get("opackets", 0)}
+ queue.put(samples)
+
+ self.client.disconnect()
+ terminated.value = 0
+
+ def collect_kpi(self):
+ if not self._queue.empty():
+ self._result.update(self._queue.get())
+ LOG.debug("trex collect Kpis %s", self._result)
+ return self._result
+
+ def terminate(self):
+ self.connection.execute("fuser -n tcp %s %s -k > /dev/null 2>&1" %
+ (TREX_SYNC_PORT, TREX_ASYNC_PORT))
+ self.traffic_finished = True
+ if self._traffic_process:
+ self._traffic_process.terminate()
diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
new file mode 100644
index 000000000..8c766f01e
--- /dev/null
+++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py
@@ -0,0 +1,331 @@
+# Copyright (c) 2016-2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" vPE (Power Edge router) VNF model definitions based on IETS Spec """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import tempfile
+import time
+import os
+import logging
+import re
+from multiprocessing import Queue
+import multiprocessing
+import ipaddress
+import six
+
+from yardstick import ssh
+from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
+from yardstick.network_services.utils import provision_tool
+from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
+from yardstick.network_services.nfvi.resource import ResourceProfile
+
+LOG = logging.getLogger(__name__)
+VPE_PIPELINE_COMMAND = '{tool_path} -p 0x3 -f {cfg_file} -s {script}'
+CORES = ['0', '1', '2']
+WAIT_TIME = 20
+
+
+class VpeApproxVnf(GenericVNF):
+ """ This class handles vPE VNF model-driver definitions """
+
+ def __init__(self, vnfd):
+ super(VpeApproxVnf, self).__init__(vnfd)
+ self.socket = None
+ self.q_in = Queue()
+ self.q_out = Queue()
+ self.vnf_cfg = None
+ self._vnf_process = None
+ self.connection = None
+ self.resource = None
+
+ def _resource_collect_start(self):
+ self.resource.initiate_systemagent(self.bin_path)
+ self.resource.start()
+
+ def _resource_collect_stop(self):
+ self.resource.stop()
+
+ def _collect_resource_kpi(self):
+ result = {}
+
+ status = self.resource.check_if_sa_running("collectd")[0]
+ if status:
+ result = self.resource.amqp_collect_nfvi_kpi()
+
+ result = {"core": result}
+
+ return result
+
+ @classmethod
+ def __setup_hugepages(cls, connection):
+ hugepages = \
+ connection.execute(
+ "awk '/Hugepagesize/ { print $2$3 }' < /proc/meminfo")[1]
+ hugepages = hugepages.rstrip()
+
+ memory_path = \
+ '/sys/kernel/mm/hugepages/hugepages-%s/nr_hugepages' % hugepages
+ connection.execute("awk -F: '{ print $1 }' < %s" % memory_path)
+
+ pages = 16384 if hugepages.rstrip() == "2048kB" else 16
+ connection.execute("echo %s > %s" % (pages, memory_path))
+
+ def setup_vnf_environment(self, connection):
+ ''' setup dpdk environment needed for vnf to run '''
+
+ self.__setup_hugepages(connection)
+ connection.execute("modprobe uio && modprobe igb_uio")
+
+ exit_status = connection.execute("lsmod | grep -i igb_uio")[0]
+ if exit_status == 0:
+ return
+
+ dpdk = os.path.join(self.bin_path, "dpdk-16.07")
+ dpdk_setup = \
+ provision_tool(self.connection,
+ os.path.join(self.bin_path, "nsb_setup.sh"))
+ status = connection.execute("ls {} >/dev/null 2>&1".format(dpdk))[0]
+ if status:
+ connection.execute("bash %s dpdk >/dev/null 2>&1" % dpdk_setup)
+
+ def _get_cpu_sibling_list(self):
+ cpu_topo = []
+ for core in CORES:
+ sys_cmd = \
+ "/sys/devices/system/cpu/cpu%s/topology/thread_siblings_list" \
+ % core
+ cpuid = \
+ self.connection.execute("awk -F: '{ print $1 }' < %s" %
+ sys_cmd)[1]
+ cpu_topo += \
+ [(idx) if idx.isdigit() else idx for idx in cpuid.split(',')]
+
+ return [cpu.strip() for cpu in cpu_topo]
+
+ def scale(self, flavor=""):
+ ''' scale vnfbased on flavor input '''
+ super(VpeApproxVnf, self).scale(flavor)
+
+ def instantiate(self, scenario_cfg, context_cfg):
+ vnf_cfg = scenario_cfg['vnf_options']['vpe']['cfg']
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+
+ self.connection = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+
+ self.connection.wait()
+
+ self.setup_vnf_environment(self.connection)
+
+ cores = self._get_cpu_sibling_list()
+ self.resource = ResourceProfile(self.vnfd, cores)
+
+ self.connection.execute("pkill vPE_vnf")
+ dpdk_nic_bind = \
+ provision_tool(self.connection,
+ os.path.join(self.bin_path, "dpdk_nic_bind.py"))
+
+ interfaces = self.vnfd["vdu"][0]['external-interface']
+ self.socket = \
+ next((0 for v in interfaces
+ if v['virtual-interface']["vpci"][5] == "0"), 1)
+
+ bound_pci = [v['virtual-interface']["vpci"] for v in interfaces]
+ for vpci in bound_pci:
+ self.connection.execute(
+ "%s --force -b igb_uio %s" % (dpdk_nic_bind, vpci))
+ queue_wrapper = \
+ QueueFileWrapper(self.q_in, self.q_out, "pipeline>")
+ self._vnf_process = multiprocessing.Process(target=self._run_vpe,
+ args=(queue_wrapper,
+ vnf_cfg,))
+ self._vnf_process.start()
+ buf = []
+ time.sleep(WAIT_TIME) # Give some time for config to load
+ while True:
+ message = ''
+ while self.q_out.qsize() > 0:
+ buf.append(self.q_out.get())
+ message = ''.join(buf)
+ if "pipeline>" in message:
+ LOG.info("VPE VNF is up and running.")
+ queue_wrapper.clear()
+ self._resource_collect_start()
+ return self._vnf_process.exitcode
+ if "PANIC" in message:
+ raise RuntimeError("Error starting vPE VNF.")
+
+ LOG.info("Waiting for VNF to start.. ")
+ time.sleep(3)
+ if not self._vnf_process.is_alive():
+ raise RuntimeError("vPE VNF process died.")
+
+ def _get_ports_gateway(self, name):
+ if 'routing_table' in self.vnfd['vdu'][0]:
+ routing_table = self.vnfd['vdu'][0]['routing_table']
+
+ for route in routing_table:
+ if name == route['if']:
+ return route['gateway']
+
+ def terminate(self):
+ self.execute_command("quit")
+ if self._vnf_process:
+ self._vnf_process.terminate()
+
+ def _run_vpe(self, filewrapper, vnf_cfg):
+ mgmt_interface = self.vnfd["mgmt-interface"]
+ ssh_port = mgmt_interface.get("ssh_port", ssh.DEFAULT_PORT)
+ self.connection = ssh.SSH(mgmt_interface["user"], mgmt_interface["ip"],
+ password=mgmt_interface["password"],
+ port=ssh_port)
+ self.connection.wait()
+ interfaces = self.vnfd["vdu"][0]['external-interface']
+ port0_ip = ipaddress.ip_interface(six.text_type(
+ "%s/%s" % (interfaces[0]["virtual-interface"]["local_ip"],
+ interfaces[0]["virtual-interface"]["netmask"])))
+ port1_ip = ipaddress.ip_interface(six.text_type(
+ "%s/%s" % (interfaces[1]["virtual-interface"]["local_ip"],
+ interfaces[1]["virtual-interface"]["netmask"])))
+ dst_port0_ip = ipaddress.ip_interface(
+ u"%s/%s" % (interfaces[0]["virtual-interface"]["dst_ip"],
+ interfaces[0]["virtual-interface"]["netmask"]))
+ dst_port1_ip = ipaddress.ip_interface(
+ u"%s/%s" % (interfaces[1]["virtual-interface"]["dst_ip"],
+ interfaces[1]["virtual-interface"]["netmask"]))
+
+ vpe_vars = {"port0_local_ip": port0_ip.ip.exploded,
+ "port0_dst_ip": dst_port0_ip.ip.exploded,
+ "port0_local_ip_hex":
+ self._ip_to_hex(port0_ip.ip.exploded),
+ "port0_prefixlen": port0_ip.network.prefixlen,
+ "port0_netmask": port0_ip.network.netmask.exploded,
+ "port0_netmask_hex":
+ self._ip_to_hex(port0_ip.network.netmask.exploded),
+ "port0_local_mac":
+ interfaces[0]["virtual-interface"]["local_mac"],
+ "port0_dst_mac":
+ interfaces[0]["virtual-interface"]["dst_mac"],
+ "port0_gateway":
+ self._get_ports_gateway(interfaces[0]["name"]),
+ "port0_local_network":
+ port0_ip.network.network_address.exploded,
+ "port0_prefix": port0_ip.network.prefixlen,
+ "port1_local_ip": port1_ip.ip.exploded,
+ "port1_dst_ip": dst_port1_ip.ip.exploded,
+ "port1_local_ip_hex":
+ self._ip_to_hex(port1_ip.ip.exploded),
+ "port1_prefixlen": port1_ip.network.prefixlen,
+ "port1_netmask": port1_ip.network.netmask.exploded,
+ "port1_netmask_hex":
+ self._ip_to_hex(port1_ip.network.netmask.exploded),
+ "port1_local_mac":
+ interfaces[1]["virtual-interface"]["local_mac"],
+ "port1_dst_mac":
+ interfaces[1]["virtual-interface"]["dst_mac"],
+ "port1_gateway":
+ self._get_ports_gateway(interfaces[1]["name"]),
+ "port1_local_network":
+ port1_ip.network.network_address.exploded,
+ "port1_prefix": port1_ip.network.prefixlen,
+ "port0_local_ip6": self._get_port0localip6(),
+ "port1_local_ip6": self._get_port1localip6(),
+ "port0_prefixlen6": self._get_port0prefixlen6(),
+ "port1_prefixlen6": self._get_port1prefixlen6(),
+ "port0_gateway6": self._get_port0gateway6(),
+ "port1_gateway6": self._get_port1gateway6(),
+ "port0_dst_ip_hex6": self._get_port0localip6(),
+ "port1_dst_ip_hex6": self._get_port1localip6(),
+ "port0_dst_netmask_hex6": self._get_port0prefixlen6(),
+ "port1_dst_netmask_hex6": self._get_port1prefixlen6(),
+ "bin_path": self.bin_path,
+ "socket": self.socket}
+
+ for cfg in os.listdir(vnf_cfg):
+ vpe_config = ""
+ with open(os.path.join(vnf_cfg, cfg), 'r') as vpe_cfg:
+ vpe_config = vpe_cfg.read()
+
+ self._provide_config_file(cfg, vpe_config, vpe_vars)
+
+ LOG.info("Provision and start the vPE")
+ tool_path = provision_tool(self.connection,
+ os.path.join(self.bin_path, "vPE_vnf"))
+ cmd = VPE_PIPELINE_COMMAND.format(cfg_file="/tmp/vpe_config",
+ script="/tmp/vpe_script",
+ tool_path=tool_path)
+ self.connection.run(cmd, stdin=filewrapper, stdout=filewrapper,
+ keep_stdin_open=True, pty=True)
+
+ def _provide_config_file(self, prefix, template, args):
+ cfg, cfg_content = tempfile.mkstemp()
+ cfg = os.fdopen(cfg, "w+")
+ cfg.write(template.format(**args))
+ cfg.close()
+ cfg_file = "/tmp/%s" % prefix
+ self.connection.put(cfg_content, cfg_file)
+ return cfg_file
+
+ def execute_command(self, cmd):
+ ''' send cmd to vnf process '''
+ LOG.info("VPE command: %s", cmd)
+ output = []
+ if self.q_in:
+ self.q_in.put(cmd + "\r\n")
+ time.sleep(3)
+ while self.q_out.qsize() > 0:
+ output.append(self.q_out.get())
+ return "".join(output)
+
+ def collect_kpi(self):
+ result = self.get_stats_vpe()
+ collect_stats = self._collect_resource_kpi()
+ result["collect_stats"] = collect_stats
+ LOG.debug("vPE collet Kpis: %s", result)
+ return result
+
+ def get_stats_vpe(self):
+ ''' get vpe statistics '''
+ result = {'pkt_in_up_stream': 0, 'pkt_drop_up_stream': 0,
+ 'pkt_in_down_stream': 0, 'pkt_drop_down_stream': 0}
+ up_stat_commands = ['p 5 stats port in 0', 'p 5 stats port out 0',
+ 'p 5 stats port out 1']
+ down_stat_commands = ['p 9 stats port in 0', 'p 9 stats port out 0']
+ pattern = \
+ "Pkts in:\\s(\\d+)\\r\\n\\tPkts dropped by " \
+ "AH:\\s(\\d+)\\r\\n\\tPkts dropped by other:\\s(\\d+)"
+
+ for cmd in up_stat_commands:
+ stats = self.execute_command(cmd)
+ match = re.search(pattern, stats, re.MULTILINE)
+ if match:
+ result["pkt_in_up_stream"] = \
+ result.get("pkt_in_up_stream", 0) + int(match.group(1))
+ result["pkt_drop_up_stream"] = \
+ result.get("pkt_drop_up_stream", 0) + \
+ int(match.group(2)) + int(match.group(3))
+
+ for cmd in down_stat_commands:
+ stats = self.execute_command(cmd)
+ match = re.search(pattern, stats, re.MULTILINE)
+ if match:
+ result["pkt_in_down_stream"] = \
+ result.get("pkt_in_down_stream", 0) + int(match.group(1))
+ result["pkt_drop_down_stream"] = \
+ result.get("pkt_drop_down_stream", 0) + \
+ int(match.group(2)) + int(match.group(3))
+ return result