diff options
83 files changed, 7543 insertions, 788 deletions
@@ -66,3 +66,4 @@ target/ /src/qemu/qemu/ /src_cuse/ /src_vanilla/ +.vagrant @@ -11,12 +11,13 @@ Repository: vswitchperf Commiters: maryam.tahhan@intel.com -chenjinzhou@huawei.com -randy.wang@huawei.com -challa@noironetworks.com -tgraf@noironetworks.com Eugene.Snider@huawei.com acmorton@att.com +ctrautma@redhat.com +martinx.klozik@intel.com +sridhar.rao@spirent.com +bmichalo@redhat.com Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/ Link(s) to approval of additional submitters: +Link(s) to removal of submitters: http://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2016/opnfv-meeting.2016-05-17-13.59.html diff --git a/conf/01_testcases.conf b/conf/01_testcases.conf index 46d00e5a..23a3ae57 100755 --- a/conf/01_testcases.conf +++ b/conf/01_testcases.conf @@ -28,10 +28,12 @@ # "Frame Modification": "vlan" # One of the supported frame modifications: # # vlan, mpls, mac, dscp, ttl, ip_addr, # # ip_port. -# "biDirectional": [true|false], # Specifies if generated traffic will be -# # full-duplex (true) or half-duplex (false) +# "biDirectional": ["True"|"False"] +# # Specifies if generated traffic will be +# # full-duplex (True) or half-duplex (False) # # It can be overridden by cli option bidirectional. -# # Default value is "false". +# # Default value is "False". Must be of type +# # string. # "MultiStream": 0-65535 # Optional. Defines number of flows simulated # # by traffic generator. Value 0 disables # # MultiStream feature @@ -100,6 +102,24 @@ # # value will be used # "options": "" # Optional. Additional command line options # # to be passed to the load generator. +# "vSwitch" : "OvsVanilla" # Defines vSwitch to be used for test execution. +# # It will override any VSWITCH option stated +# # in configuration files or value specified +# # on command line through --vswitch parameter. +# "VNF" : "QemuVirtioNet" # Defines VNF to be used for test execution. +# # It will override any VNF option stated +# # in configuration files or value specified +# # on command line through --vnf parameter. +# "Trafficgen" : "Dummy" # Defines traffic generator to be used for test +# # execution. It will override any VNF option +# # stated in configuration files or value +# # specified on command line through --trafficgen +# # parameter. +# "Parameters" : "pkt_sizes=512" # Defines list of test parameters used for test +# # execution. It will override any values defined +# # by TEST_PARAMS option stated in configuration +# # files or values specified on command line through +# # --test-params parameter. # "Test Modifier": [FrameMod|Other], # "Dependency": [Test_Case_Name |None], @@ -155,7 +175,7 @@ PERFORMANCE_TESTS = [ "Traffic Type": "rfc2544", "Deployment": "p2p", "biDirectional": "True", - "Description": "LTD.Scalability.RFC2544.0PacketLoss", + "Description": "LTD.Scalability.Flows.RFC2544.0PacketLoss", "MultiStream": "8000", }, { diff --git a/conf/02_vswitch.conf b/conf/02_vswitch.conf index f0475313..79f0afbd 100644 --- a/conf/02_vswitch.conf +++ b/conf/02_vswitch.conf @@ -31,14 +31,12 @@ VSWITCH_DIR = os.path.join(ROOT_DIR, 'vswitches') # DPDK target used when builing DPDK RTE_TARGET = 'x86_64-native-linuxapp-gcc' -# list of NIC HWIDs which will be bound to the 'igb_uio' driver on -# system init -WHITELIST_NICS = ['05:00.0', '05:00.1'] - -# list of NIC HWIDs which will be ignored by the 'igb_uio' driver on -# system init -BLACKLIST_NICS = ['0000:09:00.0', '0000:09:00.1', '0000:09:00.2', - '0000:09:00.3'] +# list of NIC HWIDs to which traffic generator is connected +# In case of NIC with SRIOV suport, it is possible to define, +# which virtual function should be used +# e.g. value '0000:05:00.0|vf1' will configure two VFs and second VF +# will be used for testing +WHITELIST_NICS = ['0000:05:00.0', '0000:05:00.1'] # for DPDK_MODULES the path is in reference to the build directory # To use vfio set @@ -71,10 +69,27 @@ VHOST_USER_SOCKS = ['/tmp/dpdkvhostuser0', '/tmp/dpdkvhostuser1', # ############################ # These are DPDK EAL parameters and they may need to be changed depending on # hardware configuration, like cpu numbering and NUMA. +# +# parameters used for legacy DPDK configuration through '--dpdk' option of ovs-vswitchd +# e.g. ovs-vswitchd --dpdk --socket-mem 1024,0 VSWITCHD_DPDK_ARGS = ['-c', '0x4', '-n', '4', '--socket-mem 1024,0'] -VSWITCHD_VANILLA_ARGS = ['--pidfile'] -VSWITCH_VANILLA_PHY_PORT_NAMES = ['', ''] +# options used for new type of OVS configuration via calls to ovs-vsctl +# e.g. ovs-vsctl --no-wait set Open_vSwitch . other_config:dpdk-socket-mem="1024,0" +VSWITCHD_DPDK_CONFIG = { + 'dpdk-init' : 'true', + 'dpdk-lcore-mask' : '0x4', + 'dpdk-socket-mem' : '1024,0', +} +# Note: VSPERF will automatically detect, which type of DPDK configuration should +# be used. + +# To enable multi queue modify the below param to the number of queues. +# 0 = disabled +VSWITCH_MULTI_QUEUES = 0 + +# parameters passed to ovs-vswitchd in case that OvsVanilla is selected +VSWITCHD_VANILLA_ARGS = [] # use full module path to load module matching OVS version built from the source VSWITCH_VANILLA_KERNEL_MODULES = ['libcrc32c', 'ip_tunnel', 'vxlan', 'gre', 'nf_conntrack', 'nf_defrag_ipv4', 'nf_defrag_ipv6', os.path.join(OVS_DIR_VANILLA, 'datapath/linux/openvswitch.ko')] diff --git a/conf/03_traffic.conf b/conf/03_traffic.conf index eb09bf09..01e3e5cf 100644 --- a/conf/03_traffic.conf +++ b/conf/03_traffic.conf @@ -27,6 +27,8 @@ TRAFFICGEN_DIR = os.path.join(ROOT_DIR, 'tools/pkt_gen') TRAFFICGEN = 'Dummy' #TRAFFICGEN = 'IxNet' #TRAFFICGEN = 'Ixia' +#TRAFFICGEN = 'Xena' +#TRAFFICGEN = 'MoonGen' # List of packet sizes to send. # Expand like this: (64, 128, 256, 512, 1024) @@ -160,3 +162,30 @@ TRAFFICGEN_STC_VERBOSE = "True" # Spirent TestCenter Configuration -- END ######################################### +# Xena traffic generator connection info +TRAFFICGEN_XENA_IP = '' +TRAFFICGEN_XENA_PORT1 = '' +TRAFFICGEN_XENA_PORT2 = '' +TRAFFICGEN_XENA_USER = '' +TRAFFICGEN_XENA_PASSWORD = '' +TRAFFICGEN_XENA_MODULE1 = '' +TRAFFICGEN_XENA_MODULE2 = '' + +# Xena Port IP info +TRAFFICGEN_XENA_PORT0_IP = '192.168.199.10' +TRAFFICGEN_XENA_PORT0_CIDR = 24 +TRAFFICGEN_XENA_PORT0_GATEWAY = '192.168.199.1' +TRAFFICGEN_XENA_PORT1_IP = '192.168.199.11' +TRAFFICGEN_XENA_PORT1_CIDR = 24 +TRAFFICGEN_XENA_PORT1_GATEWAY = '192.168.199.1' + +################################################### +# MoonGen Configuration and Connection Info-- BEGIN + +TRAFFICGEN_MOONGEN_HOST_IP_ADDR = '' +TRAFFICGEN_MOONGEN_USER = '' +TRAFFICGEN_MOONGEN_BASE_DIR = '' +TRAFFICGEN_MOONGEN_PORTS = '' + +# MoonGen Configuration and Connection Info-- END +################################################### diff --git a/conf/04_vnf.conf b/conf/04_vnf.conf index dc15fd15..0a80c1af 100644 --- a/conf/04_vnf.conf +++ b/conf/04_vnf.conf @@ -35,6 +35,9 @@ GUEST_IMAGE = ['', ''] # For 2 VNFs you may use [180, 180] GUEST_TIMEOUT = [180, 180] +# packet forwarding mode: io|mac|mac_retry|macswap|flowgen|rxonly|txonly|csum|icmpecho +GUEST_TESTPMD_FWD_MODE = 'csum' + # guest loopback application method; supported options are: # 'testpmd' - testpmd from dpdk will be built and used # 'l2fwd' - l2fwd module provided by Huawei will be built and used @@ -42,7 +45,7 @@ GUEST_TIMEOUT = [180, 180] # 'buildin' - nothing will be configured by vsperf; VM image must # ensure traffic forwarding between its interfaces # This configuration option can be overridden by CLI SCALAR option -# guest_loopback, e.g. --test-param "guest_loopback=l2fwd" +# guest_loopback, e.g. --test-params "guest_loopback=l2fwd" # For 2 VNFs you may use ['testpmd', 'l2fwd'] GUEST_LOOPBACK = ['testpmd', 'testpmd'] @@ -95,10 +98,25 @@ GUEST_SMP = ['2', '2'] # For 2 VNFs you may use [(4,5), (6, 7)] GUEST_CORE_BINDING = [(6, 7), (9, 10)] +# Queues per NIC inside guest for multi-queue configuration, requires switch +# multi-queue to be enabled. Set to 0 for disabled. +GUEST_NIC_QUEUES = 0 + GUEST_START_TIMEOUT = 120 GUEST_OVS_DPDK_DIR = '/root/ovs_dpdk' OVS_DPDK_SHARE = '/mnt/ovs_dpdk_share' +# Set the CPU mask for testpmd loopback. To bind to specific guest CPUs use -l +# GUEST_TESTPMD_CPU_MASK = '-l 0,1' +GUEST_TESTPMD_CPU_MASK = '-c 0x3' + +# Testpmd multi-core config. Leave at 0's for disabled. Will not enable unless +# GUEST_NIC_QUEUES are > 0. For bi directional traffic NB_CORES must be equal +# to (RXQ + TXQ). +GUEST_TESTPMD_NB_CORES = 0 +GUEST_TESTPMD_TXQ = 0 +GUEST_TESTPMD_RXQ = 0 + # IP addresses to use for Vanilla OVS PVP testing # Consider using RFC 2544/3330 recommended IP addresses for benchmark testing. # Network: 198.18.0.0/15 diff --git a/conf/10_custom.conf b/conf/10_custom.conf index be8244ec..4ffe470e 100644 --- a/conf/10_custom.conf +++ b/conf/10_custom.conf @@ -19,6 +19,8 @@ RTE_TARGET = '' # the relevant DPDK build target TRAFFICGEN = 'Dummy' #TRAFFICGEN = 'IxNet' #TRAFFICGEN = 'Ixia' +#TRAFFICGEN = 'Xena' +#TRAFFICGEN = 'MoonGen' ########################################### # Spirent TestCenter Configuration -- BEGIN @@ -60,8 +62,36 @@ TRAFFICGEN_IXNET_USER = '' TRAFFICGEN_IXNET_TESTER_RESULT_DIR = '' TRAFFICGEN_IXNET_DUT_RESULT_DIR = '' -TEST_PARAMS = {'packet_sizes':'64'} +# Xena traffic generator connection info +TRAFFICGEN_XENA_IP = '' +TRAFFICGEN_XENA_PORT1 = '' +TRAFFICGEN_XENA_PORT2 = '' +TRAFFICGEN_XENA_USER = '' +TRAFFICGEN_XENA_PASSWORD = '' +TRAFFICGEN_XENA_MODULE1 = '' +TRAFFICGEN_XENA_MODULE2 = '' +# Xena Port IP info +TRAFFICGEN_XENA_PORT0_IP = '192.168.199.10' +TRAFFICGEN_XENA_PORT0_CIDR = 24 +TRAFFICGEN_XENA_PORT0_GATEWAY = '192.168.199.1' +TRAFFICGEN_XENA_PORT1_IP = '192.168.199.11' +TRAFFICGEN_XENA_PORT1_CIDR = 24 +TRAFFICGEN_XENA_PORT1_GATEWAY = '192.168.199.1' + +################################################### +# MoonGen Configuration and Connection Info-- BEGIN + +# Ex: TRAFFICGEN_MOONGEN_HOST_IP_ADDR = "192.10.1.1" +TRAFFICGEN_MOONGEN_HOST_IP_ADDR = "" +TRAFFICGEN_MOONGEN_USER = "root" +TRAFFICGEN_MOONGEN_BASE_DIR = "/root/MoonGen" +TRAFFICGEN_MOONGEN_PORTS = "{0,1}" + +# MoonGen Configuration and Connection Info-- END +################################################### + +#TEST_PARAMS = {'pkt_sizes':'64'} OPNFV_INSTALLER = "Fuel" OPNFV_URL = "http://testresults.opnfv.org/testapi" PACKAGE_LIST = "src/package-list.mk" diff --git a/conf/integration/01_testcases.conf b/conf/integration/01_testcases.conf index fff148d4..2edbe08b 100644 --- a/conf/integration/01_testcases.conf +++ b/conf/integration/01_testcases.conf @@ -16,6 +16,13 @@ # tunneling protocol for OP2P tests. SUPPORTED_TUNNELING_PROTO = ['vxlan', 'gre', 'geneve'] +# +# Generic test configuration options are described at conf/01_testcases.conf +# + +# +# Options specific to integration testcases are described below: +# # Required for OP2P tests # "Tunnel Type": ["vxlan"|"gre"|"geneve"] # Tunnel Type defines tunneling protocol to use. # # It can be overridden by cli option tunnel_type. @@ -38,12 +45,98 @@ SUPPORTED_TUNNELING_PROTO = ['vxlan', 'gre', 'geneve'] # # Where i is a number of step (starts from 0) # # and j is index of result returned by step i. +# +# Common TestSteps parts ("macros") +# + +# P2P macros +STEP_VSWITCH_P2P_FLOWS_INIT = [ + ['vswitch', 'add_switch', 'int_br0'], # STEP 0 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2 + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], +] + +STEP_VSWITCH_P2P_FLOWS_FINIT = [ + ['vswitch', 'dump_flows', 'int_br0'], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], + ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], + ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], + ['vswitch', 'del_switch', 'int_br0'], +] + +# PVP and PVVP macros +STEP_VSWITCH_PVP_INIT = [ + ['vswitch', 'add_switch', 'int_br0'], # STEP 0 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2 + ['vswitch', 'add_vport', 'int_br0'], # STEP 3 + ['vswitch', 'add_vport', 'int_br0'], # STEP 4 +] + +STEP_VSWITCH_PVP_FINIT = [ + ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], + ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], + ['vswitch', 'del_port', 'int_br0', '#STEP[3][0]'], + ['vswitch', 'del_port', 'int_br0', '#STEP[4][0]'], + ['vswitch', 'del_switch', 'int_br0'], +] + +STEP_VSWITCH_PVP_FLOWS_INIT = STEP_VSWITCH_PVP_INIT + [ + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[4][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[3][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], +] + +STEP_VSWITCH_PVP_FLOWS_FINIT = [ + ['vswitch', 'dump_flows', 'int_br0'], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[4][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[3][1]'}], +] + STEP_VSWITCH_PVP_FINIT + +STEP_VSWITCH_PVVP_INIT = STEP_VSWITCH_PVP_INIT + [ + ['vswitch', 'add_vport', 'int_br0'], # STEP 5 + ['vswitch', 'add_vport', 'int_br0'], # STEP 6 +] + +STEP_VSWITCH_PVVP_FINIT = [ + ['vswitch', 'del_port', 'int_br0', '#STEP[5][0]'], + ['vswitch', 'del_port', 'int_br0', '#STEP[6][0]'], +] + STEP_VSWITCH_PVP_FINIT + +STEP_VSWITCH_PVVP_FLOWS_INIT = STEP_VSWITCH_PVVP_INIT + [ + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[3][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[4][1]', 'actions': ['output:#STEP[5][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[6][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[6][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[5][1]', 'actions': ['output:#STEP[4][1]'], 'idle_timeout': '0'}], + ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[3][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], +] + +STEP_VSWITCH_PVVP_FLOWS_FINIT = [ + ['vswitch', 'dump_flows', 'int_br0'], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[4][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[6][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[5][1]'}], + ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[3][1]'}], +] + STEP_VSWITCH_PVVP_FINIT + +# +# Definition of integration tests +# INTEGRATION_TESTS = [ { "Name": "overlay_p2p_tput", "Traffic Type": "rfc2544", "Deployment": "op2p", - "biDirectional": False, + "biDirectional": 'False', "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0], "Tunnel Operation": "encapsulation", "Description": "Overlay Encapsulation Throughput RFC2544 Test", @@ -52,7 +145,7 @@ INTEGRATION_TESTS = [ "Name": "overlay_p2p_cont", "Traffic Type": "continuous", "Deployment": "op2p", - "biDirectional": False, + "biDirectional": 'False', "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0], "Tunnel Operation": "encapsulation", "Description": "Overlay Encapsulation Continuous Stream", @@ -61,7 +154,7 @@ INTEGRATION_TESTS = [ "Name": "overlay_p2p_decap_tput", "Traffic Type": "rfc2544", "Deployment": "op2p", - "biDirectional": False, + "biDirectional": 'False', "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0], "Tunnel Operation": "decapsulation", "Description": "Overlay Decapsulation Throughput RFC2544 Test", @@ -70,7 +163,7 @@ INTEGRATION_TESTS = [ "Name": "overlay_p2p_decap_cont", "Traffic Type": "continuous", "Deployment": "op2p", - "biDirectional": False, + "biDirectional": 'False', "Tunnel Type": SUPPORTED_TUNNELING_PROTO[0], "Tunnel Operation": "decapsulation", "Description": "Overlay Decapsulation Continuous Stream", @@ -162,76 +255,187 @@ INTEGRATION_TESTS = [ "Name": "vswitch_add_del_flows", "Deployment": "clean", "Description": "vSwitch - add and delete flows", - "TestSteps": [ - ['vswitch', 'add_switch', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], - ['vswitch', 'dump_flows', 'int_br0'], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], - ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], - ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], - ['vswitch', 'del_switch', 'int_br0'], - ] + "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT + + STEP_VSWITCH_P2P_FLOWS_FINIT }, { - "Name": "vswitch_throughput", + "Name": "vswitch_p2p_tput", "Deployment": "clean", "Description": "vSwitch - configure switch and execute RFC2544 throughput test", - "TestSteps": [ - ['vswitch', 'add_switch', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], - ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : True, 'frame_rate' : 100, 'multistream' : 0, 'stream_type' : 'L4'}], - ['vswitch', 'dump_flows', 'int_br0'], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], - ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], - ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], - ['vswitch', 'del_switch', 'int_br0'], - ] + "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT + + [ + ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : 'True'}], + ] + + STEP_VSWITCH_P2P_FLOWS_FINIT }, { - "Name": "vswitch_back2back", + "Name": "vswitch_p2p_back2back", "Deployment": "clean", "Description": "vSwitch - configure switch and execute RFC2544 back2back test", - "TestSteps": [ - ['vswitch', 'add_switch', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], - ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : True, 'frame_rate' : 100, 'multistream' : 0, 'stream_type' : 'L4'}], - ['vswitch', 'dump_flows', 'int_br0'], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], - ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], - ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], - ['vswitch', 'del_switch', 'int_br0'], - ] + "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT + + [ + ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : 'True'}], + ] + + STEP_VSWITCH_P2P_FLOWS_FINIT }, { - "Name": "vswitch_continuous", + "Name": "vswitch_p2p_cont", "Deployment": "clean", "Description": "vSwitch - configure switch and execute continuous stream test", - "TestSteps": [ - ['vswitch', 'add_switch', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_phy_port', 'int_br0'], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[1][1]', 'actions': ['output:#STEP[2][1]'], 'idle_timeout': '0'}], - ['vswitch', 'add_flow', 'int_br0', {'in_port': '#STEP[2][1]', 'actions': ['output:#STEP[1][1]'], 'idle_timeout': '0'}], - ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : True, 'frame_rate' : 100, 'multistream' : 0, 'stream_type' : 'L4'}], - ['vswitch', 'dump_flows', 'int_br0'], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[1][1]'}], - ['vswitch', 'del_flow', 'int_br0', {'in_port': '#STEP[2][1]'}], - ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], - ['vswitch', 'del_port', 'int_br0', '#STEP[2][0]'], - ['vswitch', 'del_switch', 'int_br0'], - ] + "TestSteps": STEP_VSWITCH_P2P_FLOWS_INIT + + [ + ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], + ] + + STEP_VSWITCH_P2P_FLOWS_FINIT + }, + { + "Name": "vswitch_pvp", + "Deployment": "clean", + "Description": "vSwitch - configure switch and one vnf", + "TestSteps": STEP_VSWITCH_PVP_INIT + + [ + ['vnf', 'start'], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FINIT + }, + { + "Name": "vswitch_pvp_tput", + "Deployment": "clean", + "Description": "vSwitch - configure switch, vnf and execute RFC2544 throughput test", + "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT + + [ + ['vnf', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : 'True'}], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvp_back2back", + "Deployment": "clean", + "Description": "vSwitch - configure switch, vnf and execute RFC2544 back2back test", + "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT + + [ + ['vnf', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : 'True'}], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvp_cont", + "Deployment": "clean", + "Description": "vSwitch - configure switch, vnf and execute continuous stream test", + "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT + + [ + ['vnf', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvp_all", + "Deployment": "clean", + "Description": "vSwitch - configure switch, vnf and execute all test types", + "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT + + [ + ['vnf', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : 'True'}], + ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : 'True'}], + ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvvp", + "Deployment": "clean", + "Description": "vSwitch - configure switch and two vnfs", + "TestSteps": STEP_VSWITCH_PVVP_INIT + + [ + ['vnf1', 'start'], + ['vnf2', 'start'], + ['vnf1', 'stop'], + ['vnf2', 'stop'], + ] + + STEP_VSWITCH_PVVP_FINIT + }, + { + "Name": "vswitch_pvvp_tput", + "Deployment": "clean", + "Description": "vSwitch - configure switch, two chained vnfs and execute RFC2544 throughput test", + "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT + + [ + ['vnf1', 'start'], + ['vnf2', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : 'True'}], + ['vnf1', 'stop'], + ['vnf2', 'stop'], + ] + + STEP_VSWITCH_PVVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvvp_back2back", + "Deployment": "clean", + "Description": "vSwitch - configure switch, two chained vnfs and execute RFC2544 back2back test", + "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT + + [ + ['vnf1', 'start'], + ['vnf2', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : 'True'}], + ['vnf1', 'stop'], + ['vnf2', 'stop'], + ] + + STEP_VSWITCH_PVVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvvp_cont", + "Deployment": "clean", + "Description": "vSwitch - configure switch, two chained vnfs and execute continuous stream test", + "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT + + [ + ['vnf1', 'start'], + ['vnf2', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], + ['vnf1', 'stop'], + ['vnf2', 'stop'], + ] + + STEP_VSWITCH_PVVP_FLOWS_FINIT + }, + { + "Name": "vswitch_pvvp_all", + "Deployment": "clean", + "Description": "vSwitch - configure switch, two chained vnfs and execute all test types", + "TestSteps": STEP_VSWITCH_PVVP_FLOWS_INIT + + [ + ['vnf1', 'start'], + ['vnf2', 'start'], + ['trafficgen', 'send_traffic', {'traffic_type' : 'throughput', 'bidir' : 'True'}], + ['trafficgen', 'send_traffic', {'traffic_type' : 'back2back', 'bidir' : 'True'}], + ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], + ['vnf1', 'stop'], + ['vnf2', 'stop'], + ] + + STEP_VSWITCH_PVVP_FLOWS_FINIT }, ] +# Example of TC definition with exact vSwitch, VNF and TRAFFICGEN values. +# { +# "Name": "ovs_vanilla_linux_bridge_pvp_cont", +# "Deployment": "clean", +# "Description": "vSwitch - configure OVS Vanilla, QemuVirtioNet with linux bridge and execute continuous stream test", +# "vSwitch" : "OvsVanilla", +# "VNF" : "QemuVirtioNet", +# "Trafficgen": "IxNet", +# "Test Parameters": {"guest_loopback" : "linux_bridge"}, +# "TestSteps": STEP_VSWITCH_PVP_FLOWS_INIT + +# [ +# ['vnf', 'start'], +# ['trafficgen', 'send_traffic', {'traffic_type' : 'continuous', 'bidir' : 'True'}], +# ['vnf', 'stop'], +# ] + +# STEP_VSWITCH_PVP_FLOWS_FINIT +# }, diff --git a/core/component_factory.py b/core/component_factory.py index 9c58fc5c..a91872e2 100644 --- a/core/component_factory.py +++ b/core/component_factory.py @@ -118,13 +118,14 @@ def create_loadgen(loadgen_type, loadgen_cfg): elif loadgen_type.find("stress") >= 0: return Stress(loadgen_cfg) -def create_pktfwd(pktfwd_class): +def create_pktfwd(deployment, pktfwd_class): """Return a new packet forwarder controller The returned controller is configured with the given packet forwarder class. :param pktfwd_class: Reference to packet forwarder class to be used. + :param deployment: The deployment scenario name :return: packet forwarder controller """ - return PktFwdController(pktfwd_class) + return PktFwdController(deployment, pktfwd_class) diff --git a/core/loader/loader_servant.py b/core/loader/loader_servant.py index dc6353ff..226b0931 100644 --- a/core/loader/loader_servant.py +++ b/core/loader/loader_servant.py @@ -90,8 +90,8 @@ class LoaderServant(object): desc = (mod.__doc__ or 'No description').strip().split('\n')[0] results.append((name, desc)) - output = [ - 'Classes derived from: ' + self._interface.__name__ + '\n======\n'] + header = 'Classes derived from: ' + self._interface.__name__ + output = [header + '\n' + '=' * len(header) + '\n'] for (name, desc) in results: output.append('* %-18s%s' % ('%s:' % name, desc)) diff --git a/core/pktfwd_controller.py b/core/pktfwd_controller.py index 40565504..b1e37f2e 100644 --- a/core/pktfwd_controller.py +++ b/core/pktfwd_controller.py @@ -24,11 +24,12 @@ class PktFwdController(object): _pktfwd_class: The packet forwarder class to be used. _pktfwd: The packet forwarder object controlled by this controller """ - def __init__(self, pktfwd_class): + def __init__(self, deployment, pktfwd_class): """Initializes up the prerequisites for the P2P deployment scenario. :vswitch_class: the vSwitch class to be used. """ + self._deployment = deployment self._logger = logging.getLogger(__name__) self._pktfwd_class = pktfwd_class self._pktfwd = pktfwd_class() @@ -52,10 +53,12 @@ class PktFwdController(object): self._pktfwd.stop() def __enter__(self): - self.setup() + if self._deployment.find("p2p") == 0: + self.setup() def __exit__(self, type_, value, traceback): - self.stop() + if self._deployment.find("p2p") == 0: + self.stop() def get_pktfwd(self): """Get the controlled packet forwarder diff --git a/core/results/results_constants.py b/core/results/results_constants.py index 1049e89b..b7ab7052 100644 --- a/core/results/results_constants.py +++ b/core/results/results_constants.py @@ -59,6 +59,8 @@ class ResultsConstants(object): SCAL_STREAM_TYPE = 'match_type' SCAL_PRE_INSTALLED_FLOWS = 'pre-installed_flows' + TEST_RUN_TIME = "test_execution_time" + @staticmethod def get_traffic_constants(): """Method returns all Constants used to store results. diff --git a/core/traffic_controller_rfc2544.py b/core/traffic_controller_rfc2544.py index 2630101f..81e499cd 100644 --- a/core/traffic_controller_rfc2544.py +++ b/core/traffic_controller_rfc2544.py @@ -41,6 +41,7 @@ class TrafficControllerRFC2544(ITrafficController, IResults): self._traffic_started_call_count = 0 self._trials = int(get_test_param('rfc2544_trials', 1)) self._duration = int(get_test_param('duration', 30)) + self._lossrate = float(get_test_param('lossrate', 0.0)) self._results = [] # If set, comma separated packet_sizes value from --test_params @@ -100,13 +101,13 @@ class TrafficControllerRFC2544(ITrafficController, IResults): if traffic['traffic_type'] == 'back2back': result = self._traffic_gen_class.send_rfc2544_back2back( - traffic, trials=self._trials, duration=self._duration) + traffic, trials=self._trials, duration=self._duration, lossrate=self._lossrate) elif traffic['traffic_type'] == 'continuous': result = self._traffic_gen_class.send_cont_traffic( traffic, duration=self._duration) else: result = self._traffic_gen_class.send_rfc2544_throughput( - traffic, trials=self._trials, duration=self._duration) + traffic, trials=self._trials, duration=self._duration, lossrate=self._lossrate) result = TrafficControllerRFC2544._append_results(result, packet_size) diff --git a/core/vnf_controller.py b/core/vnf_controller.py index 39a63044..8800ccaf 100644 --- a/core/vnf_controller.py +++ b/core/vnf_controller.py @@ -15,6 +15,7 @@ """ import logging +import pexpect from vnfs.vnf.vnf import IVnf class VnfController(object): @@ -68,8 +69,12 @@ class VnfController(object): """ self._logger.debug('start ' + str(len(self._vnfs)) + ' VNF[s] with ' + ' '.join(map(str, self._vnfs))) - for vnf in self._vnfs: - vnf.start() + try: + for vnf in self._vnfs: + vnf.start() + except pexpect.TIMEOUT: + self.stop() + raise def stop(self): """Stops all VNFs set-up by __init__. diff --git a/core/vswitch_controller_op2p.py b/core/vswitch_controller_op2p.py index 77797b8f..ee8ada8b 100644 --- a/core/vswitch_controller_op2p.py +++ b/core/vswitch_controller_op2p.py @@ -77,11 +77,13 @@ class VswitchControllerOP2P(IVswitchController): vtep_ip2 = settings.getValue('VTEP_IP2') self._vswitch.add_switch(bridge) - tasks.run_task(['sudo', 'ifconfig', bridge, - settings.getValue('VTEP_IP1')], + tasks.run_task(['sudo', 'ip', 'addr', 'add', + settings.getValue('VTEP_IP1'), 'dev', bridge], self._logger, 'Assign ' + settings.getValue('VTEP_IP1') + ' to ' + bridge, False) + tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', bridge, 'up'], + self._logger, 'Bring up ' + bridge, False) tunnel_type = self._traffic['tunnel_type'] @@ -137,10 +139,12 @@ class VswitchControllerOP2P(IVswitchController): tgen_ip1 = settings.getValue('TRAFFICGEN_PORT1_IP') self._vswitch.add_switch(bridge) - tasks.run_task(['sudo', 'ifconfig', bridge, - settings.getValue('VTEP_IP1')], + tasks.run_task(['sudo', 'ip', 'addr', 'add', + settings.getValue('VTEP_IP1'), 'dev', bridge], self._logger, 'Assign ' + settings.getValue('VTEP_IP1') + ' to ' + bridge, False) + tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', bridge, 'up'], + self._logger, 'Bring up ' + bridge, False) tunnel_type = self._traffic['tunnel_type'] @@ -195,10 +199,12 @@ class VswitchControllerOP2P(IVswitchController): tgen_ip1 = settings.getValue('TRAFFICGEN_PORT1_IP') self._vswitch.add_switch(bridge) - tasks.run_task(['sudo', 'ifconfig', bridge, - settings.getValue('TUNNEL_INT_BRIDGE_IP')], + tasks.run_task(['sudo', 'ip', 'addr', 'add', + settings.getValue('TUNNEL_INT_BRIDGE_IP'), 'dev', bridge], self._logger, 'Assign ' + settings.getValue('TUNNEL_INT_BRIDGE_IP') + ' to ' + bridge, False) + tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', bridge, 'up'], + self._logger, 'Bring up ' + bridge, False) tunnel_type = self._traffic['tunnel_type'] diff --git a/core/vswitch_controller_pvp.py b/core/vswitch_controller_pvp.py index 0c98cc7f..a4f61961 100644 --- a/core/vswitch_controller_pvp.py +++ b/core/vswitch_controller_pvp.py @@ -77,7 +77,7 @@ class VswitchControllerPVP(IVswitchController): self._vswitch.add_flow(bridge, flow1) self._vswitch.add_flow(bridge, flow2) - if self._traffic['bidir']: + if self._traffic['bidir'] == 'True': flow3 = add_ports_to_flow(flow_template, phy2_number, vport2_number) flow4 = add_ports_to_flow(flow_template, vport1_number, diff --git a/core/vswitch_controller_pvvp.py b/core/vswitch_controller_pvvp.py index c79ad9a3..729aca3f 100644 --- a/core/vswitch_controller_pvvp.py +++ b/core/vswitch_controller_pvvp.py @@ -82,7 +82,7 @@ class VswitchControllerPVVP(IVswitchController): self._vswitch.add_flow(bridge, flow2) self._vswitch.add_flow(bridge, flow3) - if self._traffic['bidir']: + if self._traffic['bidir'] == 'True': flow4 = add_ports_to_flow(flow_template, phy2_number, vport4_number) flow5 = add_ports_to_flow(flow_template, vport3_number, diff --git a/docs/configguide/installation.rst b/docs/configguide/installation.rst index 354979b0..5072dee0 100755 --- a/docs/configguide/installation.rst +++ b/docs/configguide/installation.rst @@ -104,6 +104,29 @@ Fedora, RedHat and Ubuntu $ cd $HOME/vsperfenv $ source bin/activate +Gotcha +^^^^^^ +.. code:: bash + $ source bin/activate + Badly placed ()'s. + +Check what type of shell you are using + +.. code:: bash + echo $shell + /bin/tcsh + +See what scripts are available in $HOME/vsperfenv/bin + +.. code:: bash + $ ls bin/ + activate activate.csh activate.fish activate_this.py + +source the appropriate script + +.. code:: bash + $ source bin/activate.csh + Working Behind a Proxy ====================== @@ -119,3 +142,51 @@ running any of the above. For example: .. _virtualenv: https://virtualenv.readthedocs.org/en/latest/ .. _vloop-vnf-ubuntu-14.04_20160303: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20160303.qcow2 .. _vloop-vnf-ubuntu-14.04_20151216: http://artifacts.opnfv.org/vswitchperf/vnf/vloop-vnf-ubuntu-14.04_20151216.qcow2 + +Hugepage Configuration +---------------------- + +Systems running vsperf with either dpdk and/or tests with guests must configure +hugepage amounts to support running these configurations. It is recommended +to configure 1GB hugepages as the pagesize. + +The amount of hugepages needed depends on your configuration files in vsperf. +Each guest image requires 4096 by default according to the default settings in +the ``04_vnf.conf`` file. + +.. code:: bash + + GUEST_MEMORY = ['4096', '4096'] + +The dpdk startup parameters also require an amount of hugepages depending on +your configuration in the ``02_vswitch.conf`` file. + +.. code:: bash + + VSWITCHD_DPDK_ARGS = ['-c', '0x4', '-n', '4', '--socket-mem 1024,1024'] + VSWITCHD_DPDK_CONFIG = { + 'dpdk-init' : 'true', + 'dpdk-lcore-mask' : '0x4', + 'dpdk-socket-mem' : '1024,1024', + } + +Note: Option VSWITCHD_DPDK_ARGS is used for vswitchd, which supports --dpdk +parameter. In recent vswitchd versions, option VSWITCHD_DPDK_CONFIG will be +used to configure vswitchd via ovs-vsctl calls. + +With the --socket-mem argument set to use 1 hugepage on the specified sockets as +seen above, the configuration will need 9 hugepages total to run all tests +within vsperf if the pagesize is set correctly to 1GB. + +Depending on your OS selection configuration of hugepages may vary. Please refer +to your OS documentation to set hugepages correctly. It is recommended to set +the required amount of hugepages to be allocated by default on reboots. + +Information on hugepage requirements for dpdk can be found at +http://dpdk.org/doc/guides/linux_gsg/sys_reqs.html + +You can review your hugepage amounts by executing the following command + +.. code:: bash + + cat /proc/meminfo | grep Huge diff --git a/docs/configguide/trafficgen.rst b/docs/configguide/trafficgen.rst index 6e7626d8..63560b9c 100644 --- a/docs/configguide/trafficgen.rst +++ b/docs/configguide/trafficgen.rst @@ -14,6 +14,8 @@ VSPERF supports the following traffic generators: traffic generator. * IXIA (IxNet and IxOS) * Spirent TestCenter + * Xena Networks + * MoonGen To see the list of traffic gens from the cli: @@ -67,7 +69,7 @@ OR from the commandline: .. code-block:: console - $ ./vsperf --test-param "pkt_sizes=x,y" $TESTNAME + $ ./vsperf --test-params "pkt_sizes=x,y" $TESTNAME You can also modify the traffic transmission duration and the number of trials run by the traffic generator by extending the example @@ -75,7 +77,7 @@ commandline above to: .. code-block:: console - $ ./vsperf --test-param "pkt_sizes=x,y;duration=10;rfc2455_trials=3" $TESTNAME + $ ./vsperf --test-params "pkt_sizes=x,y;duration=10;rfc2455_trials=3" $TESTNAME Dummy Setup ------------ @@ -222,3 +224,111 @@ best practice results in deploying STCv, the following is suggested: To get the highest performance and accuracy, Spirent TestCenter hardware is recommended. vsperf can run with either stype test ports. + +Using STC REST Client +~~~~~~~~~~~~~~~~~~~~~ +The stcrestclient package provides the stchttp.py ReST API wrapper module. +This allows simple function calls, nearly identical to those provided by +StcPython.py, to be used to access TestCenter server sessions via the +STC ReST API. Basic ReST functionality is provided by the resthttp module, +and may be used for writing ReST clients independent of STC. + +- Project page: <https://github.com/Spirent/py-stcrestclient> +- Package download: <http://pypi.python.org/pypi/stcrestclient> + +To use REST interface, follow the instructions in the Project page to +install the package. Once installed, the scripts named with 'rest' keyword +can be used. For example: testcenter-rfc2544-rest.py can be used to run +RFC 2544 tests using the REST interface. + +Xena Networks +------------- + +Installation +~~~~~~~~~~~~ + +Xena Networks traffic generator requires certain files and packages to be +installed. It is assumed the user has access to the Xena2544.exe file which +must be placed in VSPerf installation location under the tools/pkt_gen/xena +folder. Contact Xena Networks for the latest version of this file. The user +can also visit www.xenanetworks/downloads to obtain the file with a valid +support contract. + +**Note** VSPerf has been fully tested with version v2.43 of Xena2544.exe + +To execute the Xena2544.exe file under Linux distributions the mono-complete +package must be installed. To install this package follow the instructions +below. Further information can be obtained from +http://www.mono-project.com/docs/getting-started/install/linux/ + +.. code-block:: console + + rpm --import "http://keyserver.ubuntu.com/pks/lookup?op=get&search=0x3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF" + yum-config-manager --add-repo http://download.mono-project.com/repo/centos/ + yum -y install mono-complete + +To prevent gpg errors on future yum installation of packages the mono-project +repo should be disabled once installed. + +.. code-block:: console + + yum-config-manager --disable download.mono-project.com_repo_centos_ + +Configuration +~~~~~~~~~~~~~ + +Connection information for your Xena Chassis must be supplied inside the +``10_custom.conf`` or ``03_custom.conf`` file. The following parameters must be +set to allow for proper connections to the chassis. + +.. code-block:: console + + TRAFFICGEN_XENA_IP = '' + TRAFFICGEN_XENA_PORT1 = '' + TRAFFICGEN_XENA_PORT2 = '' + TRAFFICGEN_XENA_USER = '' + TRAFFICGEN_XENA_PASSWORD = '' + TRAFFICGEN_XENA_MODULE1 = '' + TRAFFICGEN_XENA_MODULE2 = '' + + +MoonGen +------- + +Installation +~~~~~~~~~~~~ + +MoonGen architecture overview and general installation instructions +can be found here: + +https://github.com/emmericp/MoonGen + +For VSPerf use, MoonGen should be cloned from here (as opposed to the afore +mentioned GitHub): + +git clone https://github.com/atheurer/MoonGen + +and use the opnfv-stable branch: + +git checkout opnfv-stable + +VSPerf uses a particular example script under the examples directory within +the MoonGen project: + +MoonGen/examples/opnfv-vsperf.lua + +Follow MoonGen set up instructions here: + +https://github.com/atheurer/MoonGen/blob/opnfv-stable/MoonGenSetUp.html + +Note one will need to set up ssh login to not use passwords between the server +running MoonGen and the device under test (running the VSPERF test +infrastructure). This is because VSPERF on one server uses 'ssh' to +configure and run MoonGen upon the other server. + +One can set up this ssh access by doing the following on both servers: + +.. code-block:: console + + ssh-keygen -b 2048 -t rsa + ssh-copy-id <other server> diff --git a/docs/release/NEWS.rst b/docs/release/NEWS.rst index efeafbbe..19ad3240 100644 --- a/docs/release/NEWS.rst +++ b/docs/release/NEWS.rst @@ -2,6 +2,22 @@ .. http://creativecommons.org/licenses/by/4.0 .. (c) OPNFV, Intel Corporation, AT&T and others. +OPNFV Colorado Release +====================== +* Support for OVS version 2.5 + DPDK 2.2. +* Support for DPDK v16.04 +* Support for Xena traffic generator. +* Support for Integration tests for OVS with DPDK including: + * Physical ports. + * Virtual ports (vhost user and vhost cuse). + * Flow addition and removal tests. + * Overlay (VXLAN, GRE and NVGRE) encapsulation and decapsulation tests. +* Supporting configuration of OVS with DPDK through the OVS DB as well as the + legacy commandline arguments. +* Support for VM loopback (SR-IOV) benchmarking. +* Support for platform baseline benchmarking without a vswitch using testpmd. +* Support for Spirent Test Center REST APIs. + OPNFV Brahmaputra Release ========================= Supports both OVS and OVS with DPDK. @@ -126,6 +142,7 @@ follow once the community has digested the initial release. - Support for biDirectional functionality for ixnet interface Missing -------- +======= - xmlunit output is currently disabled + diff --git a/docs/requirements/ietf_draft/draft-vsperf-bmwg-vswitch-opnfv-02.txt b/docs/requirements/ietf_draft/draft-vsperf-bmwg-vswitch-opnfv-02.txt index 317a68ab..0f5be592 100755 --- a/docs/requirements/ietf_draft/draft-vsperf-bmwg-vswitch-opnfv-02.txt +++ b/docs/requirements/ietf_draft/draft-vsperf-bmwg-vswitch-opnfv-02.txt @@ -54,7 +54,7 @@ Status of This Memo Tahhan, et al. Expires September 22, 2016 [Page 1] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -110,7 +110,7 @@ Table of Contents Tahhan, et al. Expires September 22, 2016 [Page 2] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -166,7 +166,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 3] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -222,7 +222,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 4] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -278,7 +278,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 5] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -334,7 +334,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 6] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -390,7 +390,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 7] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -446,7 +446,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 8] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -502,7 +502,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 9] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -558,7 +558,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 10] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -614,7 +614,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 11] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -670,7 +670,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 12] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -726,7 +726,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 13] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -782,7 +782,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 14] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -838,7 +838,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 15] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -894,7 +894,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 16] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -950,7 +950,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 17] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -1006,7 +1006,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 18] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -1062,7 +1062,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 19] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -1118,7 +1118,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 20] - + Internet-Draft Benchmarking vSwitches March 2016 @@ -1174,7 +1174,7 @@ Internet-Draft Benchmarking vSwitches March 2016 Tahhan, et al. Expires September 22, 2016 [Page 21] - + Internet-Draft Benchmarking vSwitches March 2016 diff --git a/docs/requirements/vswitchperf_ltd.rst b/docs/requirements/vswitchperf_ltd.rst index 1827fe6d..6b882290 100644 --- a/docs/requirements/vswitchperf_ltd.rst +++ b/docs/requirements/vswitchperf_ltd.rst @@ -1285,11 +1285,11 @@ Test ID: LTD.Throughput.RFC2544.Profile to the DUT's RFC 2544 Throughput as determined by LTD.Throughput.RFC2544.PacketLoss Ratio (0% Packet Loss case). A delta of 0% is equivalent to an offered traffic rate equal to the RFC 2544 - Throughput; A delta of +50% indicates an offered rate half-way - between the Throughput and line-rate, whereas a delta of - -50% indicates an offered rate of half the maximum rate. Therefore the - range of the delta figure is natuarlly bounded at -100% (zero offered - traffic) and +100% (traffic offered at line rate). + Maximum Throughput; A delta of +50% indicates an offered rate half-way + between the Maximum RFC2544 Throughput and line-rate, whereas a delta of + -50% indicates an offered rate of half the RFC 2544 Maximum Throughput. + Therefore the range of the delta figure is natuarlly bounded at -100% + (zero offered traffic) and +100% (traffic offered at line rate). The following deltas to the maximum forwarding rate should be applied: @@ -1861,12 +1861,93 @@ Test ID: LTD.Throughput.RFC2544.WorstN-BestN `RFC2544 <https://www.rfc-editor.org/rfc/rfc2544.txt>`__). - Following may also be collected as part of this test, to determine the vSwitch's performance footprint on the system: + - CPU core utilization. - CPU cache utilization. - Memory footprint. - System bus (QPI, PCI, ...) utilization. - CPU cycles consumed per packet. +.. 3.2.3.1.14 + +Test ID: LTD.Throughput.Overlay.Network.<tech>.RFC2544.PacketLossRatio +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + **Title**: <tech> Overlay Network RFC 2544 X% packet loss ratio Throughput and Latency Test + + + NOTE: Throughout this test, four interchangeable overlay technologies are covered by the + same test description. They are: VXLAN, GRE, NVGRE and GENEVE. + + **Prerequisite Test**: N/A + + **Priority**: + + **Description**: + This test evaluates standard switch performance benchmarks for the scenario where an + Overlay Network is deployed for all paths through the vSwitch. Overlay Technologies covered + (replacing <tech> in the test name) include: + + - VXLAN + - GRE + - NVGRE + - GENEVE + + Performance will be assessed for each of the following overlay network functions: + + - Encapsulation only + - De-encapsulation only + - Both Encapsulation and De-encapsulation + + For each native packet, the DUT must perform the following operations: + + - Examine the packet and classify its correct overlay net (tunnel) assignment + - Encapsulate the packet + - Switch the packet to the correct port + + For each encapsulated packet, the DUT must perform the following operations: + + - Examine the packet and classify its correct native network assignment + - De-encapsulate the packet, if required + - Switch the packet to the correct port + + The selected frame sizes are those previously defined under `Default + Test Parameters <#DefaultParams>`__. + + Thus, each test comprises an overlay technology, a network function, + and a packet size *with* overlay network overhead included + (but see also the discussion at + https://etherpad.opnfv.org/p/vSwitchTestsDrafts ). + + The test can also be used to determine the average latency of the traffic. + + Under the `RFC2544 <https://www.rfc-editor.org/rfc/rfc2544.txt>`__ + test methodology, the test duration will + include a number of trials; each trial should run for a minimum period + of 60 seconds. A binary search methodology must be applied for each + trial to obtain the final result for Throughput. + + **Expected Result**: At the end of each trial, the presence or absence + of loss determines the modification of offered load for the next trial, + converging on a maximum rate, or + `RFC2544 <https://www.rfc-editor.org/rfc/rfc2544.txt>`__ Throughput with X% + loss (where the value of X is typically equal to zero). + The Throughput load is re-used in related + `RFC2544 <https://www.rfc-editor.org/rfc/rfc2544.txt>`__ tests and other + tests. + + **Metrics Collected**: + The following are the metrics collected for this test: + + - The maximum Throughput in Frames Per Second (FPS) and Mbps of + the DUT for each frame size with X% packet loss. + - The average latency of the traffic flow when passing through the DUT + and VNFs (if testing for latency, note that this average is different from the + test specified in Section 26.3 of + `RFC2544 <https://www.rfc-editor.org/rfc/rfc2544.txt>`__). + - CPU and memory utilization may also be collected as part of this + test, to determine the vSwitch's performance footprint on the system. + + .. 3.2.3.2 Packet Latency tests @@ -1969,9 +2050,9 @@ It is expected that more will be added. .. 3.2.3.3.1 -Test ID: LTD.Scalability.RFC2544.0PacketLoss +Test ID: LTD.Scalability.Flows.RFC2544.0PacketLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - **Title**: RFC 2544 0% loss Scalability throughput test + **Title**: RFC 2544 0% loss Flow Scalability throughput test **Prerequisite Test**: LTD.Throughput.RFC2544.PacketLossRatio, IF the delta Throughput between the single-flow RFC2544 test and this test with @@ -2219,16 +2300,16 @@ Test ID: LTD.Scalability.VNF.RFC2544.PacketLossProfile The following are the metrics collected for this test: - The forwarding rate in Frames Per Second (FPS) and Mbps of the DUT - for each delta to the maximum forwarding rate and for each frame - size. + for each delta to the maximum forwarding rate and for each frame + size. - The average latency for each delta to the maximum forwarding rate and - for each frame size. + for each frame size. - CPU and memory utilization may also be collected as part of this - test, to determine the vSwitch's performance footprint on the system. + test, to determine the vSwitch's performance footprint on the system. - Any failures experienced (for example if the vSwitch crashes, stops processing packets, restarts or becomes unresponsive to commands) - when the offered load is above Maximum Throughput MUST be recorded - and reported with the results. + when the offered load is above Maximum Throughput MUST be recorded + and reported with the results. .. 3.2.3.4 @@ -2395,7 +2476,7 @@ should be required. It is expected that more will be added. .. 3.2.3.6.1 Test ID: LTD.Stress.RFC2544.0PacketLoss -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Title**: RFC 2544 0% Loss CPU OR Memory Stress Test **Prerequisite Test**: @@ -2429,6 +2510,7 @@ Test ID: LTD.Stress.RFC2544.0PacketLoss **Note:** Stress in the test ID can be replaced with the name of the component being stressed, when reporting the results: LTD.CPU.RFC2544.0PacketLoss or LTD.Memory.RFC2544.0PacketLoss + .. 3.2.3.7 Summary List of Tests @@ -2447,6 +2529,8 @@ Summary List of Tests - Test ID: LTD.Throughput.RFC2889.ForwardPressure - Test ID: LTD.Throughput.RFC2889.ErrorFramesFiltering - Test ID: LTD.Throughput.RFC2889.BroadcastFrameForwarding + - Test ID: LTD.Throughput.RFC2544.WorstN-BestN + - Test ID: LTD.Throughput.Overlay.Network.<tech>.RFC2544.PacketLossRatio 2. Packet Latency tests @@ -2455,7 +2539,7 @@ Summary List of Tests 3. Scalability tests - - Test ID: LTD.Scalability.RFC2544.0PacketLoss + - Test ID: LTD.Scalability.Flows.RFC2544.0PacketLoss - Test ID: LTD.MemoryBandwidth.RFC2544.0PacketLoss.Scalability - LTD.Scalability.VNF.RFC2544.PacketLossProfile - LTD.Scalability.VNF.RFC2544.PacketLossRatio diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst index 591a1211..1a796dbf 100644 --- a/docs/userguide/index.rst +++ b/docs/userguide/index.rst @@ -12,3 +12,4 @@ VSPERF User Guide testusage.rst integration.rst + yardstick.rst diff --git a/docs/userguide/integration.rst b/docs/userguide/integration.rst index 27bf2cd0..eccd0c76 100755 --- a/docs/userguide/integration.rst +++ b/docs/userguide/integration.rst @@ -7,13 +7,101 @@ Integration tests VSPERF includes a set of integration tests defined in conf/integration. These tests can be run by specifying --integration as a parameter to vsperf. -Current tests in conf/integration are Overlay tests. +Current tests in conf/integration include switch functionality and Overlay +tests. -VSPERF supports VXLAN, GRE and GENEVE tunneling protocols. +Tests in the conf/integration can be used to test scaling of different switch +configurations by adding steps into the test case. + +For the overlay tests VSPERF supports VXLAN, GRE and GENEVE tunneling protocols. Testing of these protocols is limited to unidirectional traffic and P2P (Physical to Physical scenarios). -NOTE: The configuration for overlay tests provided in this guide is for unidirectional traffic only. +NOTE: The configuration for overlay tests provided in this guide is for +unidirectional traffic only. + +Executing Integration Tests +--------------------------- + +To execute integration tests VSPERF is run with the integration parameter. To +view the current test list simply execute the following command: + +.. code-block:: console + + ./vsperf --integration --list + +The standard tests included are defined inside the +``conf/integration/01_testcases.conf`` file. + +Test Steps +---------- + +Execution of integration tests are done on a step by step work flow starting +with step 0 as defined inside the test case. Each step of the test increments +the step number by one which is indicated in the log. + +.. code-block:: console + + (testcases.integration) - Step 1 - 'vswitch add_switch ['int_br1']' ... OK + +Each step in the test case is validated. If a step does not pass validation the +test will fail and terminate. The test will continue until a failure is detected +or all steps pass. A csv report file is generated after a test completes with an +OK or FAIL result. + +Test Macros +----------- + +Test profiles can include macros as part of the test step. Each step in the +profile may return a value such as a port name. Recall macros use #STEP to +indicate the recalled value inside the return structure. If the method the +test step calls returns a value it can be later recalled, for example: + +.. code-block:: python + + { + "Name": "vswitch_add_del_vport", + "Deployment": "clean", + "Description": "vSwitch - add and delete virtual port", + "TestSteps": [ + ['vswitch', 'add_switch', 'int_br0'], # STEP 0 + ['vswitch', 'add_vport', 'int_br0'], # STEP 1 + ['vswitch', 'del_port', 'int_br0', '#STEP[1][0]'], # STEP 2 + ['vswitch', 'del_switch', 'int_br0'], # STEP 3 + ] + } + +This test profile uses the the vswitch add_vport method which returns a string +value of the port added. This is later called by the del_port method using the +name from step 1. + +Also commonly used steps can be created as a separate profile. + +.. code-block:: python + + STEP_VSWITCH_PVP_INIT = [ + ['vswitch', 'add_switch', 'int_br0'], # STEP 0 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 1 + ['vswitch', 'add_phy_port', 'int_br0'], # STEP 2 + ['vswitch', 'add_vport', 'int_br0'], # STEP 3 + ['vswitch', 'add_vport', 'int_br0'], # STEP 4 + ] + +This profile can then be used inside other testcases + +.. code-block:: python + + { + "Name": "vswitch_pvp", + "Deployment": "clean", + "Description": "vSwitch - configure switch and one vnf", + "TestSteps": STEP_VSWITCH_PVP_INIT + + [ + ['vnf', 'start'], + ['vnf', 'stop'], + ] + + STEP_VSWITCH_PVP_FINIT + } Executing Tunnel encapsulation tests ------------------------------------ @@ -90,7 +178,6 @@ To run OVS NATIVE tunnel tests (VXLAN/GRE/GENEVE): .. code-block:: python VSWITCH = 'OvsVanilla' - VSWITCH_VANILLA_PHY_PORT_NAMES = ['nic1name', 'nic2name'] # Specify vport_* kernel module to test. VSWITCH_VANILLA_KERNEL_MODULES = ['vport_vxlan', 'vport_gre', diff --git a/docs/userguide/testusage.rst b/docs/userguide/testusage.rst index c20651b5..d807590d 100755 --- a/docs/userguide/testusage.rst +++ b/docs/userguide/testusage.rst @@ -174,7 +174,7 @@ Some tests allow for configurable parameters, including test duration $ ./vsperf --conf-file user_settings.py --tests RFC2544Tput - --test-param "duration=10;pkt_sizes=128" + --test-params "duration=10;pkt_sizes=128" For all available options, check out the help dialog: @@ -199,7 +199,6 @@ for Vanilla OVS: .. code-block:: console VSWITCH = 'OvsVanilla' - VSWITCH_VANILLA_PHY_PORT_NAMES = ['$PORT1', '$PORT2'] Where $PORT1 and $PORT2 are the Linux interfaces you'd like to bind to the vswitch. @@ -291,7 +290,7 @@ To run tests using Vanilla OVS: or use --test-param $ ./vsperf --conf-file=<path_to_custom_conf>/10_custom.conf - --test-param "vanilla_tgen_tx_ip=n.n.n.n; + --test-params "vanilla_tgen_tx_ip=n.n.n.n; vanilla_tgen_tx_mac=nn:nn:nn:nn:nn:nn" @@ -309,6 +308,8 @@ To run tests using Vanilla OVS: $ ./vsperf --conf-file<path_to_custom_conf>/10_custom.conf +.. _vfio-pci: + Using vfio_pci with DPDK ^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -346,6 +347,65 @@ To check that IOMMU is enabled on your platform: [ 3.335746] IOMMU: dmar1 using Queued invalidation .... +.. _SRIOV-support: + +Using SRIOV support +^^^^^^^^^^^^^^^^^^^ + +To use virtual functions of NIC with SRIOV support, use extended form +of NIC PCI slot definition: + +.. code-block:: python + + WHITELIST_NICS = ['0000:05:00.0|vf0', '0000:05:00.1|vf3'] + +Where 'vf' is an indication of virtual function usage and following +number defines a VF to be used. In case that VF usage is detected, +then vswitchperf will enable SRIOV support for given card and it will +detect PCI slot numbers of selected VFs. + +So in example above, one VF will be configured for NIC '0000:05:00.0' +and four VFs will be configured for NIC '0000:05:00.1'. Vswitchperf +will detect PCI addresses of selected VFs and it will use them during +test execution. + +At the end of vswitchperf execution, SRIOV support will be disabled. + +SRIOV support is generic and it can be used in different testing scenarios. +For example: + +* vSwitch tests with DPDK or without DPDK support to verify impact + of VF usage on vSwitch performance +* tests without vSwitch, where traffic is forwared directly + between VF interfaces by packet forwarder (e.g. testpmd application) +* tests without vSwitch, where VM accesses VF interfaces directly + by PCI-passthrough_ to measure raw VM throughput performance. + +.. _PCI-passthrough: + +Using QEMU with PCI passthrough support +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Raw virtual machine throughput performance can be measured by execution of PVP +test with direct access to NICs by PCI passthrough. To execute VM with direct +access to PCI devices, enable vfio-pci_. In order to use virtual functions, +SRIOV-support_ must be enabled. + +Execution of test with PCI passthrough with vswitch disabled: + +.. code-block:: console + + $ ./vsperf --conf-file=<path_to_custom_conf>/10_custom.conf + --vswtich none --vnf QemuPciPassthrough pvp_tput + +Any of supported guest-loopback-application_ can be used inside VM with +PCI passthrough support. + +Note: Qemu with PCI passthrough support can be used only with PVP test +deployment. + +.. _guest-loopback-application: + Selection of loopback application for PVP and PVVP tests ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -361,7 +421,7 @@ or use --test-param .. code-block:: console $ ./vsperf --conf-file=<path_to_custom_conf>/10_custom.conf - --test-param "guest_loopback=testpmd" + --test-params "guest_loopback=testpmd" Supported loopback applications are: @@ -377,6 +437,64 @@ Guest loopback application must be configured, otherwise traffic will not be forwarded by VM and testcases with PVP and PVVP deployments will fail. Guest loopback application is set to 'testpmd' by default. +Multi-Queue Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^ + +VSPerf currently supports multi-queue with the following limitations: + + 1. Execution of pvp/pvvp tests require testpmd as the loopback if multi-queue + is enabled at the guest. + + 2. Requires QemuDpdkVhostUser as the vnf. + + 3. Requires switch to be set to OvsDpdkVhost. + + 4. Requires QEMU 2.5 or greater and any OVS version higher than 2.5. The + default upstream package versions installed by VSPerf satisfy this + requirement. + +To enable multi-queue modify the ''02_vswitch.conf'' file to enable multi-queue +on the switch. + + .. code-block:: console + + VSWITCH_MULTI_QUEUES = 2 + +**NOTE:** you should consider using the switch affinity to set a pmd cpu mask +that can optimize your performance. Consider the numa of the NIC in use if this +applies by checking /sys/class/net/<eth_name>/device/numa_node and setting an +appropriate mask to create PMD threads on the same numa node. + +When multi-queue is enabled, each dpdk or dpdkvhostuser port that is created +on the switch will set the option for multiple queues. + +To enable multi-queue on the guest modify the ''04_vnf.conf'' file. + + .. code-block:: console + + GUEST_NIC_QUEUES = 2 + +Enabling multi-queue at the guest will add multiple queues to each NIC port when +qemu launches the guest. + +Testpmd should be configured to take advantage of multi-queue on the guest. This +can be done by modifying the ''04_vnf.conf'' file. + + .. code-block:: console + + GUEST_TESTPMD_CPU_MASK = '-l 0,1,2,3,4' + + GUEST_TESTPMD_NB_CORES = 4 + GUEST_TESTPMD_TXQ = 2 + GUEST_TESTPMD_RXQ = 2 + +**NOTE:** The guest SMP cores must be configured to allow for testpmd to use the +optimal number of cores to take advantage of the multiple guest queues. + +**NOTE:** For optimal performance guest SMPs should be on the same numa as the +NIC in use if possible/applicable. Testpmd should be assigned at least +(nb_cores +1) total cores with the cpu mask. + Executing Packet Forwarding tests ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -443,7 +561,7 @@ Mode of operation is driven by configuration parameter -m or --mode "trafficgen-pause" - execute vSwitch and VNF but wait before traffic transmission In case, that VSPERF is executed in "trafficgen" mode, then configuration -of traffic generator should be configured through --test-param option. +of traffic generator should be configured through --test-params option. Supported CLI options useful for traffic generator configuration are: .. code-block:: console @@ -509,6 +627,16 @@ an appropriate amount of memory: .. code-block:: console VSWITCHD_DPDK_ARGS = ['-c', '0x4', '-n', '4', '--socket-mem 1024,0'] + VSWITCHD_DPDK_CONFIG = { + 'dpdk-init' : 'true', + 'dpdk-lcore-mask' : '0x4', + 'dpdk-socket-mem' : '1024,0', + } + +Note: Option VSWITCHD_DPDK_ARGS is used for vswitchd, which supports --dpdk +parameter. In recent vswitchd versions, option VSWITCHD_DPDK_CONFIG will be +used to configure vswitchd via ovs-vsctl calls. + More information ^^^^^^^^^^^^^^^^ diff --git a/docs/userguide/yardstick.rst b/docs/userguide/yardstick.rst new file mode 100755 index 00000000..7f09668d --- /dev/null +++ b/docs/userguide/yardstick.rst @@ -0,0 +1,223 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Intel Corporation, AT&T and others. + +Execution of vswitchperf testcases by Yardstick +----------------------------------------------- + +General +^^^^^^^ + +Yardstick is a generic framework for a test execution, which is used for +validation of installation of OPNFV platform. In the future, Yardstick will +support two options of vswitchperf testcase execution: + +- plugin mode, which will execute native vswitchperf testcases; Tests will + be executed the same way as today, but test results will be processed and + reported by yardstick. +- traffic generator mode, which will run vswitchperf in **trafficgen** + mode only; Yardstick framework will be used to launch VNFs and to configure + flows to ensure, that traffic is properly routed. This mode will allow to + test OVS performance in real world scenarios. + +In Colorado release only the traffic generator mode is supported. + +Yardstick Installation +^^^^^^^^^^^^^^^^^^^^^^ + +In order to run Yardstick testcases, you will need to prepare your test +environment. Please follow the `installation instructions +<http://artifacts.opnfv.org/yardstick/brahmaputra/docs/user_guides_framework/index.html>`__ +to install the yardstick. + +Please note, that yardstick uses OpenStack for execution of testcases. +OpenStack must be installed with Heat and Neutron services. Otherwise +vswitchperf testcases cannot be executed. + +Vswitchperf VM image preparation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In general, any Linux distribution supported by vswitchperf can be used as +a base image for vswitchperf. One of the possibilities is to modify vloop-vnf +image, which can be downloaded from `<http://artifacts.opnfv.org/>`__. + +.. code-block:: console + + $ wget http://artifacts.opnfv.org/vswitchperf/vloop-vnf-ubuntu-14.04_20151216.qcow2 + +Please follow the `installation instructions +<http://artifacts.opnfv.org/vswitchperf/docs/configguide/installation.html>`__ to +install vswitchperf inside vloop-vnf image. As vswitchperf will be run in +trafficgen mode, it is possible to skip installation and compilation of OVS, +QEMU and DPDK to keep image size smaller. + +In case, that selected traffic generator requires installation of additional +client software, please follow appropriate documentation. For example in case +of IXIA, you would need to install IxOS and IxNetowrk TCL API. + +Final image with vswitchperf must be uploaded into the glance service and +vswitchperf specific flavor configured, e.g.: + +.. code-block:: console + + $ glance --os-username admin --os-image-api-version 1 image-create --name + vsperf --is-public true --disk-format qcow2 --container-format bare --file + image.qcow2 + + $ nova --os-username admin flavor-create vsperf-flavor 100 2048 25 1 + +Testcase customization +^^^^^^^^^^^^^^^^^^^^^^ + +Yardstick testcases are described by YAML files. vswitchperf specific testcases +are part of the vswitchperf repository and their yaml files can be found at +``yardstick/tests`` directory. For detailed description of yaml file sctructure, +please see yardstick documentation and testcase samples. Only vswitchperf specific +parts will be discussed here. + +Example of yaml file: + +.. code-block:: yaml + + ... + scenarios: + - + type: Vsperf + options: + testname: 'rfc2544_p2p_tput' + traffic_type: 'rfc2544' + pkt_sizes: '64' + bidirectional: 'True' + iload: 100 + duration: 30 + trafficgen_port1: 'eth1' + trafficgen_port2: 'eth3' + external_bridge: 'br-ex' + conf-file: '~/vsperf-yardstick.conf' + + host: vsperf.demo + + runner: + type: Sequence + scenario_option_name: pkt_sizes + sequence: + - 64 + - 128 + - 512 + - 1024 + - 1518 + sla: + metrics: 'throughput_rx_fps' + throughput_rx_fps: 500000 + action: monitor + + context: + ... + +Section option +~~~~~~~~~~~~~~ + +Section **option** defines details of vswitchperf test scenario. Lot of options +are identical to the vswitchperf parameters passed through ``--test-params`` +argument. Following options are supported: + +- **traffic_type** - specifies the type of traffic executed by traffic generator; + valid values are "rfc2544", "continuous" and "back2back"; Default: 'rfc2544' +- **pkt_sizes** - a packet size for which test should be executed; + Multiple packet sizes can be tested by modification of Sequence runner + section inside YAML definition. Default: '64' +- **duration** - sets duration for which traffic will be generated; Default: 30 +- **bidirectional** - specifies if traffic will be uni (False) or bi-directional + (True); Default: False +- **iload** - specifies frame rate; Default: 100 +- **rfc2544_trials** - specifies the number of trials performed for each packet + size +- **multistream** - specifies the number of simulated streams; Default: 0 (i.e. + multistream feature is disabled) +- **stream_type** - specifies network layer used for multistream simulation + the valid values are "L4", "L3" and "L2"; Default: 'L4' +- **conf-file** - sets path to the vswitchperf configuration file, which will be + uploaded to VM; Default: '~/vsperf-yardstick.conf' +- **setup-script** - sets path to the setup script, which will be executed + during setup and teardown phases +- **trafficgen_port1** - specifies device name of 1st interface connected to + the trafficgen +- **trafficgen_port2** - specifies device name of 2nd interface connected to + the trafficgen +- **external_bridge** - specifies name of external bridge configured in OVS; + Default: 'br-ex' + +In case that **trafficgen_port1** and/or **trafficgen_port2** are defined, then +these interfaces will be inserted into the **external_bridge** of OVS. It is +expected, that OVS runs at the same node, where the testcase is executed. In case +of more complex OpenStack installation or a need of additional OVS configuration, +**setup-script** can be used. + +Note: It is essential to prepare customized configuration file for the vsperf +and to specify its name by **conf-file** option. Config file must specify, which +traffic generator will be used and configure traffic generator specific options. + +Section runner +~~~~~~~~~~~~~~ + +Yardstick supports several `runner types +<http://artifacts.opnfv.org/yardstick/brahmaputra/docs/userguide/architecture.html#runner-types>`__. +In case of vswitchperf specific TCs, **Sequence** runner type can be used to +execute the testcase for given list of packet sizes. + + +Section sla +~~~~~~~~~~~ + +In case that sla section is not defined, then testcase will be always +considered as successful. On the other hand, it is possible to define a set of +test metrics and their minimal values to evaluate test success. Any numeric +value, reported by vswitchperf inside CSV result file, can be used. +Multiple metrics can be defined as a coma separated list of items. Minimal +value must be set separately for each metric. + +e.g.: + +.. code-block:: yaml + + sla: + metrics: 'throughput_rx_fps,throughput_rx_mbps' + throughput_rx_fps: 500000 + throughput_rx_mbps: 1000 + +In case that any of defined metrics will be lower than defined value, then +testcase will be marked as failed. Based on ``action`` policy, yardstick +will either stop test execution (value ``assert``) or it will run next test +(value ``monitor``). + +Testcase execution +^^^^^^^^^^^^^^^^^^ + +After installation, yardstick is available as python package within yardstick +specific virtual environment. It means, that before test execution yardstick +environment must be enabled, e.g.: + +.. code-block:: console + + source ~/yardstick_venv/bin/activate + + +Next step is configuration of OpenStack environment, e.g. in case of devstack: + +.. code-block:: console + + source /opt/openstack/devstack/openrc + export EXTERNAL_NETWORK=public + +Vswitchperf testcases executable by yardstick are located at vswitchperf +repository inside ``yardstick/tests`` directory. Example of their download +and execution follows: + +.. code-block:: console + + git clone https://gerrit.opnfv.org/gerrit/vswitchperf + cd vswitchperf + + yardstick -d task start yardstick/tests/p2p_cont.yaml + +Note: Optional argument ``-d`` shows debug output. diff --git a/fuel-plugin-vsperf/deployment_scripts/puppet/manifests/vsperf-install.pp b/fuel-plugin-vsperf/deployment_scripts/puppet/manifests/vsperf-install.pp index bd38dbb7..7d075add 100644 --- a/fuel-plugin-vsperf/deployment_scripts/puppet/manifests/vsperf-install.pp +++ b/fuel-plugin-vsperf/deployment_scripts/puppet/manifests/vsperf-install.pp @@ -1,11 +1,7 @@ $fuel_settings = parseyaml(file('/etc/astute.yaml')) $master_ip = $::fuel_settings['master_ip'] -exec { "wget vsperf": - command => "wget http://$master_ip:8080/plugins/fuel-plugin-vsperf-1.0/repositories/ubuntu/vswitchperf.tgz -O /opt/vswitchperf.tgz", - path => "/usr/bin:/usr/sbin:/bin:/sbin", -} -exec { "untar vsperf": - command => "tar xf /opt/vswitchperf.tgz -C /opt", - path => "/usr/bin:/usr/sbin:/bin:/sbin", +exec { "install vsperf": + command => "mkdir -p /opt/vswitchperf; curl http://$master_ip:8080/plugins/fuel-plugin-vsperf-1.0/repositories/ubuntu/vswitchperf.tgz | tar xzv -C /opt/vswitchperf", + path => "/usr/bin:/usr/sbin:/bin:/sbin"; } diff --git a/fuel-plugin-vsperf/deployment_tasks.yaml b/fuel-plugin-vsperf/deployment_tasks.yaml index fe51488c..02a7c56b 100644 --- a/fuel-plugin-vsperf/deployment_tasks.yaml +++ b/fuel-plugin-vsperf/deployment_tasks.yaml @@ -1 +1,19 @@ -[] +- id: vsperf + type: group + role: [vsperf] + requires: [deploy_start] + required_for: [deploy_end] + tasks: [hiera, setup_repositories, fuel_pkgs, globals, tools, logging, vsperf_install] + parameters: + strategy: + type: parallel +- id: vsperf_install + type: puppet + version: 2.0.0 + groups: [vsperf] + required_for: [post_deployment_end] + requires: [post_deployment_start] + parameters: + puppet_manifest: puppet/manifests/vsperf-install.pp + puppet_modules: puppet/modules:/etc/puppet/modules + timeout: 720 diff --git a/fuel-plugin-vsperf/environment_config.yaml b/fuel-plugin-vsperf/environment_config.yaml index 1dd28b54..e69de29b 100644 --- a/fuel-plugin-vsperf/environment_config.yaml +++ b/fuel-plugin-vsperf/environment_config.yaml @@ -1,7 +0,0 @@ -attributes: - fuel-plugin-vsperf_text: - value: 'Set default value' - label: 'Text field' - description: 'Description for text field' - weight: 25 - type: "text" diff --git a/fuel-plugin-vsperf/metadata.yaml b/fuel-plugin-vsperf/metadata.yaml index 998a2593..2b28fc58 100644 --- a/fuel-plugin-vsperf/metadata.yaml +++ b/fuel-plugin-vsperf/metadata.yaml @@ -3,11 +3,11 @@ name: fuel-plugin-vsperf # Human-readable name for your plugin title: Enable VSPERF plugin # Plugin version -version: '1.0.0' +version: '1.0.9' # Description description: Deploy VSPERF code # Required fuel version -fuel_version: ['8.0'] +fuel_version: ['9.0'] # Specify license of your plugin licenses: ['Apache License Version 2.0'] # Specify author or company name @@ -21,10 +21,12 @@ groups: ['network'] # The plugin is compatible with releases in the list releases: - os: ubuntu - version: liberty-8.0 + version: mitaka-9.0 mode: ['ha'] deployment_scripts_path: deployment_scripts/ repository_path: repositories/ubuntu # Version of plugin package -package_version: '3.0.0' +package_version: '4.0.0' + +is_hotpluggable: true diff --git a/fuel-plugin-vsperf/node_roles.yaml b/fuel-plugin-vsperf/node_roles.yaml index 88b258a2..0f4f8350 100644 --- a/fuel-plugin-vsperf/node_roles.yaml +++ b/fuel-plugin-vsperf/node_roles.yaml @@ -1,5 +1,5 @@ vsperf: - name: "VSPERF node" + name: "VSPERF" description: "Install VSPERF on nodes with this role" has_primary: false # whether has primary role or not public_ip_required: false # whether requires public net or not diff --git a/fuel-plugin-vsperf/pre_build_hook b/fuel-plugin-vsperf/pre_build_hook index 12517bcc..b4da204f 100755 --- a/fuel-plugin-vsperf/pre_build_hook +++ b/fuel-plugin-vsperf/pre_build_hook @@ -7,22 +7,14 @@ set -eux -VSPERF_REV=${VSPERF_REV:-stable/brahmaputra} BUILD_FOR=${BUILD_FOR:-ubuntu} DIR="$(dirname `readlink -f $0`)" -INCLUDE_DEPENDENCIES=${INCLUDE_DEPENDENCIES:-true} - function build_pkg { case $1 in ubuntu) - cd ${DIR} - rm -rf vswitchperf - git clone https://gerrit.opnfv.org/gerrit/vswitchperf - cd vswitchperf && git checkout ${VSPERF_REV} && cd .. - rm -rf vswitchperf/.git - tar cfvz ${DIR}/repositories/ubuntu/vswitchperf.tgz vswitchperf - rm -rf vswitchperf + cd ${DIR}/.. + tar cfvz ${DIR}/repositories/ubuntu/vswitchperf.tgz . --exclude=vswitchperf.tgz ;; *) echo "Not supported system"; exit 1;; esac @@ -32,5 +24,3 @@ for system in $BUILD_FOR do build_pkg $system done - - diff --git a/fuel-plugin-vsperf/tasks.yaml b/fuel-plugin-vsperf/tasks.yaml index 0cc003ef..fe51488c 100644 --- a/fuel-plugin-vsperf/tasks.yaml +++ b/fuel-plugin-vsperf/tasks.yaml @@ -1,7 +1 @@ -- role: ['compute'] - stage: post_deployment/8998 - type: puppet - parameters: - puppet_manifest: puppet/manifests/vsperf-install.pp - puppet_modules: puppet/modules:/etc/puppet/modules - timeout: 720 +[] diff --git a/fuel-plugin-vsperf/vagrant/Vagrantfile b/fuel-plugin-vsperf/vagrant/Vagrantfile new file mode 100644 index 00000000..d83ac4cc --- /dev/null +++ b/fuel-plugin-vsperf/vagrant/Vagrantfile @@ -0,0 +1,21 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + config.vm.box = "trusty-server-cloudimg-amd64" + config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box" + + config.vm.define "fuel" do | h | + h.vm.host_name = "fuel" + h.vm.provision :shell, :inline => "/vagrant/build_fuel_plugin.sh" + h.vm.synced_folder "../..", "/vswitchperf" + h.vm.provider :virtualbox do |v| + v.customize ["modifyvm", :id, "--memory", 4096] + v.customize ["modifyvm", :id, "--cpus", 4] + end + end +end diff --git a/fuel-plugin-vsperf/vagrant/build_fuel_plugin.sh b/fuel-plugin-vsperf/vagrant/build_fuel_plugin.sh new file mode 100755 index 00000000..77b6a33d --- /dev/null +++ b/fuel-plugin-vsperf/vagrant/build_fuel_plugin.sh @@ -0,0 +1,11 @@ +#!/bin/bash +sudo apt-get update -y +sudo apt-get install createrepo rpm dpkg-dev -y +sudo apt-get install python-setuptools -y +sudo apt-get install python-pip -y +sudo easy_install pip +sudo pip install fuel-plugin-builder +sudo apt-get install ruby -y +sudo gem install rubygems-update +sudo gem install fpm +fpb --debug --build /vswitchperf/fuel-plugin-vsperf diff --git a/requirements.txt b/requirements.txt index 16b7ba1f..5d44bbd6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,4 @@ jinja2==2.7.3 xmlrunner==1.7.7 requests==2.8.1 netaddr==0.7.18 +scapy-python3==0.18
\ No newline at end of file diff --git a/src/dpdk/Makefile b/src/dpdk/Makefile index 25ec3f12..69255f75 100755 --- a/src/dpdk/Makefile +++ b/src/dpdk/Makefile @@ -1,7 +1,7 @@ # makefile to manage dpdk package # -# Copyright 2015 OPNFV +# Copyright 2015-2016 OPNFV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ # # Contributors: # Aihua Li, Huawei Technologies. +# Martin Klozik, Intel Corporation. include ../mk/master.mk include ../package-list.mk @@ -29,16 +30,31 @@ ifndef VHOST_USER endif WORK_DIR = dpdk TAG_DONE_FLAG = $(WORK_DIR)/.$(DPDK_TAG).tag.done -DPDK_VANILLA = ../../src_vanilla/dpdk DPDK_CUSE = ../../src_cuse/dpdk -# the name has been changed from version to version -ifeq ($(DPDK_TAG),v1.6.0r0) +# VHOST configuration options are stored in different files based on DPDK version +# v1.2.3r0-v1.6.0r2 - configuration inside config/defconfig_x86_64-default-linuxapp-gcc +# v1.7.0-rc1-v2.2.0 - configuration inside config/common_linuxapp +# v16 and newer - configuration split between config/common_linuxapp and config/common_base +DPDK_TAG_MAJOR = $(shell echo $(DPDK_TAG) | cut -d. -f1) +DPDK_TAG_MINOR = $(shell echo $(DPDK_TAG) | cut -d. -f2) +ifeq ($(DPDK_TAG_MAJOR),v1) +ifeq ($(DPDK_TAG_MINOR), $(filter $(DPDK_TAG_MINOR), 7 8)) + DPDK_TARGET = x86_64-native-linuxapp-gcc + CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp +else DPDK_TARGET = x86_64-default-linuxapp-gcc - CONFIG_FILE = $(WORK_DIR)/config/defconfig_x86_64-default-linuxapp-gcc + CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/defconfig_x86_64-default-linuxapp-gcc +endif +else +ifeq ($(DPDK_TAG_MAJOR),v2) + DPDK_TARGET = x86_64-native-linuxapp-gcc + CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp else DPDK_TARGET = x86_64-native-linuxapp-gcc - CONFIG_FILE = $(WORK_DIR)/config/common_linuxapp + CONFIG_FILE_BASE = $(WORK_DIR)/config/common_base + CONFIG_FILE_LINUXAPP = $(WORK_DIR)/config/common_linuxapp +endif endif all: force_make @@ -47,14 +63,27 @@ all: force_make INSTALL_TARGET = force_make # modify CONFIG_FILE to enable VHOST_USER build and restore original CONFIG_FILE after the build +# DPDK v16 comments: +## CONFIG_RTE_BUILD_COMBINE_LIBS has been obsoleted +## CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_LIBRTE_KNI are listed in both config_base and config_linuxapp, +## values from config_linuxapp will be used, but options are modified at both places to avoid confusion. force_make: $(TAG_DONE_FLAG) $(AT)cd $(WORK_DIR) && git pull $(DPDK_URL) $(DPDK_TAG) - $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST_USER=.\+/CONFIG_RTE_LIBRTE_VHOST_USER=$(VHOST_USER)/g' $(CONFIG_FILE) - $(AT)sed -i -e 's/CONFIG_RTE_BUILD_COMBINE_LIBS=./CONFIG_RTE_BUILD_COMBINE_LIBS=y/g' $(CONFIG_FILE) - $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST=./CONFIG_RTE_LIBRTE_VHOST=y/g' $(CONFIG_FILE) - $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_KNI=./CONFIG_RTE_LIBRTE_KNI=n/g' $(CONFIG_FILE) +ifdef CONFIG_FILE_BASE + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST_USER=.\+/CONFIG_RTE_LIBRTE_VHOST_USER=$(VHOST_USER)/g' $(CONFIG_FILE_BASE) + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST=./CONFIG_RTE_LIBRTE_VHOST=y/g' $(CONFIG_FILE_BASE) + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_KNI=./CONFIG_RTE_LIBRTE_KNI=n/g' $(CONFIG_FILE_BASE) +else + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST_USER=.\+/CONFIG_RTE_LIBRTE_VHOST_USER=$(VHOST_USER)/g' $(CONFIG_FILE_LINUXAPP) + $(AT)sed -i -e 's/CONFIG_RTE_BUILD_COMBINE_LIBS=./CONFIG_RTE_BUILD_COMBINE_LIBS=y/g' $(CONFIG_FILE_LINUXAPP) +endif + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST=./CONFIG_RTE_LIBRTE_VHOST=y/g' $(CONFIG_FILE_LINUXAPP) + $(AT)sed -i -e 's/CONFIG_RTE_LIBRTE_KNI=./CONFIG_RTE_LIBRTE_KNI=n/g' $(CONFIG_FILE_LINUXAPP) $(AT)cd $(WORK_DIR); make install T=$(DPDK_TARGET) -j DESTDIR=$(WORK_DIR) - $(AT)cd `dirname $(CONFIG_FILE)` && git checkout `basename $(CONFIG_FILE)` && cd - + $(AT)cd `dirname $(CONFIG_FILE_LINUXAPP)` && git checkout `basename $(CONFIG_FILE_LINUXAPP)` && cd - +ifdef CONFIG_FILE_BASE + $(AT)cd `dirname $(CONFIG_FILE_BASE)` && git checkout `basename $(CONFIG_FILE_BASE)` && cd - +endif $(AT)echo "VHOST_USER = $(VHOST_USER)" ifeq ($(VHOST_USER),n) $(AT)cd $(WORK_DIR)/lib/librte_vhost/eventfd_link; make @@ -70,7 +99,6 @@ clean: $(AT)cd $(WORK_DIR) && git clean -xfd *.o clobber: $(AT)rm -rf $(WORK_DIR) - $(AT)rm -rf $(DPDK_VANILLA) $(AT)rm -rf $(DPDK_CUSE) # distclean is for developer who would like to keep the @@ -86,8 +114,6 @@ sanity: $(WORK_DIR): $(AT)git clone $(DPDK_URL) - $(AT)mkdir -p $(DPDK_VANILLA) - $(AT)cp -rf ./* $(DPDK_VANILLA) $(AT)mkdir -p $(DPDK_CUSE) $(AT)cp -rf ./* $(DPDK_CUSE) diff --git a/src/dpdk/dpdk.py b/src/dpdk/dpdk.py index f8cbbd81..30f228f7 100644 --- a/src/dpdk/dpdk.py +++ b/src/dpdk/dpdk.py @@ -23,7 +23,6 @@ from sys import platform as _platform import os import subprocess import logging -import locale from tools import tasks from conf import settings @@ -31,17 +30,25 @@ from tools.module_manager import ModuleManager _LOGGER = logging.getLogger(__name__) RTE_PCI_TOOL = os.path.join( - settings.getValue('RTE_SDK'), 'tools', 'dpdk_nic_bind.py') + settings.getValue('RTE_SDK_USER'), 'tools', 'dpdk_nic_bind.py') _DPDK_MODULE_MANAGER = ModuleManager() + +# declare global NIC variables only as their content might not be known yet +_NICS = [] +_NICS_PCI = [] + # # system management # - def init(): """Setup system for DPDK. """ + global _NICS + global _NICS_PCI + _NICS = settings.getValue('NICS') + _NICS_PCI = list(nic['pci'] for nic in _NICS) if not _is_linux(): _LOGGER.error('Not running on a compatible Linux version. Exiting...') return @@ -175,54 +182,35 @@ def _bind_nics(): True) tasks.run_task(['sudo', RTE_PCI_TOOL, '--bind=' + _driver] + - settings.getValue('WHITELIST_NICS'), _LOGGER, - 'Binding NICs %s...' % - settings.getValue('WHITELIST_NICS'), + _NICS_PCI, _LOGGER, + 'Binding NICs %s...' % _NICS_PCI, True) except subprocess.CalledProcessError: - _LOGGER.error('Unable to bind NICs %s', - str(settings.getValue('WHITELIST_NICS'))) - -def _unbind_nics_get_driver(): - """Check what driver the NICs should be bound to - after unbinding them from DPDK. - """ - _driver_list = [] - _output = subprocess.check_output([os.path.expanduser(RTE_PCI_TOOL), '--status']) - _my_encoding = locale.getdefaultlocale()[1] - for line in _output.decode(_my_encoding).split('\n'): - for nic in settings.getValue('WHITELIST_NICS'): - if nic in line: - _driver_list.append((line.split("unused=", 1)[1])) - return _driver_list + _LOGGER.error('Unable to bind NICs %s', str(_NICS_PCI)) def _unbind_nics(): """Unbind NICs using the Intel DPDK ``dpdk_nic_bind.py`` tool. """ - nic_drivers = _unbind_nics_get_driver() try: tasks.run_task(['sudo', RTE_PCI_TOOL, '--unbind'] + - settings.getValue('WHITELIST_NICS'), _LOGGER, - 'Unbinding NICs %s...' % - str(settings.getValue('WHITELIST_NICS')), + _NICS_PCI, _LOGGER, + 'Unbinding NICs %s...' % str(_NICS_PCI), True) except subprocess.CalledProcessError: - _LOGGER.error('Unable to unbind NICs %s', - str(settings.getValue('WHITELIST_NICS'))) + _LOGGER.error('Unable to unbind NICs %s', str(_NICS_PCI)) # Rebind NICs to their original drivers # using the Intel DPDK ``dpdk_nic_bind.py`` tool. - for i, nic in enumerate(settings.getValue('WHITELIST_NICS')): + for nic in _NICS: try: - if nic_drivers[i] != '': + if nic['driver']: tasks.run_task(['sudo', RTE_PCI_TOOL, '--bind', - nic_drivers[i], nic], - _LOGGER, 'Binding NIC %s...' % - nic, + nic['driver'], nic['pci']], + _LOGGER, 'Binding NIC %s to %s...' % + (nic['pci'], nic['driver']), True) except subprocess.CalledProcessError: - _LOGGER.error('Unable to bind NICs %s to drivers %s', - str(settings.getValue('WHITELIST_NICS')), - nic_drivers) + _LOGGER.error('Unable to bind NIC %s to driver %s', + nic['pci'], nic['driver']) class Dpdk(object): """A context manager for the system init/cleanup. diff --git a/src/ovs/__init__.py b/src/ovs/__init__.py index 8c157006..77592ea3 100644 --- a/src/ovs/__init__.py +++ b/src/ovs/__init__.py @@ -21,6 +21,5 @@ and external setup of vswitchd-external process, kernel modules etc. """ -from src.ovs.daemon import * from src.ovs.ofctl import * from src.ovs.dpctl import * diff --git a/src/ovs/daemon.py b/src/ovs/daemon.py deleted file mode 100644 index 09735600..00000000 --- a/src/ovs/daemon.py +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2015 Intel Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Class wrapper for controlling an OVS instance. - -Wraps a pair of ``ovs-vswitchd`` and ``ovsdb-server`` processes. -""" - -import os -import logging -import pexpect - -from conf import settings -from tools import tasks - -_OVS_VSWITCHD_BIN = os.path.join( - settings.getValue('OVS_DIR'), 'vswitchd', 'ovs-vswitchd') -_OVSDB_TOOL_BIN = os.path.join( - settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-tool') -_OVSDB_SERVER_BIN = os.path.join( - settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-server') - -_OVS_VAR_DIR = settings.getValue('OVS_VAR_DIR') -_OVS_ETC_DIR = settings.getValue('OVS_ETC_DIR') - -_LOG_FILE_VSWITCHD = os.path.join( - settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD')) - -class VSwitchd(tasks.Process): - """Class wrapper for controlling an OVS instance. - - Wraps a pair of ``ovs-vswitchd`` and ``ovsdb-server`` processes. - """ - _ovsdb_pid = None - _logfile = _LOG_FILE_VSWITCHD - _ovsdb_pidfile_path = os.path.join(settings.getValue('LOG_DIR'), "ovsdb_pidfile.pid") - _proc_name = 'ovs-vswitchd' - - def __init__(self, timeout=30, vswitchd_args=None, expected_cmd=None): - """Initialise the wrapper with a specific start timeout and extra - parameters. - - :param timeout: Timeout to wait for application to start. - :param vswitchd_args: Command line parameters for vswitchd. - - :returns: None - """ - self._logger = logging.getLogger(__name__) - self._timeout = timeout - self._expect = expected_cmd - vswitchd_args = vswitchd_args or [] - self._cmd = ['sudo', '-E', _OVS_VSWITCHD_BIN] + vswitchd_args - - # startup/shutdown - - def start(self): - """ Start ``ovsdb-server`` and ``ovs-vswitchd`` instance. - - :returns: None - :raises: pexpect.EOF, pexpect.TIMEOUT - """ - - self._reset_ovsdb() - self._start_ovsdb() # this has to be started first - - try: - super(VSwitchd, self).start() - self.relinquish() - except (pexpect.EOF, pexpect.TIMEOUT) as exc: - logging.error("Exception during VSwitch start.") - self._kill_ovsdb() - raise exc - - def kill(self, signal='-15', sleep=2): - """Kill ``ovs-vswitchd`` instance if it is alive. - - :returns: None - """ - self._logger.info('Killing ovs-vswitchd...') - - self._kill_ovsdb() - - super(VSwitchd, self).kill(signal, sleep) - - # helper functions - - def _reset_ovsdb(self): - """Reset system for 'ovsdb'. - - :returns: None - """ - self._logger.info('Resetting system after last run...') - - tasks.run_task(['sudo', 'rm', '-rf', _OVS_VAR_DIR], self._logger) - tasks.run_task(['sudo', 'mkdir', '-p', _OVS_VAR_DIR], self._logger) - tasks.run_task(['sudo', 'rm', '-rf', _OVS_ETC_DIR], self._logger) - tasks.run_task(['sudo', 'mkdir', '-p', _OVS_ETC_DIR], self._logger) - - tasks.run_task(['sudo', 'rm', '-f', - os.path.join(_OVS_ETC_DIR, 'conf.db')], - self._logger) - - self._logger.info('System reset after last run.') - - def _start_ovsdb(self): - """Start ``ovsdb-server`` instance. - - :returns: None - """ - tasks.run_task(['sudo', _OVSDB_TOOL_BIN, 'create', - os.path.join(_OVS_ETC_DIR, 'conf.db'), - os.path.join(settings.getValue('OVS_DIR'), 'vswitchd', - 'vswitch.ovsschema')], - self._logger, - 'Creating ovsdb configuration database...') - - tasks.run_background_task( - ['sudo', _OVSDB_SERVER_BIN, - '--remote=punix:%s' % os.path.join(_OVS_VAR_DIR, 'db.sock'), - '--remote=db:Open_vSwitch,Open_vSwitch,manager_options', - '--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile'], - self._logger, - 'Starting ovsdb-server...') - - def _kill_ovsdb(self): - """Kill ``ovsdb-server`` instance. - - :returns: None - """ - with open(self._ovsdb_pidfile_path, "r") as pidfile: - ovsdb_pid = pidfile.read().strip() - - self._logger.info("Killing ovsdb with pid: " + ovsdb_pid) - - if ovsdb_pid: - tasks.run_task(['sudo', 'kill', '-15', str(ovsdb_pid)], - self._logger, 'Killing ovsdb-server...') - - @staticmethod - def get_db_sock_path(): - """Method returns location of db.sock file - - :returns: path to db.sock file. - """ - return os.path.join(_OVS_VAR_DIR, 'db.sock') diff --git a/src/ovs/ofctl.py b/src/ovs/ofctl.py index 93894889..d7a2b320 100644 --- a/src/ovs/ofctl.py +++ b/src/ovs/ofctl.py @@ -57,12 +57,18 @@ class OFBase(object): def run_vsctl(self, args, check_error=False): """Run ``ovs-vsctl`` with supplied arguments. + In case that timeout is set to -1, then ovs-vsctl + will be called with --no-wait option. + :param args: Arguments to pass to ``ovs-vsctl`` :param check_error: Throw exception on error :return: None """ - cmd = ['sudo', _OVS_VSCTL_BIN, '--timeout', str(self.timeout)] + args + if self.timeout == -1: + cmd = ['sudo', _OVS_VSCTL_BIN, '--no-wait'] + args + else: + cmd = ['sudo', _OVS_VSCTL_BIN, '--timeout', str(self.timeout)] + args return tasks.run_task( cmd, self.logger, 'Running ovs-vsctl...', check_error) @@ -343,6 +349,37 @@ class OFBridge(OFBase): self.logger.debug('dump flows') self.run_ofctl(['dump-flows', self.br_name], timeout=120) + def set_stp(self, enable=True): + """ + Set stp status + :param enable: Boolean to enable or disable stp + :return: None + """ + self.logger.debug( + 'Setting stp on bridge to %s', 'on' if enable else 'off') + self.run_vsctl( + ['set', 'Bridge', self.br_name, 'stp_enable={}'.format( + 'true' if enable else 'false')]) + + def set_rstp(self, enable=True): + """ + Set rstp status + :param enable: Boolean to enable or disable rstp + :return: None + """ + self.logger.debug( + 'Setting rstp on bridge to %s', 'on' if enable else 'off') + self.run_vsctl( + ['set', 'Bridge', self.br_name, 'rstp_enable={}'.format( + 'true' if enable else 'false')]) + + def bridge_info(self): + """ + Get bridge info + :return: Returns bridge info from list bridge command + """ + return self.run_vsctl(['list', 'bridge', self.br_name]) + # # helper functions # diff --git a/src/package-list.mk b/src/package-list.mk index 4bd1159b..5aa40bd2 100644 --- a/src/package-list.mk +++ b/src/package-list.mk @@ -6,12 +6,14 @@ # dpdk section # DPDK_URL ?= git://dpdk.org/dpdk DPDK_URL ?= http://dpdk.org/git/dpdk -DPDK_TAG ?= v2.2.0 +DPDK_TAG ?= v16.04 # OVS section OVS_URL ?= https://github.com/openvswitch/ovs -OVS_TAG ?= 02ab4b1a6a173979a51cabd7000a34546d517e60 +#The Tag below is for OVS v2.5.0 with backwards compatibility support for Qemu +#versions < 2.5. +OVS_TAG ?= 31871ee3839c35e6878debfc7926afa471dbdec6 # QEMU section QEMU_URL ?= https://github.com/qemu/qemu.git -QEMU_TAG ?= v2.3.0 +QEMU_TAG ?= v2.5.0 diff --git a/src/qemu/Makefile b/src/qemu/Makefile index 4603b273..5f5e941f 100755 --- a/src/qemu/Makefile +++ b/src/qemu/Makefile @@ -27,9 +27,7 @@ TAG_DONE_FLAG = $(WORK_DIR)/.$(QEMU_TAG).done INSTALL_TARGET = force_make force_install CONFIG_CMD = CONFIG_CMD += ./configure -CONFIG_CMD += --target-list="x86_64-softmmu" -QEMU_VANILLA = ../../src_vanilla/qemu -QEMU_CUSE = ../../src_cuse/qemu +CONFIG_CMD += --target-list="x86_64-softmmu" --python="/usr/bin/python" all: force_make @@ -55,8 +53,6 @@ clean: $(AT)cd $(WORK_DIR) && git clean -xfd *.o clobber: $(AT)rm -rf $(WORK_DIR) - $(AT)rm -rf $(QEMU_VANILLA) - $(AT)rm -rf $(QEMU_CUSE) # distclean is for developer who would like to keep the # clone git repo, saving time to fetch again from url @@ -73,10 +69,6 @@ $(WORK_DIR)/configure: $(TAG_DONE_FLAG) $(WORK_DIR): $(AT)git clone $(QEMU_URL) - $(AT)mkdir -p $(QEMU_VANILLA) - $(AT)cp -rf ./* $(QEMU_VANILLA) - $(AT)mkdir -p $(QEMU_CUSE) - $(AT)cp -rf ./* $(QEMU_CUSE) $(TAG_DONE_FLAG): $(WORK_DIR) $(AT)cd $(WORK_DIR); git checkout $(QEMU_TAG) diff --git a/systems/centos/build_base_machine.sh b/systems/centos/build_base_machine.sh index 8339dd2d..d3ac3e2b 100755 --- a/systems/centos/build_base_machine.sh +++ b/systems/centos/build_base_machine.sh @@ -31,7 +31,7 @@ make gcc gcc-c++ libxml2 -glibc.i686 +glibc kernel-devel # tools diff --git a/systems/fedora/20/build_base_machine.sh b/systems/fedora/20/build_base_machine.sh index 77d95779..96bb17df 100755 --- a/systems/fedora/20/build_base_machine.sh +++ b/systems/fedora/20/build_base_machine.sh @@ -28,7 +28,8 @@ automake gcc gcc-c++ libxml2 -glibc.i686 +glibc +glib2-devel kernel-devel fuse-libs fuse diff --git a/systems/fedora/21/build_base_machine.sh b/systems/fedora/21/build_base_machine.sh index 0aaa73b6..69b067f9 100755 --- a/systems/fedora/21/build_base_machine.sh +++ b/systems/fedora/21/build_base_machine.sh @@ -28,7 +28,8 @@ automake gcc gcc-c++ libxml2 -glibc.i686 +glibc +glib2-devel kernel-devel fuse-libs fuse diff --git a/systems/fedora/22/build_base_machine.sh b/systems/fedora/22/build_base_machine.sh index a909e284..0ca565f3 100755 --- a/systems/fedora/22/build_base_machine.sh +++ b/systems/fedora/22/build_base_machine.sh @@ -28,7 +28,7 @@ automake gcc gcc-c++ libxml2 -glibc.i686 +glibc glib2-devel kernel-devel fuse-libs diff --git a/systems/rhel/7.2/build_base_machine.sh b/systems/rhel/7.2/build_base_machine.sh index a038f7b3..86608066 100755 --- a/systems/rhel/7.2/build_base_machine.sh +++ b/systems/rhel/7.2/build_base_machine.sh @@ -29,7 +29,7 @@ pkglist=(\ gcc\ gcc-c++\ glib2-devel\ - glibc.i686\ + glibc\ kernel-devel\ openssl-devel\ pixman-devel\ diff --git a/systems/ubuntu/14.04/build_base_machine.sh b/systems/ubuntu/14.04/build_base_machine.sh index 3b4185fc..9fa8511c 100755 --- a/systems/ubuntu/14.04/build_base_machine.sh +++ b/systems/ubuntu/14.04/build_base_machine.sh @@ -61,6 +61,7 @@ cifs-utils socat libpixman-1-0 libpixman-1-dev +sysstat # Java runtime environment: Required for Ixia TclClient default-jre @@ -87,4 +88,4 @@ ln -sf $(locate libc.so.6) /lib/libc.so.6 pip3 install virtualenv # Create hugepage dirs -mkdir -p /dev/hugepages
\ No newline at end of file +mkdir -p /dev/hugepages diff --git a/systems/ubuntu/14.04/prepare_python_env.sh b/systems/ubuntu/14.04/prepare_python_env.sh index f9c2def8..6ef8680d 100755 --- a/systems/ubuntu/14.04/prepare_python_env.sh +++ b/systems/ubuntu/14.04/prepare_python_env.sh @@ -23,7 +23,7 @@ fi # enable virtual environment in a subshell, so QEMU build can use python 2.7 -(virtualenv-3.4 "$VSPERFENV_DIR" +(virtualenv "$VSPERFENV_DIR" source "$VSPERFENV_DIR"/bin/activate pip install -r ../requirements.txt -pip install pylint)
\ No newline at end of file +pip install pylint) diff --git a/testcases/integration.py b/testcases/integration.py index ecaed14f..ffde5822 100644 --- a/testcases/integration.py +++ b/testcases/integration.py @@ -17,29 +17,34 @@ import os import time import logging +import copy from testcases import TestCase from conf import settings as S from collections import OrderedDict +from tools import namespace +from tools import veth +from core.loader import Loader CHECK_PREFIX = 'validate_' + class IntegrationTestCase(TestCase): """IntegrationTestCase class """ - def __init__(self, cfg, results_dir): + def __init__(self, cfg): """ Testcase initialization """ self._type = 'integration' - super(IntegrationTestCase, self).__init__(cfg, results_dir) + super(IntegrationTestCase, self).__init__(cfg) self._logger = logging.getLogger(__name__) self._inttest = None def report_status(self, label, status): """ Log status of test step """ - self._logger.debug("%s ... %s", label, 'OK' if status else 'FAILED') + self._logger.info("%s ... %s", label, 'OK' if status else 'FAILED') def run_initialize(self): """ Prepare test execution environment @@ -104,6 +109,8 @@ class IntegrationTestCase(TestCase): if not self.test: self._traffic_ctl.send_traffic(self._traffic) else: + vnf_list = {} + loader = Loader() # execute test based on TestSteps definition if self.test: step_result = [None] * len(self.test) @@ -111,8 +118,24 @@ class IntegrationTestCase(TestCase): step_ok = False if step[0] == 'vswitch': test_object = self._vswitch_ctl.get_vswitch() + elif step[0] == 'namespace': + test_object = namespace + elif step[0] == 'veth': + test_object = veth elif step[0] == 'trafficgen': test_object = self._traffic_ctl + # in case of send_traffic method, ensure that specified + # traffic values are merged with existing self._traffic + if step[1] == 'send_traffic': + tmp_traffic = copy.deepcopy(self._traffic) + tmp_traffic.update(step[2]) + step[2] = tmp_traffic + elif step[0].startswith('vnf'): + if not step[0] in vnf_list: + # initialize new VM and copy data to its shared dir + vnf_list[step[0]] = loader.get_vnf_class()() + self._copy_fwd_tools_for_guest(len(vnf_list)) + test_object = vnf_list[step[0]] else: self._logger.error("Unsupported test object %s", step[0]) self._inttest = {'status' : False, 'details' : ' '.join(step)} @@ -130,23 +153,32 @@ class IntegrationTestCase(TestCase): step_params = eval_step_params(step[2:], step_result) step_log = '{} {}'.format(' '.join(step[:2]), step_params) step_result[i] = test_method(*step_params) - self._logger.debug("Step {} '{}' results '{}'".format( - i, step_log, step_result[i])) - time.sleep(2) + self._logger.debug("Step %s '%s' results '%s'", i, + step_log, step_result[i]) + time.sleep(5) step_ok = test_method_check(step_result[i], *step_params) except AssertionError: self._inttest = {'status' : False, 'details' : step_log} - self._logger.error("Step {} raised assertion error".format(i)) + self._logger.error("Step %s raised assertion error", i) + # stop vnfs in case of error + for vnf in vnf_list: + vnf_list[vnf].stop() break except IndexError: self._inttest = {'status' : False, 'details' : step_log} - self._logger.error("Step {} result index error {}".format( - i, ' '.join(step[2:]))) + self._logger.error("Step %s result index error %s", i, + ' '.join(step[2:])) + # stop vnfs in case of error + for vnf in vnf_list: + vnf_list[vnf].stop() break self.report_status("Step {} - '{}'".format(i, step_log), step_ok) if not step_ok: self._inttest = {'status' : False, 'details' : step_log} + # stop vnfs in case of error + for vnf in vnf_list: + vnf_list[vnf].stop() break # dump vswitch flows before they are affected by VNF termination @@ -166,7 +198,7 @@ class IntegrationTestCase(TestCase): results = OrderedDict() results['status'] = 'OK' if self._inttest['status'] else 'FAILED' results['details'] = self._inttest['details'] - TestCase._write_result_to_file([results], self._output_file) + TestCase.write_result_to_file([results], self._output_file) self.report_status("Test '{}'".format(self.name), self._inttest['status']) # inform vsperf about testcase failure if not self._inttest['status']: diff --git a/testcases/performance.py b/testcases/performance.py index 0ae3ea77..a4769a28 100644 --- a/testcases/performance.py +++ b/testcases/performance.py @@ -25,11 +25,11 @@ class PerformanceTestCase(TestCase): In this basic form runs RFC2544 throughput test """ - def __init__(self, cfg, results_dir): + def __init__(self, cfg): """ Testcase initialization """ self._type = 'performance' - super(PerformanceTestCase, self).__init__(cfg, results_dir) + super(PerformanceTestCase, self).__init__(cfg) self._logger = logging.getLogger(__name__) def run_report(self): diff --git a/testcases/testcase.py b/testcases/testcase.py index 0effce75..5f5c9358 100644 --- a/testcases/testcase.py +++ b/testcases/testcase.py @@ -27,6 +27,7 @@ from core.loader import Loader from core.results.results_constants import ResultsConstants from tools import tasks from tools import hugepages +from tools import functions from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS from conf import settings as S from conf import get_test_param @@ -36,7 +37,7 @@ class TestCase(object): In this basic form runs RFC2544 throughput test """ - def __init__(self, cfg, results_dir): + def __init__(self, cfg): """Pull out fields from test config :param cfg: A dictionary of string-value pairs describing the test @@ -44,6 +45,7 @@ class TestCase(object): values. :param results_dir: Where the csv formatted results are written. """ + self._testcase_start_time = time.time() self._hugepages_mounted = False self._traffic_ctl = None self._vnf_ctl = None @@ -52,6 +54,27 @@ class TestCase(object): self._loadgen = None self._output_file = None self._tc_results = None + self.guest_loopback = [] + self._settings_original = {} + self._settings_paths_modified = False + self._testcast_run_time = None + + self._update_settings('VSWITCH', cfg.get('vSwitch', S.getValue('VSWITCH'))) + self._update_settings('VNF', cfg.get('VNF', S.getValue('VNF'))) + self._update_settings('TRAFFICGEN', cfg.get('Trafficgen', S.getValue('TRAFFICGEN'))) + self._update_settings('TEST_PARAMS', cfg.get('Parameters', S.getValue('TEST_PARAMS'))) + + # update global settings + guest_loopback = get_test_param('guest_loopback', None) + if guest_loopback: + self._update_settings('GUEST_LOOPBACK', [guest_loopback for dummy in S.getValue('GUEST_LOOPBACK')]) + + if 'VSWITCH' in self._settings_original or 'VNF' in self._settings_original: + self._settings_original.update({ + 'RTE_SDK' : S.getValue('RTE_SDK'), + 'OVS_DIR' : S.getValue('OVS_DIR'), + }) + functions.settings_update_paths() # set test parameters; CLI options take precedence to testcase settings self._logger = logging.getLogger(__name__) @@ -61,6 +84,10 @@ class TestCase(object): bidirectional = cfg.get('biDirectional', TRAFFIC_DEFAULTS['bidir']) bidirectional = get_test_param('bidirectional', bidirectional) + if not isinstance(bidirectional, str): + raise TypeError( + 'Bi-dir value must be of type string in testcase configuration') + bidirectional = bidirectional.title() # Keep things consistent traffic_type = cfg.get('Traffic Type', TRAFFIC_DEFAULTS['traffic_type']) traffic_type = get_test_param('traffic_type', traffic_type) @@ -82,18 +109,11 @@ class TestCase(object): self._tunnel_type = get_test_param('tunnel_type', self._tunnel_type) - # identify guest loopback method, so it can be added into reports - self.guest_loopback = [] - if self.deployment in ['pvp', 'pvvp']: - guest_loopback = get_test_param('guest_loopback', None) - if guest_loopback: - self.guest_loopback.append(guest_loopback) - else: - if self.deployment == 'pvp': - self.guest_loopback.append(S.getValue('GUEST_LOOPBACK')[0]) - else: - self.guest_loopback = S.getValue('GUEST_LOOPBACK').copy() + if self.deployment == 'pvp': + self.guest_loopback.append(S.getValue('GUEST_LOOPBACK')[0]) + else: + self.guest_loopback = S.getValue('GUEST_LOOPBACK').copy() # read configuration of streams; CLI parameter takes precedence to # testcase definition @@ -114,7 +134,7 @@ class TestCase(object): if self._frame_mod: self._frame_mod = self._frame_mod.lower() - self._results_dir = results_dir + self._results_dir = S.getValue('RESULTS_PATH') # set traffic details, so they can be passed to vswitch and traffic ctls self._traffic = copy.deepcopy(TRAFFIC_DEFAULTS) @@ -127,27 +147,17 @@ class TestCase(object): 'pre_installed_flows' : pre_installed_flows, 'frame_rate': int(framerate)}) + # Packet Forwarding mode + self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower() + # OVS Vanilla requires guest VM MAC address and IPs to work if 'linux_bridge' in self.guest_loopback: - self._traffic['l2'].update({'srcmac': S.getValue('GUEST_NET2_MAC')[0], - 'dstmac': S.getValue('GUEST_NET1_MAC')[0]}) + self._traffic['l2'].update({'srcmac': S.getValue('VANILLA_TGEN_PORT1_MAC'), + 'dstmac': S.getValue('VANILLA_TGEN_PORT2_MAC')}) self._traffic['l3'].update({'srcip': S.getValue('VANILLA_TGEN_PORT1_IP'), 'dstip': S.getValue('VANILLA_TGEN_PORT2_IP')}) - # Packet Forwarding mode - self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower() - - def run_initialize(self): - """ Prepare test execution environment - """ - self._logger.debug(self.name) - - # mount hugepages if needed - self._mount_hugepages() - - # copy sources of l2 forwarding tools into VM shared dir if needed - self._copy_fwd_tools_for_guest() - + # trafficgen configuration required for tests of tunneling protocols if self.deployment == "op2p": self._traffic['l2'].update({'srcmac': S.getValue('TRAFFICGEN_PORT1_MAC'), @@ -163,7 +173,24 @@ class TestCase(object): self._traffic['l2'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L2') self._traffic['l3'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L3') self._traffic['l4'] = S.getValue(self._tunnel_type.upper() + '_FRAME_L4') + elif S.getValue('NICS')[0]['type'] == 'vf' or S.getValue('NICS')[1]['type'] == 'vf': + mac1 = S.getValue('NICS')[0]['mac'] + mac2 = S.getValue('NICS')[1]['mac'] + if mac1 and mac2: + self._traffic['l2'].update({'srcmac': mac2, 'dstmac': mac1}) + else: + self._logger.debug("MAC addresses can not be read") + def run_initialize(self): + """ Prepare test execution environment + """ + self._logger.debug(self.name) + + # mount hugepages if needed + self._mount_hugepages() + + # copy sources of l2 forwarding tools into VM shared dir if needed + self._copy_fwd_tools_for_all_guests() self._logger.debug("Controllers:") loader = Loader() @@ -177,6 +204,7 @@ class TestCase(object): if self._vswitch_none: self._vswitch_ctl = component_factory.create_pktfwd( + self.deployment, loader.get_pktfwd_class()) else: self._vswitch_ctl = component_factory.create_vswitch( @@ -203,6 +231,29 @@ class TestCase(object): # umount hugepages if mounted self._umount_hugepages() + # restore original settings + S.load_from_dict(self._settings_original) + + # cleanup any namespaces created + if os.path.isdir('/tmp/namespaces'): + import tools.namespace + namespace_list = os.listdir('/tmp/namespaces') + if len(namespace_list): + self._logger.info('Cleaning up namespaces') + for name in namespace_list: + tools.namespace.delete_namespace(name) + os.rmdir('/tmp/namespaces') + # cleanup any veth ports created + if os.path.isdir('/tmp/veth'): + import tools.veth + veth_list = os.listdir('/tmp/veth') + if len(veth_list): + self._logger.info('Cleaning up veth ports') + for eth in veth_list: + port1, port2 = eth.split('-') + tools.veth.del_veth_port(port1, port2) + os.rmdir('/tmp/veth') + def run_report(self): """ Report test results """ @@ -214,7 +265,7 @@ class TestCase(object): self._traffic_ctl.print_results() self._tc_results = self._append_results(self._traffic_ctl.get_results()) - TestCase._write_result_to_file(self._tc_results, self._output_file) + TestCase.write_result_to_file(self._tc_results, self._output_file) def run(self): """Run the test @@ -255,9 +306,25 @@ class TestCase(object): # tear down test execution environment and log results self.run_finalize() + self._testcase_run_time = time.strftime("%H:%M:%S", + time.gmtime(time.time() - self._testcase_start_time)) + logging.info("Testcase execution time: " + self._testcase_run_time) # report test results self.run_report() + def _update_settings(self, param, value): + """ Check value of given configuration parameter + In case that new value is different, then testcase + specific settings is updated and original value stored + + :param param: Name of parameter inside settings + :param value: Disired parameter value + """ + orig_value = S.getValue(param) + if orig_value != value: + self._settings_original[param] = orig_value + S.setValue(param, value) + def _append_results(self, results): """ Method appends mandatory Test Case results to list of dictionaries. @@ -271,52 +338,60 @@ class TestCase(object): item[ResultsConstants.ID] = self.name item[ResultsConstants.DEPLOYMENT] = self.deployment item[ResultsConstants.TRAFFIC_TYPE] = self._traffic['l3']['proto'] + item[ResultsConstants.TEST_RUN_TIME] = self._testcase_run_time if self._traffic['multistream']: item[ResultsConstants.SCAL_STREAM_COUNT] = self._traffic['multistream'] item[ResultsConstants.SCAL_STREAM_TYPE] = self._traffic['stream_type'] item[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = self._traffic['pre_installed_flows'] - if len(self.guest_loopback): + if self.deployment in ['pvp', 'pvvp'] and len(self.guest_loopback): item[ResultsConstants.GUEST_LOOPBACK] = ' '.join(self.guest_loopback) if self._tunnel_type: item[ResultsConstants.TUNNEL_TYPE] = self._tunnel_type return results - def _copy_fwd_tools_for_guest(self): - """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] for use by guests. + def _copy_fwd_tools_for_all_guests(self): + """Copy dpdk and l2fwd code to GUEST_SHARE_DIR[s] based on selected deployment. """ - counter = 0 - # method is executed only for pvp and pvvp, so let's count number of 'v' - while counter < self.deployment.count('v'): - guest_dir = S.getValue('GUEST_SHARE_DIR')[counter] - - # remove shared dir if it exists to avoid issues with file consistency - if os.path.exists(guest_dir): - tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger, - 'Removing content of shared directory...', True) - - # directory to share files between host and guest - os.makedirs(guest_dir) - - # copy sources into shared dir only if neccessary - if 'testpmd' in self.guest_loopback or 'l2fwd' in self.guest_loopback: - try: - tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"', - os.path.join(S.getValue('RTE_SDK'), ''), - os.path.join(guest_dir, 'DPDK')], - self._logger, - 'Copying DPDK to shared directory...', - True) - tasks.run_task(['rsync', '-a', '-r', '-l', - os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'), - os.path.join(guest_dir, 'l2fwd')], - self._logger, - 'Copying l2fwd to shared directory...', - True) - except subprocess.CalledProcessError: - self._logger.error('Unable to copy DPDK and l2fwd to shared directory') - + # data are copied only for pvp and pvvp, so let's count number of 'v' + counter = 1 + while counter <= self.deployment.count('v'): + self._copy_fwd_tools_for_guest(counter) counter += 1 + def _copy_fwd_tools_for_guest(self, index): + """Copy dpdk and l2fwd code to GUEST_SHARE_DIR of VM + + :param index: Index of VM starting from 1 (i.e. 1st VM has index 1) + """ + guest_dir = S.getValue('GUEST_SHARE_DIR')[index-1] + + # remove shared dir if it exists to avoid issues with file consistency + if os.path.exists(guest_dir): + tasks.run_task(['rm', '-f', '-r', guest_dir], self._logger, + 'Removing content of shared directory...', True) + + # directory to share files between host and guest + os.makedirs(guest_dir) + + # copy sources into shared dir only if neccessary + if 'testpmd' in self.guest_loopback or 'l2fwd' in self.guest_loopback: + try: + tasks.run_task(['rsync', '-a', '-r', '-l', r'--exclude="\.git"', + os.path.join(S.getValue('RTE_SDK_USER'), ''), + os.path.join(guest_dir, 'DPDK')], + self._logger, + 'Copying DPDK to shared directory...', + True) + tasks.run_task(['rsync', '-a', '-r', '-l', + os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/'), + os.path.join(guest_dir, 'l2fwd')], + self._logger, + 'Copying l2fwd to shared directory...', + True) + except subprocess.CalledProcessError: + self._logger.error('Unable to copy DPDK and l2fwd to shared directory') + + def _mount_hugepages(self): """Mount hugepages if usage of DPDK or Qemu is detected """ @@ -324,7 +399,8 @@ class TestCase(object): if not self._hugepages_mounted and \ (self.deployment.count('v') or \ S.getValue('VSWITCH').lower().count('dpdk') or \ - self._vswitch_none): + self._vswitch_none or \ + self.test and 'vnf' in [step[0][0:3] for step in self.test]): hugepages.mount_hugepages() self._hugepages_mounted = True @@ -336,7 +412,7 @@ class TestCase(object): self._hugepages_mounted = False @staticmethod - def _write_result_to_file(results, output): + def write_result_to_file(results, output): """Write list of dictionaries to a CSV file. Each element on list will create separate row in output file. diff --git a/tools/functions.py b/tools/functions.py new file mode 100644 index 00000000..5079a9f0 --- /dev/null +++ b/tools/functions.py @@ -0,0 +1,43 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Various helper functions +""" + +from conf import settings + +# +# Support functions +# + +def settings_update_paths(): + """ Configure paths to OVS and DPDK based on VSWITCH and VNF values + """ + # set dpdk and ovs paths accorfing to VNF and VSWITCH + if settings.getValue('VSWITCH').endswith('Vanilla'): + # settings paths for Vanilla + settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_VANILLA'))) + elif settings.getValue('VSWITCH').endswith('Vhost'): + if settings.getValue('VNF').endswith('Cuse'): + # settings paths for Cuse + settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_CUSE'))) + settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_CUSE'))) + else: + # settings paths for VhostUser + settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER'))) + settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER'))) + else: + # default - set to VHOST USER but can be changed during enhancement + settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER'))) + settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER'))) diff --git a/tools/hugepages.py b/tools/hugepages.py index 71535922..3a434d6e 100644 --- a/tools/hugepages.py +++ b/tools/hugepages.py @@ -78,7 +78,8 @@ def mount_hugepages(): return if not os.path.exists(settings.getValue('HUGEPAGE_DIR')): - os.makedirs(settings.getValue('HUGEPAGE_DIR')) + tasks.run_task(['sudo', 'mkdir', settings.getValue('HUGEPAGE_DIR')], _LOGGER, + 'Creating directory ' + settings.getValue('HUGEPAGE_DIR'), True) try: tasks.run_task(['sudo', 'mount', '-t', 'hugetlbfs', 'nodev', settings.getValue('HUGEPAGE_DIR')], diff --git a/tools/namespace.py b/tools/namespace.py new file mode 100644 index 00000000..e6bcd819 --- /dev/null +++ b/tools/namespace.py @@ -0,0 +1,178 @@ +# Copyright 2016 Red Hat Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +Network namespace emulation +""" + +import logging +import os + +from tools import tasks + +_LOGGER = logging.getLogger(__name__) + + +def add_ip_to_namespace_eth(port, name, ip_addr, cidr): + """ + Assign port ip address in namespace + :param port: port to assign ip to + :param name: namespace where port resides + :param ip_addr: ip address in dot notation format + :param cidr: cidr as string + :return: + """ + ip_string = '{}/{}'.format(ip_addr, cidr) + tasks.run_task(['sudo', 'ip', 'netns', 'exec', name, + 'ip', 'addr', 'add', ip_string, 'dev', port], + _LOGGER, 'Assigning ip to port {}...'.format(port), False) + + +def assign_port_to_namespace(port, name, port_up=False): + """ + Assign NIC port to namespace + :param port: port name as string + :param name: namespace name as string + :param port_up: Boolean if the port should be brought up on assignment + :return: None + """ + tasks.run_task(['sudo', 'ip', 'link', 'set', + 'netns', name, 'dev', port], + _LOGGER, 'Assigning port {} to namespace {}...'.format( + port, name), False) + if port_up: + tasks.run_task(['sudo', 'ip', 'netns', 'exec', name, + 'ip', 'link', 'set', port, 'up'], + _LOGGER, 'Bringing up port {}...'.format(port), False) + + +def create_namespace(name): + """ + Create a linux namespace. Raises RuntimeError if namespace already exists + in the system. + :param name: name of the namespace to be created as string + :return: None + """ + if name in get_system_namespace_list(): + raise RuntimeError('Namespace already exists in system') + + # touch some files in a tmp area so we can track them separately from + # the OS's internal namespace tracking. This allows us to track VSPerf + # created namespaces so they can be cleaned up if needed. + if not os.path.isdir('/tmp/namespaces'): + try: + os.mkdir('/tmp/namespaces') + except os.error: + # OK don't crash, but cleanup may be an issue... + _LOGGER.error('Unable to create namespace temp folder.') + _LOGGER.error( + 'Namespaces will not be removed on test case completion') + if os.path.isdir('/tmp/namespaces'): + with open('/tmp/namespaces/{}'.format(name), 'a'): + os.utime('/tmp/namespaces/{}'.format(name), None) + + tasks.run_task(['sudo', 'ip', 'netns', 'add', name], _LOGGER, + 'Creating namespace {}...'.format(name), False) + tasks.run_task(['sudo', 'ip', 'netns', 'exec', name, + 'ip', 'link', 'set', 'lo', 'up'], _LOGGER, + 'Enabling loopback interface...', False) + + +def delete_namespace(name): + """ + Delete linux network namespace + :param name: namespace to delete + :return: None + """ + # delete the file if it exists in the temp area + if os.path.exists('/tmp/namespaces/{}'.format(name)): + os.remove('/tmp/namespaces/{}'.format(name)) + tasks.run_task(['sudo', 'ip', 'netns', 'delete', name], _LOGGER, + 'Deleting namespace {}...'.format(name), False) + + +def get_system_namespace_list(): + """ + Return tuple of strings for namespaces on the system + :return: tuple of namespaces as string + """ + return tuple(os.listdir('/var/run/netns')) + + +def get_vsperf_namespace_list(): + """ + Return a tuple of strings for namespaces created by vsperf testcase + :return: tuple of namespaces as string + """ + if os.path.isdir('/tmp/namespaces'): + return tuple(os.listdir('/tmp/namespaces')) + else: + return [] + + +def reset_port_to_root(port, name): + """ + Return the assigned port to the root namespace + :param port: port to return as string + :param name: namespace the port currently resides + :return: None + """ + tasks.run_task(['sudo', 'ip', 'netns', 'exec', name, + 'ip', 'link', 'set', port, 'netns', '1'], + _LOGGER, 'Assigning port {} to namespace {}...'.format( + port, name), False) + + +# pylint: disable=unused-argument +# pylint: disable=invalid-name +def validate_add_ip_to_namespace_eth(result, port, name, ip_addr, cidr): + """ + Validation function for integration testcases + """ + ip_string = '{}/{}'.format(ip_addr, cidr) + return ip_string in ''.join(tasks.run_task( + ['sudo', 'ip', 'netns', 'exec', name, 'ip', 'addr', 'show', port], + _LOGGER, 'Validating ip address in namespace...', False)) + + +def validate_assign_port_to_namespace(result, port, name, port_up=False): + """ + Validation function for integration testcases + """ + # this could be improved...its not 100% accurate + return port in ''.join(tasks.run_task( + ['sudo', 'ip', 'netns', 'exec', name, 'ip', 'addr'], + _LOGGER, 'Validating port in namespace...')) + + +def validate_create_namespace(result, name): + """ + Validation function for integration testcases + """ + return name in get_system_namespace_list() + + +def validate_delete_namespace(result, name): + """ + Validation function for integration testcases + """ + return name not in get_system_namespace_list() + + +def validate_reset_port_to_root(result, port, name): + """ + Validation function for integration testcases + """ + return not validate_assign_port_to_namespace(result, port, name) diff --git a/tools/networkcard.py b/tools/networkcard.py new file mode 100644 index 00000000..c31be691 --- /dev/null +++ b/tools/networkcard.py @@ -0,0 +1,266 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for network card manipulation +""" + +import os +import subprocess +import logging +import glob +from conf import settings + +_LOGGER = logging.getLogger('tools.networkcard') + +_PCI_DIR = '/sys/bus/pci/devices/{}/' +_SRIOV_NUMVFS = os.path.join(_PCI_DIR, 'sriov_numvfs') +_SRIOV_TOTALVFS = os.path.join(_PCI_DIR, 'sriov_totalvfs') +_SRIOV_VF_PREFIX = 'virtfn' +_SRIOV_PF = 'physfn' +_PCI_NET = 'net' +_PCI_DRIVER = 'driver' + + +def check_pci(pci_handle): + """ Checks if given extended PCI handle has correct length and fixes + it if possible. + + :param pci_handle: PCI slot identifier. It can contain vsperf specific + suffix after '|' with VF indication. e.g. '0000:05:00.0|vf1' + + :returns: PCI handle + """ + pci = pci_handle.split('|') + pci_len = len(pci[0]) + if pci_len == 12: + return pci_handle + elif pci_len == 7: + pci[0] = '0000:' + pci[0][-7:] + _LOGGER.debug('Adding domain part to PCI slot %s', pci[0]) + return '|'.join(pci) + elif pci_len > 12: + pci[0] = pci[0][-12:] + _LOGGER.warning('PCI slot is too long, it will be shortened to %s', pci[0]) + return '|'.join(pci) + else: + # pci_handle has a strange length, but let us try to use it + _LOGGER.error('Unknown format of PCI slot %s', pci_handle) + return pci_handle + +def is_sriov_supported(pci_handle): + """ Checks if sriov is supported by given NIC + + :param pci_handle: PCI slot identifier with domain part. + + :returns: True on success, False otherwise + """ + return os.path.isfile(_SRIOV_TOTALVFS.format(pci_handle)) + +def is_sriov_nic(pci_handle): + """ Checks if given extended PCI ID refers to the VF + + :param pci_handle: PCI slot identifier with domain part. It can contain + vsperf specific suffix after '|' with VF indication. + e.g. '0000:05:00.0|vf1' + + :returns: True on success, False otherwise + """ + for item in pci_handle.split('|'): + if item.lower().startswith('vf'): + return True + return False + +def set_sriov_numvfs(pci_handle, numvfs): + """ Checks if sriov is supported and configures given number of VFs + + :param pci_handle: PCI slot identifier with domain part. + :param numvfs: Number of VFs to be configured at given NIC. + + :returns: True on success, False otherwise + """ + if not is_sriov_supported(pci_handle): + return False + + if get_sriov_numvfs(pci_handle) == numvfs: + return True + + if numvfs and get_sriov_numvfs(pci_handle) != 0: + if not set_sriov_numvfs(pci_handle, 0): + return False + + try: + subprocess.call('sudo bash -c "echo {} > {}"'.format(numvfs, _SRIOV_NUMVFS.format(pci_handle)), shell=True) + return get_sriov_numvfs(pci_handle) == numvfs + except OSError: + _LOGGER.debug('Number of VFs cant be changed to %s for PF %s', numvfs, pci_handle) + return False + +def get_sriov_numvfs(pci_handle): + """ Returns the number of configured VFs + + :param pci_handle: PCI slot identifier with domain part + :returns: the number of configured VFs + """ + if is_sriov_supported(pci_handle): + with open(_SRIOV_NUMVFS.format(pci_handle), 'r') as numvfs: + return int(numvfs.readline().rstrip('\n')) + + return None + +def get_sriov_totalvfs(pci_handle): + """ Checks if sriov is supported and returns max number of supported VFs + + :param pci_handle: PCI slot identifier with domain part + :returns: the max number of supported VFs by given NIC + """ + if is_sriov_supported(pci_handle): + with open(_SRIOV_TOTALVFS.format(pci_handle), 'r') as total: + return int(total.readline().rstrip('\n')) + + return None + +def get_sriov_vfs_list(pf_pci_handle): + """ Returns list of PCI handles of VFs configured at given NIC/PF + + :param pf_pci_handle: PCI slot identifier of PF with domain part. + :returns: list + """ + vfs = [] + if is_sriov_supported(pf_pci_handle): + for vf_name in glob.glob(os.path.join(_PCI_DIR, _SRIOV_VF_PREFIX + '*').format(pf_pci_handle)): + vfs.append(os.path.basename(os.path.realpath(vf_name))) + + return vfs + +def get_sriov_pf(vf_pci_handle): + """ Get PCI handle of PF which belongs to given VF + + :param vf_pci_handle: PCI slot identifier of VF with domain part. + :returns: PCI handle of parent PF + """ + pf_path = os.path.join(_PCI_DIR, _SRIOV_PF).format(vf_pci_handle) + if os.path.isdir(pf_path): + return os.path.basename(os.path.realpath(pf_path)) + + return None + +def get_driver(pci_handle): + """ Returns name of kernel driver assigned to given NIC + + :param pci_handle: PCI slot identifier with domain part. + :returns: string with assigned kernel driver, None otherwise + """ + driver_path = os.path.join(_PCI_DIR, _PCI_DRIVER).format(pci_handle) + if os.path.isdir(driver_path): + return os.path.basename(os.path.realpath(driver_path)) + + return None + +def get_device_name(pci_handle): + """ Returns name of network card device name + + :param pci_handle: PCI slot identifier with domain part. + :returns: string with assigned NIC device name, None otherwise + """ + net_path = os.path.join(_PCI_DIR, _PCI_NET).format(pci_handle) + try: + return os.listdir(net_path)[0] + except FileNotFoundError: + return None + except IndexError: + return None + + return None + +def get_mac(pci_handle): + """ Returns MAC address of given NIC + + :param pci_handle: PCI slot identifier with domain part. + :returns: string with assigned MAC address, None otherwise + """ + mac_path = glob.glob(os.path.join(_PCI_DIR, _PCI_NET, '*', 'address').format(pci_handle)) + # kernel driver is loaded and MAC can be read + if len(mac_path) and os.path.isfile(mac_path[0]): + with open(mac_path[0], 'r') as _file: + return _file.readline().rstrip('\n') + + # MAC address is unknown, e.g. NIC is assigned to DPDK + return None + +def get_nic_info(full_pci_handle): + """ Parse given pci handle with additional info and returns + requested NIC info. + + :param full_pci_handle: A string with extended network card PCI ID. + extended PCI ID syntax: PCI_ID[|vfx][|(mac|dev)] + examples: + 0000:06:00.0 - returns the same value + 0000:06:00.0|vf0 - returns PCI ID of 1st virtual function of given NIC + 0000:06:00.0|mac - returns MAC address of given NIC + 0000:06:00.0|vf0|mac - returns MAC address of 1st virtual function of given NIC + + :returns: A string with requested NIC data or None if data cannot be read. + """ + parsed_handle = full_pci_handle.split('|') + if len(parsed_handle) not in (1, 2, 3): + _LOGGER.error("Invalid PCI device name: '%s'", full_pci_handle) + return None + + pci_handle = parsed_handle[0] + + for action in parsed_handle[1:]: + # in case of SRIOV get PCI handle of given virtual function + if action.lower().startswith('vf'): + try: + vf_num = int(action[2:]) + pci_handle = get_sriov_vfs_list(pci_handle)[vf_num] + except ValueError: + _LOGGER.error("Pci device '%s', does not have VF with index '%s'", pci_handle, action[2:]) + return None + except IndexError: + _LOGGER.error("Pci device '%s', does not have VF with index '%s'", pci_handle, vf_num) + return None + continue + + # return requested info for given PCI handle + if action.lower() == 'mac': + return get_mac(pci_handle) + elif action.lower() == 'dev': + return get_device_name(pci_handle) + else: + _LOGGER.error("Invalid item '%s' in PCI handle '%s'", action, full_pci_handle) + return None + + return pci_handle + +def reinit_vfs(pf_pci_handle): + """ Reinitializates all VFs, which belong to given PF + + :param pf_pci_handle: PCI slot identifier of PF with domain part. + """ + rte_pci_tool = os.path.join(settings.getValue('RTE_SDK'), 'tools', 'dpdk_nic_bind.py') + + for vf_nic in get_sriov_vfs_list(pf_pci_handle): + nic_driver = get_driver(vf_nic) + if nic_driver: + try: + subprocess.call(['sudo', rte_pci_tool, '--unbind', vf_nic], + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + subprocess.call(['sudo', rte_pci_tool, '--bind=' + nic_driver, vf_nic], + stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + except subprocess.CalledProcessError: + _LOGGER.warning('Error during reinitialization of VF %s', vf_nic) + else: + _LOGGER.warning("Can't detect driver for VF %s", vf_nic) + diff --git a/tools/pkt_fwd/testpmd.py b/tools/pkt_fwd/testpmd.py index d8ed8905..e1b987bc 100644 --- a/tools/pkt_fwd/testpmd.py +++ b/tools/pkt_fwd/testpmd.py @@ -42,7 +42,7 @@ class TestPMD(IPktFwd): vswitchd_args += _VSWITCHD_CONST_ARGS vswitchd_args += settings.getValue('TESTPMD_ARGS') - self._nports = len(settings.getValue('WHITELIST_NICS')) + self._nports = len(settings.getValue('NICS')) self._fwdmode = settings.getValue('TESTPMD_FWD_MODE') self._csum_layer = settings.getValue('TESTPMD_CSUM_LAYER') self._csum_calc = settings.getValue('TESTPMD_CSUM_CALC') diff --git a/tools/pkt_gen/moongen/__init__.py b/tools/pkt_gen/moongen/__init__.py new file mode 100644 index 00000000..562eb088 --- /dev/null +++ b/tools/pkt_gen/moongen/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tools/pkt_gen/moongen/moongen.py b/tools/pkt_gen/moongen/moongen.py new file mode 100644 index 00000000..d6c09e5d --- /dev/null +++ b/tools/pkt_gen/moongen/moongen.py @@ -0,0 +1,753 @@ +# Copyright 2016 Red Hat Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Contributors: +# Bill Michalowski, Red Hat Inc. +# Andrew Theurer, Red Hat Inc. +""" +Moongen Traffic Generator Model +""" + +# python imports +import logging +from collections import OrderedDict +import subprocess +import re + +# VSPerf imports +from conf import settings +from core.results.results_constants import ResultsConstants +from tools.pkt_gen.trafficgen.trafficgenhelper import ( + TRAFFIC_DEFAULTS, + merge_spec) +from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator + +class Moongen(ITrafficGenerator): + """Moongen Traffic generator wrapper.""" + _traffic_defaults = TRAFFIC_DEFAULTS.copy() + _logger = logging.getLogger(__name__) + + def __init__(self): + """Moongen class constructor.""" + self._logger.info("In moongen __init__ method") + self._params = {} + self._moongen_host_ip_addr = ( + settings.getValue('TRAFFICGEN_MOONGEN_HOST_IP_ADDR')) + self._moongen_base_dir = ( + settings.getValue('TRAFFICGEN_MOONGEN_BASE_DIR')) + self._moongen_user = settings.getValue('TRAFFICGEN_MOONGEN_USER') + self._moongen_ports = settings.getValue('TRAFFICGEN_MOONGEN_PORTS') + + @property + def traffic_defaults(self): + """Default traffic values. + + These can be expected to be constant across traffic generators, + so no setter is provided. Changes to the structure or contents + will likely break traffic generator implementations or tests + respectively. + """ + self._logger.info("In moongen traffic_defaults method") + return self._traffic_defaults + + def create_moongen_cfg_file(self, traffic, duration=60, + acceptable_loss_pct=1, one_shot=0): + """Create the MoonGen configuration file from VSPERF's traffic profile + :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags + :param duration: The length of time to generate packet throughput + :param acceptable_loss: Maximum packet loss acceptable + :param one_shot: No RFC 2544 binary search, + just packet flow at traffic specifics + """ + logging.debug("traffic['frame_rate'] = " + \ + str(traffic['frame_rate'])) + + logging.debug("traffic['multistream'] = " + \ + str(traffic['multistream'])) + + logging.debug("traffic['stream_type'] = " + \ + str(traffic['stream_type'])) + + logging.debug("traffic['l2']['srcmac'] = " + \ + str(traffic['l2']['srcmac'])) + + logging.debug("traffic['l2']['dstmac'] = " + \ + str(traffic['l2']['dstmac'])) + + logging.debug("traffic['l3']['proto'] = " + \ + str(traffic['l3']['proto'])) + + logging.debug("traffic['l3']['srcip'] = " + \ + str(traffic['l3']['srcip'])) + + logging.debug("traffic['l3']['dstip'] = " + \ + str(traffic['l3']['dstip'])) + + logging.debug("traffic['l4']['srcport'] = " + \ + str(traffic['l4']['srcport'])) + + logging.debug("traffic['l4']['dstport'] = " + \ + str(traffic['l4']['dstport'])) + + logging.debug("traffic['vlan']['enabled'] = " + \ + str(traffic['vlan']['enabled'])) + + logging.debug("traffic['vlan']['id'] = " + \ + str(traffic['vlan']['id'])) + + logging.debug("traffic['vlan']['priority'] = " + \ + str(traffic['vlan']['priority'])) + + logging.debug("traffic['vlan']['cfi'] = " + \ + str(traffic['vlan']['cfi'])) + + logging.debug(traffic['l2']['framesize']) + + out_file = open("opnfv-vsperf-cfg.lua", "wt") + + out_file.write("VSPERF {\n") + + out_file.write("testType = \"throughput\",\n") + + out_file.write("runBidirec = " + \ + traffic['bidir'].lower() + ",\n") + + out_file.write("frameSize = " + \ + str(traffic['l2']['framesize']) + ",\n") + + out_file.write("srcMac = \"" + \ + str(traffic['l2']['srcmac']) + "\",\n") + + out_file.write("dstMac = \"" + \ + str(traffic['l2']['dstmac']) + "\",\n") + + out_file.write("srcIp = \"" + \ + str(traffic['l3']['srcip']) + "\",\n") + + out_file.write("dstIp = \"" + \ + str(traffic['l3']['dstip']) + "\",\n") + + out_file.write("vlanId = " + \ + str(traffic['vlan']['id']) + ",\n") + + out_file.write("searchRunTime = " + \ + str(duration) + ",\n") + + out_file.write("validationRunTime = " + \ + str(duration) + ",\n") + + out_file.write("acceptableLossPct = " + \ + str(acceptable_loss_pct) + ",\n") + + out_file.write("ports = " +\ + str(self._moongen_ports) + ",\n") + + if one_shot: + out_file.write("oneShot = true,\n") + + # Assume 10G line rates at the moment. Need to convert VSPERF + # frame_rate (percentage of line rate) to Mpps for MoonGen + + out_file.write("startRate = " + str((traffic['frame_rate'] / 100) * 14.88) + "\n") + out_file.write("}" + "\n") + out_file.close() + + copy_moongen_cfg = "scp opnfv-vsperf-cfg.lua " + \ + self._moongen_user + "@" + \ + self._moongen_host_ip_addr + ":" + \ + self._moongen_base_dir + \ + "/. && rm opnfv-vsperf-cfg.lua" + + find_moongen = subprocess.Popen(copy_moongen_cfg, + shell=True, + stderr=subprocess.PIPE) + + output, error = find_moongen.communicate() + + if error: + logging.error(output) + logging.error(error) + raise RuntimeError('MOONGEN: Error copying configuration file') + + def connect(self): + """Connect to MoonGen traffic generator + + Verify that MoonGen is on the system indicated by + the configuration file + """ + self._logger.info("MOONGEN: In MoonGen connect method...") + + if self._moongen_host_ip_addr: + cmd_ping = "ping -c1 " + self._moongen_host_ip_addr + else: + raise RuntimeError('MOONGEN: MoonGen host not defined') + + ping = subprocess.Popen(cmd_ping, shell=True, stderr=subprocess.PIPE) + output, error = ping.communicate() + + if ping.returncode: + self._logger.error(error) + self._logger.error(output) + raise RuntimeError('MOONGEN: Cannot ping MoonGen host at ' + \ + self._moongen_host_ip_addr) + + connect_moongen = "ssh " + self._moongen_user + \ + "@" + self._moongen_host_ip_addr + + cmd_find_moongen = connect_moongen + " ls " + \ + self._moongen_base_dir + "/examples/opnfv-vsperf.lua" + + find_moongen = subprocess.Popen(cmd_find_moongen, + shell=True, + stderr=subprocess.PIPE) + + output, error = find_moongen.communicate() + + if find_moongen.returncode: + self._logger.error(error) + self._logger.error(output) + raise RuntimeError( + 'MOONGEN: Cannot locate MoonGen program at %s within %s' \ + % (self._moongen_host_ip_addr, self._moongen_base_dir)) + + self._logger.info("MOONGEN: MoonGen host successfully found...") + + def disconnect(self): + """Disconnect from the traffic generator. + + As with :func:`connect`, this function is optional. + + Where implemented, this function should raise an exception on + failure. + + :returns: None + """ + self._logger.info("MOONGEN: In moongen disconnect method") + + def send_burst_traffic(self, traffic=None, numpkts=100, duration=20): + """Send a burst of traffic. + + Send a ``numpkts`` packets of traffic, using ``traffic`` + configuration, with a timeout of ``time``. + + :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags + :param numpkts: Number of packets to send + :param duration: Time to wait to receive packets + + :returns: dictionary of strings with following data: + - List of Tx Frames, + - List of Rx Frames, + - List of Tx Bytes, + - List of List of Rx Bytes, + - Payload Errors and Sequence Errors. + """ + self._logger.info("In moongen send_burst_traffic method") + return NotImplementedError('Moongen Burst traffic not implemented') + + def send_cont_traffic(self, traffic=None, duration=20): + """Send a continuous flow of traffic + + Send packets at ``frame rate``, using ``traffic`` configuration, + until timeout ``time`` occurs. + + :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags + :param duration: Time to wait to receive packets (secs) + :returns: dictionary of strings with following data: + - Tx Throughput (fps), + - Rx Throughput (fps), + - Tx Throughput (mbps), + - Rx Throughput (mbps), + - Tx Throughput (% linerate), + - Rx Throughput (% linerate), + - Min Latency (ns), + - Max Latency (ns), + - Avg Latency (ns) + """ + self._logger.info("In moongen send_cont_traffic method") + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + Moongen.create_moongen_cfg_file(self, + traffic, + duration=duration, + acceptable_loss_pct=100.0, + one_shot=1) + + collected_results = Moongen.run_moongen_and_collect_results(self, + test_run=1) + + total_throughput_rx_fps = ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS])) + + total_throughput_rx_mbps = ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS])) + + total_throughput_rx_pct = ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT])) + + total_throughput_tx_fps = ( + float(collected_results[ResultsConstants.TX_RATE_FPS])) + + total_throughput_tx_mbps = ( + float(collected_results[ResultsConstants.TX_RATE_MBPS])) + + total_throughput_tx_pct = ( + float(collected_results[ResultsConstants.TX_RATE_PERCENT])) + + total_min_latency_ns = 0 + total_max_latency_ns = 0 + total_avg_latency_ns = 0 + + results = OrderedDict() + results[ResultsConstants.THROUGHPUT_RX_FPS] = ( + '{:,.6f}'.format(total_throughput_rx_fps)) + + results[ResultsConstants.THROUGHPUT_RX_MBPS] = ( + '{:,.3f}'.format(total_throughput_rx_mbps)) + + results[ResultsConstants.THROUGHPUT_RX_PERCENT] = ( + '{:,.3f}'.format(total_throughput_rx_pct)) + + results[ResultsConstants.TX_RATE_FPS] = ( + '{:,.6f}'.format(total_throughput_tx_fps)) + + results[ResultsConstants.TX_RATE_MBPS] = ( + '{:,.3f}'.format(total_throughput_tx_mbps)) + + results[ResultsConstants.TX_RATE_PERCENT] = ( + '{:,.3f}'.format(total_throughput_tx_pct)) + + results[ResultsConstants.MIN_LATENCY_NS] = ( + '{:,.3f}'.format(total_min_latency_ns)) + + results[ResultsConstants.MAX_LATENCY_NS] = ( + '{:,.3f}'.format(total_max_latency_ns)) + + results[ResultsConstants.AVG_LATENCY_NS] = ( + '{:,.3f}'.format(total_avg_latency_ns)) + + return results + + def start_cont_traffic(self, traffic=None, duration=20): + """ Non-blocking version of 'send_cont_traffic'. + + Start transmission and immediately return. Do not wait for + results. + :param traffic: Detailed "traffic" spec, i.e. IP address, VLAN tags + :param duration: Time to wait to receive packets (secs) + """ + self._logger.info("In moongen start_cont_traffic method") + return NotImplementedError('Moongen continuous traffic not implemented') + + def stop_cont_traffic(self): + # Stop continuous transmission and return results. + self._logger.info("In moongen stop_cont_traffic method") + + def run_moongen_and_collect_results(self, test_run=1): + """Execute MoonGen and transform results into VSPERF format + :param test_run: The number of tests to run + """ + # Start MoonGen and create logfile of the run + connect_moongen = "ssh " + self._moongen_user + "@" + \ + self._moongen_host_ip_addr + + cmd_moongen = " 'cd " + self._moongen_base_dir + \ + "; ./build/MoonGen examples/opnfv-vsperf.lua | tee moongen_log.txt'" + + cmd_start_moongen = connect_moongen + cmd_moongen + + start_moongen = subprocess.Popen(cmd_start_moongen, + shell=True, stderr=subprocess.PIPE) + + output, error = start_moongen.communicate() + + if start_moongen.returncode: + logging.debug(error) + logging.debug(output) + raise RuntimeError( + 'MOONGEN: Error starting MoonGen program at %s within %s' \ + % (self._moongen_host_ip_addr, self._moongen_base_dir)) + + cmd_moongen = "mkdir -p /tmp/moongen/" + str(test_run) + + moongen_create_log_dir = subprocess.Popen(cmd_moongen, + shell=True, + stderr=subprocess.PIPE) + + output, error = moongen_create_log_dir.communicate() + + if moongen_create_log_dir.returncode: + logging.debug(error) + logging.debug(output) + raise RuntimeError( + 'MOONGEN: Error obtaining MoonGen log from %s within %s' \ + % (self._moongen_host_ip_addr, self._moongen_base_dir)) + + cmd_moongen = " scp " + self._moongen_user + "@" + \ + self._moongen_host_ip_addr + ":" + \ + self._moongen_base_dir + "/moongen_log.txt /tmp/moongen/" + \ + str(test_run) + "/moongen-run.log" + + copy_moongen_log = subprocess.Popen(cmd_moongen, + shell=True, + stderr=subprocess.PIPE) + + output, error = copy_moongen_log.communicate() + + if copy_moongen_log.returncode: + logging.debug(error) + logging.debug(output) + raise RuntimeError( + 'MOONGEN: Error obtaining MoonGen log from %s within %s' \ + % (self._moongen_host_ip_addr, self._moongen_base_dir)) + + log_file = "/tmp/moongen/" + str(test_run) + "/moongen-run.log" + + with open(log_file, 'r') as logfile_handle: + mytext = logfile_handle.read() + + # REPORT results line + # match.group(1) = Tx frames + # match.group(2) = Rx frames + # match.group(3) = Frame loss (count) + # match.group(4) = Frame loss (percentage) + # match.group(5) = Tx Mpps + # match.group(6) = Rx Mpps + search_pattern = re.compile( + r'\[REPORT\]\s+total\:\s+' + r'Tx\s+frames\:\s+(\d+)\s+' + r'Rx\s+Frames\:\s+(\d+)\s+' + r'frame\s+loss\:\s+(\d+)\,' + r'\s+(\d+\.\d+|\d+)%\s+' + r'Tx\s+Mpps\:\s+(\d+.\d+|\d+)\s+' + r'Rx\s+Mpps\:\s+(\d+\.\d+|\d+)', + re.IGNORECASE) + + results_match = search_pattern.search(mytext) + + if not results_match: + logging.error('There was a problem parsing ' +\ + 'MoonGen REPORT section of MoonGen log file') + + moongen_results = OrderedDict() + moongen_results[ResultsConstants.THROUGHPUT_RX_FPS] = 0 + moongen_results[ResultsConstants.THROUGHPUT_RX_MBPS] = 0 + moongen_results[ResultsConstants.THROUGHPUT_RX_PERCENT] = 0 + moongen_results[ResultsConstants.TX_RATE_FPS] = 0 + moongen_results[ResultsConstants.TX_RATE_MBPS] = 0 + moongen_results[ResultsConstants.TX_RATE_PERCENT] = 0 + moongen_results[ResultsConstants.B2B_TX_COUNT] = 0 + moongen_results[ResultsConstants.B2B_FRAMES] = 0 + moongen_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = 0 + moongen_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = 0 + + # find PARAMETERS line + # parameters_match.group(1) = Frame size + + search_pattern = re.compile( + r'\[PARAMETERS\]\s+.*frameSize\:\s+(\d+)', + flags=re.IGNORECASE) + parameters_match = search_pattern.search(mytext) + + if parameters_match: + frame_size = int(parameters_match.group(1)) + else: + logging.error('There was a problem parsing MoonGen ' +\ + 'PARAMETERS section of MoonGen log file') + frame_size = 0 + + if results_match and parameters_match: + # Assume for now 10G link speed + max_theoretical_mfps = ( + (10000000000 / 8) / (frame_size + 20)) + + moongen_results[ResultsConstants.THROUGHPUT_RX_FPS] = ( + float(results_match.group(6)) * 1000000) + + moongen_results[ResultsConstants.THROUGHPUT_RX_MBPS] = ( + (float(results_match.group(6)) * frame_size + 20) * 8) + + moongen_results[ResultsConstants.THROUGHPUT_RX_PERCENT] = ( + float(results_match.group(6)) * \ + 1000000 / max_theoretical_mfps * 100) + + moongen_results[ResultsConstants.TX_RATE_FPS] = ( + float(results_match.group(5)) * 1000000) + + moongen_results[ResultsConstants.TX_RATE_MBPS] = ( + float(results_match.group(5)) * (frame_size + 20) * 8) + + moongen_results[ResultsConstants.TX_RATE_PERCENT] = ( + float(results_match.group(5)) * + 1000000 / max_theoretical_mfps * 100) + + moongen_results[ResultsConstants.B2B_TX_COUNT] = ( + float(results_match.group(1))) + + moongen_results[ResultsConstants.B2B_FRAMES] = ( + float(results_match.group(2))) + + moongen_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = ( + float(results_match.group(3))) + + moongen_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = ( + float(results_match.group(4))) + + return moongen_results + + def send_rfc2544_throughput(self, traffic=None, duration=20, + lossrate=0.0, trials=1): + # + # Send traffic per RFC2544 throughput test specifications. + # + # Send packets at a variable rate, using ``traffic`` + # configuration, until minimum rate at which no packet loss is + # detected is found. + # + # :param traffic: Detailed "traffic" spec, see design docs for details + # :param trials: Number of trials to execute + # :param duration: Per iteration duration + # :param lossrate: Acceptable lossrate percentage + # :returns: dictionary of strings with following data: + # - Tx Throughput (fps), + # - Rx Throughput (fps), + # - Tx Throughput (mbps), + # - Rx Throughput (mbps), + # - Tx Throughput (% linerate), + # - Rx Throughput (% linerate), + # - Min Latency (ns), + # - Max Latency (ns), + # - Avg Latency (ns) + # + self._logger.info("In moongen send_rfc2544_throughput method") + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + Moongen.create_moongen_cfg_file(self, + traffic, + duration=duration, + acceptable_loss_pct=lossrate) + + total_throughput_rx_fps = 0 + total_throughput_rx_mbps = 0 + total_throughput_rx_pct = 0 + total_throughput_tx_fps = 0 + total_throughput_tx_mbps = 0 + total_throughput_tx_pct = 0 + total_min_latency_ns = 0 + total_max_latency_ns = 0 + total_avg_latency_ns = 0 + + for test_run in range(1, trials+1): + collected_results = ( + Moongen.run_moongen_and_collect_results(self, test_run=test_run)) + + total_throughput_rx_fps += ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS])) + + total_throughput_rx_mbps += ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_MBPS])) + + total_throughput_rx_pct += ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT])) + + total_throughput_tx_fps += ( + float(collected_results[ResultsConstants.TX_RATE_FPS])) + + total_throughput_tx_mbps += ( + float(collected_results[ResultsConstants.TX_RATE_MBPS])) + + total_throughput_tx_pct += ( + float(collected_results[ResultsConstants.TX_RATE_PERCENT])) + + # Latency not supported now, leaving as placeholder + total_min_latency_ns = 0 + total_max_latency_ns = 0 + total_avg_latency_ns = 0 + + results = OrderedDict() + results[ResultsConstants.THROUGHPUT_RX_FPS] = ( + '{:,.6f}'.format(total_throughput_rx_fps / trials)) + + results[ResultsConstants.THROUGHPUT_RX_MBPS] = ( + '{:,.3f}'.format(total_throughput_rx_mbps / trials)) + + results[ResultsConstants.THROUGHPUT_RX_PERCENT] = ( + '{:,.3f}'.format(total_throughput_rx_pct / trials)) + + results[ResultsConstants.TX_RATE_FPS] = ( + '{:,.6f}'.format(total_throughput_tx_fps / trials)) + + results[ResultsConstants.TX_RATE_MBPS] = ( + '{:,.3f}'.format(total_throughput_tx_mbps / trials)) + + results[ResultsConstants.TX_RATE_PERCENT] = ( + '{:,.3f}'.format(total_throughput_tx_pct / trials)) + + results[ResultsConstants.MIN_LATENCY_NS] = ( + '{:,.3f}'.format(total_min_latency_ns / trials)) + + results[ResultsConstants.MAX_LATENCY_NS] = ( + '{:,.3f}'.format(total_max_latency_ns / trials)) + + results[ResultsConstants.AVG_LATENCY_NS] = ( + '{:,.3f}'.format(total_avg_latency_ns / trials)) + + return results + + def start_rfc2544_throughput(self, traffic=None, trials=3, duration=20, + lossrate=0.0): + """Non-blocking version of 'send_rfc2544_throughput'. + + Start transmission and immediately return. Do not wait for + results. + """ + self._logger.info( + "MOONGEN: In moongen start_rfc2544_throughput method") + + def wait_rfc2544_throughput(self): + """Wait for and return results of RFC2544 test. + """ + self._logger.info('In moongen wait_rfc2544_throughput') + + def send_rfc2544_back2back(self, traffic=None, duration=60, + lossrate=0.0, trials=1): + """Send traffic per RFC2544 back2back test specifications. + + Send packets at a fixed rate, using ``traffic`` + configuration, for duration seconds. + + :param traffic: Detailed "traffic" spec, see design docs for details + :param trials: Number of trials to execute + :param duration: Per iteration duration + :param lossrate: Acceptable loss percentage + + :returns: Named tuple of Rx Throughput (fps), Rx Throughput (mbps), + Tx Rate (% linerate), Rx Rate (% linerate), Tx Count (frames), + Back to Back Count (frames), Frame Loss (frames), Frame Loss (%) + :rtype: :class:`Back2BackResult` + """ + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + Moongen.create_moongen_cfg_file(self, + traffic, + duration=duration, + acceptable_loss_pct=lossrate) + + results = OrderedDict() + results[ResultsConstants.B2B_RX_FPS] = 0 + results[ResultsConstants.B2B_TX_FPS] = 0 + results[ResultsConstants.B2B_RX_PERCENT] = 0 + results[ResultsConstants.B2B_TX_PERCENT] = 0 + results[ResultsConstants.B2B_TX_COUNT] = 0 + results[ResultsConstants.B2B_FRAMES] = 0 + results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = 0 + results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = 0 + results[ResultsConstants.SCAL_STREAM_COUNT] = 0 + results[ResultsConstants.SCAL_STREAM_TYPE] = 0 + results[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = 0 + + for test_run in range(1, trials+1): + collected_results = ( + Moongen.run_moongen_and_collect_results(self, test_run=test_run)) + + results[ResultsConstants.B2B_RX_FPS] += ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_FPS])) + + results[ResultsConstants.B2B_RX_PERCENT] += ( + float(collected_results[ResultsConstants.THROUGHPUT_RX_PERCENT])) + + results[ResultsConstants.B2B_TX_FPS] += ( + float(collected_results[ResultsConstants.TX_RATE_FPS])) + + results[ResultsConstants.B2B_TX_PERCENT] += ( + float(collected_results[ResultsConstants.TX_RATE_PERCENT])) + + results[ResultsConstants.B2B_TX_COUNT] += ( + int(collected_results[ResultsConstants.B2B_TX_COUNT])) + + results[ResultsConstants.B2B_FRAMES] += ( + int(collected_results[ResultsConstants.B2B_FRAMES])) + + results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] += ( + int(collected_results[ResultsConstants.B2B_FRAME_LOSS_FRAMES])) + + results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] += ( + int(collected_results[ResultsConstants.B2B_FRAME_LOSS_PERCENT])) + + # Calculate average results + results[ResultsConstants.B2B_RX_FPS] = ( + results[ResultsConstants.B2B_RX_FPS] / trials) + + results[ResultsConstants.B2B_RX_PERCENT] = ( + results[ResultsConstants.B2B_RX_PERCENT] / trials) + + results[ResultsConstants.B2B_TX_FPS] = ( + results[ResultsConstants.B2B_TX_FPS] / trials) + + results[ResultsConstants.B2B_TX_PERCENT] = ( + results[ResultsConstants.B2B_TX_PERCENT] / trials) + + results[ResultsConstants.B2B_TX_COUNT] = ( + results[ResultsConstants.B2B_TX_COUNT] / trials) + + results[ResultsConstants.B2B_FRAMES] = ( + results[ResultsConstants.B2B_FRAMES] / trials) + + results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] = ( + results[ResultsConstants.B2B_FRAME_LOSS_FRAMES] / trials) + + results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = ( + results[ResultsConstants.B2B_FRAME_LOSS_PERCENT] / trials) + + results[ResultsConstants.SCAL_STREAM_COUNT] = 0 + results[ResultsConstants.SCAL_STREAM_TYPE] = 0 + results[ResultsConstants.SCAL_PRE_INSTALLED_FLOWS] = 0 + + return results + + def start_rfc2544_back2back(self, traffic=None, trials=1, duration=20, + lossrate=0.0): + # + # Non-blocking version of 'send_rfc2544_back2back'. + # + # Start transmission and immediately return. Do not wait for results. + # + self._logger.info("In moongen start_rfc2544_back2back method") + return NotImplementedError( + 'Moongen start back2back traffic not implemented') + + def wait_rfc2544_back2back(self): + self._logger.info("In moongen wait_rfc2544_back2back method") + # + # Wait and set results of RFC2544 test. + # + return NotImplementedError( + 'Moongen wait back2back traffic not implemented') + +if __name__ == "__main__": + pass diff --git a/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py new file mode 100644 index 00000000..91f7e27f --- /dev/null +++ b/tools/pkt_gen/testcenter/testcenter-rfc2544-rest.py @@ -0,0 +1,570 @@ +# Copyright 2016 Spirent Communications. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +@author Spirent Communications + +This test automates the RFC2544 tests using the Spirent +TestCenter REST APIs. This test supports Python 3.4 + +''' +import argparse +import logging +import os + + +logger = logging.getLogger(__name__) + + +def create_dir(path): + """Create the directory as specified in path """ + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as e: + logger.error("Failed to create directory %s: %s", path, str(e)) + raise + + +def write_query_results_to_csv(results_path, csv_results_file_prefix, + query_results): + """ Write the results of the query to the CSV """ + create_dir(results_path) + filec = os.path.join(results_path, csv_results_file_prefix + ".csv") + with open(filec, "wb") as f: + f.write(query_results["Columns"].replace(" ", ",") + "\n") + for row in (query_results["Output"].replace("} {", ","). + replace("{", "").replace("}", "").split(",")): + f.write(row.replace(" ", ",") + "\n") + + +def positive_int(value): + """ Positive Integer type for Arguments """ + ivalue = int(value) + if ivalue <= 0: + raise argparse.ArgumentTypeError( + "%s is an invalid positive int value" % value) + return ivalue + + +def percent_float(value): + """ Floating type for Arguments """ + pvalue = float(value) + if pvalue < 0.0 or pvalue > 100.0: + raise argparse.ArgumentTypeError( + "%s not in range [0.0, 100.0]" % pvalue) + return pvalue + + +def main(): + """ Read the arguments, Invoke Test and Return the results""" + parser = argparse.ArgumentParser() + # Required parameters + required_named = parser.add_argument_group("required named arguments") + required_named.add_argument("--lab_server_addr", + required=True, + help=("The IP address of the" + "Spirent Lab Server"), + dest="lab_server_addr") + required_named.add_argument("--license_server_addr", + required=True, + help=("The IP address of the Spirent" + "License Server"), + dest="license_server_addr") + required_named.add_argument("--east_chassis_addr", + required=True, + help=("The TestCenter chassis IP address to" + "use for the east test port"), + dest="east_chassis_addr") + required_named.add_argument("--east_slot_num", + type=positive_int, + required=True, + help=("The TestCenter slot number to" + "use for the east test port"), + dest="east_slot_num") + required_named.add_argument("--east_port_num", + type=positive_int, + required=True, + help=("The TestCenter port number to use" + "for the east test port"), + dest="east_port_num") + required_named.add_argument("--west_chassis_addr", + required=True, + help=("The TestCenter chassis IP address" + "to use for the west test port"), + dest="west_chassis_addr") + required_named.add_argument("--west_slot_num", + type=positive_int, + required=True, + help=("The TestCenter slot number to use" + "for the west test port"), + dest="west_slot_num") + required_named.add_argument("--west_port_num", + type=positive_int, + required=True, + help=("The TestCenter port number to" + "use for the west test port"), + dest="west_port_num") + # Optional parameters + optional_named = parser.add_argument_group("optional named arguments") + optional_named.add_argument("--metric", + required=False, + help=("One among - throughput, latency,\ + backtoback and frameloss"), + choices=["throughput", "latency", + "backtoback", "frameloss"], + default="throughput", + dest="metric") + optional_named.add_argument("--test_session_name", + required=False, + default="RFC2544 East-West Throughput", + help=("The friendly name to identify" + "the Spirent Lab Server test session"), + dest="test_session_name") + + optional_named.add_argument("--test_user_name", + required=False, + default="RFC2544 East-West User", + help=("The friendly name to identify the" + "Spirent Lab Server test user"), + dest="test_user_name") + optional_named.add_argument("--results_dir", + required=False, + default="./Results", + help="The directory to copy results to", + dest="results_dir") + optional_named.add_argument("--csv_results_file_prefix", + required=False, + default="Rfc2544Tput", + help="The prefix for the CSV results files", + dest="csv_results_file_prefix") + optional_named.add_argument("--num_trials", + type=positive_int, + required=False, + default=1, + help=("The number of trials to execute during" + "the test"), + dest="num_trials") + optional_named.add_argument("--trial_duration_sec", + type=positive_int, + required=False, + default=60, + help=("The duration of each trial executed" + "during the test"), + dest="trial_duration_sec") + optional_named.add_argument("--traffic_pattern", + required=False, + choices=["BACKBONE", "MESH", "PAIR"], + default="PAIR", + help="The traffic pattern between endpoints", + dest="traffic_pattern") + optional_named.add_argument("--traffic_custom", + required=False, + default=None, + help="The traffic pattern between endpoints", + dest="traffic_custom") + optional_named.add_argument("--search_mode", + required=False, + choices=["COMBO", "STEP", "BINARY"], + default="BINARY", + help=("The search mode used to find the" + "throughput rate"), + dest="search_mode") + optional_named.add_argument("--learning_mode", + required=False, + choices=["AUTO", "L2_LEARNING", + "L3_LEARNING", "NONE"], + default="AUTO", + help=("The learning mode used during the test," + "default is 'NONE'"), + dest="learning_mode") + optional_named.add_argument("--rate_lower_limit_pct", + type=percent_float, + required=False, + default=1.0, + help=("The minimum percent line rate that" + "will be used during the test"), + dest="rate_lower_limit_pct") + optional_named.add_argument("--rate_upper_limit_pct", + type=percent_float, + required=False, + default=99.0, + help=("The maximum percent line rate that" + "will be used during the test"), + dest="rate_upper_limit_pct") + optional_named.add_argument("--rate_initial_pct", + type=percent_float, + required=False, + default=99.0, + help=("If Search Mode is BINARY, the percent" + "line rate that will be used at the" + "start of the test"), + dest="rate_initial_pct") + optional_named.add_argument("--rate_step_pct", + type=percent_float, + required=False, + default=10.0, + help=("If SearchMode is STEP, the percent" + "load increase per step"), + dest="rate_step_pct") + optional_named.add_argument("--resolution_pct", + type=percent_float, + required=False, + default=1.0, + help=("The minimum percentage of load" + "adjustment between iterations"), + dest="resolution_pct") + optional_named.add_argument("--frame_size_list", + type=lambda s: [int(item) + for item in s.split(',')], + required=False, + default=[256], + help="A comma-delimited list of frame sizes", + dest="frame_size_list") + optional_named.add_argument("--acceptable_frame_loss_pct", + type=percent_float, + required=False, + default=0.0, + help=("The maximum acceptable frame loss" + "percent in any iteration"), + dest="acceptable_frame_loss_pct") + optional_named.add_argument("--east_intf_addr", + required=False, + default="192.85.1.3", + help=("The address to assign to the first" + "emulated device interface on the first" + "east port"), + dest="east_intf_addr") + optional_named.add_argument("--east_intf_gateway_addr", + required=False, + default="192.85.1.53", + help=("The gateway address to assign to the" + "first emulated device interface on the" + "first east port"), + dest="east_intf_gateway_addr") + optional_named.add_argument("--west_intf_addr", + required=False, + default="192.85.1.53", + help=("The address to assign to the first" + "emulated device interface on the" + "first west port"), + dest="west_intf_addr") + optional_named.add_argument("--west_intf_gateway_addr", + required=False, + default="192.85.1.53", + help=("The gateway address to assign to" + "the first emulated device interface" + "on the first west port"), + dest="west_intf_gateway_addr") + parser.add_argument("-v", + "--verbose", + required=False, + default=True, + help="More output during operation when present", + action="store_true", + dest="verbose") + args = parser.parse_args() + + if args.verbose: + logger.debug("Creating results directory") + create_dir(args.results_dir) + + session_name = args.test_session_name + user_name = args.test_user_name + + try: + # Load Spirent REST Library + from stcrestclient import stchttp + + stc = stchttp.StcHttp(args.lab_server_addr) + session_id = stc.new_session(user_name, session_name) + stc.join_session(session_id) + except RuntimeError as e: + logger.error(e) + raise + + # Get STC system info. + tx_port_loc = "//%s/%s/%s" % (args.east_chassis_addr, + args.east_slot_num, + args.east_port_num) + rx_port_loc = "//%s/%s/%s" % (args.west_chassis_addr, + args.west_slot_num, + args.west_port_num) + + # Retrieve and display the server information + if args.verbose: + logger.debug("SpirentTestCenter system version: %s", + stc.get("system1", "version")) + + try: + device_list = [] + port_list = [] + if args.verbose: + logger.debug("Bring up license server") + license_mgr = stc.get("system1", "children-licenseservermanager") + if args.verbose: + logger.debug("license_mgr = %s", license_mgr) + stc.create("LicenseServer", under=license_mgr, attributes={ + "server": args.license_server_addr}) + + # Create the root project object + if args.verbose: + logger.debug("Creating project ...") + project = stc.get("System1", "children-Project") + + # Configure any custom traffic parameters + if args.traffic_custom == "cont": + if args.verbose: + logger.debug("Configure Continuous Traffic") + stc.create("ContinuousTestConfig", under=project) + + # Create ports + if args.verbose: + logger.debug("Creating ports ...") + east_chassis_port = stc.create('port', project) + if args.verbose: + logger.debug("Configuring TX port ...") + stc.config(east_chassis_port, {'location': tx_port_loc}) + port_list.append(east_chassis_port) + + west_chassis_port = stc.create('port', project) + if args.verbose: + logger.debug("Configuring RX port ...") + stc.config(west_chassis_port, {'location': rx_port_loc}) + port_list.append(west_chassis_port) + + # Create emulated genparam for east port + east_device_gen_params = stc.create("EmulatedDeviceGenParams", + under=project, + attributes={"Port": + east_chassis_port}) + # Create the DeviceGenEthIIIfParams object + stc.create("DeviceGenEthIIIfParams", + under=east_device_gen_params) + # Configuring Ipv4 interfaces + stc.create("DeviceGenIpv4IfParams", + under=east_device_gen_params, + attributes={"Addr": args.east_intf_addr, + "Gateway": args.east_intf_gateway_addr}) + # Create Devices using the Device Wizard + device_gen_config = stc.perform("DeviceGenConfigExpand", + params={"DeleteExisting": "No", + "GenParams": + east_device_gen_params}) + # Append to the device list + device_list.append(device_gen_config['ReturnList']) + + # Create emulated genparam for west port + west_device_gen_params = stc.create("EmulatedDeviceGenParams", + under=project, + attributes={"Port": + west_chassis_port}) + # Create the DeviceGenEthIIIfParams object + stc.create("DeviceGenEthIIIfParams", + under=west_device_gen_params) + # Configuring Ipv4 interfaces + stc.create("DeviceGenIpv4IfParams", + under=west_device_gen_params, + attributes={"Addr": args.west_intf_addr, + "Gateway": args.west_intf_gateway_addr}) + # Create Devices using the Device Wizard + device_gen_config = stc.perform("DeviceGenConfigExpand", + params={"DeleteExisting": "No", + "GenParams": + west_device_gen_params}) + # Append to the device list + device_list.append(device_gen_config['ReturnList']) + if args.verbose: + logger.debug(device_list) + + # Create the RFC 2544 'metric test + if args.metric == "throughput": + if args.verbose: + logger.debug("Set up the RFC2544 throughput test...") + stc.perform("Rfc2544SetupThroughputTestCommand", + params={"AcceptableFrameLoss": + args.acceptable_frame_loss_pct, + "Duration": args.trial_duration_sec, + "FrameSizeList": args.frame_size_list, + "LearningMode": args.learning_mode, + "NumOfTrials": args.num_trials, + "RateInitial": args.rate_initial_pct, + "RateLowerLimit": args.rate_lower_limit_pct, + "RateStep": args.rate_step_pct, + "RateUpperLimit": args.rate_upper_limit_pct, + "Resolution": args.resolution_pct, + "SearchMode": args.search_mode, + "TrafficPattern": args.traffic_pattern}) + elif args.metric == "backtoback": + stc.perform("Rfc2544SetupBackToBackTestCommand", + params={"AcceptableFrameLoss": + args.acceptable_frame_loss_pct, + "Duration": args.trial_duration_sec, + "FrameSizeList": args.frame_size_list, + "LearningMode": args.learning_mode, + "LatencyType": args.latency_type, + "NumOfTrials": args.num_trials, + "RateInitial": args.rate_initial_pct, + "RateLowerLimit": args.rate_lower_limit_pct, + "RateStep": args.rate_step_pct, + "RateUpperLimit": args.rate_upper_limit_pct, + "Resolution": args.resolution_pct, + "SearchMode": args.search_mode, + "TrafficPattern": args.traffic_pattern}) + elif args.metric == "frameloss": + stc.perform("Rfc2544SetupFrameLossTestCommand", + params={"AcceptableFrameLoss": + args.acceptable_frame_loss_pct, + "Duration": args.trial_duration_sec, + "FrameSizeList": args.frame_size_list, + "LearningMode": args.learning_mode, + "LatencyType": args.latency_type, + "NumOfTrials": args.num_trials, + "RateInitial": args.rate_initial_pct, + "RateLowerLimit": args.rate_lower_limit_pct, + "RateStep": args.rate_step_pct, + "RateUpperLimit": args.rate_upper_limit_pct, + "Resolution": args.resolution_pct, + "SearchMode": args.search_mode, + "TrafficPattern": args.traffic_pattern}) + elif args.metric == "latency": + stc.perform("Rfc2544SetupLatencyTestCommand", + params={"AcceptableFrameLoss": + args.acceptable_frame_loss_pct, + "Duration": args.trial_duration_sec, + "FrameSizeList": args.frame_size_list, + "LearningMode": args.learning_mode, + "LatencyType": args.latency_type, + "NumOfTrials": args.num_trials, + "RateInitial": args.rate_initial_pct, + "RateLowerLimit": args.rate_lower_limit_pct, + "RateStep": args.rate_step_pct, + "RateUpperLimit": args.rate_upper_limit_pct, + "Resolution": args.resolution_pct, + "SearchMode": args.search_mode, + "TrafficPattern": args.traffic_pattern}) + + # Save the configuration + stc.perform("SaveToTcc", params={"Filename": "2544.tcc"}) + # Connect to the hardware... + stc.perform("AttachPorts", params={"portList": stc.get( + "system1.project", "children-port"), "autoConnect": "TRUE"}) + # Apply configuration. + if args.verbose: + logger.debug("Apply configuration...") + stc.apply() + + if args.verbose: + logger.debug("Starting the sequencer...") + stc.perform("SequencerStart") + + # Wait for sequencer to finish + logger.info( + "Starting test... Please wait for the test to complete...") + stc.wait_until_complete() + logger.info("The test has completed... Saving results...") + + # Determine what the results database filename is... + lab_server_resultsdb = stc.get( + "system1.project.TestResultSetting", "CurrentResultFileName") + + if args.verbose: + logger.debug("The lab server results database is %s", + lab_server_resultsdb) + + stc.perform("CSSynchronizeFiles", + params={"DefaultDownloadDir": args.results_dir}) + + resultsdb = args.results_dir + \ + lab_server_resultsdb.split("/Results")[1] + + logger.info( + "The local summary DB file has been saved to %s", resultsdb) + + # The returns the "RFC2544ThroughputTestResultDetailedSummaryView" + # table view from the results database. + # There are other views available. + + if args.metric == "throughput": + resultsdict = ( + stc.perform("QueryResult", + params={ + "DatabaseConnectionString": + resultsdb, + "ResultPath": + ("RFC2544ThroughputTestResultDetailed" + "SummaryView")})) + + # The returns the "RFC2544BacktoBackTestResultDetailedSummaryView" + # table view from the results database. + # There are other views available. + elif args.metric == "backtoback": + resultsdict = ( + stc.perform("QueryResult", + params={ + "DatabaseConnectionString": + resultsdb, + "ResultPath": + ("RFC2544Back2BackTestResultDetailed" + "SummaryView")})) + + # The returns the "RFC2544LatencyTestResultDetailedSummaryView" + # table view from the results database. + # There are other views available. + elif args.metric == "latency": + resultsdict = ( + stc.perform("QueryResult", + params={ + "DatabaseConnectionString": + resultsdb, + "ResultPath": + ("RFC2544LatencyTestResultDetailed" + "SummaryView")})) + + # The returns the "RFC2544FrameLossTestResultDetailedSummaryView" + # table view from the results database. + # There are other views available. + elif args.metric == "frameloss": + resultsdict = ( + stc.perform("QueryResult", + params={ + "DatabaseConnectionString": + resultsdb, + "ResultPath": + ("RFC2544FrameLossTestResultDetailed" + "SummaryView")})) + if args.verbose: + logger.debug("resultsdict[\"Columns\"]: %s", + resultsdict["Columns"]) + logger.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"]) + logger.debug("Result paths: %s", + stc.perform("GetTestResultSettingPaths")) + + # Write results to csv + logger.debug("Writing CSV file to results directory %s", + args.results_dir) + write_query_results_to_csv( + args.results_dir, args.csv_results_file_prefix, resultsdict) + + except RuntimeError as e: + logger.error(e) + + if args.verbose: + logger.debug("Destroy session on lab server") + stc.end_session() + + logger.info("Test complete!") + +if __name__ == "__main__": + main() diff --git a/tools/pkt_gen/testcenter/testcenter.py b/tools/pkt_gen/testcenter/testcenter.py index f670612c..a1f38d8b 100644 --- a/tools/pkt_gen/testcenter/testcenter.py +++ b/tools/pkt_gen/testcenter/testcenter.py @@ -1,4 +1,4 @@ -# Copyright 2015 Spirent Communications. +# Copyright 2016 Spirent Communications. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,20 +19,108 @@ Provides a model for Spirent TestCenter as a test tool for implementing various performance tests of a virtual switch. """ -from __future__ import print_function - -from tools.pkt_gen import trafficgen -from core.results.results_constants import ResultsConstants -import subprocess -import os import csv +import logging +import os +import subprocess + from conf import settings +from core.results.results_constants import ResultsConstants +from tools.pkt_gen import trafficgen + + +def get_stc_common_settings(): + """ + Return the common Settings + These settings would apply to almost all the tests. + """ + args = ["--lab_server_addr", + settings.getValue("TRAFFICGEN_STC_LAB_SERVER_ADDR"), + "--license_server_addr", + settings.getValue("TRAFFICGEN_STC_LICENSE_SERVER_ADDR"), + "--east_chassis_addr", + settings.getValue("TRAFFICGEN_STC_EAST_CHASSIS_ADDR"), + "--east_slot_num", + settings.getValue("TRAFFICGEN_STC_EAST_SLOT_NUM"), + "--east_port_num", + settings.getValue("TRAFFICGEN_STC_EAST_PORT_NUM"), + "--west_chassis_addr", + settings.getValue("TRAFFICGEN_STC_WEST_CHASSIS_ADDR"), + "--west_slot_num", + settings.getValue("TRAFFICGEN_STC_WEST_SLOT_NUM"), + "--west_port_num", + settings.getValue("TRAFFICGEN_STC_WEST_PORT_NUM"), + "--test_session_name", + settings.getValue("TRAFFICGEN_STC_TEST_SESSION_NAME"), + "--results_dir", + settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), + "--csv_results_file_prefix", + settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX")] + return args + + +def get_rfc2544_common_settings(): + """ + Retrun Generic RFC 2544 settings. + These settings apply to all the 2544 tests + """ + args = [settings.getValue("TRAFFICGEN_STC_PYTHON2_PATH"), + os.path.join( + settings.getValue("TRAFFICGEN_STC_TESTCENTER_PATH"), + settings.getValue( + "TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME")), + "--metric", + settings.getValue("TRAFFICGEN_STC_RFC2544_METRIC"), + "--search_mode", + settings.getValue("TRAFFICGEN_STC_SEARCH_MODE"), + "--learning_mode", + settings.getValue("TRAFFICGEN_STC_LEARNING_MODE"), + "--rate_lower_limit_pct", + settings.getValue("TRAFFICGEN_STC_RATE_LOWER_LIMIT_PCT"), + "--rate_upper_limit_pct", + settings.getValue("TRAFFICGEN_STC_RATE_UPPER_LIMIT_PCT"), + "--rate_initial_pct", + settings.getValue("TRAFFICGEN_STC_RATE_INITIAL_PCT"), + "--rate_step_pct", + settings.getValue("TRAFFICGEN_STC_RATE_STEP_PCT"), + "--resolution_pct", + settings.getValue("TRAFFICGEN_STC_RESOLUTION_PCT"), + "--acceptable_frame_loss_pct", + settings.getValue("TRAFFICGEN_STC_ACCEPTABLE_FRAME_LOSS_PCT"), + "--east_intf_addr", + settings.getValue("TRAFFICGEN_STC_EAST_INTF_ADDR"), + "--east_intf_gateway_addr", + settings.getValue("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR"), + "--west_intf_addr", + settings.getValue("TRAFFICGEN_STC_WEST_INTF_ADDR"), + "--west_intf_gateway_addr", + settings.getValue("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR"), + "--num_trials", + settings.getValue("TRAFFICGEN_STC_NUMBER_OF_TRIALS"), + "--trial_duration_sec", + settings.getValue("TRAFFICGEN_STC_TRIAL_DURATION_SEC"), + "--traffic_pattern", + settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN")] + return args + + +def get_rfc2544_custom_settings(framesize, custom_tr): + """ + Return RFC2544 Custom Settings + """ + args = ["--frame_size_list", + str(framesize), + "--traffic_custom", + str(custom_tr)] + return args class TestCenter(trafficgen.ITrafficGenerator): """ Spirent TestCenter """ + _logger = logging.getLogger(__name__) + def connect(self): """ Do nothing. @@ -45,111 +133,146 @@ class TestCenter(trafficgen.ITrafficGenerator): """ pass - def send_burst_traffic(self, traffic=None, numpkts=100, duration=20, framerate=100): + def send_burst_traffic(self, traffic=None, numpkts=100, duration=20): """ Do nothing. """ return None - def send_cont_traffic(self, traffic=None, duration=30, framerate=0, - multistream=False): + def get_rfc2544_results(self, filename): """ - Do nothing. + Reads the CSV file and return the results """ - return None + result = {} + with open(filename, "r") as csvfile: + csvreader = csv.DictReader(csvfile) + for row in csvreader: + self._logger.info("Row: %s", row) + tx_fps = ((float(row["TxFrameCount"])) / + (float(row["Duration(sec)"]))) + rx_fps = ((float(row["RxFrameCount"])) / + (float(row["Duration(sec)"]))) + tx_mbps = ((float(row["TxFrameCount"]) * + float(row["ConfiguredFrameSize"])) / + (float(row["Duration(sec)"]) * 1000000.0)) + rx_mbps = ((float(row["RxFrameCount"]) * + float(row["ConfiguredFrameSize"])) / + (float(row["Duration(sec)"]) * 1000000.0)) + result[ResultsConstants.TX_RATE_FPS] = tx_fps + result[ResultsConstants.THROUGHPUT_RX_FPS] = rx_fps + result[ResultsConstants.TX_RATE_MBPS] = tx_mbps + result[ResultsConstants.THROUGHPUT_RX_MBPS] = rx_mbps + result[ResultsConstants.TX_RATE_PERCENT] = float( + row["OfferedLoad(%)"]) + result[ResultsConstants.THROUGHPUT_RX_PERCENT] = float( + row["Throughput(%)"]) + result[ResultsConstants.MIN_LATENCY_NS] = float( + row["MinimumLatency(us)"]) * 1000 + result[ResultsConstants.MAX_LATENCY_NS] = float( + row["MaximumLatency(us)"]) * 1000 + result[ResultsConstants.AVG_LATENCY_NS] = float( + row["AverageLatency(us)"]) * 1000 + result[ResultsConstants.FRAME_LOSS_PERCENT] = float( + row["PercentLoss"]) + return result + + def send_cont_traffic(self, traffic=None, duration=30): + """ + Send Custom - Continuous Test traffic + Reuse RFC2544 throughput test specifications along with + 'custom' configuration + """ + verbose = False + custom = "cont" + framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE") + if traffic and 'l2' in traffic: + if 'framesize' in traffic['l2']: + framesize = traffic['l2']['framesize'] + + stc_common_args = get_stc_common_settings() + rfc2544_common_args = get_rfc2544_common_settings() + rfc2544_custom_args = get_rfc2544_custom_settings(framesize, + custom) + args = stc_common_args + rfc2544_common_args + rfc2544_custom_args + + if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True": + args.append("--verbose") + verbose = True + self._logger.debug("Arguments used to call test: %s", args) + subprocess.check_call(args) + + filec = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), + settings.getValue( + "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") + + ".csv") + + if verbose: + self._logger.info("file: %s", filec) + + return self.get_rfc2544_results(filec) def send_rfc2544_throughput(self, traffic=None, trials=3, duration=20, - lossrate=0.0, multistream=False): + lossrate=0.0): """ Send traffic per RFC2544 throughput test specifications. """ verbose = False + framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE") + if traffic and 'l2' in traffic: + if 'framesize' in traffic['l2']: + framesize = traffic['l2']['framesize'] + + stc_common_args = get_stc_common_settings() + rfc2544_common_args = get_rfc2544_common_settings() + rfc2544_custom_args = get_rfc2544_custom_settings(framesize, '') + args = stc_common_args + rfc2544_common_args + rfc2544_custom_args - args = [settings.getValue("TRAFFICGEN_STC_PYTHON2_PATH"), - os.path.join(settings.getValue("TRAFFICGEN_STC_TESTCENTER_PATH"), - settings.getValue("TRAFFICGEN_STC_RFC2544_TPUT_TEST_FILE_NAME")), - "--lab_server_addr", - settings.getValue("TRAFFICGEN_STC_LAB_SERVER_ADDR"), - "--license_server_addr", - settings.getValue("TRAFFICGEN_STC_LICENSE_SERVER_ADDR"), - "--east_chassis_addr", - settings.getValue("TRAFFICGEN_STC_EAST_CHASSIS_ADDR"), - "--east_slot_num", - settings.getValue("TRAFFICGEN_STC_EAST_SLOT_NUM"), - "--east_port_num", - settings.getValue("TRAFFICGEN_STC_EAST_PORT_NUM"), - "--west_chassis_addr", - settings.getValue("TRAFFICGEN_STC_WEST_CHASSIS_ADDR"), - "--west_slot_num", - settings.getValue("TRAFFICGEN_STC_WEST_SLOT_NUM"), - "--west_port_num", - settings.getValue("TRAFFICGEN_STC_WEST_PORT_NUM"), - "--test_session_name", - settings.getValue("TRAFFICGEN_STC_TEST_SESSION_NAME"), - "--results_dir", - settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), - "--csv_results_file_prefix", - settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX"), - "--num_trials", - settings.getValue("TRAFFICGEN_STC_NUMBER_OF_TRIALS"), - "--trial_duration_sec", - settings.getValue("TRAFFICGEN_STC_TRIAL_DURATION_SEC"), - "--traffic_pattern", - settings.getValue("TRAFFICGEN_STC_TRAFFIC_PATTERN"), - "--search_mode", - settings.getValue("TRAFFICGEN_STC_SEARCH_MODE"), - "--learning_mode", - settings.getValue("TRAFFICGEN_STC_LEARNING_MODE"), - "--rate_lower_limit_pct", - settings.getValue("TRAFFICGEN_STC_RATE_LOWER_LIMIT_PCT"), - "--rate_upper_limit_pct", - settings.getValue("TRAFFICGEN_STC_RATE_UPPER_LIMIT_PCT"), - "--rate_initial_pct", - settings.getValue("TRAFFICGEN_STC_RATE_INITIAL_PCT"), - "--rate_step_pct", - settings.getValue("TRAFFICGEN_STC_RATE_STEP_PCT"), - "--resolution_pct", - settings.getValue("TRAFFICGEN_STC_RESOLUTION_PCT"), - "--frame_size_list", - settings.getValue("TRAFFICGEN_STC_FRAME_SIZE"), - "--acceptable_frame_loss_pct", - settings.getValue("TRAFFICGEN_STC_ACCEPTABLE_FRAME_LOSS_PCT"), - "--east_intf_addr", - settings.getValue("TRAFFICGEN_STC_EAST_INTF_ADDR"), - "--east_intf_gateway_addr", - settings.getValue("TRAFFICGEN_STC_EAST_INTF_GATEWAY_ADDR"), - "--west_intf_addr", - settings.getValue("TRAFFICGEN_STC_WEST_INTF_ADDR"), - "--west_intf_gateway_addr", - settings.getValue("TRAFFICGEN_STC_WEST_INTF_GATEWAY_ADDR")] if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True": args.append("--verbose") verbose = True - print("Arguments used to call test: %s" % args) + self._logger.debug("Arguments used to call test: %s", args) + subprocess.check_call(args) - subprocess.check_call(map(os.path.expanduser, args)) + filec = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), + settings.getValue( + "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") + + ".csv") - file = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), - settings.getValue("TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") + ".csv") if verbose: - print("file: %s" % file) + self._logger.info("file: %s", filec) - result = {} + return self.get_rfc2544_results(filec) - with open(file, "r") as csvfile: - csvreader = csv.DictReader(csvfile) - for row in csvreader: - print("Row: %s" % row) - result[ResultsConstants.TX_RATE_FPS] = 0.0 - result[ResultsConstants.THROUGHPUT_RX_FPS] = 0.0 - result[ResultsConstants.TX_RATE_MBPS] = 0.0 - result[ResultsConstants.THROUGHPUT_RX_MBPS] = 0.0 - result[ResultsConstants.TX_RATE_PERCENT] = float(row["OfferedLoad(%)"]) - result[ResultsConstants.THROUGHPUT_RX_PERCENT] = float(row["Throughput(%)"]) - result[ResultsConstants.MIN_LATENCY_NS] = float(row["MinimumLatency(us)"]) * 1000 - result[ResultsConstants.MAX_LATENCY_NS] = float(row["MaximumLatency(us)"]) * 1000 - result[ResultsConstants.AVG_LATENCY_NS] = float(row["AverageLatency(us)"]) * 1000 - return result + def send_rfc2544_back2back(self, traffic=None, trials=1, duration=20, + lossrate=0.0): + """ + Send traffic per RFC2544 BacktoBack test specifications. + """ + verbose = False + framesize = settings.getValue("TRAFFICGEN_STC_FRAME_SIZE") + if traffic and 'l2' in traffic: + if 'framesize' in traffic['l2']: + framesize = traffic['l2']['framesize'] + + stc_common_args = get_stc_common_settings() + rfc2544_common_args = get_rfc2544_common_settings() + rfc2544_custom_args = get_rfc2544_custom_settings(framesize, '') + args = stc_common_args + rfc2544_common_args + rfc2544_custom_args + + if settings.getValue("TRAFFICGEN_STC_VERBOSE") is "True": + args.append("--verbose") + verbose = True + self._logger.info("Arguments used to call test: %s", args) + subprocess.check_call(args) + + filecs = os.path.join(settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"), + settings.getValue( + "TRAFFICGEN_STC_CSV_RESULTS_FILE_PREFIX") + + ".csv") + if verbose: + self._logger.debug("file: %s", filecs) + + return self.get_rfc2544_results(filecs) if __name__ == '__main__': TRAFFIC = { @@ -159,6 +282,6 @@ if __name__ == '__main__': 'dstip': '90.90.90.90', }, } - with TestCenter() as dev: print(dev.send_rfc2544_throughput(traffic=TRAFFIC)) + print(dev.send_rfc2544_backtoback(traffic=TRAFFIC)) diff --git a/tools/pkt_gen/trafficgen/trafficgenhelper.py b/tools/pkt_gen/trafficgen/trafficgenhelper.py index 0a240579..90c77b09 100644 --- a/tools/pkt_gen/trafficgen/trafficgenhelper.py +++ b/tools/pkt_gen/trafficgen/trafficgenhelper.py @@ -23,7 +23,7 @@ CMD_PREFIX = 'gencmd : ' TRAFFIC_DEFAULTS = { 'traffic_type' : 'rfc2544', 'frame_rate' : 100, - 'bidir' : False, + 'bidir' : 'False', # will be passed as string in title format to tgen 'multistream' : 0, 'stream_type' : 'L4', 'pre_installed_flows' : 'No', # used by vswitch implementation diff --git a/tools/pkt_gen/xena/XenaDriver.py b/tools/pkt_gen/xena/XenaDriver.py new file mode 100644 index 00000000..aa8443c9 --- /dev/null +++ b/tools/pkt_gen/xena/XenaDriver.py @@ -0,0 +1,1129 @@ +# Copyright 2016 Red Hat Inc & Xena Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a port of code by Xena and Flavios ported to python 3 compatibility. +# Credit given to Xena and Flavio for providing most of the logic of this code. +# The code has changes for PEP 8 and python 3 conversion. Added Stat classes +# for better scaling of future requirements. Also added calculation functions +# for line rate to align within VSPerf project. +# Flavios xena libraries available at https://github.com/fleitner/XenaPythonLib + +# Contributors: +# Flavio Leitner, Red Hat Inc. +# Dan Amzulescu, Xena Networks +# Christian Trautman, Red Hat Inc. + +""" +Xena Socket API Driver module for communicating directly with Xena system +through socket commands and returning different statistics. +""" +import locale +import logging +import socket +import struct +import sys +import threading +import time + +# Xena Socket Commands +CMD_CLEAR_RX_STATS = 'pr_clear' +CMD_CLEAR_TX_STATS = 'pt_clear' +CMD_COMMENT = ';' +CMD_CREATE_STREAM = 'ps_create' +CMD_DELETE_STREAM = 'ps_delete' +CMD_GET_PORT_SPEED = 'p_speed ?' +CMD_GET_PORT_SPEED_REDUCTION = 'p_speedreduction ?' +CMD_GET_RX_STATS_PER_TID = 'pr_tpldtraffic' +CMD_GET_STREAM_DATA = 'pt_stream' +CMD_GET_STREAMS_PER_PORT = 'ps_indices' +CMD_GET_TID_PER_STREAM = 'ps_tpldid' +CMD_GET_TX_STATS_PER_STREAM = 'pt_stream' +CMD_GET_RX_STATS = 'pr_all ?' +CMD_GET_TX_STATS = 'pt_all ?' +CMD_INTERFRAME_GAP = 'p_interframegap' +CMD_LOGIN = 'c_logon' +CMD_LOGOFF = 'c_logoff' +CMD_OWNER = 'c_owner' +CMD_PORT = ';Port:' +CMD_PORT_IP = 'p_ipaddress' +CMD_RESERVE = 'p_reservation reserve' +CMD_RELEASE = 'p_reservation release' +CMD_RELINQUISH = 'p_reservation relinquish' +CMD_RESET = 'p_reset' +CMD_SET_PORT_TIME_LIMIT = 'p_txtimelimit' +CMD_SET_STREAM_HEADER_PROTOCOL = 'ps_headerprotocol' +CMD_SET_STREAM_ON_OFF = 'ps_enable' +CMD_SET_STREAM_PACKET_HEADER = 'ps_packetheader' +CMD_SET_STREAM_PACKET_LENGTH = 'ps_packetlength' +CMD_SET_STREAM_PACKET_LIMIT = 'ps_packetlimit' +CMD_SET_STREAM_PACKET_PAYLOAD = 'ps_payload' +CMD_SET_STREAM_RATE_FRACTION = 'ps_ratefraction' +CMD_SET_STREAM_TEST_PAYLOAD_ID = 'ps_tpldid' +CMD_SET_TPLD_MODE = 'p_tpldmode' +CMD_START_TRAFFIC = 'p_traffic on' +CMD_STOP_TRAFFIC = 'p_traffic off' +CMD_STREAM_MODIFIER = 'ps_modifier' +CMD_STREAM_MODIFIER_COUNT = 'ps_modifiercount' +CMD_STREAM_MODIFIER_RANGE = 'ps_modifierrange' +CMD_VERSION = 'c_versionno ?' + +_LOCALE = locale.getlocale()[1] +_LOGGER = logging.getLogger(__name__) + + +class SimpleSocket(object): + """ + Socket class + """ + def __init__(self, hostname, port=5025, timeout=1): + """Constructor + :param hostname: hostname or ip as string + :param port: port number to use for socket as int + :param timeout: socket timeout as int + :return: SimpleSocket object + """ + self.hostname = hostname + try: + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.settimeout(timeout) + self.sock.connect((hostname, port)) + except socket.error as msg: + _LOGGER.error( + "Cannot connect to Xena Socket at %s", hostname) + _LOGGER.error("Exception : %s", msg) + sys.exit(1) + + def __del__(self): + """Deconstructor + :return: + """ + self.sock.close() + + def ask(self, cmd): + """ Send the command over the socket + :param cmd: cmd as string + :return: byte utf encoded return value from socket + """ + cmd += '\n' + try: + self.sock.send(cmd.encode('utf-8')) + return self.sock.recv(1024) + except OSError: + return '' + + def read_reply(self): + """ Get the response from the socket + :return: Return the reply + """ + reply = self.sock.recv(1024) + if reply.find("---^".encode('utf-8')) != -1: + # read again the syntax error msg + reply = self.sock.recv(1024) + return reply + + def send_command(self, cmd): + """ Send the command specified over the socket + :param cmd: Command to send as string + :return: None + """ + cmd += '\n' + self.sock.send(cmd.encode('utf-8')) + + def set_keep_alive(self): + """ Set the keep alive for the socket + :return: None + """ + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + +class KeepAliveThread(threading.Thread): + """ + Keep alive socket class + """ + message = '' + + def __init__(self, connection, interval=10): + """ Constructor + :param connection: Socket for keep alive + :param interval: interval in seconds to send keep alive + :return: KeepAliveThread object + """ + threading.Thread.__init__(self) + self.connection = connection + self.interval = interval + self.finished = threading.Event() + self.setDaemon(True) + _LOGGER.debug( + 'Xena Socket keep alive thread initiated, interval ' + + '{} seconds'.format(self.interval)) + + def stop(self): + """ Thread stop. See python thread docs for more info + :return: None + """ + self.finished.set() + self.join() + + def run(self): + """ Thread start. See python thread docs for more info + :return: None + """ + while not self.finished.isSet(): + self.finished.wait(self.interval) + self.connection.ask(self.message) + + +class XenaSocketDriver(SimpleSocket): + """ + Xena socket class + """ + reply_ok = '<OK>' + + def __init__(self, hostname, port=22611): + """ Constructor + :param hostname: Hostname or ip as string + :param port: port to use as int + :return: XenaSocketDriver object + """ + SimpleSocket.__init__(self, hostname=hostname, port=port) + SimpleSocket.set_keep_alive(self) + self.access_semaphor = threading.Semaphore(1) + + def ask(self, cmd): + """ Send the command over the socket in a thread safe manner + :param cmd: Command to send + :return: reply from socket + """ + self.access_semaphor.acquire() + reply = SimpleSocket.ask(self, cmd) + self.access_semaphor.release() + return reply + + def ask_verify(self, cmd): + """ Send the command over the socket in a thread safe manner and + verify the response is good. + :param cmd: Command to send + :return: Boolean True if command response is good, False otherwise + """ + resp = self.ask(cmd).decode(_LOCALE).strip('\n') + _LOGGER.info('[ask_verify] %s', resp) + if resp == self.reply_ok: + return True + return False + + def disconnect(self): + """ + Close the socket connection + :return: None + """ + self.sock.close() + + def send_command(self, cmd): + """ Send the command over the socket with no return + :param cmd: Command to send + :return: None + """ + self.access_semaphor.acquire() + SimpleSocket.send_command(self, cmd) + self.access_semaphor.release() + + def send_query_replies(self, cmd): + """ Send the command over the socket and wait for all replies and return + the lines as a list + :param cmd: Command to send + :return: Response from command as list + """ + # send the command followed by cmd SYNC to find out + # when the last reply arrives. + self.send_command(cmd.strip('\n')) + self.send_command('SYNC') + replies = [] + self.access_semaphor.acquire() + msg = SimpleSocket.read_reply(self).decode(_LOCALE) + msgleft = '' + while True: + if '\n' in msg: + (reply, msgleft) = msg.split('\n', 1) + # check for syntax problems + if reply.rfind('Syntax') != -1: + self.access_semaphor.release() + return [] + + if reply.rfind('<SYNC>') == 0: + + self.access_semaphor.release() + return replies + + replies.append(reply + '\n') + msg = msgleft + else: + # more bytes to come + msgnew = SimpleSocket.read_reply(self).decode(_LOCALE) + msg = msgleft + msgnew + + +class XenaManager(object): + """ + Manager class for port and socket functions + """ + def __init__(self, socketDriver, user='', password='xena'): + """Constructor + + Establish a connection to Xena using a ``driver`` with the ``password`` + supplied. + + Attributes: + :param socketDriver: XenaSocketDriver connection object + :param password: Password to the Xena traffic generator + :returns: XenaManager object + """ + self.driver = socketDriver + self.ports = list() + self.keep_alive_thread = KeepAliveThread(self.driver) + + if self.logon(password): + _LOGGER.info('Connected to Xena at %s', self.driver.hostname) + else: + _LOGGER.error('Failed to logon to Xena at %s', self.driver.hostname) + return + + self.set_owner(user) + + def disconnect(self): + """ Release ports and disconnect from chassis. + """ + for module_port in self.ports: + module_port.release_port() + self.ports = [] + self.logoff() + self.keep_alive_thread.stop() + + def add_module_port(self, module, port): + """Factory for Xena Ports + + :param module: String or int of module + :param port: String or int of port + :return: XenaPort object if success, None if port already added + """ + xenaport = XenaPort(self, module, port) + if xenaport in self.ports: + return None + else: + self.ports.append(xenaport) + return xenaport + + def get_module_port(self, module, port): + """Return the Xena Port object if available + :param module: module number as int or str + :param port: port number as int or str + :return: XenaPort object or None if not found + """ + for por in self.ports: + if por.port == str(port) and por.module == str(module): + return por + return None + + def get_version(self): + """ + Get the version from the chassis + :return: versions of server and driver as string + """ + res = self.driver.ask(make_manager_command( + CMD_VERSION, '')).decode(_LOCALE) + res = res.rstrip('\n').split() + return "Server: {} Driver: {}".format(res[1], res[2]) + + def logoff(self): + """ + Logoff from the Xena chassis + :return: Boolean True if response OK, False if error. + """ + return self.driver.ask_verify(make_manager_command(CMD_LOGOFF)) + + def logon(self, password): + """Login to the Xena traffic generator using the ``password`` supplied. + + :param password: string of password + :return: Boolean True if response OK, False if error. + """ + self.keep_alive_thread.start() + return self.driver.ask_verify(make_manager_command(CMD_LOGIN, password)) + + def set_owner(self, username): + """Set the ports owner. + :return: Boolean True if response OK, False if error. + """ + return self.driver.ask_verify(make_manager_command(CMD_OWNER, username)) + + +class XenaPort(object): + """ + Xena Port emulator class + """ + def __init__(self, manager, module, port): + """Constructor + + :param manager: XenaManager object + :param module: Module as string or int of module to use + :param port: Port as string or int of port to use + :return: XenaPort object + """ + self._manager = manager + self._module = str(module) + self._port = str(port) + self._streams = list() + + @property + def manager(self): + """Property for manager attribute + :return: manager object + """ + return self._manager + + @property + def module(self): + """Property for module attribute + :return: module value as string + """ + return self._module + + @property + def port(self): + """Property for port attribute + :return: port value as string + """ + return self._port + + def port_string(self): + """String builder with attributes + :return: String of module port for command sequence + """ + stringify = "{}/{}".format(self._module, self._port) + return stringify + + def add_stream(self): + """Add a stream to the port. + :return: XenaStream object, None if failure + """ + identifier = len(self._streams) + stream = XenaStream(self, identifier) + if self._manager.driver.ask_verify(make_stream_command( + CMD_CREATE_STREAM, '', stream)): + self._streams.append(stream) + return stream + else: + _LOGGER.error("Error during stream creation") + return None + + def clear_stats(self, rx_clear=True, tx_clear=True): + """Clear the port stats + + :param rx_clear: Boolean if rx stats are to be cleared + :param tx_clear: Boolean if tx stats are to be cleared + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_CLEAR_RX_STATS, self) + res1 = self._manager.driver.ask_verify(command) if rx_clear else True + command = make_port_command(CMD_CLEAR_TX_STATS, self) + res2 = self._manager.driver.ask_verify(command) if tx_clear else True + return all([res1, res2]) + + def get_effective_speed(self): + """ + Get the effective speed on the port + :return: effective speed as float + """ + port_speed = self.get_port_speed() + reduction = self.get_port_speed_reduction() + effective_speed = port_speed * (1.0 - reduction / 1000000.0) + return effective_speed + + def get_inter_frame_gap(self): + """ + Get the interframe gap and return it as string + :return: integer of interframe gap + """ + command = make_port_command(CMD_INTERFRAME_GAP + '?', self) + res = self._manager.driver.ask(command).decode(_LOCALE) + res = int(res.rstrip('\n').split(' ')[-1]) + return res + + def get_port_speed(self): + """ + Get the port speed as bits from port and return it as a int. + :return: Int of port speed + """ + command = make_port_command(CMD_GET_PORT_SPEED, self) + res = self._manager.driver.ask(command).decode(_LOCALE) + port_speed = res.split(' ')[-1].rstrip('\n') + return int(port_speed) * 1000000 + + def get_port_speed_reduction(self): + """ + Get the port speed reduction value as int + :return: Integer of port speed reduction value + """ + command = make_port_command(CMD_GET_PORT_SPEED_REDUCTION, self) + res = self._manager.driver.ask(command).decode(_LOCALE) + res = int(res.rstrip('\n').split(' ')[-1]) + return res + + def get_rx_stats(self): + """Get the rx stats and return the data as a dict. + :return: Receive stats as dictionary + """ + command = make_port_command(CMD_GET_RX_STATS, self) + rx_data = self._manager.driver.send_query_replies(command) + data = XenaRXStats(rx_data, time.time()) + return data + + def get_tx_stats(self): + """Get the tx stats and return the data as a dict. + :return: Receive stats as dictionary + """ + command = make_port_command(CMD_GET_TX_STATS, self) + tx_data = self._manager.driver.send_query_replies(command) + data = XenaTXStats(tx_data, time.time()) + return data + + def micro_tpld_disable(self): + """Disable micro TPLD and return to standard payload size + :return: Boolean if response OK, False if error + """ + command = make_port_command(CMD_SET_TPLD_MODE + ' normal', self) + return self._manager.driver.ask_verify(command) + + def micro_tpld_enable(self): + """Enable micro TPLD 6 byte payloads. + :Return Boolean if response OK, False if error + """ + command = make_port_command(CMD_SET_TPLD_MODE + ' micro', self) + return self._manager.driver.ask_verify(command) + + def release_port(self): + """Release the port + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_RELEASE, self) + return self._manager.driver.ask_verify(command) + + def reserve_port(self): + """Reserve the port + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_RESERVE, self) + return self._manager.driver.ask_verify(command) + + def reset_port(self): + """Reset the port + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_RESET, self) + return self._manager.driver.ask_verify(command) + + def set_port_ip(self, ip_addr, cidr, gateway, wild='255'): + """ + Set the port ip address of the specific port + :param ip_addr: IP address to set to port + :param cidr: cidr number for the subnet + :param gateway: Gateway ip for port + :param wild: wildcard used for ARP and PING replies + :return: Boolean True if response OK, False if error + """ + # convert the cidr to a dot notation subnet address + subnet = socket.inet_ntoa( + struct.pack(">I", (0xffffffff << (32 - cidr)) & 0xffffffff)) + + command = make_port_command('{} {} {} {} 0.0.0.{}'.format( + CMD_PORT_IP, ip_addr, subnet, gateway, wild), self) + return self._manager.driver.ask_verify(command) + + def set_port_time_limit(self, micro_seconds): + """Set the port time limit in ms + :param micro_seconds: ms for port time limit + :return: Boolean True if response OK, False if error. + """ + command = make_port_command('{} {}'.format( + CMD_SET_PORT_TIME_LIMIT, micro_seconds), self) + return self._manager.driver.ask_verify(command) + + def traffic_off(self): + """Start traffic + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_STOP_TRAFFIC, self) + return self._manager.driver.ask_verify(command) + + def traffic_on(self): + """Stop traffic + :return: Boolean True if response OK, False if error. + """ + command = make_port_command(CMD_START_TRAFFIC, self) + return self._manager.driver.ask_verify(command) + + +class XenaStream(object): + """ + Xena stream emulator class + """ + def __init__(self, xenaPort, streamID): + """Constructor + + :param xenaPort: XenaPort object + :param streamID: Stream ID as int or string + :return: XenaStream object + """ + self._xena_port = xenaPort + self._stream_id = str(streamID) + self._manager = self._xena_port.manager + self._header_protocol = None + + @property + def xena_port(self): + """Property for port attribute + :return: XenaPort object + """ + return self._xena_port + + @property + def stream_id(self): + """Property for streamID attribute + :return: streamID value as string + """ + return self._stream_id + + def enable_multistream(self, flows, layer): + """ + Basic implementation of multi stream. Enable multi stream by setting + modifiers on the stream + :param flows: Numbers of flows or end range + :param layer: layer to enable multi stream as str. Acceptable values + are L2, L3, or L4 + :return: True if success False otherwise + """ + if not self._header_protocol: + raise RuntimeError( + "Please set a protocol header before calling this method.") + + # byte offsets for setting the modifier + offsets = { + 'L2': [0, 6], + 'L3': [32, 36] if 'VLAN' in self._header_protocol else [28, 32], + 'L4': [38, 40] if 'VLAN' in self._header_protocol else [34, 36] + } + + responses = list() + if layer in offsets.keys() and flows > 0: + command = make_port_command( + CMD_STREAM_MODIFIER_COUNT + ' [{}]'.format(self._stream_id) + + ' 2', self._xena_port) + responses.append(self._manager.driver.ask_verify(command)) + command = make_port_command( + CMD_STREAM_MODIFIER + ' [{},0] {} 0xFFFF0000 INC 1'.format( + self._stream_id, offsets[layer][0]), self._xena_port) + responses.append(self._manager.driver.ask_verify(command)) + command = make_port_command( + CMD_STREAM_MODIFIER_RANGE + ' [{},0] 0 1 {}'.format( + self._stream_id, flows), self._xena_port) + responses.append(self._manager.driver.ask_verify(command)) + command = make_port_command( + CMD_STREAM_MODIFIER + ' [{},1] {} 0xFFFF0000 INC 1'.format( + self._stream_id, offsets[layer][1]), self._xena_port) + responses.append(self._manager.driver.ask_verify(command)) + command = make_port_command( + CMD_STREAM_MODIFIER_RANGE + ' [{},1] 0 1 {}'.format( + self._stream_id, flows), self._xena_port) + responses.append(self._manager.driver.ask_verify(command)) + return all(responses) # return True if they all worked + elif flows < 1: + _LOGGER.warning( + 'No flows specified in enable multistream. Bypassing...') + return False + else: + raise NotImplementedError( + "Non-implemented stream layer in method enable multistream ", + "layer=", layer) + + def get_stream_data(self): + """ + Get the response for stream data + :return: String of response for stream data info + """ + command = make_stream_command(CMD_GET_STREAM_DATA, '?', self) + res = self._manager.driver.ask(command).decode(_LOCALE) + return res + + def set_header_protocol(self, protocol_header): + """Set the header info for the packet header hex. + If the packet header contains just Ethernet and IP info then call this + method with ETHERNET IP as the protocol header. + + :param protocol_header: protocol header argument + :return: Boolean True if success, False if error + """ + command = make_stream_command( + CMD_SET_STREAM_HEADER_PROTOCOL, + protocol_header, self) + if self._manager.driver.ask_verify(command): + self._header_protocol = protocol_header + return True + else: + return False + + def set_off(self): + """Set the stream to off + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_ON_OFF, 'off', self)) + + def set_on(self): + """Set the stream to on + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_ON_OFF, 'on', self)) + + def set_packet_header(self, header): + """Set the stream packet header + + :param header: packet header as hex bytes + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_PACKET_HEADER, header, self)) + + def set_packet_length(self, pattern_type, minimum, maximum): + """Set the pattern length with min and max values based on the pattern + type supplied + + :param pattern_type: String of pattern type, valid entries [ fixed, + butterfly, random, mix, incrementing ] + :param minimum: integer of minimum byte value + :param maximum: integer of maximum byte value + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_PACKET_LENGTH, '{} {} {}'.format( + pattern_type, minimum, maximum), self)) + + def set_packet_limit(self, limit): + """Set the packet limit + + :param limit: number of packets that will be sent, use -1 to disable + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_PACKET_LIMIT, limit, self)) + + def set_packet_payload(self, payload_type, hex_value): + """Set the payload to the hex value based on the payload type + + :param payload_type: string of the payload type, valid entries [ pattern, + incrementing, prbs ] + :param hex_value: hex string of valid hex + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_PACKET_PAYLOAD, '{} {}'.format( + payload_type, hex_value), self)) + + def set_rate_fraction(self, fraction): + """Set the rate fraction + + :param fraction: fraction for the stream + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_RATE_FRACTION, fraction, self)) + + def set_payload_id(self, identifier): + """ Set the test payload ID + :param identifier: ID as int or string + :return: Boolean True if success, False if error + """ + return self._manager.driver.ask_verify(make_stream_command( + CMD_SET_STREAM_TEST_PAYLOAD_ID, identifier, self)) + + +class XenaRXStats(object): + """ + Receive stat class + """ + def __init__(self, stats, epoc): + """ Constructor + :param stats: Stats from pr all command as list + :param epoc: Current time in epoc + :return: XenaRXStats object + """ + self._stats = stats + self._time = epoc + self.data = self.parse_stats() + self.preamble = 8 + + @staticmethod + def _pack_stats(param, start, fields=None): + """ Pack up the list of stats in a dictionary + :param param: The list of params to process + :param start: What element to start at + :param fields: The field names to pack as keys + :return: Dictionary of data where fields match up to the params + """ + if not fields: + fields = ['bps', 'pps', 'bytes', 'packets'] + data = {} + i = 0 + for column in fields: + data[column] = int(param[start + i]) + i += 1 + + return data + + @staticmethod + def _pack_tplds_stats(param, start): + """ Pack up the tplds stats + :param param: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + data = {} + i = 0 + for val in range(start, len(param) - start): + data[i] = int(param[val]) + i += 1 + return data + + def _pack_rxextra_stats(self, param, start): + """ Pack up the extra stats + :param param: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + fields = ['fcserrors', 'pauseframes', 'arprequests', 'arpreplies', + 'pingrequests', 'pingreplies', 'gapcount', 'gapduration'] + return self._pack_stats(param, start, fields) + + def _pack_tplderrors_stats(self, param, start): + """ Pack up tlpd errors + :param param: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + fields = ['dummy', 'seq', 'mis', 'pld'] + return self._pack_stats(param, start, fields) + + def _pack_tpldlatency_stats(self, param, start): + """ Pack up the tpld latency stats + :param param: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + fields = ['min', 'avg', 'max', '1sec'] + return self._pack_stats(param, start, fields) + + def _pack_tpldjitter_stats(self, param, start): + """ Pack up the tpld jitter stats + :param param: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + fields = ['min', 'avg', 'max', '1sec'] + return self._pack_stats(param, start, fields) + + @property + def time(self): + """ + :return: Time as String of epoc of when stats were collected + """ + return self._time + + def parse_stats(self): + """ Parse the stats from pr all command + :return: Dictionary of all stats + """ + statdict = {} + for line in self._stats: + param = line.split() + if param[1] == 'PR_TOTAL': + statdict['pr_total'] = self._pack_stats(param, 2) + elif param[1] == 'PR_NOTPLD': + statdict['pr_notpld'] = self._pack_stats(param, 2,) + elif param[1] == 'PR_EXTRA': + statdict['pr_extra'] = self._pack_rxextra_stats(param, 2) + elif param[1] == 'PT_STREAM': + entry_id = "pt_stream_%s" % param[2].strip('[]') + statdict[entry_id] = self._pack_stats(param, 3) + elif param[1] == 'PR_TPLDS': + tid_list = self._pack_tplds_stats(param, 2) + if len(tid_list): + statdict['pr_tplds'] = tid_list + elif param[1] == 'PR_TPLDTRAFFIC': + if 'pr_tpldstraffic' in statdict: + data = statdict['pr_tpldstraffic'] + else: + data = {} + entry_id = param[2].strip('[]') + data[entry_id] = self._pack_stats(param, 3) + statdict['pr_tpldstraffic'] = data + elif param[1] == 'PR_TPLDERRORS': + if 'pr_tplderrors' in statdict: + data = statdict['pr_tplderrors'] + else: + data = {} + entry_id = param[2].strip('[]') + data[entry_id] = self._pack_tplderrors_stats(param, 3) + statdict['pr_tplderrors'] = data + elif param[1] == 'PR_TPLDLATENCY': + if 'pr_tpldlatency' in statdict: + data = statdict['pr_tpldlatency'] + else: + data = {} + entry_id = param[2].strip('[]') + data[entry_id] = self._pack_tpldlatency_stats(param, 3) + statdict['pr_tpldlatency'] = data + elif param[1] == 'PR_TPLDJITTER': + if 'pr_tpldjitter' in statdict: + data = statdict['pr_tpldjitter'] + else: + data = {} + entry_id = param[2].strip('[]') + data[entry_id] = self._pack_tpldjitter_stats(param, 3) + statdict['pr_pldjitter'] = data + elif param[1] == 'PR_FILTER': + if 'pr_filter' in statdict: + data = statdict['pr_filter'] + else: + data = {} + entry_id = param[2].strip('[]') + data[entry_id] = self._pack_stats(param, 3) + statdict['pr_filter'] = data + elif param[1] == 'P_RECEIVESYNC': + if param[2] == 'IN_SYNC': + statdict['p_receivesync'] = {'IN SYNC': 'True'} + else: + statdict['p_receivesync'] = {'IN SYNC': 'False'} + else: + logging.warning("XenaPort: unknown stats: %s", param[1]) + + mydict = statdict + return mydict + + +class XenaTXStats(object): + """ + Xena transmit stat class + """ + def __init__(self, stats, epoc): + """ Constructor + :param stats: Stats from pt all command as list + :param epoc: Current time in epoc + :return: XenaTXStats object + """ + self._stats = stats + self._time = epoc + self._ptstreamkeys = list() + self.data = self.parse_stats() + self.preamble = 8 + + @staticmethod + def _pack_stats(params, start, fields=None): + """ Pack up the list of stats in a dictionary + :param params: The list of params to process + :param start: What element to start at + :param fields: The field names to pack as keys + :return: Dictionary of data where fields match up to the params + """ + if not fields: + fields = ['bps', 'pps', 'bytes', 'packets'] + data = {} + i = 0 + for column in fields: + data[column] = int(params[start + i]) + i += 1 + + return data + + def _pack_txextra_stats(self, params, start): + """ Pack up the tx extra stats + :param params: List of params to pack + :param start: What element to start at + :return: Dictionary of stats + """ + fields = ['arprequests', 'arpreplies', 'pingrequests', 'pingreplies', + 'injectedfcs', 'injectedseq', 'injectedmis', 'injectedint', + 'injectedtid', 'training'] + return self._pack_stats(params, start, fields) + + @property + def pt_stream_keys(self): + """ + :return: Return a list of pt_stream_x stream key ids + """ + return self._ptstreamkeys + + @property + def time(self): + """ + :return: Time as String of epoc of when stats were collected + """ + return self._time + + def parse_stats(self): + """ Parse the stats from pr all command + :return: Dictionary of all stats + """ + statdict = {} + for line in self._stats: + param = line.split() + if param[1] == 'PT_TOTAL': + statdict['pt_total'] = self._pack_stats(param, 2) + elif param[1] == 'PT_NOTPLD': + statdict['pt_notpld'] = self._pack_stats(param, 2,) + elif param[1] == 'PT_EXTRA': + statdict['pt_extra'] = self._pack_txextra_stats(param, 2) + elif param[1] == 'PT_STREAM': + entry_id = "pt_stream_%s" % param[2].strip('[]') + self._ptstreamkeys.append(entry_id) + statdict[entry_id] = self._pack_stats(param, 3) + else: + logging.warning("XenaPort: unknown stats: %s", param[1]) + mydict = statdict + return mydict + + +def aggregate_stats(stat1, stat2): + """ + Recursive function to aggregate two sets of statistics. This is used when + bi directional traffic is done and statistics need to be calculated based + on two sets of statistics. + :param stat1: One set of dictionary stats from RX or TX stats + :param stat2: Second set of dictionary stats from RX or TX stats + :return: stats for data entry in RX or TX Stats instance + """ + newstat = dict() + for (keys1, keys2) in zip(stat1.keys(), stat2.keys()): + if isinstance(stat1[keys1], dict): + newstat[keys1] = aggregate_stats(stat1[keys1], stat2[keys2]) + else: + if not isinstance(stat1[keys1], int) and not isinstance( + [keys1], float): + # its some value we don't need to aggregate + return stat1[keys1] + # for latency stats do the appropriate calculation + if keys1 == 'max': + newstat[keys1] = max(stat1[keys1], stat2[keys2]) + elif keys1 == 'min': + newstat[keys1] = min(stat1[keys1], stat2[keys2]) + elif keys1 == 'avg': + newstat[keys1] = (stat1[keys1] + stat2[keys2]) / 2 + else: + newstat[keys1] = (stat1[keys1] + stat2[keys2]) + return newstat + + +def line_percentage(port, stats, time_active, packet_size): + """ + Calculate the line percentage rate from the duration, port object and stat + object. + :param port: XenaPort object + :param stats: Xena RXStat or TXStat object + :param time_active: time the stream was active in secs as int + :param packet_size: packet size as int + :return: line percentage as float + """ + # this is ugly, but its prettier than calling the get method 3 times... + try: + packets = stats.data['pr_total']['packets'] + except KeyError: + try: + packets = stats.data['pt_total']['packets'] + except KeyError: + _LOGGER.error( + 'Could not calculate line rate because packet stat not found.') + return 0 + ifg = port.get_inter_frame_gap() + pps = packets_per_second(packets, time_active) + l2br = l2_bit_rate(packet_size, stats.preamble, pps) + l1br = l1_bit_rate(l2br, pps, ifg, stats.preamble) + return 100.0 * l1br / port.get_effective_speed() + + +def l2_bit_rate(packet_size, preamble, pps): + """ + Return the l2 bit rate + :param packet_size: packet size on the line in bytes + :param preamble: preamble size of the packet header in bytes + :param pps: packets per second + :return: l2 bit rate as float + """ + return (packet_size * preamble) * pps + + +def l1_bit_rate(l2br, pps, ifg, preamble): + """ + Return the l1 bit rate + :param l2br: l2 bit rate int bits per second + :param pps: packets per second + :param ifg: the inter frame gap + :param preamble: preamble size of the packet header in bytes + :return: l1 bit rate as float + """ + return l2br + (pps * ifg * preamble) + + +def make_manager_command(cmd, argument=None): + """ String builder for Xena socket commands + + :param cmd: Command to send + :param argument: Arguments for command to send + :return: String of command + """ + command = '{} "{}"'.format(cmd, argument) if argument else cmd + _LOGGER.info("[Command Sent] : %s", command) + return command + + +def make_port_command(cmd, xena_port): + """ String builder for Xena port commands + + :param cmd: Command to send + :param xena_port: XenaPort object + :return: String of command + """ + command = "{} {}".format(xena_port.port_string(), cmd) + _LOGGER.info("[Command Sent] : %s", command) + return command + + +def make_stream_command(cmd, args, xena_stream): + """ String builder for Xena port commands + + :param cmd: Command to send + :param xena_stream: XenaStream object + :return: String of command + """ + command = "{} {} [{}] {}".format(xena_stream.xena_port.port_string(), cmd, + xena_stream.stream_id, args) + _LOGGER.info("[Command Sent] : %s", command) + return command + + +def packets_per_second(packets, time_in_sec): + """ + Return the pps as float + :param packets: total packets + :param time_in_sec: time in seconds + :return: float of pps + """ + return packets / time_in_sec diff --git a/tools/pkt_gen/xena/__init__.py b/tools/pkt_gen/xena/__init__.py new file mode 100644 index 00000000..8081be42 --- /dev/null +++ b/tools/pkt_gen/xena/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2015-2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tools/pkt_gen/xena/profiles/baseconfig.x2544 b/tools/pkt_gen/xena/profiles/baseconfig.x2544 new file mode 100644 index 00000000..0612b329 --- /dev/null +++ b/tools/pkt_gen/xena/profiles/baseconfig.x2544 @@ -0,0 +1,373 @@ +{ + "copyright": [ + "# Copyright 2015-2016 Xena Networks.", + "#", + "# Licensed under the Apache License, Version 2.0 (the 'License');", + "# you may not use this file except in compliance with the License.", + "# You may obtain a copy of the License at\n", + "#", + "# http://www.apache.org/licenses/LICENSE-2.0", + "#", + "# Unless required by applicable law or agreed to in writing, software", + "# distributed under the License is distributed on an 'AS IS' BASIS,", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "PortHandler": { + "EntityList": [ + { + "PortRef": { + "ChassisId": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1", + "ModuleIndex": 3, + "PortIndex": 0 + }, + "PortGroup": "UNDEFINED", + "PairPeerRef": null, + "PairPeerId": "", + "MulticastRole": "Undefined", + "PortSpeed": "AUTO", + "InterFrameGap": 20, + "PauseModeOn": false, + "AutoNegEnabled": true, + "AdjustPpm": 0, + "LatencyOffset": 0, + "MdiMdixMode": "AUTO", + "EnableFec": true, + "ReplyArpRequests": true, + "ReplyPingRequests": true, + "IpV4Address": "192.168.199.10", + "IpV4RoutingPrefix": 24, + "IpV4Gateway": "192.168.199.1", + "IpV6Address": "::", + "IpV6RoutingPrefix": 64, + "IpV6Gateway": "::", + "IpGatewayMacAddress": "AAAAAAAA", + "PublicIpAddress": "", + "PublicIpRoutingPrefix": 24, + "PublicIpAddressV6": "", + "PublicIpRoutingPrefixV6": 64, + "RemoteLoopIpAddress": "", + "RemoteLoopIpAddressV6": "", + "RemoteLoopMacAddress": "AAAAAAAA", + "EnablePortRateCap": false, + "PortRateCapValue": 1000.0, + "PortRateCapProfile": "Physical Port Rate", + "PortRateCapUnit": "Mbps", + "MultiStreamMap": null, + "ItemID": "4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a", + "ParentID": "", + "Label": "" + }, + { + "PortRef": { + "ChassisId": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1", + "ModuleIndex": 3, + "PortIndex": 1 + }, + "PortGroup": "UNDEFINED", + "PairPeerRef": null, + "PairPeerId": "", + "MulticastRole": "Undefined", + "PortSpeed": "AUTO", + "InterFrameGap": 20, + "PauseModeOn": false, + "AutoNegEnabled": true, + "AdjustPpm": 0, + "LatencyOffset": 0, + "MdiMdixMode": "AUTO", + "EnableFec": true, + "ReplyArpRequests": true, + "ReplyPingRequests": true, + "IpV4Address": "192.168.199.11", + "IpV4RoutingPrefix": 24, + "IpV4Gateway": "192.168.199.1", + "IpV6Address": "::", + "IpV6RoutingPrefix": 64, + "IpV6Gateway": "::", + "IpGatewayMacAddress": "AAAAAAAA", + "PublicIpAddress": "", + "PublicIpRoutingPrefix": 24, + "PublicIpAddressV6": "", + "PublicIpRoutingPrefixV6": 64, + "RemoteLoopIpAddress": "", + "RemoteLoopIpAddressV6": "", + "RemoteLoopMacAddress": "AAAAAAAA", + "EnablePortRateCap": false, + "PortRateCapValue": 1000.0, + "PortRateCapProfile": "Physical Port Rate", + "PortRateCapUnit": "Mbps", + "MultiStreamMap": null, + "ItemID": "1b88dc59-1b1a-43f5-a314-673219f47545", + "ParentID": "", + "Label": "" + } + ] + }, + "StreamHandler": { + "StreamConnectionList": [ + { + "ConnectionId": 0, + "Port1Id": "4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a", + "Port2Id": "1b88dc59-1b1a-43f5-a314-673219f47545", + "AddressOffset1": 2, + "AddressOffset2": 3, + "ItemID": "244b9295-9a5a-4405-8404-a62074152783", + "ParentID": "", + "Label": "" + } + ] + }, + "StreamProfileHandler": { + "ProfileAssignmentMap": { + "guid_1b88dc59-1b1a-43f5-a314-673219f47545": "033f23c9-3986-40c9-b7e4-9ac1176f3c0b", + "guid_4faf0f0c-2fc6-44a7-87ea-5f47b02d4c1a": "106a3aa6-ea43-4dd7-84b5-51424a52ac87" + }, + "EntityList": [ + { + "StreamConfig": { + "SwModifier": null, + "HwModifiers": [], + "FieldValueRanges": [], + "StreamDescrPrefix": "Stream", + "ResourceIndex": -1, + "TpldId": -1, + "EnableState": "OFF", + "RateType": "Fraction", + "PacketLimit": 0, + "RateFraction": 100.0, + "RatePps": 0.0, + "RateL2Mbps": 0.0, + "UseBurstValues": false, + "BurstSize": 0, + "BurstDensity": 100, + "HeaderSegments": [], + "PacketLengthType": "FIXED", + "PacketMinSize": 64, + "PacketMaxSize": 64, + "PayloadDefinition": { + "PayloadType": "Incrementing", + "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" + }, + "ResourceUsed": false, + "ChildResourceUsed": false + }, + "ItemID": "106a3aa6-ea43-4dd7-84b5-51424a52ac87", + "ParentID": "", + "Label": "" + }, + { + "StreamConfig": { + "SwModifier": null, + "HwModifiers": [], + "FieldValueRanges": [], + "StreamDescrPrefix": "Stream", + "ResourceIndex": -1, + "TpldId": -1, + "EnableState": "OFF", + "RateType": "Fraction", + "PacketLimit": 0, + "RateFraction": 100.0, + "RatePps": 0.0, + "RateL2Mbps": 0.0, + "UseBurstValues": false, + "BurstSize": 0, + "BurstDensity": 100, + "HeaderSegments": [], + "PacketLengthType": "FIXED", + "PacketMinSize": 64, + "PacketMaxSize": 64, + "PayloadDefinition": { + "PayloadType": "Incrementing", + "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" + }, + "ResourceUsed": false, + "ChildResourceUsed": false + }, + "ItemID": "033f23c9-3986-40c9-b7e4-9ac1176f3c0b", + "ParentID": "", + "Label": "" + } + ] + }, + "TestOptions": { + "TestTypeOptionMap": { + "Throughput": { + "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.ThroughputTestOptions, Xena2544", + "RateIterationOptions": { + "SearchType": "BinarySearch", + "AcceptableLoss": 0.0, + "ResultScope": "CommonResult", + "FastBinarySearch": false, + "InitialValue": 10.0, + "MinimumValue": 0.1, + "MaximumValue": 100.0, + "ValueResolution": 0.5, + "UsePassThreshold": false, + "PassThreshold": 0.0 + }, + "ReportPropertyOptions": [ + "LatencyCounters" + ], + "TestType": "Throughput", + "Enabled": false, + "DurationType": "Seconds", + "Duration": 1.0, + "DurationFrames": 1, + "DurationFrameUnit": "Mframes", + "Iterations": 3, + "ItemID": "5ba8b4d4-9a52-4697-860a-4af1b97d2a5c", + "ParentID": "", + "Label": "" + }, + "Latency": { + "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.LatencyTestOptions, Xena2544", + "RateSweepOptions": { + "StartValue": 50.0, + "EndValue": 100.0, + "StepValue": 50.0 + }, + "LatencyMode": "Last_To_Last", + "RateRelativeTputMaxRate": true, + "TestType": "Latency", + "Enabled": false, + "DurationType": "Seconds", + "Duration": 1.0, + "DurationFrames": 1, + "DurationFrameUnit": "Mframes", + "Iterations": 1, + "ItemID": "c63c0362-96a6-434b-9c67-6be518492a49", + "ParentID": "", + "Label": "" + }, + "Loss": { + "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.LossTestOptions, Xena2544", + "RateSweepOptions": { + "StartValue": 50.0, + "EndValue": 100.0, + "StepValue": 50.0 + }, + "UsePassFailCriteria": false, + "AcceptableLoss": 0.0, + "AcceptableLossType": "Percent", + "TestType": "Loss", + "Enabled": false, + "DurationType": "Seconds", + "Duration": 1.0, + "DurationFrames": 1, + "DurationFrameUnit": "Mframes", + "Iterations": 1, + "ItemID": "f5cf336e-c983-4c48-a8cb-88447b3e2adb", + "ParentID": "", + "Label": "" + }, + "Back2Back": { + "$type": "XenaCommon.TestConfig.Xena2544.TestTypeOptions.Back2BackTestOptions, Xena2544", + "RateSweepOptions": { + "StartValue": 100.0, + "EndValue": 100.0, + "StepValue": 50.0 + }, + "ResultScope": "CommonResult", + "BurstResolution": 100.0, + "TestType": "Back2Back", + "Enabled": false, + "DurationType": "Seconds", + "Duration": 1.0, + "DurationFrames": 1, + "DurationFrameUnit": "Mframes", + "Iterations": 1, + "ItemID": "2c494ee2-16f1-4a40-b28b-aff6ad7464e3", + "ParentID": "", + "Label": "" + } + }, + "PacketSizes": { + "PacketSizeType": "CustomSizes", + "CustomPacketSizes": [ + 512.0 + ], + "SwPacketStartSize": 100, + "SwPacketEndSize": 1500, + "SwPacketStepSize": 100, + "HwPacketMinSize": 64, + "HwPacketMaxSize": 1500, + "MixedSizesWeights": [] + }, + "TopologyConfig": { + "Topology": "MESH", + "Direction": "BIDIR" + }, + "FlowCreationOptions": { + "FlowCreationType": "StreamBased", + "MacBaseAddress": "4,244,188", + "UseGatewayMacAsDmac": true, + "EnableMultiStream": false, + "PerPortStreamCount": 1, + "MultiStreamAddressOffset": 2, + "MultiStreamAddressIncrement": 1, + "MultiStreamMacBaseAddress": "4,244,188", + "UseMicroTpldOnDemand": false + }, + "LearningOptions": { + "MacLearningMode": "EveryTrial", + "MacLearningRetries": 1, + "ArpRefreshEnabled": true, + "ArpRefreshPeriod": 4000.0, + "UseFlowBasedLearningPreamble": false, + "FlowBasedLearningFrameCount": 1, + "FlowBasedLearningDelay": 500, + "LearningRatePercent": 1.0, + "LearningDuration": 5000.0 + }, + "ToggleSyncState": true, + "SyncOffDuration": 1, + "SyncOnDuration": 1, + "PayloadDefinition": { + "PayloadType": "Incrementing", + "PayloadPattern": "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0" + }, + "EnableSpeedReductSweep": false, + "UsePortSyncStart": false, + "PortStaggerSteps": 0, + "ShouldStopOnLos": true, + "PortResetDelay": 5 + }, + "CreationDate": "2016-02-24 13:33:50Z", + "ChassisManager": { + "ChassisList": [ + { + "ChassisID": "4605b3c9-70cc-42d9-9d8c-16c34989a4c1", + "HostName": "10.19.15.19", + "PortNumber": 22606, + "Password": "xena", + "ConnectionType": "Native", + "UsedModuleList": [], + "ResourceIndex": 0, + "ResourceUsed": false, + "ChildResourceUsed": false + } + ] + }, + "ReportConfig": { + "CustomerName": "Xena Networks", + "CustomerServiceID": "", + "CustomerAccessID": "", + "Comments": "", + "RateUnitTerminology": "FPS", + "IncludeTestPairInfo": true, + "IncludePerStreamInfo": false, + "IncludeGraphs": true, + "PlotThroughputUnit": "Pps", + "GeneratePdf": false, + "GenerateHtml": false, + "GenerateXml": true, + "GenerateCsv": false, + "SaveIntermediateResults": false, + "ReportFilename": "xena2544-report", + "AppendTimestamp": false + }, + "TidAllocationScope": "ConfigScope", + "FormatVersion": 10, + "ApplicationVersion": "2.39.5876.25884" +}
\ No newline at end of file diff --git a/tools/pkt_gen/xena/xena.py b/tools/pkt_gen/xena/xena.py new file mode 100755 index 00000000..7dd4b90b --- /dev/null +++ b/tools/pkt_gen/xena/xena.py @@ -0,0 +1,660 @@ +# Copyright 2016 Red Hat Inc & Xena Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Contributors: +# Rick Alongi, Red Hat Inc. +# Amit Supugade, Red Hat Inc. +# Dan Amzulescu, Xena Networks +# Christian Trautman, Red Hat Inc. + +""" +Xena Traffic Generator Model +""" + +# python imports +import binascii +import logging +import subprocess +import sys +from time import sleep +import xml.etree.ElementTree as ET +from collections import OrderedDict +# scapy imports +import scapy.layers.inet as inet + +# VSPerf imports +from conf import settings +from core.results.results_constants import ResultsConstants +from tools.pkt_gen.trafficgen.trafficgenhelper import ( + TRAFFIC_DEFAULTS, + merge_spec) +from tools.pkt_gen.trafficgen.trafficgen import ITrafficGenerator + +# Xena module imports +from tools.pkt_gen.xena.xena_json import XenaJSON +from tools.pkt_gen.xena.XenaDriver import ( + aggregate_stats, + line_percentage, + XenaSocketDriver, + XenaManager, + ) + +class Xena(ITrafficGenerator): + """ + Xena Traffic generator wrapper class + """ + _traffic_defaults = TRAFFIC_DEFAULTS.copy() + _logger = logging.getLogger(__name__) + + def __init__(self): + self.mono_pipe = None + self.xmanager = None + self._params = {} + self._xsocket = None + self._duration = None + self.tx_stats = None + self.rx_stats = None + + @property + def traffic_defaults(self): + """Default traffic values. + + These can be expected to be constant across traffic generators, + so no setter is provided. Changes to the structure or contents + will likely break traffic generator implementations or tests + respectively. + """ + return self._traffic_defaults + + @staticmethod + def _create_throughput_result(root): + """ + Create the results based off the output xml file from the Xena2544.exe + execution + :param root: root dictionary from xml import + :return: Results Ordered dictionary based off ResultsConstants + """ + # get the test type from the report file + test_type = root[0][1].get('TestType') + # set the version from the report file + settings.setValue('XENA_VERSION', root[0][0][1].get('GeneratedBy')) + + if test_type == 'Throughput': + results = OrderedDict() + results[ResultsConstants.THROUGHPUT_RX_FPS] = float( + root[0][1][0][0].get('PortRxPps')) + float( + root[0][1][0][1].get('PortRxPps')) + results[ResultsConstants.THROUGHPUT_RX_MBPS] = (float( + root[0][1][0][0].get('PortRxBpsL1')) + float( + root[0][1][0][1].get('PortRxBpsL1')))/ 1000000 + results[ResultsConstants.THROUGHPUT_RX_PERCENT] = ( + 100 - int(root[0][1][0].get('TotalLossRatioPcnt'))) * float( + root[0][1][0].get('TotalTxRatePcnt'))/100 + results[ResultsConstants.TX_RATE_FPS] = root[0][1][0].get( + 'TotalTxRateFps') + results[ResultsConstants.TX_RATE_MBPS] = float( + root[0][1][0].get('TotalTxRateBpsL1')) / 1000000 + results[ResultsConstants.TX_RATE_PERCENT] = root[0][1][0].get( + 'TotalTxRatePcnt') + try: + results[ResultsConstants.MIN_LATENCY_NS] = float( + root[0][1][0][0].get('MinLatency')) * 1000 + except ValueError: + # Stats for latency returned as N/A so just post them + results[ResultsConstants.MIN_LATENCY_NS] = root[0][1][0][0].get( + 'MinLatency') + try: + results[ResultsConstants.MAX_LATENCY_NS] = float( + root[0][1][0][0].get('MaxLatency')) * 1000 + except ValueError: + # Stats for latency returned as N/A so just post them + results[ResultsConstants.MAX_LATENCY_NS] = root[0][1][0][0].get( + 'MaxLatency') + try: + results[ResultsConstants.AVG_LATENCY_NS] = float( + root[0][1][0][0].get('AvgLatency')) * 1000 + except ValueError: + # Stats for latency returned as N/A so just post them + results[ResultsConstants.AVG_LATENCY_NS] = root[0][1][0][0].get( + 'AvgLatency') + elif test_type == 'Back2Back': + results = OrderedDict() + + # Just mimic what Ixia does and only return the b2b frame count. + # This may change later once its decided the common results stats + # to be returned should be. + results[ResultsConstants.B2B_FRAMES] = root[0][1][0][0].get( + 'TotalTxBurstFrames') + else: + raise NotImplementedError('Unknown test type in report file.') + + return results + + def _build_packet_header(self, reverse=False): + """ + Build a packet header based on traffic profile using scapy external + libraries. + :param reverse: Swap source and destination info when building header + :return: packet header in hex + """ + srcmac = self._params['traffic']['l2'][ + 'srcmac'] if not reverse else self._params['traffic']['l2'][ + 'dstmac'] + dstmac = self._params['traffic']['l2'][ + 'dstmac'] if not reverse else self._params['traffic']['l2'][ + 'srcmac'] + srcip = self._params['traffic']['l3'][ + 'srcip'] if not reverse else self._params['traffic']['l3']['dstip'] + dstip = self._params['traffic']['l3'][ + 'dstip'] if not reverse else self._params['traffic']['l3']['srcip'] + layer2 = inet.Ether(src=srcmac, dst=dstmac) + layer3 = inet.IP(src=srcip, dst=dstip, + proto=self._params['traffic']['l3']['proto']) + layer4 = inet.UDP(sport=self._params['traffic']['l4']['srcport'], + dport=self._params['traffic']['l4']['dstport']) + if self._params['traffic']['vlan']['enabled']: + vlan = inet.Dot1Q(vlan=self._params['traffic']['vlan']['id'], + prio=self._params['traffic']['vlan']['priority'], + id=self._params['traffic']['vlan']['cfi']) + else: + vlan = None + packet = layer2/vlan/layer3/layer4 if vlan else layer2/layer3/layer4 + packet_bytes = bytes(packet) + packet_hex = '0x' + binascii.hexlify(packet_bytes).decode('utf-8') + return packet_hex + + def _create_api_result(self): + """ + Create result dictionary per trafficgen specifications from socket API + stats. If stats are not available return values of 0. + :return: ResultsConstants as dictionary + """ + # Handle each case of statistics based on if the data is available. + # This prevents uncaught exceptions when the stats aren't available. + result_dict = OrderedDict() + if self.tx_stats.data.get(self.tx_stats.pt_stream_keys[0]): + result_dict[ResultsConstants.TX_FRAMES] = self.tx_stats.data[ + self.tx_stats.pt_stream_keys[0]]['packets'] + result_dict[ResultsConstants.TX_RATE_FPS] = self.tx_stats.data[ + self.tx_stats.pt_stream_keys[0]]['pps'] + result_dict[ResultsConstants.TX_RATE_MBPS] = self.tx_stats.data[ + self.tx_stats.pt_stream_keys[0]]['bps'] / 1000000 + result_dict[ResultsConstants.TX_BYTES] = self.tx_stats.data[ + self.tx_stats.pt_stream_keys[0]]['bytes'] + # tx rate percent may need to be halved if bi directional + result_dict[ResultsConstants.TX_RATE_PERCENT] = line_percentage( + self.xmanager.ports[0], self.tx_stats, self._duration, + self._params['traffic']['l2']['framesize']) if \ + self._params['traffic']['bidir'] == 'False' else\ + line_percentage( + self.xmanager.ports[0], self.tx_stats, self._duration, + self._params['traffic']['l2']['framesize']) / 2 + else: + self._logger.error('Transmit stats not available.') + result_dict[ResultsConstants.TX_FRAMES] = 0 + result_dict[ResultsConstants.TX_RATE_FPS] = 0 + result_dict[ResultsConstants.TX_RATE_MBPS] = 0 + result_dict[ResultsConstants.TX_BYTES] = 0 + result_dict[ResultsConstants.TX_RATE_PERCENT] = 0 + + if self.rx_stats.data.get('pr_tpldstraffic'): + result_dict[ResultsConstants.RX_FRAMES] = self.rx_stats.data[ + 'pr_tpldstraffic']['0']['packets'] + result_dict[ + ResultsConstants.THROUGHPUT_RX_FPS] = self.rx_stats.data[ + 'pr_tpldstraffic']['0']['pps'] + result_dict[ + ResultsConstants.THROUGHPUT_RX_MBPS] = self.rx_stats.data[ + 'pr_tpldstraffic']['0']['bps'] / 1000000 + result_dict[ResultsConstants.RX_BYTES] = self.rx_stats.data[ + 'pr_tpldstraffic']['0']['bytes'] + # throughput percent may need to be halved if bi directional + result_dict[ + ResultsConstants.THROUGHPUT_RX_PERCENT] = line_percentage( + self.xmanager.ports[1], self.rx_stats, self._duration, + self._params['traffic']['l2']['framesize']) if \ + self._params['traffic']['bidir'] == 'False' else \ + line_percentage( + self.xmanager.ports[1], self.rx_stats, self._duration, + self._params['traffic']['l2']['framesize']) / 2 + + else: + self._logger.error('Receive stats not available.') + result_dict[ResultsConstants.RX_FRAMES] = 0 + result_dict[ResultsConstants.THROUGHPUT_RX_FPS] = 0 + result_dict[ResultsConstants.THROUGHPUT_RX_MBPS] = 0 + result_dict[ResultsConstants.RX_BYTES] = 0 + result_dict[ResultsConstants.THROUGHPUT_RX_PERCENT] = 0 + + if self.rx_stats.data.get('pr_tplderrors'): + result_dict[ResultsConstants.PAYLOAD_ERR] = self.rx_stats.data[ + 'pr_tplderrors']['0']['pld'] + result_dict[ResultsConstants.SEQ_ERR] = self.rx_stats.data[ + 'pr_tplderrors']['0']['seq'] + else: + result_dict[ResultsConstants.PAYLOAD_ERR] = 0 + result_dict[ResultsConstants.SEQ_ERR] = 0 + + if self.rx_stats.data.get('pr_tpldlatency'): + result_dict[ResultsConstants.MIN_LATENCY_NS] = self.rx_stats.data[ + 'pr_tpldlatency']['0']['min'] + result_dict[ResultsConstants.MAX_LATENCY_NS] = self.rx_stats.data[ + 'pr_tpldlatency']['0']['max'] + result_dict[ResultsConstants.AVG_LATENCY_NS] = self.rx_stats.data[ + 'pr_tpldlatency']['0']['avg'] + else: + result_dict[ResultsConstants.MIN_LATENCY_NS] = 0 + result_dict[ResultsConstants.MAX_LATENCY_NS] = 0 + result_dict[ResultsConstants.AVG_LATENCY_NS] = 0 + + return result_dict + + def _setup_json_config(self, trials, loss_rate, testtype=None): + """ + Create a 2bUsed json file that will be used for xena2544.exe execution. + :param trials: Number of trials + :param loss_rate: The acceptable loss rate as float + :param testtype: Either '2544_b2b' or '2544_throughput' as string + :return: None + """ + try: + j_file = XenaJSON('./tools/pkt_gen/xena/profiles/baseconfig.x2544') + j_file.set_chassis_info( + settings.getValue('TRAFFICGEN_XENA_IP'), + settings.getValue('TRAFFICGEN_XENA_PASSWORD') + ) + j_file.set_port(0, settings.getValue('TRAFFICGEN_XENA_MODULE1'), + settings.getValue('TRAFFICGEN_XENA_PORT1')) + j_file.set_port(1, settings.getValue('TRAFFICGEN_XENA_MODULE2'), + settings.getValue('TRAFFICGEN_XENA_PORT2')) + j_file.set_port_ip_v4( + 0, settings.getValue("TRAFFICGEN_XENA_PORT0_IP"), + settings.getValue("TRAFFICGEN_XENA_PORT0_CIDR"), + settings.getValue("TRAFFICGEN_XENA_PORT0_GATEWAY")) + j_file.set_port_ip_v4( + 1, settings.getValue("TRAFFICGEN_XENA_PORT1_IP"), + settings.getValue("TRAFFICGEN_XENA_PORT1_CIDR"), + settings.getValue("TRAFFICGEN_XENA_PORT1_GATEWAY")) + + if testtype == '2544_throughput': + j_file.set_test_options_tput( + packet_sizes=self._params['traffic']['l2']['framesize'], + iterations=trials, loss_rate=loss_rate, + duration=self._duration, micro_tpld=True if self._params[ + 'traffic']['l2']['framesize'] == 64 else False) + j_file.enable_throughput_test() + + elif testtype == '2544_b2b': + j_file.set_test_options_back2back( + packet_sizes=self._params['traffic']['l2']['framesize'], + iterations=trials, duration=self._duration, + startvalue=self._params['traffic']['frame_rate'], + endvalue=self._params['traffic']['frame_rate'], + micro_tpld=True if self._params[ + 'traffic']['l2']['framesize'] == 64 else False) + j_file.enable_back2back_test() + + j_file.set_header_layer2( + dst_mac=self._params['traffic']['l2']['dstmac'], + src_mac=self._params['traffic']['l2']['srcmac']) + j_file.set_header_layer3( + src_ip=self._params['traffic']['l3']['srcip'], + dst_ip=self._params['traffic']['l3']['dstip'], + protocol=self._params['traffic']['l3']['proto']) + j_file.set_header_layer4_udp( + source_port=self._params['traffic']['l4']['srcport'], + destination_port=self._params['traffic']['l4']['dstport']) + if self._params['traffic']['vlan']['enabled']: + j_file.set_header_vlan( + vlan_id=self._params['traffic']['vlan']['id'], + id=self._params['traffic']['vlan']['cfi'], + prio=self._params['traffic']['vlan']['priority']) + j_file.add_header_segments( + flows=self._params['traffic']['multistream'], + multistream_layer=self._params['traffic']['stream_type']) + # set duplex mode + if self._params['traffic']['bidir'] == "True": + j_file.set_topology_mesh() + else: + j_file.set_topology_blocks() + + j_file.write_config('./tools/pkt_gen/xena/profiles/2bUsed.x2544') + except Exception as exc: + self._logger.exception("Error during Xena JSON setup: %s", exc) + raise + + def _start_traffic_api(self, packet_limit): + """ + Start the Xena traffic using the socket API driver + :param packet_limit: packet limit for stream, set to -1 for no limit + :return: None + """ + if not self.xmanager: + self._xsocket = XenaSocketDriver( + settings.getValue('TRAFFICGEN_XENA_IP')) + self.xmanager = XenaManager( + self._xsocket, settings.getValue('TRAFFICGEN_XENA_USER'), + settings.getValue('TRAFFICGEN_XENA_PASSWORD')) + + # for the report file version info ask the chassis directly for its + # software versions + settings.setValue('XENA_VERSION', 'XENA Socket API - {}'.format( + self.xmanager.get_version())) + + if not len(self.xmanager.ports): + self.xmanager.ports[0] = self.xmanager.add_module_port( + settings.getValue('TRAFFICGEN_XENA_MODULE1'), + settings.getValue('TRAFFICGEN_XENA_PORT1')) + if not self.xmanager.ports[0].reserve_port(): + self._logger.error( + 'Unable to reserve port 0. Please release Xena Port') + + if len(self.xmanager.ports) < 2: + self.xmanager.ports[1] = self.xmanager.add_module_port( + settings.getValue('TRAFFICGEN_XENA_MODULE2'), + settings.getValue('TRAFFICGEN_XENA_PORT2')) + if not self.xmanager.ports[1].reserve_port(): + self._logger.error( + 'Unable to reserve port 1. Please release Xena Port') + + # Clear port configuration for a clean start + self.xmanager.ports[0].reset_port() + self.xmanager.ports[1].reset_port() + self.xmanager.ports[0].clear_stats() + self.xmanager.ports[1].clear_stats() + + # set the port IP from the conf file + self.xmanager.ports[0].set_port_ip( + settings.getValue('TRAFFICGEN_XENA_PORT0_IP'), + settings.getValue('TRAFFICGEN_XENA_PORT0_CIDR'), + settings.getValue('TRAFFICGEN_XENA_PORT0_GATEWAY')) + self.xmanager.ports[1].set_port_ip( + settings.getValue('TRAFFICGEN_XENA_PORT1_IP'), + settings.getValue('TRAFFICGEN_XENA_PORT1_CIDR'), + settings.getValue('TRAFFICGEN_XENA_PORT1_GATEWAY')) + + def setup_stream(stream, port, payload_id, flip_addr=False): + """ + Helper function to configure streams. + :param stream: Stream object from XenaDriver module + :param port: Port object from XenaDriver module + :param payload_id: payload ID as int + :param flip_addr: Boolean if the source and destination addresses + should be flipped. + :return: None + """ + stream.set_on() + stream.set_packet_limit(packet_limit) + + stream.set_rate_fraction( + 10000 * self._params['traffic']['frame_rate']) + stream.set_packet_header(self._build_packet_header( + reverse=flip_addr)) + stream.set_header_protocol( + 'ETHERNET VLAN IP UDP' if self._params['traffic']['vlan'][ + 'enabled'] else 'ETHERNET IP UDP') + stream.set_packet_length( + 'fixed', self._params['traffic']['l2']['framesize'], 16383) + stream.set_packet_payload('incrementing', '0x00') + stream.set_payload_id(payload_id) + port.set_port_time_limit(self._duration * 1000000) + + if self._params['traffic']['l2']['framesize'] == 64: + # set micro tpld + port.micro_tpld_enable() + + if self._params['traffic']['multistream']: + stream.enable_multistream( + flows=self._params['traffic']['multistream'], + layer=self._params['traffic']['stream_type']) + + s1_p0 = self.xmanager.ports[0].add_stream() + setup_stream(s1_p0, self.xmanager.ports[0], 0) + + if self._params['traffic']['bidir'] == 'True': + s1_p1 = self.xmanager.ports[1].add_stream() + setup_stream(s1_p1, self.xmanager.ports[1], 1, flip_addr=True) + + if not self.xmanager.ports[0].traffic_on(): + self._logger.error( + "Failure to start port 0. Check settings and retry.") + if self._params['traffic']['bidir'] == 'True': + if not self.xmanager.ports[1].traffic_on(): + self._logger.error( + "Failure to start port 1. Check settings and retry.") + sleep(self._duration) + # getting results + if self._params['traffic']['bidir'] == 'True': + # need to aggregate out both ports stats and assign that data + self.rx_stats = self.xmanager.ports[1].get_rx_stats() + self.tx_stats = self.xmanager.ports[0].get_tx_stats() + self.tx_stats.data = aggregate_stats( + self.tx_stats.data, + self.xmanager.ports[1].get_tx_stats().data) + self.rx_stats.data = aggregate_stats( + self.rx_stats.data, + self.xmanager.ports[0].get_rx_stats().data) + else: + # no need to aggregate, just grab the appropriate port stats + self.tx_stats = self.xmanager.ports[0].get_tx_stats() + self.rx_stats = self.xmanager.ports[1].get_rx_stats() + sleep(1) + + def _stop_api_traffic(self): + """ + Stop traffic through the socket API + :return: Return results from _create_api_result method + """ + self.xmanager.ports[0].traffic_off() + if self._params['traffic']['bidir'] == 'True': + self.xmanager.ports[1].traffic_off() + sleep(5) + + stat = self._create_api_result() + self.disconnect() + return stat + + def connect(self): + self._logger.debug('Connect') + return self + + def disconnect(self): + """Disconnect from the traffic generator. + + As with :func:`connect`, this function is optional. + + + Where implemented, this function should raise an exception on + failure. + + :returns: None + """ + self._logger.debug('disconnect') + if self.xmanager: + self.xmanager.disconnect() + self.xmanager = None + + if self._xsocket: + self._xsocket.disconnect() + self._xsocket = None + + def send_burst_traffic(self, traffic=None, numpkts=100, duration=20): + """Send a burst of traffic. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._start_traffic_api(numpkts) + return self._stop_api_traffic() + + def send_cont_traffic(self, traffic=None, duration=20): + """Send a continuous flow of traffic. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._start_traffic_api(-1) + return self._stop_api_traffic() + + def start_cont_traffic(self, traffic=None, duration=20): + """Non-blocking version of 'send_cont_traffic'. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._start_traffic_api(-1) + + def stop_cont_traffic(self): + """Stop continuous transmission and return results. + """ + return self._stop_api_traffic() + + def send_rfc2544_throughput(self, traffic=None, trials=3, duration=20, + lossrate=0.0): + """Send traffic per RFC2544 throughput test specifications. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._setup_json_config(trials, lossrate, '2544_throughput') + + args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c", + "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r", + "./tools/pkt_gen/xena", "-u", + settings.getValue('TRAFFICGEN_XENA_USER')] + self.mono_pipe = subprocess.Popen(args, stdout=sys.stdout) + self.mono_pipe.communicate() + root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot() + return Xena._create_throughput_result(root) + + def start_rfc2544_throughput(self, traffic=None, trials=3, duration=20, + lossrate=0.0): + """Non-blocking version of 'send_rfc2544_throughput'. + + See ITrafficGenerator for description + """ + self._duration = duration + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._setup_json_config(trials, lossrate, '2544_throughput') + + args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c", + "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r", + "./tools/pkt_gen/xena", "-u", + settings.getValue('TRAFFICGEN_XENA_USER')] + self.mono_pipe = subprocess.Popen(args, stdout=sys.stdout) + + def wait_rfc2544_throughput(self): + """Wait for and return results of RFC2544 test. + + See ITrafficGenerator for description + """ + self.mono_pipe.communicate() + sleep(2) + root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot() + return Xena._create_throughput_result(root) + + def send_rfc2544_back2back(self, traffic=None, trials=1, duration=20, + lossrate=0.0): + """Send traffic per RFC2544 back2back test specifications. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._setup_json_config(trials, lossrate, '2544_b2b') + + args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c", + "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r", + "./tools/pkt_gen/xena", "-u", + settings.getValue('TRAFFICGEN_XENA_USER')] + self.mono_pipe = subprocess.Popen( + args, stdout=sys.stdout) + self.mono_pipe.communicate() + root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot() + return Xena._create_throughput_result(root) + + def start_rfc2544_back2back(self, traffic=None, trials=1, duration=20, + lossrate=0.0): + """Non-blocking version of 'send_rfc2544_back2back'. + + See ITrafficGenerator for description + """ + self._duration = duration + + self._params.clear() + self._params['traffic'] = self.traffic_defaults.copy() + if traffic: + self._params['traffic'] = merge_spec(self._params['traffic'], + traffic) + + self._setup_json_config(trials, lossrate, '2544_b2b') + + args = ["mono", "./tools/pkt_gen/xena/Xena2544.exe", "-c", + "./tools/pkt_gen/xena/profiles/2bUsed.x2544", "-e", "-r", + "./tools/pkt_gen/xena", "-u", + settings.getValue('TRAFFICGEN_XENA_USER')] + self.mono_pipe = subprocess.Popen( + args, stdout=sys.stdout) + + def wait_rfc2544_back2back(self): + """Wait and set results of RFC2544 test. + """ + self.mono_pipe.communicate() + sleep(2) + root = ET.parse(r'./tools/pkt_gen/xena/xena2544-report.xml').getroot() + return Xena._create_throughput_result(root) + + +if __name__ == "__main__": + pass + diff --git a/tools/pkt_gen/xena/xena_json.py b/tools/pkt_gen/xena/xena_json.py new file mode 100644 index 00000000..2a15a932 --- /dev/null +++ b/tools/pkt_gen/xena/xena_json.py @@ -0,0 +1,625 @@ +# Copyright 2016 Red Hat Inc & Xena Networks. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Contributors: +# Dan Amzulescu, Xena Networks +# Christian Trautman, Red Hat Inc. +# +# Usage can be seen below in unit test. This implementation is designed for one +# module two port Xena chassis runs only. + +""" +Xena JSON module +""" + +import base64 +from collections import OrderedDict +import json +import locale +import logging +import uuid + +import scapy.layers.inet as inet + +_LOGGER = logging.getLogger(__name__) +_LOCALE = locale.getlocale()[1] + + +class XenaJSON(object): + """ + Class to modify and read Xena JSON configuration files. + """ + def __init__(self, json_path='./profiles/baseconfig.x2544'): + """ + Constructor + :param json_path: path to JSON file to read. Expected files must have + two module ports with each port having its own stream config profile. + :return: XenaJSON object + """ + self.json_data = read_json_file(json_path) + + self.packet_data = OrderedDict() + self.packet_data['layer2'] = None + self.packet_data['vlan'] = None + self.packet_data['layer3'] = None + self.packet_data['layer4'] = None + + def _add_multistream_layer(self, entity, seg_uuid, stop_value, layer): + """ + Add the multi stream layers to the json file based on the layer provided + :param entity: Entity to append the segment to in entity list + :param seg_uuid: The UUID to attach the multistream layer to + :param stop_value: The number of flows to configure + :param layer: the layer that the multistream will be attached to + :return: None + """ + field_name = { + 2: ('Dst MAC addr', 'Src MAC addr'), + 3: ('Dest IP Addr', 'Src IP Addr'), + 4: ('Dest Port', 'Src Port') + } + segments = [ + { + "Offset": 0, + "Mask": "//8=", # mask of 255/255 + "Action": "INC", + "StartValue": 0, + "StopValue": stop_value, + "StepValue": 1, + "RepeatCount": 1, + "SegmentId": seg_uuid, + "FieldName": field_name[int(layer)][0] + }, + { + "Offset": 0, + "Mask": "//8=", # mask of 255/255 + "Action": "INC", + "StartValue": 0, + "StopValue": stop_value, + "StepValue": 1, + "RepeatCount": 1, + "SegmentId": seg_uuid, + "FieldName": field_name[int(layer)][1] + } + ] + + self.json_data['StreamProfileHandler']['EntityList'][entity][ + 'StreamConfig']['HwModifiers'] = (segments) + + def _create_packet_header(self): + """ + Create the scapy packet header based on what has been built in this + instance using the set header methods. Return tuple of the two byte + arrays, one for each port. + :return: Scapy packet headers as bytearrays + """ + if not self.packet_data['layer2']: + _LOGGER.warning('Using dummy info for layer 2 in Xena JSON file') + self.set_header_layer2() + packet1, packet2 = (self.packet_data['layer2'][0], + self.packet_data['layer2'][1]) + for packet_header in list(self.packet_data.copy().values())[1:]: + if packet_header: + packet1 /= packet_header[0] + packet2 /= packet_header[1] + ret = (bytes(packet1), bytes(packet2)) + return ret + + def add_header_segments(self, flows=0, multistream_layer=None): + """ + Build the header segments to write to the JSON file. + :param flows: Number of flows to configure for multistream if enabled + :param multistream_layer: layer to set multistream flows as string. + Acceptable values are L2, L3 or L4 + :return: None + """ + packet = self._create_packet_header() + segment1 = list() + segment2 = list() + header_pos = 0 + if self.packet_data['layer2']: + # slice out the layer 2 bytes from the packet header byte array + layer2 = packet[0][header_pos: len(self.packet_data['layer2'][0])] + seg = create_segment( + "ETHERNET", encode_byte_array(layer2).decode(_LOCALE)) + if multistream_layer == 'L2' and flows > 0: + self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'], + stop_value=flows, layer=2) + segment1.append(seg) + # now do the other port data with reversed src, dst info + layer2 = packet[1][header_pos: len(self.packet_data['layer2'][1])] + seg = create_segment( + "ETHERNET", encode_byte_array(layer2).decode(_LOCALE)) + segment2.append(seg) + if multistream_layer == 'L2' and flows > 0: + self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'], + stop_value=flows, layer=2) + header_pos = len(layer2) + if self.packet_data['vlan']: + # slice out the vlan bytes from the packet header byte array + vlan = packet[0][header_pos: len( + self.packet_data['vlan'][0]) + header_pos] + segment1.append(create_segment( + "VLAN", encode_byte_array(vlan).decode(_LOCALE))) + segment2.append(create_segment( + "VLAN", encode_byte_array(vlan).decode(_LOCALE))) + header_pos += len(vlan) + if self.packet_data['layer3']: + # slice out the layer 3 bytes from the packet header byte array + layer3 = packet[0][header_pos: len( + self.packet_data['layer3'][0]) + header_pos] + seg = create_segment( + "IP", encode_byte_array(layer3).decode(_LOCALE)) + segment1.append(seg) + if multistream_layer == 'L3' and flows > 0: + self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'], + stop_value=flows, layer=3) + # now do the other port data with reversed src, dst info + layer3 = packet[1][header_pos: len( + self.packet_data['layer3'][1]) + header_pos] + seg = create_segment( + "IP", encode_byte_array(layer3).decode(_LOCALE)) + segment2.append(seg) + if multistream_layer == 'L3' and flows > 0: + self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'], + stop_value=flows, layer=3) + header_pos += len(layer3) + if self.packet_data['layer4']: + # slice out the layer 4 bytes from the packet header byte array + layer4 = packet[0][header_pos: len( + self.packet_data['layer4'][0]) + header_pos] + seg = create_segment( + "UDP", encode_byte_array(layer4).decode(_LOCALE)) + segment1.append(seg) + if multistream_layer == 'L4' and flows > 0: + self._add_multistream_layer(entity=0, seg_uuid=seg['ItemID'], + stop_value=flows, layer=4) + # now do the other port data with reversed src, dst info + layer4 = packet[1][header_pos: len( + self.packet_data['layer4'][1]) + header_pos] + seg = create_segment( + "UDP", encode_byte_array(layer4).decode(_LOCALE)) + segment2.append(seg) + if multistream_layer == 'L4' and flows > 0: + self._add_multistream_layer(entity=1, seg_uuid=seg['ItemID'], + stop_value=flows, layer=4) + header_pos += len(layer4) + + self.json_data['StreamProfileHandler']['EntityList'][0][ + 'StreamConfig']['HeaderSegments'] = segment1 + self.json_data['StreamProfileHandler']['EntityList'][1][ + 'StreamConfig']['HeaderSegments'] = segment2 + + def disable_back2back_test(self): + """ + Disable the rfc2544 back to back test + :return: None + """ + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'Enabled'] = 'false' + + def disable_throughput_test(self): + """ + Disable the rfc2544 throughput test + :return: None + """ + self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][ + 'Enabled'] = 'false' + + def enable_back2back_test(self): + """ + Enable the rfc2544 back to back test + :return: None + """ + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'Enabled'] = 'true' + + def enable_throughput_test(self): + """ + Enable the rfc2544 throughput test + :return: None + """ + self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][ + 'Enabled'] = 'true' + + def set_chassis_info(self, hostname, pwd): + """ + Set the chassis info + :param hostname: hostname as string of ip + :param pwd: password to chassis as string + :return: None + """ + self.json_data['ChassisManager']['ChassisList'][0][ + 'HostName'] = hostname + self.json_data['ChassisManager']['ChassisList'][0][ + 'Password'] = pwd + + def set_header_layer2(self, dst_mac='cc:cc:cc:cc:cc:cc', + src_mac='bb:bb:bb:bb:bb:bb', **kwargs): + """ + Build a scapy Ethernet L2 objects inside instance packet_data structure + :param dst_mac: destination mac as string. Example "aa:aa:aa:aa:aa:aa" + :param src_mac: source mac as string. Example "bb:bb:bb:bb:bb:bb" + :param kwargs: Extra params per scapy usage. + :return: None + """ + self.packet_data['layer2'] = [ + inet.Ether(dst=dst_mac, src=src_mac, **kwargs), + inet.Ether(dst=src_mac, src=dst_mac, **kwargs)] + + def set_header_layer3(self, src_ip='192.168.0.2', dst_ip='192.168.0.3', + protocol='UDP', **kwargs): + """ + Build scapy IPV4 L3 objects inside instance packet_data structure + :param src_ip: source IP as string in dot notation format + :param dst_ip: destination IP as string in dot notation format + :param protocol: protocol for l4 + :param kwargs: Extra params per scapy usage + :return: None + """ + self.packet_data['layer3'] = [ + inet.IP(src=src_ip, dst=dst_ip, proto=protocol.lower(), **kwargs), + inet.IP(src=dst_ip, dst=src_ip, proto=protocol.lower(), **kwargs)] + + def set_header_layer4_udp(self, source_port, destination_port, **kwargs): + """ + Build scapy UDP L4 objects inside instance packet_data structure + :param source_port: Source port as int + :param destination_port: Destination port as int + :param kwargs: Extra params per scapy usage + :return: None + """ + self.packet_data['layer4'] = [ + inet.UDP(sport=source_port, dport=destination_port, **kwargs), + inet.UDP(sport=source_port, dport=destination_port, **kwargs)] + + def set_header_vlan(self, vlan_id=1, **kwargs): + """ + Build a Dot1Q scapy object inside instance packet_data structure + :param vlan_id: The VLAN ID + :param kwargs: Extra params per scapy usage + :return: None + """ + self.packet_data['vlan'] = [ + inet.Dot1Q(vlan=vlan_id, **kwargs), + inet.Dot1Q(vlan=vlan_id, **kwargs)] + + def set_port(self, index, module, port): + """ + Set the module and port for the 0 index port to use with the test + :param index: Index of port to set, 0 = port1, 1=port2, etc.. + :param module: module location as int + :param port: port location in module as int + :return: None + """ + self.json_data['PortHandler']['EntityList'][index]['PortRef'][ + 'ModuleIndex'] = module + self.json_data['PortHandler']['EntityList'][index]['PortRef'][ + 'PortIndex'] = port + + def set_port_ip_v4(self, port, ip_addr, netmask, gateway): + """ + Set the port IP info + :param port: port number as int of port to set ip info + :param ip_addr: ip address in dot notation format as string + :param netmask: cidr number for netmask (ie 24/16/8) as int + :param gateway: gateway address in dot notation format + :return: None + """ + available_ports = range(len( + self.json_data['PortHandler']['EntityList'])) + if port not in available_ports: + raise ValueError("{}{}{}".format( + 'Port assignment must be an available port ', + 'number in baseconfig file. Port=', port)) + self.json_data['PortHandler']['EntityList'][ + port]["IpV4Address"] = ip_addr + self.json_data['PortHandler']['EntityList'][ + port]["IpV4Gateway"] = gateway + self.json_data['PortHandler']['EntityList'][ + port]["IpV4RoutingPrefix"] = int(netmask) + + def set_port_ip_v6(self, port, ip_addr, netmask, gateway): + """ + Set the port IP info + :param port: port number as int of port to set ip info + :param ip_addr: ip address as 8 groups of 4 hexadecimal groups separated + by a colon. + :param netmask: cidr number for netmask (ie 24/16/8) as int + :param gateway: gateway address as string in 8 group of 4 hexadecimal + groups separated by a colon. + :return: None + """ + available_ports = range(len( + self.json_data['PortHandler']['EntityList'])) + if port not in available_ports: + raise ValueError("{}{}{}".format( + 'Port assignment must be an available port ', + 'number in baseconfig file. Port=', port)) + self.json_data['PortHandler']['EntityList'][ + port]["IpV6Address"] = ip_addr + self.json_data['PortHandler']['EntityList'][ + port]["IpV6Gateway"] = gateway + self.json_data['PortHandler']['EntityList'][ + port]["IpV6RoutingPrefix"] = int(netmask) + + def set_test_options_tput(self, packet_sizes, duration, iterations, + loss_rate, micro_tpld=False): + """ + Set the tput test options + :param packet_sizes: List of packet sizes to test, single int entry is + acceptable for one packet size testing + :param duration: time for each test in seconds as int + :param iterations: number of iterations of testing as int + :param loss_rate: acceptable loss rate as float + :param micro_tpld: boolean if micro_tpld should be enabled or disabled + :return: None + """ + if isinstance(packet_sizes, int): + packet_sizes = [packet_sizes] + self.json_data['TestOptions']['PacketSizes'][ + 'CustomPacketSizes'] = packet_sizes + self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][ + 'Duration'] = duration + self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][ + 'RateIterationOptions']['AcceptableLoss'] = loss_rate + self.json_data['TestOptions']['FlowCreationOptions'][ + 'UseMicroTpldOnDemand'] = 'true' if micro_tpld else 'false' + self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][ + 'Iterations'] = iterations + + def set_test_options_back2back(self, packet_sizes, duration, + iterations, startvalue, endvalue, + micro_tpld=False): + """ + Set the back2back test options + :param packet_sizes: List of packet sizes to test, single int entry is + acceptable for one packet size testing + :param duration: time for each test in seconds as int + :param iterations: number of iterations of testing as int + :param micro_tpld: boolean if micro_tpld should be enabled or disabled + :param StartValue: start value + :param EndValue: end value + :return: None + """ + if isinstance(packet_sizes, int): + packet_sizes = [packet_sizes] + self.json_data['TestOptions']['PacketSizes'][ + 'CustomPacketSizes'] = packet_sizes + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'Duration'] = duration + self.json_data['TestOptions']['FlowCreationOptions'][ + 'UseMicroTpldOnDemand'] = 'true' if micro_tpld else 'false' + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'Iterations'] = iterations + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'RateSweepOptions']['StartValue'] = startvalue + self.json_data['TestOptions']['TestTypeOptionMap']['Back2Back'][ + 'RateSweepOptions']['EndValue'] = endvalue + + def set_topology_blocks(self): + """ + Set the test topology to a West to East config for half duplex flow with + port 0 as the sender and port 1 as the receiver. + :return: None + """ + self.json_data['TestOptions']['TopologyConfig']['Topology'] = 'BLOCKS' + self.json_data['TestOptions']['TopologyConfig'][ + 'Direction'] = 'WEST_EAST' + self.json_data['PortHandler']['EntityList'][0][ + 'PortGroup'] = "WEST" + self.json_data['PortHandler']['EntityList'][1][ + 'PortGroup'] = "EAST" + + def set_topology_mesh(self): + """ + Set the test topology to Mesh for bi directional full duplex flow + :return: None + """ + self.json_data['TestOptions']['TopologyConfig']['Topology'] = 'MESH' + self.json_data['TestOptions']['TopologyConfig']['Direction'] = 'BIDIR' + self.json_data['PortHandler']['EntityList'][0][ + 'PortGroup'] = "UNDEFINED" + self.json_data['PortHandler']['EntityList'][1][ + 'PortGroup'] = "UNDEFINED" + + def write_config(self, path='./2bUsed.x2544'): + """ + Write the config to out as file + :param path: Output file to export the json data to + :return: None + """ + if not write_json_file(self.json_data, path): + raise RuntimeError("Could not write out file, please check config") + + +def create_segment(header_type, encode_64_string): + """ + Create segment for JSON file + :param header_type: Type of header as string + :param encode_64_string: 64 byte encoded string value of the hex bytes + :return: segment as dictionary + """ + return { + "SegmentType": header_type.upper(), + "SegmentValue": encode_64_string, + "ItemID": str(uuid.uuid4()), + "ParentID": "", + "Label": ""} + + +def decode_byte_array(enc_str): + """ Decodes the base64-encoded string to a byte array + :param enc_str: The base64-encoded string representing a byte array + :return: The decoded byte array + """ + dec_string = base64.b64decode(enc_str) + barray = bytearray() + barray.extend(dec_string) + return barray + + +def encode_byte_array(byte_arr): + """ Encodes the byte array as a base64-encoded string + :param byte_arr: A bytearray containing the bytes to convert + :return: A base64 encoded string + """ + enc_string = base64.b64encode(bytes(byte_arr)) + return enc_string + + +def print_json_report(json_data): + """ + Print out info from the json data for testing purposes only. + :param json_data: json loaded data from json.loads + :return: None + """ + print("<<Xena JSON Config Report>>\n") + try: + print("### Chassis Info ###") + print("Chassis IP: {}".format(json_data['ChassisManager'][ + 'ChassisList'][0]['HostName'])) + print("Chassis Password: {}".format(json_data['ChassisManager'][ + 'ChassisList'][0]['Password'])) + print("### Port Configuration ###") + print("Port 1 IPv4:{}/{} gateway:{}".format( + json_data['PortHandler']['EntityList'][0]["IpV4Address"], + json_data['PortHandler']['EntityList'][0]["IpV4RoutingPrefix"], + json_data['PortHandler']['EntityList'][0]["IpV4Gateway"])) + print("Port 1 IPv6:{}/{} gateway:{}".format( + json_data['PortHandler']['EntityList'][0]["IpV6Address"], + json_data['PortHandler']['EntityList'][0]["IpV6RoutingPrefix"], + json_data['PortHandler']['EntityList'][0]["IpV6Gateway"])) + print("Port 2 IPv4:{}/{} gateway:{}".format( + json_data['PortHandler']['EntityList'][1]["IpV4Address"], + json_data['PortHandler']['EntityList'][1]["IpV4RoutingPrefix"], + json_data['PortHandler']['EntityList'][1]["IpV4Gateway"])) + print("Port 2 IPv6:{}/{} gateway:{}".format( + json_data['PortHandler']['EntityList'][1]["IpV6Address"], + json_data['PortHandler']['EntityList'][1]["IpV6RoutingPrefix"], + json_data['PortHandler']['EntityList'][1]["IpV6Gateway"])) + print("Port 1: {}/{} group: {}".format( + json_data['PortHandler']['EntityList'][0]['PortRef']['ModuleIndex'], + json_data['PortHandler']['EntityList'][0]['PortRef']['PortIndex'], + json_data['PortHandler']['EntityList'][0]['PortGroup'])) + print("Port 2: {}/{} group: {}".format( + json_data['PortHandler']['EntityList'][1]['PortRef']['ModuleIndex'], + json_data['PortHandler']['EntityList'][1]['PortRef']['PortIndex'], + json_data['PortHandler']['EntityList'][1]['PortGroup'])) + print("### Tests Enabled ###") + print("Back2Back Enabled: {}".format(json_data['TestOptions'][ + 'TestTypeOptionMap']['Back2Back']['Enabled'])) + print("Throughput Enabled: {}".format(json_data['TestOptions'][ + 'TestTypeOptionMap']['Throughput']['Enabled'])) + print("### Test Options ###") + print("Test topology: {}/{}".format( + json_data['TestOptions']['TopologyConfig']['Topology'], + json_data['TestOptions']['TopologyConfig']['Direction'])) + print("Packet Sizes: {}".format(json_data['TestOptions'][ + 'PacketSizes']['CustomPacketSizes'])) + print("Test duration: {}".format(json_data['TestOptions'][ + 'TestTypeOptionMap']['Throughput']['Duration'])) + print("Acceptable loss rate: {}".format(json_data['TestOptions'][ + 'TestTypeOptionMap']['Throughput']['RateIterationOptions'][ + 'AcceptableLoss'])) + print("Micro TPLD enabled: {}".format(json_data['TestOptions'][ + 'FlowCreationOptions']['UseMicroTpldOnDemand'])) + print("Test iterations: {}".format(json_data['TestOptions'][ + 'TestTypeOptionMap']['Throughput']['Iterations'])) + if 'StreamConfig' in json_data['StreamProfileHandler']['EntityList'][0]: + print("### Header segments ###") + for seg in json_data['StreamProfileHandler']['EntityList']: + for header in seg['StreamConfig']['HeaderSegments']: + print("Type: {}".format( + header['SegmentType'])) + print("Value: {}".format(decode_byte_array( + header['SegmentValue']))) + print("### Multi Stream config ###") + for seg in json_data['StreamProfileHandler']['EntityList']: + for header in seg['StreamConfig']['HwModifiers']: + print(header) + except KeyError as exc: + print("Error setting not found in JSON data: {}".format(exc)) + + +def read_json_file(json_file): + """ + Read the json file path and return a dictionary of the data + :param json_file: path to json file + :return: dictionary of json data + """ + try: + with open(json_file, 'r', encoding=_LOCALE) as data_file: + file_data = json.loads(data_file.read()) + except ValueError as exc: + # general json exception, Python 3.5 adds new exception type + _LOGGER.exception("Exception with json read: %s", exc) + raise + except IOError as exc: + _LOGGER.exception( + 'Exception during file open: %s file=%s', exc, json_file) + raise + return file_data + + +def write_json_file(json_data, output_path): + """ + Write out the dictionary of data to a json file + :param json_data: dictionary of json data + :param output_path: file path to write output + :return: Boolean if success + """ + try: + with open(output_path, 'w', encoding=_LOCALE) as fileh: + json.dump(json_data, fileh, indent=2, sort_keys=True, + ensure_ascii=True) + return True + except ValueError as exc: + # general json exception, Python 3.5 adds new exception type + _LOGGER.exception( + "Exception with json write: %s", exc) + return False + except IOError as exc: + _LOGGER.exception( + 'Exception during file write: %s file=%s', exc, output_path) + return False + + +if __name__ == "__main__": + print("Running UnitTest for XenaJSON") + JSON = XenaJSON() + print_json_report(JSON.json_data) + JSON.set_chassis_info('192.168.0.5', 'vsperf') + JSON.set_port(0, 1, 0) + JSON.set_port(1, 1, 1) + JSON.set_port_ip_v4(0, '192.168.240.10', 32, '192.168.240.1') + JSON.set_port_ip_v4(1, '192.168.240.11', 32, '192.168.240.1') + JSON.set_port_ip_v6(0, 'a1a1:a2a2:a3a3:a4a4:a5a5:a6a6:a7a7:a8a8', 128, + 'a1a1:a2a2:a3a3:a4a4:a5a5:a6a6:a7a7:1111') + JSON.set_port_ip_v6(1, 'b1b1:b2b2:b3b3:b4b4:b5b5:b6b6:b7b7:b8b8', 128, + 'b1b1:b2b2:b3b3:b4b4:b5b5:b6b6:b7b7:1111') + JSON.set_header_layer2(dst_mac='dd:dd:dd:dd:dd:dd', + src_mac='ee:ee:ee:ee:ee:ee') + JSON.set_header_vlan(vlan_id=5) + JSON.set_header_layer3(src_ip='192.168.100.2', dst_ip='192.168.100.3', + protocol='udp') + JSON.set_header_layer4_udp(source_port=3000, destination_port=3001) + JSON.set_test_options_tput(packet_sizes=[64], duration=10, iterations=1, + loss_rate=0.0, micro_tpld=True) + JSON.add_header_segments(flows=4000, multistream_layer='L4') + JSON.set_topology_blocks() + write_json_file(JSON.json_data, './testthis.x2544') + JSON = XenaJSON('./testthis.x2544') + print_json_report(JSON.json_data) + diff --git a/tools/systeminfo.py b/tools/systeminfo.py index 62db852b..9d8eb5cb 100644 --- a/tools/systeminfo.py +++ b/tools/systeminfo.py @@ -71,8 +71,9 @@ def get_nic(): output = subprocess.check_output('lspci', shell=True) output = output.decode(locale.getdefaultlocale()[1]) for line in output.split('\n'): - for nic_pciid in S.getValue('WHITELIST_NICS'): - if line.startswith(nic_pciid): + for nic in S.getValue('NICS'): + # lspci shows PCI addresses without domain part, i.e. last 7 chars + if line.startswith(nic['pci'][-7:]): nics.append(''.join(line.split(':')[2:]).strip()) return nics @@ -167,6 +168,14 @@ def get_pid(proc_name_str): """ return get_pids([proc_name_str]) +def pid_isalive(pid): + """ Checks if given PID is alive + + :param pid: PID of the process + :returns: True if given process is running, False otherwise + """ + return os.path.isdir('/proc/' + str(pid)) + # This function uses long switch per purpose, so let us suppress pylint warning too-many-branches # pylint: disable=R0912 def get_version(app_name): @@ -181,7 +190,7 @@ def get_version(app_name): 'dpdk' : os.path.join(S.getValue('RTE_SDK'), 'lib/librte_eal/common/include/rte_version.h'), 'qemu' : os.path.join(S.getValue('QEMU_DIR'), 'VERSION'), 'l2fwd' : os.path.join(S.getValue('ROOT_DIR'), 'src/l2fwd/l2fwd.c'), - 'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl') + 'ixnet' : os.path.join(S.getValue('TRAFFICGEN_IXNET_LIB_PATH'), 'pkgIndex.tcl'), } @@ -239,6 +248,12 @@ def get_version(app_name): app_version = match_line(app_version_file['ixnet'], 'package provide IxTclNetwork') if app_version: app_version = app_version.split(' ')[3] + elif app_name.lower() == 'xena': + try: + app_version = S.getValue('XENA_VERSION') + except AttributeError: + # setting was not available after execution + app_version = 'N/A' elif app_name.lower() == 'dummy': # get git tag of file with Dummy implementation app_git_tag = get_git_tag(os.path.join(S.getValue('ROOT_DIR'), 'tools/pkt_gen/dummy/dummy.py')) diff --git a/tools/tasks.py b/tools/tasks.py index 90b7e553..9816a336 100644 --- a/tools/tasks.py +++ b/tools/tasks.py @@ -26,6 +26,7 @@ import locale import time from conf import settings +from tools import systeminfo CMD_PREFIX = 'cmd : ' @@ -85,17 +86,24 @@ def run_task(cmd, logger, msg=None, check_error=False): for file_d in ret[0]: if file_d == proc.stdout.fileno(): - line = proc.stdout.readline() - if settings.getValue('VERBOSITY') == 'debug': - sys.stdout.write(line.decode(my_encoding)) - stdout.append(line) + while True: + line = proc.stdout.readline() + if not line: + break + if settings.getValue('VERBOSITY') == 'debug': + sys.stdout.write(line.decode(my_encoding)) + stdout.append(line) if file_d == proc.stderr.fileno(): - line = proc.stderr.readline() - sys.stderr.write(line.decode(my_encoding)) - stderr.append(line) + while True: + line = proc.stderr.readline() + if not line: + break + sys.stderr.write(line.decode(my_encoding)) + stderr.append(line) if proc.poll() is not None: break + except OSError as ex: handle_error(ex) else: @@ -150,6 +158,55 @@ def run_interactive_task(cmd, logger, msg): return child +def terminate_task_subtree(pid, signal='-15', sleep=10, logger=None): + """Terminate given process and all its children + + Function will sent given signal to the process. In case + that process will not terminate within given sleep interval + and signal was not SIGKILL, then process will be killed by SIGKILL. + After that function will check if all children of the process + are terminated and if not the same terminating procedure is applied + on any living child (only one level of children is considered). + + :param pid: Process ID to terminate + :param signal: Signal to be sent to the process + :param sleep: Maximum delay in seconds after signal is sent + :param logger: Logger to write details to + """ + try: + output = subprocess.check_output("pgrep -P " + str(pid), shell=True).decode().rstrip('\n') + except subprocess.CalledProcessError: + output = "" + + terminate_task(pid, signal, sleep, logger) + + # just for case children were kept alive + children = output.split('\n') + for child in children: + terminate_task(child, signal, sleep, logger) + +def terminate_task(pid, signal='-15', sleep=10, logger=None): + """Terminate process with given pid + + Function will sent given signal to the process. In case + that process will not terminate within given sleep interval + and signal was not SIGKILL, then process will be killed by SIGKILL. + + :param pid: Process ID to terminate + :param signal: Signal to be sent to the process + :param sleep: Maximum delay in seconds after signal is sent + :param logger: Logger to write details to + """ + if systeminfo.pid_isalive(pid): + run_task(['sudo', 'kill', signal, str(pid)], logger) + logger.debug('Wait for process %s to terminate after signal %s', pid, signal) + for dummy in range(sleep): + time.sleep(1) + if not systeminfo.pid_isalive(pid): + break + + if signal.lstrip('-').upper() not in ('9', 'KILL', 'SIGKILL') and systeminfo.pid_isalive(pid): + terminate_task(pid, '-9', sleep, logger) class Process(object): """Control an instance of a long-running process. @@ -242,17 +299,14 @@ class Process(object): self.kill() raise exc - def kill(self, signal='-15', sleep=2): + def kill(self, signal='-15', sleep=10): """Kill process instance if it is alive. :param signal: signal to be sent to the process :param sleep: delay in seconds after signal is sent """ - if self._child and self._child.isalive(): - run_task(['sudo', 'kill', signal, str(self._child.pid)], - self._logger) - self._logger.debug('Wait for process to terminate') - time.sleep(sleep) + if self.is_running(): + terminate_task_subtree(self._child.pid, signal, sleep, self._logger) if self.is_relinquished(): self._relinquish_thread.join() @@ -275,7 +329,7 @@ class Process(object): :returns: True if process is running, else False """ - return self._child is not None + return self._child and self._child.isalive() def _affinitize_pid(self, core, pid): """Affinitize a process with ``pid`` to ``core``. @@ -298,7 +352,7 @@ class Process(object): """ self._logger.info('Affinitizing process') - if self._child and self._child.isalive(): + if self.is_running(): self._affinitize_pid(core, self._child.pid) class ContinueReadPrintLoop(threading.Thread): diff --git a/tools/veth.py b/tools/veth.py new file mode 100644 index 00000000..6418d11a --- /dev/null +++ b/tools/veth.py @@ -0,0 +1,118 @@ +# Copyright 2016 Red Hat Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +veth port emulation +""" + +import logging +import os + +from tools import tasks + +_LOGGER = logging.getLogger(__name__) + + +def add_veth_port(port, peer_port): + """ + Add a veth port + :param port:port name for the first port + :param peer_port: port name for the peer port + :return: None + """ + # touch some files in a tmp area so we can track them. This allows us to + # track VSPerf created veth ports so they can be cleaned up if needed. + if not os.path.isdir('/tmp/veth'): + try: + os.mkdir('/tmp/veth') + except os.error: + # OK don't crash but cleanup may be an issue + _LOGGER.error('Unable to create veth temp folder.') + _LOGGER.error( + 'Veth ports may not be removed on testcase completion') + if os.path.isdir('/tmp/veth'): + with open('/tmp/veth/{}-{}'.format(port, peer_port), 'a'): + os.utime('/tmp/veth/{}-{}'.format(port, peer_port), None) + tasks.run_task(['sudo', 'ip', 'link', 'add', + port, 'type', 'veth', 'peer', 'name', peer_port], + _LOGGER, 'Adding veth port {} with peer port {}...'.format( + port, peer_port), False) + + +def bring_up_eth_port(eth_port, namespace=None): + """ + Bring up an eth port + :param eth_port: string of eth port to bring up + :param namespace: Namespace eth port it located if needed + :return: None + """ + if namespace: + tasks.run_task(['sudo', 'ip', 'netns', 'exec', namespace, + 'ip', 'link', 'set', eth_port, 'up'], + _LOGGER, + 'Bringing up port {} in namespace {}...'.format( + eth_port, namespace), False) + else: + tasks.run_task(['sudo', 'ip', 'link', 'set', eth_port, 'up'], + _LOGGER, 'Bringing up port...', False) + + +def del_veth_port(port, peer_port): + """ + Delete the veth ports, the peer will automatically be deleted on deletion + of the first port param + :param port: port name to delete + :param port: peer port name + :return: None + """ + # delete the file if it exists in the temp area + if os.path.exists('/tmp/veth/{}-{}'.format(port, peer_port)): + os.remove('/tmp/veth/{}-{}'.format(port, peer_port)) + tasks.run_task(['sudo', 'ip', 'link', 'del', port], + _LOGGER, 'Deleting veth port {} with peer {}...'.format( + port, peer_port), False) + + +# pylint: disable=unused-argument +def validate_add_veth_port(result, port, peer_port): + """ + Validation function for integration testcases + """ + devs = os.listdir('/sys/class/net') + return all([port in devs, peer_port in devs]) + + +def validate_bring_up_eth_port(result, eth_port, namespace=None): + """ + Validation function for integration testcases + """ + command = list() + if namespace: + command += ['ip', 'netns', 'exec', namespace] + command += ['cat', '/sys/class/net/{}/operstate'.format(eth_port)] + out = tasks.run_task(command, _LOGGER, 'Validating port up...', False) + + # since different types of ports may report different status the best way + # we can do this for now is to just make sure it doesn't say down + if 'down' in out: + return False + return True + + +def validate_del_veth_port(result, port, peer_port): + """ + Validation function for integration testcases + """ + devs = os.listdir('/sys/class/net') + return not any([port in devs, peer_port in devs]) diff --git a/vnfs/qemu/qemu.py b/vnfs/qemu/qemu.py index cb6d9ecc..9382edef 100644 --- a/vnfs/qemu/qemu.py +++ b/vnfs/qemu/qemu.py @@ -20,6 +20,8 @@ import logging import locale import re import subprocess +import time +import pexpect from conf import settings as S from conf import get_test_param @@ -62,6 +64,18 @@ class IVnfQemu(IVnf): else: self._net2 = self._net2.split(',')[self._number] + # set guest loopback application based on VNF configuration + # cli option take precedence to config file values + self._guest_loopback = S.getValue('GUEST_LOOPBACK')[self._number] + + self._testpmd_fwd_mode = S.getValue('GUEST_TESTPMD_FWD_MODE') + # in case of SRIOV we must ensure, that MAC addresses are not swapped + if S.getValue('SRIOV_ENABLED') and self._testpmd_fwd_mode.startswith('mac') and \ + not S.getValue('VNF').endswith('PciPassthrough'): + + self._logger.info("SRIOV detected, forwarding mode of testpmd was changed from '%s' to '%s'", + self._testpmd_fwd_mode, 'io') + self._testpmd_fwd_mode = 'io' name = 'Client%d' % self._number vnc = ':%d' % self._number @@ -116,6 +130,32 @@ class IVnfQemu(IVnf): if self._timeout: self._config_guest_loopback() + def stop(self): + """ + Stops VNF instance gracefully first. + """ + try: + # exit testpmd if needed + if self._guest_loopback == 'testpmd': + self.execute_and_wait('stop', 120, "Done") + self.execute_and_wait('quit', 120, "[bB]ye") + + # turn off VM + self.execute_and_wait('poweroff', 120, "Power down") + + except pexpect.TIMEOUT: + self.kill() + + # wait until qemu shutdowns + self._logger.debug('Wait for QEMU to terminate') + for dummy in range(30): + time.sleep(1) + if not self.is_running(): + break + + # just for case that graceful shutdown failed + super(IVnfQemu, self).stop() + # helper functions def _login(self, timeout=120): @@ -196,24 +236,20 @@ class IVnfQemu(IVnf): def _config_guest_loopback(self): """ - Configure VM to run VNF (e.g. port forwarding application) + Configure VM to run VNF, e.g. port forwarding application based on the configuration """ - # set guest loopback application based on VNF configuration - # cli option take precedence to config file values - guest_loopback = S.getValue('GUEST_LOOPBACK')[self._number] - - if guest_loopback == 'testpmd': + if self._guest_loopback == 'testpmd': self._login() self._configure_testpmd() - elif guest_loopback == 'l2fwd': + elif self._guest_loopback == 'l2fwd': self._login() self._configure_l2fwd() - elif guest_loopback == 'linux_bridge': + elif self._guest_loopback == 'linux_bridge': self._login() self._configure_linux_bridge() - elif guest_loopback != 'buildin': + elif self._guest_loopback != 'buildin': self._logger.error('Unsupported guest loopback method "%s" was specified. Option' - ' "buildin" will be used as a fallback.', guest_loopback) + ' "buildin" will be used as a fallback.', self._guest_loopback) def wait(self, prompt=S.getValue('GUEST_PROMPT'), timeout=30): super(IVnfQemu, self).wait(prompt=prompt, timeout=timeout) @@ -225,7 +261,7 @@ class IVnfQemu(IVnf): def _modify_dpdk_makefile(self): """ - Modifies DPDK makefile in Guest before compilation + Modifies DPDK makefile in Guest before compilation if needed """ pass @@ -234,14 +270,15 @@ class IVnfQemu(IVnf): Mount shared directory and copy DPDK and l2fwd sources """ # mount shared directory - self.execute_and_wait('umount ' + S.getValue('OVS_DPDK_SHARE')) + self.execute_and_wait('umount /dev/sdb1') self.execute_and_wait('rm -rf ' + S.getValue('GUEST_OVS_DPDK_DIR')) self.execute_and_wait('mkdir -p ' + S.getValue('OVS_DPDK_SHARE')) - self.execute_and_wait('mount -o iocharset=utf8 /dev/sdb1 ' + + self.execute_and_wait('mount -o ro,iocharset=utf8 /dev/sdb1 ' + S.getValue('OVS_DPDK_SHARE')) self.execute_and_wait('mkdir -p ' + S.getValue('GUEST_OVS_DPDK_DIR')) - self.execute_and_wait('cp -ra ' + os.path.join(S.getValue('OVS_DPDK_SHARE'), dirname) + + self.execute_and_wait('cp -r ' + os.path.join(S.getValue('OVS_DPDK_SHARE'), dirname) + ' ' + S.getValue('GUEST_OVS_DPDK_DIR')) + self.execute_and_wait('umount /dev/sdb1') def _configure_disable_firewall(self): """ @@ -291,6 +328,11 @@ class IVnfQemu(IVnf): # modify makefile if needed self._modify_dpdk_makefile() + # disable network interfaces, so DPDK can take care of them + self.execute_and_wait('ifdown ' + self._net1) + self.execute_and_wait('ifdown ' + self._net2) + + # build and insert igb_uio and rebind interfaces to it self.execute_and_wait('make RTE_OUTPUT=$RTE_SDK/$RTE_TARGET -C ' '$RTE_SDK/lib/librte_eal/linuxapp/igb_uio') self.execute_and_wait('modprobe uio') @@ -298,21 +340,39 @@ class IVnfQemu(IVnf): S.getValue('RTE_TARGET')) self.execute_and_wait('./tools/dpdk_nic_bind.py --status') self.execute_and_wait( + './tools/dpdk_nic_bind.py -u' ' ' + + S.getValue('GUEST_NET1_PCI_ADDRESS')[self._number] + ' ' + + S.getValue('GUEST_NET2_PCI_ADDRESS')[self._number]) + self.execute_and_wait( './tools/dpdk_nic_bind.py -b igb_uio' ' ' + S.getValue('GUEST_NET1_PCI_ADDRESS')[self._number] + ' ' + S.getValue('GUEST_NET2_PCI_ADDRESS')[self._number]) + self.execute_and_wait('./tools/dpdk_nic_bind.py --status') # build and run 'test-pmd' self.execute_and_wait('cd ' + S.getValue('GUEST_OVS_DPDK_DIR') + '/DPDK/app/test-pmd') self.execute_and_wait('make clean') self.execute_and_wait('make') - self.execute_and_wait('./testpmd -c 0x3 -n 4 --socket-mem 512 --' - ' --burst=64 -i --txqflags=0xf00 ' + - '--disable-hw-vlan', 60, "Done") - self.execute('set fwd mac_retry', 1) + if int(S.getValue('GUEST_NIC_QUEUES')): + self.execute_and_wait( + './testpmd {} -n4 --socket-mem 512 --'.format( + S.getValue('GUEST_TESTPMD_CPU_MASK')) + + ' --burst=64 -i --txqflags=0xf00 ' + + '--nb-cores={} --rxq={} --txq={} '.format( + S.getValue('GUEST_TESTPMD_NB_CORES'), + S.getValue('GUEST_TESTPMD_TXQ'), + S.getValue('GUEST_TESTPMD_RXQ')) + + '--disable-hw-vlan', 60, "Done") + else: + self.execute_and_wait( + './testpmd {} -n 4 --socket-mem 512 --'.format( + S.getValue('GUEST_TESTPMD_CPU_MASK')) + + ' --burst=64 -i --txqflags=0xf00 ' + + '--disable-hw-vlan', 60, "Done") + self.execute('set fwd ' + self._testpmd_fwd_mode, 1) self.execute_and_wait('start', 20, - 'TX RS bit threshold=0 - TXQ flags=0xf00') + 'TX RS bit threshold=.+ - TXQ flags=0xf00') def _configure_l2fwd(self): """ @@ -337,17 +397,23 @@ class IVnfQemu(IVnf): """ self._configure_disable_firewall() - self.execute('ifconfig ' + self._net1 + ' ' + - S.getValue('VANILLA_NIC1_IP_CIDR')[self._number]) + self.execute('ip addr add ' + + S.getValue('VANILLA_NIC1_IP_CIDR')[self._number] + + ' dev ' + self._net1) + self.execute('ip link set dev ' + self._net1 + ' up') - self.execute('ifconfig ' + self._net2 + ' ' + - S.getValue('VANILLA_NIC2_IP_CIDR')[self._number]) + self.execute('ip addr add ' + + S.getValue('VANILLA_NIC2_IP_CIDR')[self._number] + + ' dev ' + self._net2) + self.execute('ip link set dev ' + self._net2 + ' up') # configure linux bridge self.execute('brctl addbr br0') self.execute('brctl addif br0 ' + self._net1 + ' ' + self._net2) - self.execute('ifconfig br0 ' + - S.getValue('VANILLA_BRIDGE_IP')[self._number]) + self.execute('ip addr add ' + + S.getValue('VANILLA_BRIDGE_IP')[self._number] + + ' dev br0') + self.execute('ip link set dev br0 up') # Add the arp entries for the IXIA ports and the bridge you are using. # Use command line values if provided. diff --git a/vnfs/qemu/qemu_dpdk_vhost_cuse.py b/vnfs/qemu/qemu_dpdk_vhost_cuse.py index e5a5e823..ab4fec84 100644 --- a/vnfs/qemu/qemu_dpdk_vhost_cuse.py +++ b/vnfs/qemu/qemu_dpdk_vhost_cuse.py @@ -56,13 +56,3 @@ class QemuDpdkVhostCuse(IVnfQemu): ',netdev=' + net2 + ',csum=off,gso=off,' + 'guest_tso4=off,guest_tso6=off,guest_ecn=off', ] - - # helper functions - - def _modify_dpdk_makefile(self): - """ - Modifies DPDK makefile in Guest before compilation - """ - self.execute_and_wait("sed -i -e 's/CONFIG_RTE_LIBRTE_VHOST_USER=n/" + - "CONFIG_RTE_LIBRTE_VHOST_USER=y/g'" + - "config/common_linuxapp") diff --git a/vnfs/qemu/qemu_dpdk_vhost_user.py b/vnfs/qemu/qemu_dpdk_vhost_user.py index f0f97d8a..49131423 100644 --- a/vnfs/qemu/qemu_dpdk_vhost_user.py +++ b/vnfs/qemu/qemu_dpdk_vhost_user.py @@ -38,6 +38,14 @@ class QemuDpdkVhostUser(IVnfQemu): net1 = 'net' + str(i + 1) net2 = 'net' + str(i + 2) + # multi-queue values + if int(S.getValue('GUEST_NIC_QUEUES')): + queue_str = ',queues={}'.format(S.getValue('GUEST_NIC_QUEUES')) + mq_vector_str = ',mq=on,vectors={}'.format( + int(S.getValue('GUEST_NIC_QUEUES')) * 2 + 2) + else: + queue_str, mq_vector_str = '', '' + self._cmd += ['-chardev', 'socket,id=char' + if1 + ',path=' + S.getValue('OVS_VAR_DIR') + @@ -48,19 +56,20 @@ class QemuDpdkVhostUser(IVnfQemu): 'dpdkvhostuser' + if2, '-netdev', 'type=vhost-user,id=' + net1 + - ',chardev=char' + if1 + ',vhostforce', + ',chardev=char' + if1 + ',vhostforce' + queue_str, '-device', 'virtio-net-pci,mac=' + S.getValue('GUEST_NET1_MAC')[self._number] + ',netdev=' + net1 + ',csum=off,gso=off,' + - 'guest_tso4=off,guest_tso6=off,guest_ecn=off', + 'guest_tso4=off,guest_tso6=off,guest_ecn=off' + + mq_vector_str, '-netdev', 'type=vhost-user,id=' + net2 + - ',chardev=char' + if2 + ',vhostforce', + ',chardev=char' + if2 + ',vhostforce' + queue_str, '-device', 'virtio-net-pci,mac=' + S.getValue('GUEST_NET2_MAC')[self._number] + ',netdev=' + net2 + ',csum=off,gso=off,' + - 'guest_tso4=off,guest_tso6=off,guest_ecn=off', + 'guest_tso4=off,guest_tso6=off,guest_ecn=off' + + mq_vector_str, ] - diff --git a/vnfs/qemu/qemu_pci_passthrough.py b/vnfs/qemu/qemu_pci_passthrough.py new file mode 100644 index 00000000..1b55fdf2 --- /dev/null +++ b/vnfs/qemu/qemu_pci_passthrough.py @@ -0,0 +1,87 @@ +# Copyright 2015 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Automation of QEMU hypervisor with direct access to host NICs via + PCI passthrough. +""" + +import logging +import subprocess +import os + +from conf import settings as S +from vnfs.qemu.qemu import IVnfQemu +from tools import tasks +from tools.module_manager import ModuleManager + +_MODULE_MANAGER = ModuleManager() +_RTE_PCI_TOOL = os.path.join(S.getValue('RTE_SDK'), 'tools', 'dpdk_nic_bind.py') + +class QemuPciPassthrough(IVnfQemu): + """ + Control an instance of QEMU with direct access to the host network devices + """ + def __init__(self): + """ + Initialization function. + """ + super(QemuPciPassthrough, self).__init__() + self._logger = logging.getLogger(__name__) + self._nics = S.getValue('NICS') + + # in case of SRIOV and PCI passthrough we must ensure, that MAC addresses are swapped + if S.getValue('SRIOV_ENABLED') and not self._testpmd_fwd_mode.startswith('mac'): + self._logger.info("SRIOV detected, forwarding mode of testpmd was changed from '%s' to '%s'", + self._testpmd_fwd_mode, 'mac_retry') + self._testpmd_fwd_mode = 'mac_retry' + + for nic in self._nics: + self._cmd += ['-device', 'vfio-pci,host=' + nic['pci']] + + def start(self): + """ + Start QEMU instance, bind host NICs to vfio-pci driver + """ + # load vfio-pci + _MODULE_MANAGER.insert_modules(['vfio-pci']) + + # bind every interface to vfio-pci driver + try: + nics_list = list(tmp_nic['pci'] for tmp_nic in self._nics) + tasks.run_task(['sudo', _RTE_PCI_TOOL, '--bind=vfio-pci'] + nics_list, + self._logger, 'Binding NICs %s...' % nics_list, True) + + except subprocess.CalledProcessError: + self._logger.error('Unable to bind NICs %s', self._nics) + + super(QemuPciPassthrough, self).start() + + def stop(self): + """ + Stop QEMU instance, bind host NICs to the original driver + """ + super(QemuPciPassthrough, self).stop() + + # bind original driver to every interface + for nic in self._nics: + if nic['driver']: + try: + tasks.run_task(['sudo', _RTE_PCI_TOOL, '--bind=' + nic['driver'], nic['pci']], + self._logger, 'Binding NIC %s...' % nic['pci'], True) + + except subprocess.CalledProcessError: + self._logger.error('Unable to bind NIC %s to driver %s', nic['pci'], nic['driver']) + + # unload vfio-pci driver + _MODULE_MANAGER.remove_modules() diff --git a/vnfs/vnf/vnf.py b/vnfs/vnf/vnf.py index 483faf38..1410a0c4 100644 --- a/vnfs/vnf/vnf.py +++ b/vnfs/vnf/vnf.py @@ -51,11 +51,12 @@ class IVnf(tasks.Process): """ Stops VNF instance. """ - self._logger.info('Killing VNF...') + if self.is_running(): + self._logger.info('Killing VNF...') - # force termination of VNF and wait for it to terminate; It will avoid - # sporadic reboot of host. (caused by hugepages or DPDK ports) - super(IVnf, self).kill(signal='-9', sleep=10) + # force termination of VNF and wait for it to terminate; It will avoid + # sporadic reboot of host. (caused by hugepages or DPDK ports) + super(IVnf, self).kill(signal='-9', sleep=10) def execute(self, cmd, delay=0): """ @@ -122,6 +123,19 @@ class IVnf(tasks.Process): self.execute(cmd) self.wait(prompt=prompt, timeout=timeout) + def validate_start(self, dummy_result): + """ Validate call of VNF start() + """ + if self._child and self._child.isalive(): + return True + else: + return False + + def validate_stop(self, result): + """ Validate call of fVNF stop() + """ + return not self.validate_start(result) + @staticmethod def reset_vnf_counter(): """ @@ -39,6 +39,8 @@ from core.loader import Loader from testcases import PerformanceTestCase from testcases import IntegrationTestCase from tools import tasks +from tools import networkcard +from tools import functions from tools.pkt_gen import trafficgen from tools.opnfvdashboard import opnfvdashboard from tools.pkt_gen.trafficgen.trafficgenhelper import TRAFFIC_DEFAULTS @@ -58,6 +60,8 @@ _TEMPLATE_RST = {'head' : 'tools/report/report_head.rst', 'tmp' : 'tools/report/report_tmp_caption.rst' } +_LOGGER = logging.getLogger() + def parse_arguments(): """ Parse command line arguments. @@ -154,7 +158,8 @@ def parse_arguments(): group.add_argument('-d', '--test-dir', help='directory containing tests') group.add_argument('-t', '--tests', help='Comma-separated list of terms \ indicating tests to run. e.g. "RFC2544,!p2p" - run all tests whose\ - name contains RFC2544 less those containing "p2p"') + name contains RFC2544 less those containing "p2p"; "!back2back" - \ + run all tests except those containing back2back') group.add_argument('--verbosity', choices=list_logging_levels(), help='debug level') group.add_argument('--integration', action='store_true', help='execute integration tests') @@ -194,18 +199,17 @@ def configure_logging(level): settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_TRAFFIC_GEN')) - logger = logging.getLogger() - logger.setLevel(logging.DEBUG) + _LOGGER.setLevel(logging.DEBUG) stream_logger = logging.StreamHandler(sys.stdout) stream_logger.setLevel(VERBOSITY_LEVELS[level]) stream_logger.setFormatter(logging.Formatter( - '[%(levelname)s] %(asctime)s : (%(name)s) - %(message)s')) - logger.addHandler(stream_logger) + '[%(levelname)-5s] %(asctime)s : (%(name)s) - %(message)s')) + _LOGGER.addHandler(stream_logger) file_logger = logging.FileHandler(filename=log_file_default) file_logger.setLevel(logging.DEBUG) - logger.addHandler(file_logger) + _LOGGER.addHandler(file_logger) class CommandFilter(logging.Filter): """Filter out strings beginning with 'cmd :'""" @@ -220,12 +224,12 @@ def configure_logging(level): cmd_logger = logging.FileHandler(filename=log_file_host_cmds) cmd_logger.setLevel(logging.DEBUG) cmd_logger.addFilter(CommandFilter()) - logger.addHandler(cmd_logger) + _LOGGER.addHandler(cmd_logger) gen_logger = logging.FileHandler(filename=log_file_traffic_gen) gen_logger.setLevel(logging.DEBUG) gen_logger.addFilter(TrafficGenCommandFilter()) - logger.addHandler(gen_logger) + _LOGGER.addHandler(gen_logger) def apply_filter(tests, tc_filter): @@ -242,7 +246,11 @@ def apply_filter(tests, tc_filter): e.g. '' - empty string selects all tests. :return: A list of the selected Tests. """ - result = [] + # if negative filter is first we have to start with full list of tests + if tc_filter.strip()[0] == '!': + result = tests + else: + result = [] if tc_filter is None: tc_filter = "" @@ -250,11 +258,11 @@ def apply_filter(tests, tc_filter): if not term or term[0] != '!': # Add matching tests from 'tests' into results result.extend([test for test in tests \ - if test.name.lower().find(term) >= 0]) + if test['Name'].lower().find(term) >= 0]) else: # Term begins with '!' so we remove matching tests result = [test for test in result \ - if test.name.lower().find(term[1:]) < 0] + if test['Name'].lower().find(term[1:]) < 0] return result @@ -267,26 +275,30 @@ def check_and_set_locale(): system_locale = locale.getdefaultlocale() if None in system_locale: os.environ['LC_ALL'] = settings.getValue('DEFAULT_LOCALE') - logging.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s", + _LOGGER.warning("Locale was not properly configured. Default values were set. Old locale: %s, New locale: %s", system_locale, locale.getdefaultlocale()) -def generate_final_report(path): +def generate_final_report(): """ Function will check if partial test results are available and generates final report in rst format. """ + path = settings.getValue('RESULTS_PATH') # check if there are any results in rst format rst_results = glob.glob(os.path.join(path, 'result*rst')) if len(rst_results): try: test_report = os.path.join(path, '{}_{}'.format(settings.getValue('VSWITCH'), _TEMPLATE_RST['final'])) # create report caption directly - it is not worth to execute jinja machinery + if settings.getValue('VSWITCH').lower() != 'none': + pkt_processor = Loader().get_vswitches()[settings.getValue('VSWITCH')].__doc__.strip().split('\n')[0] + else: + pkt_processor = Loader().get_pktfwds()[settings.getValue('PKTFWD')].__doc__.strip().split('\n')[0] report_caption = '{}\n{} {}\n{}\n\n'.format( '============================================================', 'Performance report for', - Loader().get_vswitches()[settings.getValue('VSWITCH')].__doc__.strip().split('\n')[0], - + pkt_processor, '============================================================') with open(_TEMPLATE_RST['tmp'], 'w') as file_: @@ -296,15 +308,143 @@ def generate_final_report(path): ' '.join(rst_results), _TEMPLATE_RST['foot'], test_report), shell=True) if retval == 0 and os.path.isfile(test_report): - logging.info('Overall test report written to "%s"', test_report) + _LOGGER.info('Overall test report written to "%s"', test_report) else: - logging.error('Generatrion of overall test report has failed.') + _LOGGER.error('Generatrion of overall test report has failed.') # remove temporary file os.remove(_TEMPLATE_RST['tmp']) except subprocess.CalledProcessError: - logging.error('Generatrion of overall test report has failed.') + _LOGGER.error('Generatrion of overall test report has failed.') + + +def enable_sriov(nic_list): + """ Enable SRIOV for given enhanced PCI IDs + + :param nic_list: A list of enhanced PCI IDs + """ + # detect if sriov is required + sriov_nic = {} + for nic in nic_list: + if networkcard.is_sriov_nic(nic): + tmp_nic = nic.split('|') + if tmp_nic[0] in sriov_nic: + if int(tmp_nic[1][2:]) > sriov_nic[tmp_nic[0]]: + sriov_nic[tmp_nic[0]] = int(tmp_nic[1][2:]) + else: + sriov_nic.update({tmp_nic[0] : int(tmp_nic[1][2:])}) + + # sriov is required for some NICs + if len(sriov_nic): + for nic in sriov_nic: + # check if SRIOV is supported and enough virt interfaces are available + if not networkcard.is_sriov_supported(nic) \ + or networkcard.get_sriov_numvfs(nic) <= sriov_nic[nic]: + # if not, enable and set appropriate number of VFs + if not networkcard.set_sriov_numvfs(nic, sriov_nic[nic] + 1): + _LOGGER.error("SRIOV cannot be enabled for NIC %s", nic) + raise + else: + _LOGGER.debug("SRIOV enabled for NIC %s", nic) + + # WORKAROUND: it has been observed with IXGBE(VF) driver, + # that NIC doesn't correclty dispatch traffic to VFs based + # on their MAC address. Unbind and bind to the same driver + # solves this issue. + networkcard.reinit_vfs(nic) + + # After SRIOV is enabled it takes some time until network drivers + # properly initialize all cards. + # Wait also in case, that SRIOV was already configured as it can be + # configured automatically just before vsperf execution. + time.sleep(2) + + return True + + return False + + +def disable_sriov(nic_list): + """ Disable SRIOV for given PCI IDs + + :param nic_list: A list of enhanced PCI IDs + """ + for nic in nic_list: + if networkcard.is_sriov_nic(nic): + if not networkcard.set_sriov_numvfs(nic.split('|')[0], 0): + _LOGGER.error("SRIOV cannot be disabled for NIC %s", nic) + raise + else: + _LOGGER.debug("SRIOV disabled for NIC %s", nic.split('|')[0]) + + +def handle_list_options(args): + """ Process --list cli arguments if needed + + :param args: A dictionary with all CLI arguments + """ + if args['list_trafficgens']: + print(Loader().get_trafficgens_printable()) + sys.exit(0) + + if args['list_collectors']: + print(Loader().get_collectors_printable()) + sys.exit(0) + + if args['list_vswitches']: + print(Loader().get_vswitches_printable()) + sys.exit(0) + + if args['list_vnfs']: + print(Loader().get_vnfs_printable()) + sys.exit(0) + + if args['list_fwdapps']: + print(Loader().get_pktfwds_printable()) + sys.exit(0) + + if args['list_settings']: + print(str(settings)) + sys.exit(0) + + if args['list']: + # configure tests + if args['integration']: + testcases = settings.getValue('INTEGRATION_TESTS') + else: + testcases = settings.getValue('PERFORMANCE_TESTS') + + print("Available Tests:") + print("================") + + for test in testcases: + print('* %-30s %s' % ('%s:' % test['Name'], test['Description'])) + sys.exit(0) + + +def vsperf_finalize(): + """ Clean up before exit + """ + # remove directory if no result files were created + try: + results_path = settings.getValue('RESULTS_PATH') + if os.path.exists(results_path): + files_list = os.listdir(results_path) + if files_list == []: + _LOGGER.info("Removing empty result directory: " + results_path) + shutil.rmtree(results_path) + except AttributeError: + # skip it if parameter doesn't exist + pass + + # disable SRIOV if needed + try: + if settings.getValue('SRIOV_ENABLED'): + disable_sriov(settings.getValue('WHITELIST_NICS_ORIG')) + except AttributeError: + # skip it if parameter doesn't exist + pass class MockTestCase(unittest.TestCase): @@ -362,29 +502,13 @@ def main(): # than both a settings file and environment variables settings.load_from_dict(args) - vswitch_none = False # set dpdk and ovs paths accorfing to VNF and VSWITCH - if settings.getValue('VSWITCH').endswith('Vanilla'): - # settings paths for Vanilla - settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_VANILLA'))) - elif settings.getValue('VSWITCH').endswith('Vhost'): - if settings.getValue('VNF').endswith('Cuse'): - # settings paths for Cuse - settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_CUSE'))) - settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_CUSE'))) - else: - # settings paths for VhostUser - settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER'))) - settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER'))) - else: - # default - set to VHOST USER but can be changed during enhancement - settings.setValue('RTE_SDK', (settings.getValue('RTE_SDK_USER'))) - settings.setValue('OVS_DIR', (settings.getValue('OVS_DIR_USER'))) - if 'none' == settings.getValue('VSWITCH').strip().lower(): - vswitch_none = True + functions.settings_update_paths() + + # if required, handle list-* operations + handle_list_options(args) configure_logging(settings.getValue('VERBOSITY')) - logger = logging.getLogger() # check and fix locale check_and_set_locale() @@ -393,12 +517,12 @@ def main(): if args['trafficgen']: trafficgens = Loader().get_trafficgens() if args['trafficgen'] not in trafficgens: - logging.error('There are no trafficgens matching \'%s\' found in' + _LOGGER.error('There are no trafficgens matching \'%s\' found in' ' \'%s\'. Exiting...', args['trafficgen'], settings.getValue('TRAFFICGEN_DIR')) sys.exit(1) - # configure vswitch + # configuration validity checks if args['vswitch']: vswitch_none = 'none' == args['vswitch'].strip().lower() if vswitch_none: @@ -406,7 +530,7 @@ def main(): else: vswitches = Loader().get_vswitches() if args['vswitch'] not in vswitches: - logging.error('There are no vswitches matching \'%s\' found in' + _LOGGER.error('There are no vswitches matching \'%s\' found in' ' \'%s\'. Exiting...', args['vswitch'], settings.getValue('VSWITCH_DIR')) sys.exit(1) @@ -415,7 +539,7 @@ def main(): settings.setValue('PKTFWD', args['fwdapp']) fwdapps = Loader().get_pktfwds() if args['fwdapp'] not in fwdapps: - logging.error('There are no forwarding application' + _LOGGER.error('There are no forwarding application' ' matching \'%s\' found in' ' \'%s\'. Exiting...', args['fwdapp'], settings.getValue('PKTFWD_DIR')) @@ -424,16 +548,45 @@ def main(): if args['vnf']: vnfs = Loader().get_vnfs() if args['vnf'] not in vnfs: - logging.error('there are no vnfs matching \'%s\' found in' + _LOGGER.error('there are no vnfs matching \'%s\' found in' ' \'%s\'. exiting...', args['vnf'], settings.getValue('vnf_dir')) sys.exit(1) + if args['exact_test_name'] and args['tests']: + _LOGGER.error("Cannot specify tests with both positional args and --test.") + sys.exit(1) + + # sriov handling + settings.setValue('SRIOV_ENABLED', enable_sriov(settings.getValue('WHITELIST_NICS'))) + + # modify NIC configuration to decode enhanced PCI IDs + wl_nics_orig = list(networkcard.check_pci(pci) for pci in settings.getValue('WHITELIST_NICS')) + settings.setValue('WHITELIST_NICS_ORIG', wl_nics_orig) + + nic_list = [] + for nic in wl_nics_orig: + tmp_nic = networkcard.get_nic_info(nic) + if tmp_nic: + nic_list.append({'pci' : tmp_nic, + 'type' : 'vf' if networkcard.get_sriov_pf(tmp_nic) else 'pf', + 'mac' : networkcard.get_mac(tmp_nic), + 'driver' : networkcard.get_driver(tmp_nic), + 'device' : networkcard.get_device_name(tmp_nic)}) + else: + _LOGGER.error("Invalid network card PCI ID: '%s'", nic) + vsperf_finalize() + raise + + settings.setValue('NICS', nic_list) + # for backward compatibility + settings.setValue('WHITELIST_NICS', list(nic['pci'] for nic in nic_list)) + # update global settings guest_loopback = get_test_param('guest_loopback', None) if guest_loopback: tmp_gl = [] - for i in range(len(settings.getValue('GUEST_LOOPBACK'))): + for dummy_i in range(len(settings.getValue('GUEST_LOOPBACK'))): tmp_gl.append(guest_loopback) settings.setValue('GUEST_LOOPBACK', tmp_gl) @@ -443,20 +596,21 @@ def main(): date = datetime.datetime.fromtimestamp(time.time()) results_dir = "results_" + date.strftime('%Y-%m-%d_%H-%M-%S') results_path = os.path.join(settings.getValue('LOG_DIR'), results_dir) + settings.setValue('RESULTS_PATH', results_path) # create results directory if not os.path.exists(results_path): - logger.info("Creating result directory: " + results_path) + _LOGGER.info("Creating result directory: " + results_path) os.makedirs(results_path) if settings.getValue('mode') == 'trafficgen': # execute only traffic generator - logging.debug("Executing traffic generator:") + _LOGGER.debug("Executing traffic generator:") loader = Loader() # set traffic details, so they can be passed to traffic ctl traffic = copy.deepcopy(TRAFFIC_DEFAULTS) traffic.update({'traffic_type': get_test_param('traffic_type', 'rfc2544'), - 'bidir': get_test_param('bidirectional', False), + 'bidir': get_test_param('bidirectional', 'False'), 'multistream': int(get_test_param('multistream', 0)), 'stream_type': get_test_param('stream_type', 'L4'), 'frame_rate': int(get_test_param('iload', 100))}) @@ -466,8 +620,12 @@ def main(): loader.get_trafficgen_class()) with traffic_ctl: traffic_ctl.send_traffic(traffic) - logging.debug("Traffic Results:") + _LOGGER.debug("Traffic Results:") traffic_ctl.print_results() + + # write results into CSV file + result_file = os.path.join(results_path, "result.csv") + PerformanceTestCase.write_result_to_file(traffic_ctl.get_results(), result_file) else: # configure tests if args['integration']: @@ -475,81 +633,41 @@ def main(): else: testcases = settings.getValue('PERFORMANCE_TESTS') - all_tests = [] - for cfg in testcases: - try: - if args['integration']: - all_tests.append(IntegrationTestCase(cfg, results_path)) - else: - all_tests.append(PerformanceTestCase(cfg, results_path)) - except (Exception) as _: - logger.exception("Failed to create test: %s", - cfg.get('Name', '<Name not set>')) - raise - - # if required, handle list-* operations - - if args['list']: - print("Available Tests:") - print("================") - for test in all_tests: - print('* %-30s %s' % ('%s:' % test.name, test.desc)) - exit() - - if args['list_trafficgens']: - print(Loader().get_trafficgens_printable()) - exit() - - if args['list_collectors']: - print(Loader().get_collectors_printable()) - exit() - - if args['list_vswitches']: - print(Loader().get_vswitches_printable()) - exit() - - if args['list_vnfs']: - print(Loader().get_vnfs_printable()) - exit() - - if args['list_settings']: - print(str(settings)) - exit() - - # select requested tests - if args['exact_test_name'] and args['tests']: - logger.error("Cannot specify tests with both positional args and --test.") - sys.exit(1) - if args['exact_test_name']: exact_names = args['exact_test_name'] # positional args => exact matches only - selected_tests = [test for test in all_tests if test.name in exact_names] + selected_tests = [test for test in testcases if test['Name'] in exact_names] elif args['tests']: # --tests => apply filter to select requested tests - selected_tests = apply_filter(all_tests, args['tests']) + selected_tests = apply_filter(testcases, args['tests']) else: # Default - run all tests - selected_tests = all_tests + selected_tests = testcases - if not selected_tests: - logger.error("No tests matched --test option or positional args. Done.") + if not len(selected_tests): + _LOGGER.error("No tests matched --tests option or positional args. Done.") + vsperf_finalize() sys.exit(1) # run tests suite = unittest.TestSuite() - for test in selected_tests: + for cfg in selected_tests: + test_name = cfg.get('Name', '<Name not set>') try: + if args['integration']: + test = IntegrationTestCase(cfg) + else: + test = PerformanceTestCase(cfg) test.run() suite.addTest(MockTestCase('', True, test.name)) #pylint: disable=broad-except except (Exception) as ex: - logger.exception("Failed to run test: %s", test.name) - suite.addTest(MockTestCase(str(ex), False, test.name)) - logger.info("Continuing with next test...") + _LOGGER.exception("Failed to run test: %s", test_name) + suite.addTest(MockTestCase(str(ex), False, test_name)) + _LOGGER.info("Continuing with next test...") # generate final rst report with results of all executed TCs - generate_final_report(results_path) + generate_final_report() if settings.getValue('XUNIT'): xmlrunner.XMLTestRunner( @@ -574,13 +692,9 @@ def main(): int_data['cuse'] = True opnfvdashboard.results2opnfv_dashboard(results_path, int_data) - #remove directory if no result files were created. - if os.path.exists(results_path): - files_list = os.listdir(results_path) - if files_list == []: - shutil.rmtree(results_path) + # cleanup before exit + vsperf_finalize() if __name__ == "__main__": main() - diff --git a/vswitches/ovs.py b/vswitches/ovs.py index 06dc7a1a..115ab19b 100644 --- a/vswitches/ovs.py +++ b/vswitches/ovs.py @@ -16,41 +16,83 @@ """ import logging +import os +import pexpect import re +import time + from conf import settings -from vswitches.vswitch import IVSwitch from src.ovs import OFBridge, flow_key, flow_match +from tools import tasks +from vswitches.vswitch import IVSwitch + +_OVS_VAR_DIR = settings.getValue('OVS_VAR_DIR') +_OVS_ETC_DIR = settings.getValue('OVS_ETC_DIR') -_VSWITCHD_CONST_ARGS = ['--', '--pidfile', '--log-file'] -class IVSwitchOvs(IVSwitch): +class IVSwitchOvs(IVSwitch, tasks.Process): """Open vSwitch base class implementation The method docstrings document only considerations specific to this implementation. For generic information of the nature of the methods, see the interface. """ + _logfile = os.path.join(settings.getValue('LOG_DIR'), settings.getValue('LOG_FILE_VSWITCHD')) + _ovsdb_pidfile_path = os.path.join(settings.getValue('LOG_DIR'), "ovsdb_pidfile.pid") + _vswitchd_pidfile_path = os.path.join(settings.getValue('LOG_DIR'), "vswitchd_pidfile.pid") + _proc_name = 'ovs-vswitchd' def __init__(self): """See IVswitch for general description """ - self._vswitchd = None self._logger = logging.getLogger(__name__) + self._expect = None + self._timeout = 30 self._bridges = {} - self._vswitchd_args = _VSWITCHD_CONST_ARGS + self._vswitchd_args = ['--pidfile=' + self._vswitchd_pidfile_path, + '--overwrite-pidfile', '--log-file=' + self._logfile] + self._cmd = [] + self._cmd_template = ['sudo', '-E', os.path.join(settings.getValue('OVS_DIR'), + 'vswitchd', 'ovs-vswitchd')] def start(self): - """See IVswitch for general description + """ Start ``ovsdb-server`` and ``ovs-vswitchd`` instance. + + :raises: pexpect.EOF, pexpect.TIMEOUT """ self._logger.info("Starting vswitchd...") - self._vswitchd.start() + + self._cmd = self._cmd_template + self._vswitchd_args + + # DB must be started before vswitchd + self._reset_ovsdb() + self._start_ovsdb() + + # DB must be up before vswitchd config is altered or vswitchd started + time.sleep(3) + + self.configure() + + try: + tasks.Process.start(self) + self.relinquish() + except (pexpect.EOF, pexpect.TIMEOUT) as exc: + logging.error("Exception during VSwitch start.") + self._kill_ovsdb() + raise exc + self._logger.info("Vswitchd...Started.") + def configure(self): + """ Configure vswitchd through ovsdb if needed + """ + pass + def stop(self): """See IVswitch for general description """ self._logger.info("Terminating vswitchd...") - self._vswitchd.kill() + self.kill() self._logger.info("Vswitchd...Terminated.") def add_switch(self, switch_name, params=None): @@ -153,6 +195,137 @@ class IVSwitchOvs(IVSwitch): cnt = 0 return cnt + def disable_stp(self, switch_name): + """ + Disable stp protocol on the bridge + :param switch_name: bridge to disable stp + :return: None + """ + bridge = self._bridges[switch_name] + bridge.set_stp(False) + self._logger.info('Sleeping for 50 secs to allow stp to stop.') + time.sleep(50) # needs time to disable + + def enable_stp(self, switch_name): + """ + Enable stp protocol on the bridge + :param switch_name: bridge to enable stp + :return: None + """ + bridge = self._bridges[switch_name] + bridge.set_stp(True) + self._logger.info('Sleeping for 50 secs to allow stp to start.') + time.sleep(50) # needs time to enable + + def disable_rstp(self, switch_name): + """ + Disable rstp on the bridge + :param switch_name: bridge to disable rstp + :return: None + """ + bridge = self._bridges[switch_name] + bridge.set_rstp(False) + self._logger.info('Sleeping for 15 secs to allow rstp to stop.') + time.sleep(15) # needs time to disable + + def enable_rstp(self, switch_name): + """ + Enable rstp on the bridge + :param switch_name: bridge to enable rstp + :return: None + """ + bridge = self._bridges[switch_name] + bridge.set_rstp(True) + self._logger.info('Sleeping for 15 secs to allow rstp to start.') + time.sleep(15) # needs time to enable + + def kill(self, signal='-15', sleep=10): + """Kill ``ovs-vswitchd`` and ``ovs-ovsdb`` instances if they are alive. + + :returns: None + """ + if os.path.isfile(self._vswitchd_pidfile_path): + self._logger.info('Killing ovs-vswitchd...') + with open(self._vswitchd_pidfile_path, "r") as pidfile: + vswitchd_pid = pidfile.read().strip() + tasks.terminate_task(vswitchd_pid, logger=self._logger) + + self._kill_ovsdb() # ovsdb must be killed after vswitchd + + # just for case, that sudo envelope has not been terminated yet + tasks.Process.kill(self, signal, sleep) + + # helper functions + + def _reset_ovsdb(self): + """Reset system for 'ovsdb'. + + :returns: None + """ + self._logger.info('Resetting system after last run...') + + tasks.run_task(['sudo', 'rm', '-rf', _OVS_VAR_DIR], self._logger) + tasks.run_task(['sudo', 'mkdir', '-p', _OVS_VAR_DIR], self._logger) + tasks.run_task(['sudo', 'rm', '-rf', _OVS_ETC_DIR], self._logger) + tasks.run_task(['sudo', 'mkdir', '-p', _OVS_ETC_DIR], self._logger) + + tasks.run_task(['sudo', 'rm', '-f', + os.path.join(_OVS_ETC_DIR, 'conf.db')], + self._logger) + + self._logger.info('System reset after last run.') + + def _start_ovsdb(self): + """Start ``ovsdb-server`` instance. + + :returns: None + """ + ovsdb_tool_bin = os.path.join( + settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-tool') + tasks.run_task(['sudo', ovsdb_tool_bin, 'create', + os.path.join(_OVS_ETC_DIR, 'conf.db'), + os.path.join(settings.getValue('OVS_DIR'), 'vswitchd', + 'vswitch.ovsschema')], + self._logger, + 'Creating ovsdb configuration database...') + + ovsdb_server_bin = os.path.join( + settings.getValue('OVS_DIR'), 'ovsdb', 'ovsdb-server') + + tasks.run_background_task( + ['sudo', ovsdb_server_bin, + '--remote=punix:%s' % os.path.join(_OVS_VAR_DIR, 'db.sock'), + '--remote=db:Open_vSwitch,Open_vSwitch,manager_options', + '--pidfile=' + self._ovsdb_pidfile_path, '--overwrite-pidfile'], + self._logger, + 'Starting ovsdb-server...') + + def _kill_ovsdb(self): + """Kill ``ovsdb-server`` instance. + + :returns: None + """ + if os.path.isfile(self._ovsdb_pidfile_path): + with open(self._ovsdb_pidfile_path, "r") as pidfile: + ovsdb_pid = pidfile.read().strip() + + self._logger.info("Killing ovsdb with pid: " + ovsdb_pid) + + if ovsdb_pid: + tasks.terminate_task(ovsdb_pid, logger=self._logger) + + @staticmethod + def get_db_sock_path(): + """Method returns location of db.sock file + + :returns: path to db.sock file. + """ + return os.path.join(_OVS_VAR_DIR, 'db.sock') + + # + # validate methods required for integration testcases + # + def validate_add_switch(self, result, switch_name, params=None): """Validate - Create a new logical switch with no ports """ @@ -227,3 +400,27 @@ class IVSwitchOvs(IVSwitch): """ Validate call of flow dump """ return True + + def validate_disable_rstp(self, result, switch_name): + """ Validate rstp disable + """ + bridge = self._bridges[switch_name] + return 'rstp_enable : false' in ''.join(bridge.bridge_info()) + + def validate_enable_rstp(self, result, switch_name): + """ Validate rstp enable + """ + bridge = self._bridges[switch_name] + return 'rstp_enable : true' in ''.join(bridge.bridge_info()) + + def validate_disable_stp(self, result, switch_name): + """ Validate stp disable + """ + bridge = self._bridges[switch_name] + return 'stp_enable : false' in ''.join(bridge.bridge_info()) + + def validate_enable_stp(self, result, switch_name): + """ Validate stp enable + """ + bridge = self._bridges[switch_name] + return 'stp_enable : true' in ''.join(bridge.bridge_info()) diff --git a/vswitches/ovs_dpdk_vhost.py b/vswitches/ovs_dpdk_vhost.py index 9d29c9d1..2d424bc5 100644 --- a/vswitches/ovs_dpdk_vhost.py +++ b/vswitches/ovs_dpdk_vhost.py @@ -16,10 +16,13 @@ """ import logging +import subprocess +import os + +from src.ovs import OFBridge +from src.dpdk import dpdk from conf import settings from vswitches.ovs import IVSwitchOvs -from src.ovs import VSwitchd -from src.dpdk import dpdk class OvsDpdkVhost(IVSwitchOvs): """ Open vSwitch with DPDK support @@ -36,16 +39,32 @@ class OvsDpdkVhost(IVSwitchOvs): def __init__(self): super(OvsDpdkVhost, self).__init__() self._logger = logging.getLogger(__name__) + self._expect = r'EAL: Master l*core \d+ is ready' + + vswitchd_args = [] + + # legacy DPDK configuration through --dpdk option of vswitchd + if self.old_dpdk_config(): + vswitchd_args = ['--dpdk'] + settings.getValue('VSWITCHD_DPDK_ARGS') + if self._vswitchd_args: + self._vswitchd_args = vswitchd_args + ['--'] + self._vswitchd_args + else: + self._vswitchd_args = vswitchd_args - self._vswitchd_args = ['--dpdk'] - self._vswitchd_args += settings.getValue('VSWITCHD_DPDK_ARGS') if settings.getValue('VNF').endswith('Cuse'): self._logger.info("Inserting VHOST Cuse modules into kernel...") dpdk.insert_vhost_modules() - self._vswitchd = VSwitchd(vswitchd_args=self._vswitchd_args, - expected_cmd= - r'EAL: Master l*core \d+ is ready') + def configure(self): + """ Configure vswitchd DPDK options through ovsdb if needed + """ + dpdk_config = settings.getValue('VSWITCHD_DPDK_CONFIG') + if dpdk_config and not self.old_dpdk_config(): + # enforce calls to ovs-vsctl with --no-wait + tmp_br = OFBridge(timeout=-1) + for option in dpdk_config: + tmp_br.set_db_attribute('Open_vSwitch', '.', + 'other_config:' + option, dpdk_config[option]) def start(self): """See IVswitch for general description @@ -90,8 +109,11 @@ class OvsDpdkVhost(IVSwitchOvs): dpdk_count = self._get_port_count('type=dpdk') port_name = 'dpdk' + str(dpdk_count) params = ['--', 'set', 'Interface', port_name, 'type=dpdk'] + # multi-queue enable + if int(settings.getValue('VSWITCH_MULTI_QUEUES')): + params += ['options:n_rxq={}'.format( + settings.getValue('VSWITCH_MULTI_QUEUES'))] of_port = bridge.add_port(port_name, params) - return (port_name, of_port) def add_vport(self, switch_name): @@ -111,7 +133,24 @@ class OvsDpdkVhost(IVSwitchOvs): vhost_count = self._get_port_count('type=dpdkvhostuser') port_name = 'dpdkvhostuser' + str(vhost_count) params = ['--', 'set', 'Interface', port_name, 'type=dpdkvhostuser'] - + # multi queue enable + if int(settings.getValue('VSWITCH_MULTI_QUEUES')): + params += ['options:n_rxq={}'.format( + settings.getValue('VSWITCH_MULTI_QUEUES'))] of_port = bridge.add_port(port_name, params) return (port_name, of_port) + + @staticmethod + def old_dpdk_config(): + """Checks if ovs-vswitchd uses legacy dpdk configuration via --dpdk option + + :returns: True if legacy --dpdk option is supported, otherwise it returns False + """ + + ovs_vswitchd_bin = os.path.join(settings.getValue('OVS_DIR'), 'vswitchd', 'ovs-vswitchd') + try: + subprocess.check_output(ovs_vswitchd_bin + r' --help | grep "\-\-dpdk"', shell=True) + return True + except subprocess.CalledProcessError: + return False diff --git a/vswitches/ovs_vanilla.py b/vswitches/ovs_vanilla.py index 6a380b1b..89023a79 100644 --- a/vswitches/ovs_vanilla.py +++ b/vswitches/ovs_vanilla.py @@ -18,7 +18,7 @@ import logging from conf import settings from vswitches.ovs import IVSwitchOvs -from src.ovs import VSwitchd, DPCtl +from src.ovs import DPCtl from tools.module_manager import ModuleManager from tools import tasks @@ -32,18 +32,16 @@ class OvsVanilla(IVSwitchOvs): see the interface definition. """ - _ports = settings.getValue('VSWITCH_VANILLA_PHY_PORT_NAMES') _current_id = 0 _vport_id = 0 def __init__(self): super(OvsVanilla, self).__init__() + self._ports = list(nic['device'] for nic in settings.getValue('NICS')) self._logger = logging.getLogger(__name__) - self._vswitchd_args = ["unix:%s" % VSwitchd.get_db_sock_path()] + self._vswitchd_args += ["unix:%s" % self.get_db_sock_path()] self._vswitchd_args += settings.getValue('VSWITCHD_VANILLA_ARGS') - self._vswitchd = VSwitchd(vswitchd_args=self._vswitchd_args, - expected_cmd="db.sock: connected") - self._bridges = {} + self._expect = "db.sock: connected" self._module_manager = ModuleManager() def start(self): @@ -77,8 +75,7 @@ class OvsVanilla(IVSwitchOvs): def add_phy_port(self, switch_name): """ - Method adds port based on configured VSWITCH_VANILLA_PHY_PORT_NAMES - stored in config file. + Method adds port based on detected device names. See IVswitch for general description """ @@ -89,16 +86,18 @@ class OvsVanilla(IVSwitchOvs): raise if not self._ports[self._current_id]: - self._logger.error("VSWITCH_VANILLA_PHY_PORT_NAMES not set") - raise ValueError("Invalid VSWITCH_VANILLA_PHY_PORT_NAMES") + self._logger.error("Can't detect device name for NIC %s", self._current_id) + raise ValueError("Invalid device name for %s" % self._current_id) bridge = self._bridges[switch_name] port_name = self._ports[self._current_id] params = [] # For PVP only - tasks.run_task(['sudo', 'ifconfig', port_name, '0'], + tasks.run_task(['sudo', 'ip', 'addr', 'flush', 'dev', port_name], self._logger, 'Remove IP', False) + tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', port_name, 'up'], + self._logger, 'Bring up ' + port_name, False) of_port = bridge.add_port(port_name, params) self._current_id += 1 @@ -122,7 +121,9 @@ class OvsVanilla(IVSwitchOvs): tap_name, 'mode', 'tap'], self._logger, 'Creating tap device...', False) - tasks.run_task(['sudo', 'ifconfig', tap_name, '0'], + tasks.run_task(['sudo', 'ip', 'addr', 'flush', 'dev', tap_name], + self._logger, 'Remove IP', False) + tasks.run_task(['sudo', 'ip', 'link', 'set', 'dev', tap_name, 'up'], self._logger, 'Bring up ' + tap_name, False) bridge = self._bridges[switch_name] diff --git a/yardstick/tests/p2p_back2back.yaml b/yardstick/tests/p2p_back2back.yaml new file mode 100644 index 00000000..39ab03e1 --- /dev/null +++ b/yardstick/tests/p2p_back2back.yaml @@ -0,0 +1,74 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# VSPERF specific configuration file for execution of back2back traffic. +# Traffic executed by traffic generator is forwarded directly between +# interfaces connected to the traffic generator. So test will only +# benchmark the performance of OVS external bridge at controller node. +# For details about supported test options see following file from +# the yardstick repository: +# +# yardstick/benchmark/scenarios/networking/vsperf.py + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Vsperf + options: + testname: 'rfc2544_p2p_back2back' + traffic_type: 'back2back' + pkt_sizes: '64' + bidirectional: 'True' + iload: 100 + duration: 30 + trafficgen_port1: 'eth1' + trafficgen_port2: 'eth3' + external_bridge: 'br-ex' + conf-file: '~/vsperf-yardstick.conf' + + host: vsperf.demo + + runner: + type: Sequence + scenario_option_name: pkt_sizes + sequence: + - 64 + - 128 + - 512 + - 1024 + - 1518 + sla: + metrics: 'b2b_frames' + b2b_frames: 1 + action: monitor + +context: + name: demo + image: vsperf + flavor: vsperf-flavor + user: ubuntu + + placement_groups: + pgrp1: + policy: "availability" + + servers: + vsperf: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.0.0/24' diff --git a/yardstick/tests/p2p_cont.yaml b/yardstick/tests/p2p_cont.yaml new file mode 100644 index 00000000..5cd63793 --- /dev/null +++ b/yardstick/tests/p2p_cont.yaml @@ -0,0 +1,74 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# VSPERF specific configuration file for execution of continuous traffic. +# Traffic executed by traffic generator is forwarded directly between +# interfaces connected to the traffic generator. So test will only +# benchmark the performance of OVS external bridge at controller node. +# For details about supported test options see following file from +# the yardstick repository: +# +# yardstick/benchmark/scenarios/networking/vsperf.py + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Vsperf + options: + testname: 'rfc2544_p2p_continuous' + traffic_type: 'continuous' + pkt_sizes: '64' + bidirectional: 'True' + iload: 100 + duration: 30 + trafficgen_port1: 'eth1' + trafficgen_port2: 'eth3' + external_bridge: 'br-ex' + conf-file: '~/vsperf-yardstick.conf' + + host: vsperf.demo + + runner: + type: Sequence + scenario_option_name: pkt_sizes + sequence: + - 64 + - 128 + - 512 + - 1024 + - 1518 + sla: + metrics: 'throughput_rx_fps' + throughput_rx_fps: 500000 + action: monitor + +context: + name: demo + image: vsperf + flavor: vsperf-flavor + user: ubuntu + + placement_groups: + pgrp1: + policy: "availability" + + servers: + vsperf: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.0.0/24' diff --git a/yardstick/tests/p2p_tput.yaml b/yardstick/tests/p2p_tput.yaml new file mode 100644 index 00000000..7a172450 --- /dev/null +++ b/yardstick/tests/p2p_tput.yaml @@ -0,0 +1,74 @@ +# Copyright 2016 Intel Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# VSPERF specific configuration file for execution of RFC2544 throughput +# traffic. Traffic executed by traffic generator is forwarded directly +# between interfaces connected to the traffic generator. So test will only +# benchmark the performance of OVS external bridge at controller node. +# For details about supported test options see following file from +# the yardstick repository: +# +# yardstick/benchmark/scenarios/networking/vsperf.py + +schema: "yardstick:task:0.1" + +scenarios: +- + type: Vsperf + options: + testname: 'rfc2544_p2p_tput' + traffic_type: 'rfc2544' + pkt_sizes: '64' + bidirectional: 'True' + iload: 100 + duration: 30 + trafficgen_port1: 'eth1' + trafficgen_port2: 'eth3' + external_bridge: 'br-ex' + conf-file: '~/vsperf-yardstick.conf' + + host: vsperf.demo + + runner: + type: Sequence + scenario_option_name: pkt_sizes + sequence: + - 64 + - 128 + - 512 + - 1024 + - 1518 + sla: + metrics: 'throughput_rx_fps' + throughput_rx_fps: 500000 + action: monitor + +context: + name: demo + image: vsperf + flavor: vsperf-flavor + user: ubuntu + + placement_groups: + pgrp1: + policy: "availability" + + servers: + vsperf: + floating_ip: true + placement: "pgrp1" + + networks: + test: + cidr: '10.0.0.0/24' |