diff options
387 files changed, 64854 insertions, 350 deletions
@@ -25,8 +25,9 @@ ACL := $(VNF_DIR)/vACL FW := $(VNF_DIR)/vFW CGNAPT := $(VNF_DIR)/vCGNAPT UDP_Replay := $(VNF_DIR)/UDP_Replay +PROX := $(VNF_DIR)/DPPD-PROX -subdirs := $(ACL) $(CGNAPT) $(FW) $(UDP_Replay) +subdirs := $(ACL) $(CGNAPT) $(FW) $(UDP_Replay) ${PROX} .PHONY: $(TARGETS) $(subdirs) diff --git a/VNFs/DPPD-PROX/LICENSE.ALv2 b/VNFs/DPPD-PROX/LICENSE.ALv2 new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/VNFs/DPPD-PROX/LICENSE.ALv2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/VNFs/DPPD-PROX/Makefile b/VNFs/DPPD-PROX/Makefile new file mode 100644 index 00000000..0288181c --- /dev/null +++ b/VNFs/DPPD-PROX/Makefile @@ -0,0 +1,204 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-native-linuxapp-gcc + +rte_version_h := $(RTE_SDK)/$(RTE_TARGET)/include/rte_version.h +rte_ver_part = $(shell sed -n -e 's/^\#define\s*$1\s*\(.*\)$$/\1/p' $(rte_version_h)) +rte_ver_eval = $(shell printf '%u' $$(printf '0x%02x%02x%02x%02x' $1 $2 $3 $4)) +rte_ver_MMLR = $(call rte_ver_eval,$(call \ + rte_ver_part,RTE_VER_MAJOR),$(call \ + rte_ver_part,RTE_VER_MINOR),$(call \ + rte_ver_part,RTE_VER_PATCH_LEVEL),$(call \ + rte_ver_part,RTE_VER_PATCH_RELEASE)) +rte_ver_YMMR = $(call rte_ver_eval,$(call \ + rte_ver_part,RTE_VER_YEAR),$(call \ + rte_ver_part,RTE_VER_MONTH),$(call \ + rte_ver_part,RTE_VER_MINOR),$(call \ + rte_ver_part,RTE_VER_RELEASE)) +rte_ver_dpdk := $(if $(call rte_ver_part,RTE_VER_MAJOR),$(rte_ver_MMLR),$(rte_ver_YMMR)) +rte_ver_comp = $(shell test $(rte_ver_dpdk) $5 $(call rte_ver_eval,$1,$2,$3,$4) && echo 'y') +rte_ver_EQ = $(call rte_ver_comp,$1,$2,$3,$4,-eq) +rte_ver_NE = $(call rte_ver_comp,$1,$2,$3,$4,-ne) +rte_ver_GT = $(call rte_ver_comp,$1,$2,$3,$4,-gt) +rte_ver_LT = $(call rte_ver_comp,$1,$2,$3,$4,-lt) +rte_ver_GE = $(call rte_ver_comp,$1,$2,$3,$4,-ge) +rte_ver_LE = $(call rte_ver_comp,$1,$2,$3,$4,-le) + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = prox +CFLAGS += -DPROGRAM_NAME=\"$(APP)\" + +CFLAGS += -O2 -g +CFLAGS += -fno-stack-protector -Wno-deprecated-declarations + +ifeq ($(BNG_QINQ),) +CFLAGS += -DUSE_QINQ +else ifeq ($(BNG_QINQ),y) +CFLAGS += -DUSE_QINQ +endif + +ifeq ($(MPLS_ROUTING),) +CFLAGS += -DMPLS_ROUTING +else ifeq ($(MPLS_ROUTING),y) +CFLAGS += -DMPLS_ROUTING +endif + +LD_LUA = $(shell pkg-config --silence-errors --libs-only-l lua) +CFLAGS += $(shell pkg-config --silence-errors --cflags lua) +ifeq ($(LD_LUA),) +LD_LUA = $(shell pkg-config --silence-errors --libs-only-l lua5.2) +CFLAGS += $(shell pkg-config --silence-errors --cflags lua5.2) +ifeq ($(LD_LUA),) +LD_LUA = $(shell pkg-config --silence-errors --libs-only-l lua5.3) +CFLAGS += $(shell pkg-config --silence-errors --cflags lua5.3) +ifeq ($(LD_LUA),) +LD_LUA =-llua +endif +endif +endif + +LD_TINFO = $(shell pkg-config --silence-errors --libs-only-l tinfo) +LDFLAGS += -lpcap $(LD_TINFO) $(LD_LUA) +LDFLAGS += -lncurses -lncursesw -ledit + +PROX_STATS ?= y +ifeq ($(PROX_STATS),y) +CFLAGS += -DPROX_STATS +endif + +ifeq ($(DPI_STATS),y) +CFLAGS += -DDPI_STATS +endif + +ifeq ($(HW_DIRECT_STATS),y) +CFLAGS += -DPROX_HW_DIRECT_STATS +endif + +ifeq ($(dbg),y) +EXTRA_CFLAGS += -ggdb +endif + +ifeq ($(log),) +CFLAGS += -DPROX_MAX_LOG_LVL=2 +else +CFLAGS += -DPROX_MAX_LOG_LVL=$(log) +endif + +# override any use-case/enviroment specific choices regarding crc and +# always use the sw implementation +ifeq ($(crc),soft) +CFLAGS += -DSOFT_CRC +endif + +CFLAGS += -DPROX_PREFETCH_OFFSET=2 +#CFLAGS += -DBRAS_RX_BULK +#CFLAGS += -DASSERT +#CFLAGS += -DENABLE_EXTRA_USER_STATISTICS +CFLAGS += -DLATENCY_PER_PACKET +CFLAGS += -DLATENCY_DETAILS +CFLAGS += -DGRE_TP +CFLAGS += -std=gnu99 +CFLAGS += -D_GNU_SOURCE # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-unused +CFLAGS += -Wno-unused-parameter +CFLAGS += -Wno-unused-result + +# all source are stored in SRCS-y + +SRCS-y := task_init.c + +SRCS-y += handle_aggregator.c +SRCS-y += handle_nop.c +SRCS-y += handle_irq.c +SRCS-y += handle_arp.c +SRCS-y += handle_impair.c +SRCS-y += handle_lat.c +SRCS-y += handle_qos.c +SRCS-y += handle_qinq_decap4.c +SRCS-y += handle_routing.c +SRCS-y += handle_untag.c +SRCS-y += handle_mplstag.c +SRCS-y += handle_qinq_decap6.c + +# support for GRE encap/decap dropped in latest DPDK versions +SRCS-$(call rte_ver_LT,2,1,0,0) += handle_gre_decap_encap.c + +SRCS-y += rw_reg.c +SRCS-y += handle_lb_qinq.c +SRCS-y += handle_lb_pos.c +SRCS-y += handle_lb_net.c +SRCS-y += handle_qinq_encap4.c +SRCS-y += handle_qinq_encap6.c +SRCS-y += handle_classify.c +SRCS-y += handle_l2fwd.c +SRCS-y += handle_swap.c +SRCS-y += handle_police.c +SRCS-y += handle_acl.c +SRCS-y += handle_gen.c +SRCS-y += handle_mirror.c +SRCS-y += handle_genl4.c +SRCS-y += handle_ipv6_tunnel.c +SRCS-y += handle_read.c +SRCS-y += handle_cgnat.c +SRCS-y += handle_nat.c +SRCS-y += handle_dump.c +SRCS-y += handle_tsc.c +SRCS-y += handle_fm.c +SRCS-$(call rte_ver_GE,1,8,0,16) += handle_nsh.c +SRCS-y += handle_lb_5tuple.c +SRCS-y += handle_blockudp.c +SRCS-y += toeplitz.c +SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += handle_pf_acl.c + +SRCS-y += thread_nop.c +SRCS-y += thread_generic.c +SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread_pipeline.c + +SRCS-y += prox_args.c prox_cfg.c prox_cksum.c prox_port_cfg.c + +SRCS-y += cfgfile.c clock.c commands.c cqm.c msr.c defaults.c +SRCS-y += display.c display_latency.c display_mempools.c +SRCS-y += display_ports.c display_rings.c display_priority.c display_pkt_len.c display_l4gen.c display_tasks.c +SRCS-y += log.c hash_utils.c main.c parse_utils.c file_utils.c +SRCS-y += run.c input_conn.c input_curses.c +SRCS-y += rx_pkt.c lconf.c tx_pkt.c expire_cpe.c ip_subnet.c +SRCS-y += stats_port.c stats_mempool.c stats_ring.c stats_l4gen.c +SRCS-y += stats_latency.c stats_global.c stats_core.c stats_task.c stats_prio.c +SRCS-y += cmd_parser.c input.c prox_shared.c prox_lua_types.c +SRCS-y += genl4_bundle.c heap.c genl4_stream_tcp.c genl4_stream_udp.c cdf.c +SRCS-y += stats.c stats_cons_log.c stats_cons_cli.c stats_parser.c hash_set.c prox_lua.c prox_malloc.c + +ifeq ($(FIRST_PROX_MAKE),) +MAKEFLAGS += --no-print-directory +FIRST_PROX_MAKE = 1 +export FIRST_PROX_MAKE +all: + @./helper-scripts/trailing.sh + @$(MAKE) $@ +%:: + @$(MAKE) $@ +else +include $(RTE_SDK)/mk/rte.extapp.mk +endif diff --git a/VNFs/DPPD-PROX/README b/VNFs/DPPD-PROX/README new file mode 100644 index 00000000..a09873cd --- /dev/null +++ b/VNFs/DPPD-PROX/README @@ -0,0 +1,117 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +Description +----------- +This is PROX, the Packet pROcessing eXecution engine, part of Intel(R) +Data Plane Performance Demonstrators, and formerly known as DPPD-BNG. +PROX is a DPDK-based application implementing Telco use-cases such as +a simplified BRAS/BNG, light-weight AFTR... It also allows configuring +finer grained network functions like QoS, Routing, load-balancing... + +Compiling and running this application +-------------------------------------- +This application supports DPDK 16.04, 16.11, 16.11.1, 17.02 and 17.05. +The following commands assume that the following variables have been set: + +export RTE_SDK=/path/to/dpdk +export RTE_TARGET=x86_64-native-linuxapp-gcc + +Example: DPDK 17.05 installation +-------------------------------- +git clone http://dpdk.org/git/dpdk +cd dpdk +git checkout v17.05 +make install T=$RTE_TARGET + +PROX compilation +---------------- +The Makefile with this application expects RTE_SDK to point to the +root directory of DPDK (e.g. export RTE_SDK=/root/dpdk). If RTE_TARGET +has not been set, x86_64-native-linuxapp-gcc will be assumed. + +Running PROX +------------ +After DPDK has been set up, run make from the directory where you have +extracted this application. A build directory will be created +containing the PROX executable. The usage of the application is shown +below. Note that this application assumes that all required ports have +been bound to the DPDK provided igb_uio driver. Refer to the "Getting +Started Guide - DPDK" document for more details. + +Usage: ./build/prox [-f CONFIG_FILE] [-l LOG_FILE] [-p] [-o DISPLAY] [-v] [-a|-e] \ + [-m|-s|-i] [-n] [-w DEF] [-q] [-k] [-d] [-z] [-r VAL] [-u] [-t] + -f CONFIG_FILE : configuration file to load, ./prox.cfg by default + -l LOG_FILE : log file name, ./prox.log by default + -p : include PID in log file name if default log file is used + -o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none' + -v verbosity : initial logging verbosity + -a : autostart all cores (by default) + -e : don't autostart + -n : Create NULL devices instead of using PCI devices, useful together with -i + -m : list supported task modes and exit + -s : check configuration file syntax and exit + -i : check initialization sequence and exit + -u : Listen on UDS /tmp/prox.sock + -t : Listen on TCP port 8474 + -q : Pass argument to Lua interpreter, useful to define variables + -w : define variable using syntax varname=value + takes precedence over variables defined in CONFIG_FILE + -k : Log statistics to file "stats_dump" in current directory + -d : Run as daemon, the parent process will block until PROX is not initialized + -z : Ignore CPU topology, implies -i + -r : Change initial screen refresh rate. If set to a lower than 0.001 seconds, + screen refreshing will be disabled + +While applications using DPDK typically rely on the core mask and the +number of channels to be specified on the command line, this +application is configured using a .cfg file. The core mask and number +of channels is derived from this config. For example, to run the +application from the source directory execute: + + user@target:~$ ./build/prox -f ./config/nop.cfg + +Provided example configurations +------------------------------- +PROX can be configured either as the SUT (System Under Test) or as the +Traffic Generator. Some example configuration files are provided, both +in the config directory to run PROX as a SUT, and in the gen directory +to run it as a Traffic Generator. +A quick description of these example configurations is provided below. +Additional details are provided in the example configuration files. + +Basic configurations, mostly used as sanity check: +- config/nop.cfg +- config/nop-rings.cfg +- gen/nop-gen.cfg + +Simplified BNG (Border Network Gateway) configurations, using different +number of ports, with and without QoS, running on the host or in a VM: +- config/bng-4ports.cfg +- config/bng-8ports.cfg +- config/bng-qos-4ports.cfg +- config/bng-qos-8ports.cfg +- config/bng-1q-4ports.cfg +- config/bng-ovs-usv-4ports.cfg +- config/bng-no-cpu-topology-4ports.cfg +- gen/bng-4ports-gen.cfg +- gen/bng-8ports-gen.cfg +- gen/bng-ovs-usv-4ports-gen.cfg + +Light-weight AFTR configurations: +- config/lw_aftr.cfg +- gen/lw_aftr-gen.cfg + diff --git a/VNFs/DPPD-PROX/acl_field_def.h b/VNFs/DPPD-PROX/acl_field_def.h new file mode 100644 index 00000000..ede5bea7 --- /dev/null +++ b/VNFs/DPPD-PROX/acl_field_def.h @@ -0,0 +1,152 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _ACL_FIELD_DEF_H_ +#define _ACL_FIELD_DEF_H_ + +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_udp.h> + +#include "qinq.h" + +struct pkt_eth_ipv4_udp { + struct ether_hdr ether_hdr; + struct ipv4_hdr ipv4_hdr; + struct udp_hdr udp_hdr; +} __attribute__((packed)); + +static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = { + /* first input field - always one byte long. */ + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof (uint8_t), + .field_index = 0, + .input_index = 0, + .offset = offsetof (struct pkt_eth_ipv4_udp, ipv4_hdr.next_proto_id), + }, + /* IPv4 source address. */ + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof (uint32_t), + .field_index = 1, + .input_index = 1, + .offset = offsetof (struct pkt_eth_ipv4_udp, ipv4_hdr.src_addr), + }, + /* IPv4 destination address */ + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof (uint32_t), + .field_index = 2, + .input_index = 2, + .offset = offsetof (struct pkt_eth_ipv4_udp, ipv4_hdr.dst_addr), + }, + /* (L4 src/dst port) - 4 consecutive bytes. */ + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof (uint16_t), + .field_index = 3, + .input_index = 3, + .offset = offsetof (struct pkt_eth_ipv4_udp, udp_hdr.src_port), + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof (uint16_t), + .field_index = 4, + .input_index = 3, + .offset = offsetof (struct pkt_eth_ipv4_udp, udp_hdr.dst_port), + }, +}; + +struct pkt_qinq_ipv4_udp { + struct qinq_hdr qinq_hdr; + struct ipv4_hdr ipv4_hdr; + struct udp_hdr udp_hdr; +}; + +static struct rte_acl_field_def pkt_qinq_ipv4_udp_defs[] = { + /* first input field - always one byte long. */ + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof (uint8_t), + .field_index = 0, + .input_index = 0, + .offset = offsetof (struct pkt_qinq_ipv4_udp, ipv4_hdr.next_proto_id), + }, + /* IPv4 source address. */ + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof (uint32_t), + .field_index = 1, + .input_index = 1, + .offset = offsetof (struct pkt_qinq_ipv4_udp, ipv4_hdr.src_addr), + }, + /* IPv4 destination address */ + { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof (uint32_t), + .field_index = 2, + .input_index = 2, + .offset = offsetof (struct pkt_qinq_ipv4_udp, ipv4_hdr.dst_addr), + }, + /* (L4 src/dst port) - 4 consecutive bytes. */ + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof (uint16_t), + .field_index = 3, + .input_index = 3, + .offset = offsetof (struct pkt_qinq_ipv4_udp, udp_hdr.src_port), + }, + { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof (uint16_t), + .field_index = 4, + .input_index = 3, + .offset = offsetof (struct pkt_qinq_ipv4_udp, udp_hdr.dst_port), + }, + /* (SVLAN id + eth type) - 4 consecutive bytes. */ + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint16_t), + .field_index = 5, + .input_index = 4, + .offset = offsetof (struct pkt_qinq_ipv4_udp, qinq_hdr.svlan.eth_proto), + }, + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint16_t), + .field_index = 6, + .input_index = 4, + .offset = offsetof (struct pkt_qinq_ipv4_udp, qinq_hdr.svlan.vlan_tci), + }, + /* (CVLAN id + eth type) - 4 consecutive byates. */ + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint16_t), + .field_index = 7, + .input_index = 5, + .offset = offsetof (struct pkt_qinq_ipv4_udp, qinq_hdr.cvlan.eth_proto), + }, + { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint16_t), + .field_index = 8, + .input_index = 5, + .offset = offsetof (struct pkt_qinq_ipv4_udp, qinq_hdr.cvlan.vlan_tci), + }, +}; + +#endif /* _ACL_FIELD_DEF_H_ */ diff --git a/VNFs/DPPD-PROX/arp.h b/VNFs/DPPD-PROX/arp.h new file mode 100644 index 00000000..279bdada --- /dev/null +++ b/VNFs/DPPD-PROX/arp.h @@ -0,0 +1,72 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _ARP_H_ +#define _ARP_H_ + +#include <rte_ether.h> + +#define ARP_REQUEST 0x100 +#define ARP_REPLY 0x200 + +struct _arp_ipv4 { + struct ether_addr sha; /* Sender hardware address */ + uint32_t spa; /* Sender protocol address */ + struct ether_addr tha; /* Target hardware address */ + uint32_t tpa; /* Target protocol address */ +} __attribute__((__packed__)); +typedef struct _arp_ipv4 arp_ipv4_t; + +struct my_arp_t { + uint16_t htype; + uint16_t ptype; + uint8_t hlen; + uint8_t plen; + uint16_t oper; + arp_ipv4_t data; +} __attribute__((__packed__)); + +struct ether_hdr_arp { + struct ether_hdr ether_hdr; + struct my_arp_t arp; +}; + +static int arp_is_gratuitous(struct ether_hdr_arp *hdr) +{ + return hdr->arp.data.spa == hdr->arp.data.tpa; +} + +static inline void prepare_arp_reply(struct ether_hdr_arp *hdr_arp, struct ether_addr *s_addr) +{ + uint32_t ip_source = hdr_arp->arp.data.spa; + + hdr_arp->arp.data.spa = hdr_arp->arp.data.tpa; + hdr_arp->arp.data.tpa = ip_source; + hdr_arp->arp.oper = 0x200; + memcpy(&hdr_arp->arp.data.tha, &hdr_arp->arp.data.sha, sizeof(struct ether_addr)); + memcpy(&hdr_arp->arp.data.sha, s_addr, sizeof(struct ether_addr)); +} + +static void create_mac(struct ether_hdr_arp *hdr, struct ether_addr *addr) +{ + addr->addr_bytes[0] = 0x2; + addr->addr_bytes[1] = 0; + // Instead of sending a completely random MAC address, create the following MAC: + // 02:00:x1:x2:x3:x4 where x1:x2:x3:x4 is the IP address + memcpy(addr->addr_bytes + 2, (uint32_t *)&hdr->arp.data.tpa, 4); +} + +#endif /* _ARP_H_ */ diff --git a/VNFs/DPPD-PROX/bng_pkts.h b/VNFs/DPPD-PROX/bng_pkts.h new file mode 100644 index 00000000..82e6199c --- /dev/null +++ b/VNFs/DPPD-PROX/bng_pkts.h @@ -0,0 +1,114 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _BNG_PKTS_H_ +#define _BNG_PKTS_H_ + +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_byteorder.h> + +#include "gre.h" +#include "mpls.h" +#include "qinq.h" +#include "arp.h" +#include "hash_entry_types.h" + +struct cpe_pkt { +#ifdef USE_QINQ + struct qinq_hdr qinq_hdr; +#else + struct ether_hdr ether_hdr; +#endif + struct ipv4_hdr ipv4_hdr; + struct udp_hdr udp_hdr; +} __attribute__((packed)); + +struct cpe_packet_arp { + struct qinq_hdr qinq_hdr; + struct my_arp_t arp; +} __attribute__((packed)); + +/* Struct used for setting all the values a packet + going to the core netwerk. Payload may follow + after the headers, but no need to touch that. */ +struct core_net_pkt_m { + struct ether_hdr ether_hdr; +#ifdef MPLS_ROUTING + union { + struct mpls_hdr mpls; + uint32_t mpls_bytes; + }; +#endif + struct ipv4_hdr tunnel_ip_hdr; + struct gre_hdr gre_hdr; + struct ipv4_hdr ip_hdr; + struct udp_hdr udp_hdr; +} __attribute__((packed)); + +struct core_net_pkt { + struct ether_hdr ether_hdr; + struct ipv4_hdr tunnel_ip_hdr; + struct gre_hdr gre_hdr; + struct ipv4_hdr ip_hdr; + struct udp_hdr udp_hdr; +} __attribute__((packed)); + +#define UPSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt) - sizeof(struct cpe_pkt))) +#define DOWNSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt_m) - sizeof(struct cpe_pkt))) + +struct cpe_pkt_delta { + uint8_t encap[DOWNSTREAM_DELTA]; + struct cpe_pkt pkt; +} __attribute__((packed)); + +static inline void extract_key_cpe(struct rte_mbuf *mbuf, uint64_t* key) +{ + uint8_t* packet = rte_pktmbuf_mtod(mbuf, uint8_t*); +#ifdef USE_QINQ + *key = (*(uint64_t *)(packet + 12)) & 0xFF0FFFFFFF0FFFFF; +#else + *key = rte_bswap32(*(uint32_t *)(packet + 26)) & 0x00FFFFFF; +#endif +} + +static inline void key_core(struct gre_hdr* gre, __attribute__((unused)) struct ipv4_hdr* ip, uint64_t* key) +{ + struct cpe_key *cpe_key = (struct cpe_key*)key; + + cpe_key->gre_id = rte_be_to_cpu_32(gre->gre_id) & 0xFFFFFFF; + +#ifdef USE_QINQ + cpe_key->ip = ip->dst_addr; +#else + cpe_key->ip = 0; +#endif +} + +static inline void extract_key_core(struct rte_mbuf *mbuf, uint64_t* key) +{ + struct core_net_pkt *packet = rte_pktmbuf_mtod(mbuf, struct core_net_pkt *); + key_core(&packet->gre_hdr, &packet->ip_hdr, key); +} + +static inline void extract_key_core_m(struct rte_mbuf *mbuf, uint64_t* key) +{ + struct core_net_pkt_m *packet = rte_pktmbuf_mtod(mbuf, struct core_net_pkt_m *); + key_core(&packet->gre_hdr, &packet->ip_hdr, key); +} + +#endif /* _BNG_PKTS_H_ */ diff --git a/VNFs/DPPD-PROX/cdf.c b/VNFs/DPPD-PROX/cdf.c new file mode 100644 index 00000000..1de40314 --- /dev/null +++ b/VNFs/DPPD-PROX/cdf.c @@ -0,0 +1,148 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdlib.h> +#include <inttypes.h> + +#include <rte_cycles.h> + +#include "prox_malloc.h" +#include "cdf.h" + +static uint32_t round_pow2(uint32_t val) +{ + uint32_t ret; + uint32_t s = 1 << 31; + + while ((s & val) == 0) + s = s >> 1; + if (s == 1U << 31 && s != val) + return 0; + + ret = val; + if (s != ret) + ret = (s << 1); + + return ret; +} + +static uint32_t get_r_max(struct cdf *cdf, uint32_t cur) +{ + uint32_t right_child = cur; + + do { + cur = right_child; + right_child = cur * 2 + 1; + } while (right_child < cdf->elems[0]); + + return cdf->elems[cur]; +} + +struct cdf *cdf_create(uint32_t n_vals, int socket_id) +{ + struct cdf *ret; + size_t mem_size = 0; + uint32_t n_vals_round = round_pow2(n_vals); + + if (0 == n_vals_round) + return NULL; + + mem_size += sizeof(struct cdf); + mem_size += sizeof(((struct cdf *)(0))->elems[0]) * n_vals_round * 2; + ret = prox_zmalloc(mem_size, socket_id); + ret->elems[0] = n_vals; + + /* leafs are [n_vals, 2 * n_vals[. During cdf_add() and + cdf_setup(), rand_max refers to the index of the next leaf + to be added. */ + ret->rand_max = n_vals_round; + ret->first_child = n_vals_round; + ret->seed = rte_rdtsc(); + + return ret; +} + +void cdf_add(struct cdf *cdf, uint32_t len) +{ + cdf->elems[cdf->rand_max++] = len; +} + +int cdf_setup(struct cdf *cdf) +{ + uint32_t last_leaf, first_leaf; + uint32_t first_parent, last_parent; + uint32_t total, multiplier, cur, end; + + if (cdf->elems[0] == 1) { + cdf->rand_max = RAND_MAX; + cdf->elems[1] = RAND_MAX; + cdf->elems[0] = 2; + return 0; + } + + last_leaf = cdf->rand_max; + first_leaf = round_pow2(cdf->elems[0]); + /* Failed to add all elements through cdf_add() */ + if (last_leaf - first_leaf != cdf->elems[0]) + return -1; + + total = 0; + for (uint32_t i = first_leaf; i < last_leaf; ++i) { + total += cdf->elems[i]; + } + + multiplier = RAND_MAX / total; + if (multiplier * total == RAND_MAX) + multiplier--; + cdf->rand_max = multiplier * total; + total = 0; + for (uint32_t i = first_leaf; i < last_leaf; ++i) { + uint32_t cur = cdf->elems[i]; + + /* Each element represents the range between previous + total (non-inclusive) and new total (inclusive). */ + total += cur * multiplier - 1; + cdf->elems[i] = total; + total += 1; + } + end = round_pow2(first_leaf) << 1; + for (uint32_t i = last_leaf; i < end; ++i) { + cdf->elems[i] = RAND_MAX; + } + cdf->first_child = first_leaf; + cdf->elems[0] = end; + + /* Build the binary tree used at run-time. */ + last_leaf = end - 1; + do { + first_parent = first_leaf/2; + last_parent = last_leaf/2; + + for (uint32_t i = first_parent; i <= last_parent; ++i) { + /* The current nodes value should be the + biggest value accessible through its left + child. This value is stored in the right + most child of the left child. The left most + child of the right child is the first value + that can not be accessed through the left + child. */ + cdf->elems[i] = get_r_max(cdf, i * 2); + } + first_leaf = first_parent; + last_leaf = last_parent; + } while (first_parent != last_parent); + return 0; +} diff --git a/VNFs/DPPD-PROX/cdf.h b/VNFs/DPPD-PROX/cdf.h new file mode 100644 index 00000000..821c71bf --- /dev/null +++ b/VNFs/DPPD-PROX/cdf.h @@ -0,0 +1,49 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +struct cdf { + uint32_t rand_max; + uint32_t seed; + uint32_t first_child; + uint32_t elems[0]; +}; + +struct cdf *cdf_create(uint32_t n_vals, int socket_id); +void cdf_add(struct cdf *cdf, uint32_t len); +int cdf_setup(struct cdf *cdf); + +static uint32_t cdf_sample(struct cdf *cdf) +{ + uint32_t left_child, right_child; + uint32_t rand; + + do { + rand = rand_r(&cdf->seed); + } while (rand > cdf->rand_max); + + uint32_t cur = 1; + + while (1) { + left_child = cur * 2; + right_child = cur * 2 + 1; + if (right_child < cdf->elems[0]) + cur = rand > cdf->elems[cur]? right_child : left_child; + else if (left_child < cdf->elems[0]) + cur = left_child; + else + return cur - cdf->first_child; + } +} diff --git a/VNFs/DPPD-PROX/cfgfile.c b/VNFs/DPPD-PROX/cfgfile.c new file mode 100644 index 00000000..80a90937 --- /dev/null +++ b/VNFs/DPPD-PROX/cfgfile.c @@ -0,0 +1,339 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "cfgfile.h" + +#include <rte_string_fns.h> +#include <stdlib.h> +#include <string.h> +#include <ctype.h> +#include <errno.h> +#include <unistd.h> + +#include "parse_utils.h" +#include "log.h" +#include "quit.h" + +#define UINT32_MAX_STR "4294967295" + +/* + * Allocate cfg_file structure. + * Returns pointer to the allocated structure, NULL otherwise. + */ +struct cfg_file *cfg_open(const char *cfg_name) +{ + if (cfg_name == NULL) { + plog_err("\tNo config file name provided\n"); + return NULL; + } + if (access(cfg_name, F_OK)) { + plog_err("\tError opening config file '%s': %s\n", cfg_name, strerror(errno)); + return NULL; + } + + FILE *pf = fopen(cfg_name, "rb"); + if (pf == NULL) { + plog_err("\tError opening config file '%s'\n", cfg_name); + return NULL; + } + + struct cfg_file *pcfg = calloc(1, sizeof(struct cfg_file)); + + if (pcfg == NULL) { + fclose(pf); + plog_err("\tCouldn't allocate memory for config file struct\n"); + return NULL; + } + + pcfg->pfile = pf; + pcfg->name = strdup(cfg_name); + + return pcfg; +} + +/* Free memory allocated for cfg_file structure. + * Returns 0 on success, -1 if the pointer to the pcfg is invalid */ +int cfg_close(struct cfg_file *pcfg) +{ + if (pcfg == NULL) { + return -1; + } + + if (pcfg->name != NULL) { + free(pcfg->name); + } + if (pcfg->err_section != NULL) { + free(pcfg->err_section); + } + if (pcfg->pfile != NULL) { + fclose(pcfg->pfile); + } + + free(pcfg); + return 0; +} + +static int cfg_get_pos(struct cfg_file *pcfg, fpos_t *pos) +{ + pcfg->index_line = pcfg->line; + return fgetpos(pcfg->pfile, pos); +} + +static int cfg_set_pos(struct cfg_file *pcfg, fpos_t *pos) +{ + pcfg->line = pcfg->index_line; + return fsetpos(pcfg->pfile, pos); +} + +/* + * Read a line from the configuration file. + * Returns: on success length of the line read from the file is returned, + * 0 to indicate End of File, + * -1 in case of wrong function parameters + */ +static int cfg_get_line(struct cfg_file *pcfg, char *buffer, unsigned len, int raw_lines) +{ + char *ptr; + + if (pcfg == NULL || pcfg->pfile == NULL || buffer == NULL || len == 0) { + return -1; + } + + do { + ptr = fgets(buffer, len, pcfg->pfile); + if (ptr == NULL) { + return 0; /* end of file */ + } + ++pcfg->line; + + if (raw_lines) { + break; + } + + /* remove comments */ + ptr = strchr(buffer, ';'); + if (ptr != NULL) { + *ptr = '\0'; + } + else { + ptr = strchr(buffer, '\0'); + } + + /* remove trailing spaces */ + if (ptr != buffer) { + ptr--; + while (isspace(*ptr)) { + *ptr = '\0'; + ptr--; + } + } + + ptr = buffer; + /* remove leading spaces */ + while (*ptr && isspace(*ptr)) { + ++ptr; + } + if (ptr != buffer) { + strcpy(buffer, ptr); + ptr = buffer; + } + } + while (*ptr == '\0'); /* skip empty strings */ + + return strlen(buffer); +} + +/* + * Checks if buffer contains section name specified by the cfg_section pointer. + * Returns NULL if section name does not match, cfg_section pointer otherwise + */ +static struct cfg_section *cfg_check_section(char *buffer, struct cfg_section *psec) +{ + char *pend; + unsigned len; + static const char *valid = "0123456789,hs- \t"; + + pend = strchr(buffer, ']'); + if (pend == NULL) { + return NULL; /* ']' not found: invalid section name */ + } + + *pend = '\0'; + + /* check if section is indexed */ + pend = strchr(psec->name, '#'); + if (pend == NULL) { + return (strcmp(buffer, psec->name) == 0) ? psec : NULL; + } + + /* get section index */ + len = pend - psec->name; + if (strncmp(buffer, psec->name, len) != 0) { + return NULL; + } + pend = buffer + len; + if (*pend == '\0') { + return NULL; + } + /* only numeric characters are valid for section index + (currently, variables not checked!) */ + if (pend[0] != '$') { + for (len = 0; pend[len] != '\0'; ++len) { + if (strchr(valid, pend[len]) == NULL) { + return NULL; + } + } + } + + psec->nbindex = parse_list_set(psec->indexp, pend, MAX_INDEX); + PROX_PANIC(psec->nbindex == -1, "\t\tError in cfg_check_section('%s'): %s\n", buffer, get_parse_err()); + + for (int i = 0; i < psec->nbindex; ++i) { + psec->indexp[i] |= CFG_INDEXED; + } + + return psec; +} + +static char *cfg_get_section_name(struct cfg_section *psec) +{ + char *name; + + if (!(psec->indexp[0] & CFG_INDEXED)) { + return strdup(psec->name); + } + + name = malloc(strlen(psec->name) + strlen(UINT32_MAX_STR)); + if (name != NULL) { + strcpy(name, psec->name); + char *pidx = strchr(name, '#'); + if (pidx != NULL) { + sprintf(pidx, "%u", psec->indexp[0] & ~CFG_INDEXED); + } + } + return name; +} + +/* + * Reads configuration file and parses section specified by psec pointer. + * Returns 0 on success, -1 otherwise + */ +int cfg_parse(struct cfg_file *pcfg, struct cfg_section *psec) +{ + int error; + unsigned entry = 0; + fpos_t pos; + int index_count = 0; + struct cfg_section *section = NULL; + char buffer[sizeof(pcfg->cur_line)] = {0}; + + if (pcfg == NULL || psec == NULL) { + return -1; + } + + pcfg->line = 0; + fseek(pcfg->pfile, 0, SEEK_SET); + + /* read configuration file and parse section specified by psec pointer */ + while (1) { + if (psec->raw_lines) { + /* skip until section starts */ + char *lines = pcfg->cur_line; + size_t max_len = sizeof(pcfg->cur_line); + char *ret; + + do { + ret = fgets(lines, max_len, pcfg->pfile); + if (ret && *ret == '[') { + section = cfg_check_section(lines + 1, psec); + } + } while (!section && ret); + + if (!ret) + return 0; + + do { + + ret = fgets(buffer, sizeof(buffer), pcfg->pfile); + if (ret && *ret != '[') { + size_t l = strlen(buffer); + strncpy(lines, buffer, max_len); + max_len -= l; + lines += l; + } + } while ((ret && *ret != '[')); + + if (section != NULL) { + error = section->parser(section->indexp[index_count], pcfg->cur_line, section->data); + if (error != 0) { + section->error = error; + /* log only the very first error */ + if (!pcfg->err_section) { + pcfg->err_line = pcfg->line; + pcfg->err_entry = entry; + pcfg->err_section = cfg_get_section_name(section); + } + return 0; + } + ++entry; + } + return 0; + } + + while (cfg_get_line(pcfg, buffer, MAX_CFG_STRING_LEN, psec->raw_lines) > 0) { + strncpy(pcfg->cur_line, buffer, sizeof(pcfg->cur_line)); + if (*buffer == '[') { + if (index_count + 1 < psec->nbindex) { + // Need to loop - go back to recorded postion in file + cfg_set_pos(pcfg, &pos); + ++index_count; + continue; + } + else { + section = cfg_check_section(buffer + 1, psec); + entry = 0; + index_count = 0; + cfg_get_pos(pcfg, &pos); + continue; + } + } + /* call parser procedure for each line in the section */ + if (section != NULL) { + error = section->parser(section->indexp[index_count], buffer, section->data); + if (error != 0) { + section->error = error; + /* log only the very first error */ + if (!pcfg->err_section) { + pcfg->err_line = pcfg->line; + pcfg->err_entry = entry; + pcfg->err_section = cfg_get_section_name(section); + } + return 0; + } + ++entry; + } + } + if (index_count + 1 < psec->nbindex) { + // Last core config contained multiple cores - loop back + cfg_set_pos(pcfg, &pos); + ++index_count; + } + else { + break; + } + } + return 0; +} diff --git a/VNFs/DPPD-PROX/cfgfile.h b/VNFs/DPPD-PROX/cfgfile.h new file mode 100644 index 00000000..41b474ee --- /dev/null +++ b/VNFs/DPPD-PROX/cfgfile.h @@ -0,0 +1,60 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CFG_FILE_H_ +#define _CFG_FILE_H_ + +#include <stdio.h> + +#define DEFAULT_CONFIG_FILE "./prox.cfg" + +/* configuration file line parser procedure */ +typedef int (*cfg_parser)(unsigned sindex, char *str, void *data); + +#define CFG_INDEXED 0x80000000 /* section contains index [name #] */ +#define MAX_INDEX 64 + +struct cfg_section { + const char *name; /* section name without [] */ + cfg_parser parser; /* section parser function */ + void *data; /* data to be passed to the parser */ + /* set by parsing procedure */ + unsigned indexp[MAX_INDEX]; + int raw_lines; /* if set, do not remove text after ';' */ + int nbindex; + int error; +}; + +#define MAX_CFG_STRING_LEN 8192 +#define STRING_TERMINATOR_LEN 4 + +struct cfg_file { + char *name; + FILE *pfile; + unsigned line; + unsigned index_line; + /* set in case of any error */ + unsigned err_line; + char *err_section; + unsigned err_entry; + char cur_line[MAX_CFG_STRING_LEN + STRING_TERMINATOR_LEN]; +}; + +struct cfg_file *cfg_open(const char *cfg_name); +int cfg_parse(struct cfg_file *pcfg, struct cfg_section *psec); +int cfg_close(struct cfg_file *pcfg); + +#endif /* _CFGFILE_H_ */ diff --git a/VNFs/DPPD-PROX/clock.c b/VNFs/DPPD-PROX/clock.c new file mode 100644 index 00000000..6e057101 --- /dev/null +++ b/VNFs/DPPD-PROX/clock.c @@ -0,0 +1,261 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "clock.h" + +#include <stdio.h> +#include <string.h> + +#include <rte_cycles.h> + +/* Calibrate TSC overhead by reading NB_READ times and take the smallest value. + Bigger values are caused by external influence and can be discarded. The best + estimate is the smallest read value. */ +#define NB_READ 10000 + +uint32_t rdtsc_overhead; +uint32_t rdtsc_overhead_stats; + +uint64_t thresh; +uint64_t tsc_hz; + +/* calculate how much overhead is involved with calling rdtsc. This value has + to be taken into account where the time spent running a small piece of code + is measured */ +static void init_tsc_overhead(void) +{ + volatile uint32_t min_without_overhead = UINT32_MAX; + volatile uint32_t min_with_overhead = UINT32_MAX; + volatile uint32_t min_stats_overhead = UINT32_MAX; + volatile uint64_t start1, end1; + volatile uint64_t start2, end2; + + for (uint32_t i = 0; i < NB_READ; ++i) { + start1 = rte_rdtsc(); + end1 = rte_rdtsc(); + + start2 = rte_rdtsc(); + end2 = rte_rdtsc(); + end2 = rte_rdtsc(); + + if (min_without_overhead > end1 - start1) { + min_without_overhead = end1 - start1; + } + + if (min_with_overhead > end2 - start2) { + min_with_overhead = end2 - start2; + } + } + + rdtsc_overhead = min_with_overhead - min_without_overhead; + + start1 = rte_rdtsc(); + end1 = rte_rdtsc(); + /* forbid the compiler to optimize this dummy variable */ + volatile int dummy = 0; + for (uint32_t i = 0; i < NB_READ; ++i) { + start1 = rte_rdtsc(); + dummy += 32; + end1 = rte_rdtsc(); + + if (min_stats_overhead > end2 - start2) { + min_stats_overhead = end1 - start1; + } + } + + rdtsc_overhead_stats = rdtsc_overhead + min_stats_overhead - min_without_overhead; +} + +void clock_init(void) +{ + init_tsc_overhead(); + tsc_hz = rte_get_tsc_hz(); + thresh = UINT64_MAX/tsc_hz; +} + +uint64_t str_to_tsc(const char *from) +{ + const uint64_t hz = rte_get_tsc_hz(); + uint64_t ret; + char str[16]; + + strncpy(str, from, sizeof(str)); + + char *frac = strchr(str, '.'); + + if (frac) { + *frac = 0; + frac++; + } + + ret = hz * atoi(str); + + if (!frac) + return ret; + + uint64_t nsec = 0; + uint64_t multiplier = 100000000; + + for (size_t i = 0; i < strlen(frac); ++i) { + nsec += (frac[i] - '0') * multiplier; + multiplier /= 10; + } + + /* Wont overflow until CPU freq is ~18.44 GHz */ + ret += hz * nsec/1000000000; + + return ret; +} + +uint64_t sec_to_tsc(uint64_t sec) +{ + if (sec < UINT64_MAX/rte_get_tsc_hz()) + return sec * rte_get_tsc_hz(); + else + return UINT64_MAX; +} + +uint64_t msec_to_tsc(uint64_t msec) +{ + if (msec < UINT64_MAX/rte_get_tsc_hz()) + return msec * rte_get_tsc_hz() / 1000; + else + return msec / 1000 * rte_get_tsc_hz(); +} + +uint64_t usec_to_tsc(uint64_t usec) +{ + if (usec < UINT64_MAX/rte_get_tsc_hz()) + return usec * rte_get_tsc_hz() / 1000000; + else + return usec / 1000000 * rte_get_tsc_hz(); +} + +uint64_t nsec_to_tsc(uint64_t nsec) +{ + if (nsec < UINT64_MAX/rte_get_tsc_hz()) + return nsec * rte_get_tsc_hz() / 1000000000; + else + return nsec / 1000000000 * rte_get_tsc_hz(); +} + +uint64_t tsc_to_msec(uint64_t tsc) +{ + if (tsc < UINT64_MAX / 1000) { + return tsc * 1000 / rte_get_tsc_hz(); + } else { + return tsc / (rte_get_tsc_hz() / 1000); + } +} + +uint64_t tsc_to_usec(uint64_t tsc) +{ + if (tsc < UINT64_MAX / 1000000) { + return tsc * 1000000 / rte_get_tsc_hz(); + } else { + return tsc / (rte_get_tsc_hz() / 1000000); + } +} + +uint64_t tsc_to_nsec(uint64_t tsc) +{ + if (tsc < UINT64_MAX / 1000000000) { + return tsc * 1000000000 / rte_get_tsc_hz(); + } else { + return tsc / (rte_get_tsc_hz() / 1000000000); + } +} + +uint64_t tsc_to_sec(uint64_t tsc) +{ + return tsc / rte_get_tsc_hz(); +} + +struct time_unit tsc_to_time_unit(uint64_t tsc) +{ + struct time_unit ret; + uint64_t hz = rte_get_tsc_hz(); + + ret.sec = tsc/hz; + ret.nsec = (tsc - ret.sec*hz)*1000000000/hz; + + return ret; +} + +uint64_t time_unit_to_usec(struct time_unit *time_unit) +{ + return time_unit->sec * 1000000 + time_unit->nsec/1000; +} + +uint64_t time_unit_to_nsec(struct time_unit *time_unit) +{ + return time_unit->sec * 1000000000 + time_unit->nsec; +} + +int time_unit_cmp(struct time_unit *left, struct time_unit *right) +{ + if (left->sec < right->sec) + return -1; + if (left->sec > right->sec) + return 1; + + if (left->nsec < right->nsec) + return -1; + if (left->nsec > right->nsec) + return -1; + return 0; +} + +uint64_t freq_to_tsc(uint64_t times_per_sec) +{ + return rte_get_tsc_hz()/times_per_sec; +} + +void tsc_to_tv(struct timeval *tv, const uint64_t tsc) +{ + uint64_t hz = rte_get_tsc_hz(); + uint64_t sec = tsc/hz; + + tv->tv_sec = sec; + tv->tv_usec = ((tsc - sec * hz) * 1000000) / hz; +} + +void tv_to_tsc(const struct timeval *tv, uint64_t *tsc) +{ + uint64_t hz = rte_get_tsc_hz(); + *tsc = tv->tv_sec * hz; + *tsc += tv->tv_usec * hz / 1000000; +} + +struct timeval tv_diff(const struct timeval *cur, const struct timeval *next) +{ + uint64_t sec, usec; + + sec = next->tv_sec - cur->tv_sec; + if (next->tv_usec < cur->tv_usec) { + usec = next->tv_usec + 1000000 - cur->tv_usec; + sec -= 1; + } + else + usec = next->tv_usec - cur->tv_usec; + + struct timeval ret = { + .tv_sec = sec, + .tv_usec = usec, + }; + + return ret; +} diff --git a/VNFs/DPPD-PROX/clock.h b/VNFs/DPPD-PROX/clock.h new file mode 100644 index 00000000..719968ab --- /dev/null +++ b/VNFs/DPPD-PROX/clock.h @@ -0,0 +1,76 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CLOCK_H_ +#define _CLOCK_H_ + +#include <inttypes.h> + +extern uint32_t rdtsc_overhead; +extern uint32_t rdtsc_overhead_stats; + +void clock_init(void); + +struct time_unit { + uint64_t sec; + uint64_t nsec; +}; + +struct time_unit_err { + struct time_unit time; + struct time_unit error; +}; + +extern uint64_t thresh; +extern uint64_t tsc_hz; + +static uint64_t val_to_rate(uint64_t val, uint64_t delta_t) +{ + if (val < thresh) { + return val * tsc_hz / delta_t; + } else if (val >> 2 < thresh) { + /* bytes per sec malls into this category ... */ + return ((val >> 2) * tsc_hz) / (delta_t >> 2); + } else { + if (delta_t < tsc_hz) + return UINT64_MAX; + else + return val / (delta_t/tsc_hz); + } +} + +/* The precision of the conversion is nano-second. */ +uint64_t str_to_tsc(const char *from); +uint64_t sec_to_tsc(uint64_t sec); +uint64_t msec_to_tsc(uint64_t msec); +uint64_t usec_to_tsc(uint64_t usec); +uint64_t nsec_to_tsc(uint64_t nsec); +uint64_t freq_to_tsc(uint64_t times_per_sec); +uint64_t tsc_to_msec(uint64_t tsc); +uint64_t tsc_to_usec(uint64_t tsc); +uint64_t tsc_to_nsec(uint64_t tsc); +uint64_t tsc_to_sec(uint64_t tsc); +struct time_unit tsc_to_time_unit(uint64_t tsc); +uint64_t time_unit_to_usec(struct time_unit *time_unit); +uint64_t time_unit_to_nsec(struct time_unit *time_unit); +int time_unit_cmp(struct time_unit *left, struct time_unit *right); + +struct timeval; +void tsc_to_tv(struct timeval *tv, const uint64_t tsc); +void tv_to_tsc(const struct timeval *tv, uint64_t *tsc); +struct timeval tv_diff(const struct timeval *tv1, const struct timeval * tv2); + +#endif /* _CLOCK_H_ */ diff --git a/VNFs/DPPD-PROX/cmd_parser.c b/VNFs/DPPD-PROX/cmd_parser.c new file mode 100644 index 00000000..95688477 --- /dev/null +++ b/VNFs/DPPD-PROX/cmd_parser.c @@ -0,0 +1,2031 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <rte_cycles.h> +#include <rte_version.h> + +#include "input.h" +#include "cmd_parser.h" +#include "commands.h" +#include "run.h" +#include "display.h" +#include "log.h" +#include "prox_cfg.h" +#include "prox_port_cfg.h" +#include "task_base.h" +#include "lconf.h" +#include "main.h" +#include "parse_utils.h" +#include "stats_parser.h" +#include "stats_port.h" +#include "stats_latency.h" +#include "stats_global.h" +#include "stats_prio_task.h" + +#include "handle_routing.h" +#include "handle_qinq_decap4.h" +#include "handle_lat.h" +#include "handle_arp.h" +#include "handle_gen.h" +#include "handle_acl.h" +#include "handle_irq.h" +#include "defines.h" +#include "prox_cfg.h" +#include "version.h" +#include "stats_latency.h" +#include "handle_cgnat.h" +#include "handle_impair.h" + +static int core_task_is_valid(int lcore_id, int task_id) +{ + if (lcore_id >= RTE_MAX_LCORE) { + plog_err("Invalid core id %u (lcore ID above %d)\n", lcore_id, RTE_MAX_LCORE); + return 0; + } + else if (!prox_core_active(lcore_id, 0)) { + plog_err("Invalid core id %u (lcore is not active)\n", lcore_id); + return 0; + } + else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) { + plog_err("Invalid task id (valid task IDs for core %u are below %u)\n", + lcore_id, lcore_cfg[lcore_id].n_tasks_all); + return 0; + } + return 1; +} + +static int cores_task_are_valid(unsigned int *lcores, int task_id, unsigned int nb_cores) +{ + unsigned int lcore_id; + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (lcore_id >= RTE_MAX_LCORE) { + plog_err("Invalid core id %u (lcore ID above %d)\n", lcore_id, RTE_MAX_LCORE); + return 0; + } + else if (!prox_core_active(lcore_id, 0)) { + plog_err("Invalid core id %u (lcore is not active)\n", lcore_id); + return 0; + } + else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) { + plog_err("Invalid task id (valid task IDs for core %u are below %u)\n", + lcore_id, lcore_cfg[lcore_id].n_tasks_all); + return 0; + } + } + return 1; +} + +static int parse_core_task(const char *str, uint32_t *lcore_id, uint32_t *task_id, unsigned int *nb_cores) +{ + char str_lcore_id[128]; + int ret; + + if (2 != sscanf(str, "%s %u", str_lcore_id, task_id)) + return -1; + + if ((ret = parse_list_set(lcore_id, str_lcore_id, RTE_MAX_LCORE)) <= 0) { + plog_err("Invalid core while parsing command (%s)\n", get_parse_err()); + return -1; + } + *nb_cores = ret; + + return 0; +} + +static const char *strchr_skip_twice(const char *str, int chr) +{ + str = strchr(str, chr); + if (!str) + return NULL; + str = str + 1; + + str = strchr(str, chr); + if (!str) + return NULL; + return str + 1; +} + +static int parse_cmd_quit(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + quit(); + return 0; +} + +static int parse_cmd_quit_force(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + abort(); +} + +static int parse_cmd_history(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + if (input->history) { + input->history(input); + return 0; + } + plog_err("Invalid history comand "); + return -1; +} + +static int parse_cmd_echo(const char *str, struct input *input) +{ + if (strcmp(str, "") == 0) { + return -1; + } + + char resolved[2048]; + + if (parse_vars(resolved, sizeof(resolved), str)) { + return 0; + } + + if (input->reply) { + if (strlen(resolved) + 2 < sizeof(resolved)) { + resolved[strlen(resolved) + 1] = 0; + resolved[strlen(resolved)] = '\n'; + } + else + return 0; + + input->reply(input, resolved, strlen(resolved)); + } else + plog_info("%s\n", resolved); + + return 0; +} + +static int parse_cmd_reset_stats(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + stats_reset(); + return 0; +} + +static int parse_cmd_reset_lat_stats(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + stats_latency_reset(); + return 0; +} + +static int parse_cmd_trace(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%u", &nb_packets) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + cmd_trace(lcores[i], task_id, nb_packets); + } + } + return 0; +} + +static int parse_cmd_dump_rx(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%u", &nb_packets) != 1) { + return -1; + } + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + cmd_dump(lcores[i], task_id, nb_packets, input, 1, 0); + } + } + return 0; +} + +static int parse_cmd_pps_unit(const char *str, struct input *input) +{ + uint32_t val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + display_set_pps_unit(val); + return 0; +} + +static int parse_cmd_dump_tx(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%u", &nb_packets) != 1) { + return -1; + } + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + cmd_dump(lcores[i], task_id, nb_packets, input, 0, 1); + } + } + return 0; +} + +static int parse_cmd_rate(const char *str, struct input *input) +{ + unsigned queue, port, rate; + + if (sscanf(str, "%u %u %u", &queue, &port, &rate) != 3) { + return -1; + } + + if (port > PROX_MAX_PORTS) { + plog_err("Max port id allowed is %u (specified %u)\n", PROX_MAX_PORTS, port); + } + else if (!prox_port_cfg[port].active) { + plog_err("Port %u not active\n", port); + } + else if (queue >= prox_port_cfg[port].n_txq) { + plog_err("Number of active queues is %u\n", + prox_port_cfg[port].n_txq); + } + else if (rate > prox_port_cfg[port].link_speed) { + plog_err("Max rate allowed on port %u queue %u is %u Mbps\n", + port, queue, prox_port_cfg[port].link_speed); + } + else { + if (rate == 0) { + plog_info("Disabling rate limiting on port %u queue %u\n", + port, queue); + } + else { + plog_info("Setting rate limiting to %u Mbps on port %u queue %u\n", + rate, port, queue); + } + rte_eth_set_queue_rate_limit(port, queue, rate); + } + return 0; +} + +int task_is_mode(uint32_t lcore_id, uint32_t task_id, const char *mode, const char *sub_mode) +{ + struct task_init *t = lcore_cfg[lcore_id].targs[task_id].task_init; + + return !strcmp(t->mode_str, mode) && !strcmp(t->sub_mode_str, sub_mode); +} + +int task_is_sub_mode(uint32_t lcore_id, uint32_t task_id, const char *sub_mode) +{ + struct task_init *t = lcore_cfg[lcore_id].targs[task_id].task_init; + + return !strcmp(t->sub_mode_str, sub_mode); +} + +static void log_pkt_count(uint32_t count, uint32_t lcore_id, uint32_t task_id) +{ + if (count == UINT32_MAX) + plog_info("Core %u task %u will keep sending packets\n", lcore_id, task_id); + else if (count == 0) + plog_info("Core %u task %u waits for next count command\n", lcore_id, task_id); + else + plog_info("Core %u task %u stopping after %u packets\n", lcore_id, task_id, count); +} + +static int parse_cmd_count(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, count, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%u", &count) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else { + struct task_base *task = lcore_cfg[lcore_id].tasks_all[task_id]; + + log_pkt_count(count, lcore_id, task_id); + task_gen_set_pkt_count(task, count); + } + } + } + return 0; +} + +static int parse_cmd_set_probability(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + float probability; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%f", &probability) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "impair", "")) { + plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id); + } + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + task_impair_set_proba(tbase, probability); + } + } + return 0; +} + +static int parse_cmd_delay_us(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, delay_us, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%d", &delay_us) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "impair", "")) { + plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id); + } + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + task_impair_set_delay_us(tbase, delay_us, 0); + } + } + return 0; +} + +static int parse_cmd_random_delay_us(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, delay_us, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%d", &delay_us) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "impair", "")) { + plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id); + } + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + task_impair_set_delay_us(tbase, 0, delay_us); + } + } + return 0; +} + +static int parse_cmd_bypass(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if ((prox_cfg.flags & DSF_ENABLE_BYPASS) == 0) { + plog_err("enable bypass not set => command not supported\n"); + return -1; + } + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (bypass_task(lcore_id, task_id) != 0) + return -1; + } + } + return 0; +} + +static int parse_cmd_reconnect(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (reconnect_task(lcore_id, task_id) != 0) + return -1; + } + } + return 0; +} + +static int parse_cmd_pkt_size(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%d", &pkt_size) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + if (task_gen_set_pkt_size(tbase, pkt_size) != 0) + return -1; + } + } + return 0; +} + +static int parse_cmd_speed(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], task_id, lcore_id, nb_cores; + float speed; + unsigned i; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%f", &speed) != 1) { + return -1; + } + + if (!cores_task_are_valid(lcores, task_id, nb_cores)) { + return 0; + } + + for (i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else if (speed > 400.0f || speed < 0.0f) { + plog_err("Speed out of range (must be betweeen 0%% and 100%%)\n"); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + uint64_t bps = speed * 12500000; + + plog_info("Setting rate to %"PRIu64" Bps\n", bps); + + task_gen_set_rate(tbase, bps); + } + } + return 0; +} + +static int parse_cmd_speed_byte(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + uint64_t bps; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%"PRIu64"", &bps) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else if (bps > 1250000000) { + plog_err("Speed out of range (must be <= 1250000000)\n"); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + plog_info("Setting rate to %"PRIu64" Bps\n", bps); + task_gen_set_rate(tbase, bps); + } + } + } + return 0; +} + +static int parse_cmd_reset_randoms_all(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + unsigned task_id, lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) { + if ((task_is_mode(lcore_id, task_id, "gen", "")) || (task_is_mode(lcore_id, task_id, "gen", "l3"))) { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + uint32_t n_rands = task_gen_get_n_randoms(tbase); + + plog_info("Resetting randoms on core %d task %d from %d randoms\n", lcore_id, task_id, n_rands); + task_gen_reset_randoms(tbase); + } + } + } + return 0; +} + +static int parse_cmd_reset_values_all(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + unsigned task_id, lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) { + if ((task_is_mode(lcore_id, task_id, "gen", "")) || (task_is_mode(lcore_id, task_id, "gen", "l3"))) { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + plog_info("Resetting values on core %d task %d\n", lcore_id, task_id); + task_gen_reset_values(tbase); + } + } + } + return 0; +} + +static int parse_cmd_reset_values(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + plog_info("Resetting values on core %d task %d\n", lcore_id, task_id); + task_gen_reset_values(tbase); + } + } + } + return 0; +} + +static int parse_cmd_set_value(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, value, nb_cores; + unsigned short offset; + uint8_t value_len; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%hu %u %hhu", &offset, &value, &value_len) != 3) { + return -1; + } + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else if (offset > ETHER_MAX_LEN) { + plog_err("Offset out of range (must be less then %u)\n", ETHER_MAX_LEN); + } + else if (value_len > 4) { + plog_err("Length out of range (must be less then 4)\n"); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + if (task_gen_set_value(tbase, value, offset, value_len)) + plog_info("Unable to set Byte %"PRIu16" to %"PRIu8" - too many value set\n", offset, value); + else + plog_info("Setting Byte %"PRIu16" to %"PRIu32"\n", offset, value); + } + } + } + return 0; +} + +static int parse_cmd_set_random(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + unsigned short offset; + uint8_t value_len; + char rand_str[64]; + int16_t rand_id = -1; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%hu %32s %hhu", &offset, rand_str, &value_len) != 3) { + return -1; + } + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else if (offset > ETHER_MAX_LEN) { + plog_err("Offset out of range (must be less then %u)\n", ETHER_MAX_LEN); + } + else if (value_len > 4) { + plog_err("Length out of range (must be less then 4)\n"); + } else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + if (task_gen_add_rand(tbase, rand_str, offset, rand_id)) { + plog_warn("Random not added on core %u task %u\n", lcore_id, task_id); + } + } + } + } + return 0; +} + +static int parse_cmd_thread_info(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + for (unsigned int i = 0; i < nb_cores; i++) { + cmd_thread_info(lcores[i], task_id); + } + return 0; +} + +static int parse_cmd_verbose(const char *str, struct input *input) +{ + unsigned id; + + if (sscanf(str, "%u", &id) != 1) { + return -1; + } + + if (plog_set_lvl(id) != 0) { + plog_err("Cannot set log level to %u\n", id); + } + return 0; +} + +static int parse_cmd_arp_add(const char *str, struct input *input) +{ + struct arp_msg amsg; + struct arp_msg *pmsg = &amsg; + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + struct rte_ring *ring; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (strcmp(str, "")) + return -1; + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + if (str_to_arp_msg(&amsg, str) == 0) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + ring = ctrl_rings[lcore_id*MAX_TASKS_PER_CORE + task_id]; + if (!ring) { + plog_err("No ring for control messages to core %u task %u\n", lcore_id, task_id); + } + else { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&pmsg, 1)); +#else + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&pmsg, 1, NULL) == 0); +#endif + while (!rte_ring_empty(ring)); + } + } + return 0; + } + } + return -1; +} + +static int parse_cmd_rule_add(const char *str, struct input *input) +{ + struct rte_ring *ring; + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (strcmp(str, "")) + return -1; + char *fields[9]; + char str_cpy[255]; + strncpy(str_cpy, str, 255); + // example add rule command: rule add 15 0 1&0x0fff 1&0x0fff 0&0 128.0.0.0/1 128.0.0.0/1 5000-5000 5000-5000 allow + int ret = rte_strsplit(str_cpy, 255, fields, 9, ' '); + if (ret != 8) { + return -1; + } + + struct acl4_rule rule; + struct acl4_rule *prule = &rule; + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + if (str_to_rule(&rule, fields, -1, 1) == 0) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + ring = ctrl_rings[lcore_id*MAX_TASKS_PER_CORE + task_id]; + if (!ring) { + plog_err("No ring for control messages to core %u task %u\n", lcore_id, task_id); + } + else { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&prule, 1)); +#else + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&prule, 1, NULL) == 0); +#endif + while (!rte_ring_empty(ring)); + } + } + return 0; + } + } + return -1; +} + +static int parse_cmd_gateway_ip(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, ip[4], nb_cores, i; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (!strcmp(str, "")) + return -1; + if (sscanf(str, "%u.%u.%u.%u", ip, ip + 1, ip + 2, ip + 3) != 4) { + return -1; + } + for (i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if ((!task_is_mode(lcore_id, task_id, "gen", "")) && (!task_is_mode(lcore_id, task_id, "gen", "l3"))) { + plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); + } + else { + uint32_t gateway_ip = ((ip[3] & 0xFF) << 24) | ((ip[2] & 0xFF) << 16) | ((ip[1] & 0xFF) << 8) | ((ip[0] & 0xFF) << 0); + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + plog_info("Setting gateway ip to %s\n", str); + task_gen_set_gateway_ip(tbase, gateway_ip); + } + } + return 0; +} + +static int parse_cmd_local_ip(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, ip[4], nb_cores, i; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (!strcmp(str, "")) + return -1; + if (sscanf(str, "%u.%u.%u.%u", ip, ip + 1, ip + 2, ip + 3) != 4) { + return -1; + } + for (i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "arp", "local")) { + plog_err("Core %u task %u is not in arp mode\n", lcore_id, task_id); + } + else { + uint32_t local_ip = ((ip[3] & 0xFF) << 24) | ((ip[2] & 0xFF) << 16) | ((ip[1] & 0xFF) << 8) | ((ip[0] & 0xFF) << 0); + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + plog_info("Setting local ip to %s\n", str); + task_arp_set_local_ip(tbase, local_ip); + } + } + return 0; +} + +static int parse_cmd_route_add(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, prefix, next_hop_idx, ip[4], nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (strcmp(str, "")) + return -1; + if (sscanf(str, "%u.%u.%u.%u/%u %u", ip, ip + 1, ip + 2, ip + 3, + &prefix, &next_hop_idx) != 8) { + return -1; + } + struct rte_ring *ring; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + ring = ctrl_rings[lcore_id*MAX_TASKS_PER_CORE + task_id]; + if (!ring) { + plog_err("No ring for control messages to core %u task %u\n", lcore_id, task_id); + } + else { + struct route_msg rmsg; + struct route_msg *pmsg = &rmsg; + + rmsg.ip_bytes[0] = ip[0]; + rmsg.ip_bytes[1] = ip[1]; + rmsg.ip_bytes[2] = ip[2]; + rmsg.ip_bytes[3] = ip[3]; + rmsg.prefix = prefix; + rmsg.nh = next_hop_idx; +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&pmsg, 1)); +#else + while (rte_ring_sp_enqueue_bulk(ring, (void *const *)&pmsg, 1, NULL) == 0); +#endif + while (!rte_ring_empty(ring)); + } + } + } + return 0; +} + +static int parse_cmd_start(const char *str, struct input *input) +{ + int task_id = -1; + + if (strncmp(str, "all", 3) == 0) { + str += 3; + sscanf(str, "%d", &task_id); + + start_core_all(task_id); + req_refresh(); + return 0; + } + + uint32_t cores[64] = {0}; + int ret; + ret = parse_list_set(cores, str, 64); + if (ret < 0) { + return -1; + } + str = strchr(str, ' '); + + if (str) { + sscanf(str, "%d", &task_id); + } + start_cores(cores, ret, task_id); + req_refresh(); + return 0; +} + +static int parse_cmd_stop(const char *str, struct input *input) +{ + int task_id = -1; + + if (strncmp(str, "all", 3) == 0) { + str += 3; + sscanf(str, "%d", &task_id); + stop_core_all(task_id); + req_refresh(); + return 0; + } + + uint32_t cores[64] = {0}; + int ret; + ret = parse_list_set(cores, str, 64); + if (ret < 0) { + return -1; + } + str = strchr(str, ' '); + + if (str) { + sscanf(str, "%d", &task_id); + } + stop_cores(cores, ret, task_id); + req_refresh(); + + return 0; +} + +static int parse_cmd_rx_distr_start(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_rx_distr_start(lcore_id[i]); + return 0; +} + +static int parse_cmd_tx_distr_start(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_tx_distr_start(lcore_id[i]); + return 0; +} + +static int parse_cmd_rx_distr_stop(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_rx_distr_stop(lcore_id[i]); + return 0; +} + +static int parse_cmd_tx_distr_stop(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_tx_distr_stop(lcore_id[i]); + return 0; +} + +static int parse_cmd_rx_distr_reset(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_rx_distr_rst(lcore_id[i]); + return 0; +} + +static int parse_cmd_tx_distr_reset(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_tx_distr_rst(lcore_id[i]); + return 0; +} + +static int parse_cmd_rx_distr_show(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_rx_distr_show(lcore_id[i]); + return 0; +} + +static int parse_cmd_tx_distr_show(const char *str, struct input *input) +{ + unsigned lcore_id[RTE_MAX_LCORE]; + + int nb_cores; + + nb_cores = parse_list_set(lcore_id, str, sizeof(lcore_id)/sizeof(lcore_id[0])); + + if (nb_cores <= 0) { + return -1; + } + + for (int i = 0; i < nb_cores; ++i) + cmd_tx_distr_show(lcore_id[i]); + return 0; +} + +static int parse_cmd_tot_stats(const char *str, struct input *input) +{ + if (strcmp("", str) != 0) { + return -1; + } + + struct global_stats_sample *gsl = stats_get_global_stats(1); + uint64_t tot_rx = gsl->host_rx_packets; + uint64_t tot_tx = gsl->host_tx_packets; + uint64_t last_tsc = gsl->tsc; + + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"\n", + tot_rx, tot_tx, last_tsc, rte_get_tsc_hz()); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("RX: %"PRIu64", TX: %"PRIu64"\n", tot_rx, tot_tx); + } + return 0; +} + +static int parse_cmd_update_interval(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + if (val == 0) { + plog_err("Minimum update interval is 1 ms\n"); + } + else { + plog_info("Setting update interval to %d ms\n", val); + set_update_interval(val); + } + return 0; +} + +static int parse_cmd_mem_info(const char *str, struct input *input) +{ + if (strcmp("", str) != 0) { + return -1; + } + + cmd_mem_stats(); + cmd_mem_layout(); + return 0; +} + +static int parse_cmd_tot_ierrors_tot(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + struct global_stats_sample *gsl = stats_get_global_stats(1); + uint64_t tot = gsl->nics_ierrors; + uint64_t last_tsc = gsl->tsc; + + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), + "%"PRIu64",%"PRIu64",%"PRIu64"\n", + tot, last_tsc, rte_get_tsc_hz()); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("ierrors: %"PRIu64"\n", tot); + } + return 0; +} + +static int parse_cmd_tot_imissed_tot(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + struct global_stats_sample *gsl = stats_get_global_stats(1); + uint64_t tot = gsl->nics_imissed; + uint64_t last_tsc = gsl->tsc; + + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), + "%"PRIu64",%"PRIu64",%"PRIu64"\n", + tot, last_tsc, rte_get_tsc_hz()); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("imissed: %"PRIu64"\n", tot); + } + return 0; +} + +static int parse_cmd_reset_port(const char *str, struct input *input) +{ + uint32_t port_id; + + if (sscanf(str, "%u", &port_id ) != 1) { + return -1; + } + + cmd_reset_port(port_id); + return 0; +} + +static int parse_cmd_write_reg(const char *str, struct input *input) +{ + uint32_t port_id; + uint32_t id, val; + + if (sscanf(str, "%u %x %u", &port_id, &id, &val) != 3) { + return -1; + } + + cmd_write_reg(port_id, id, val); + return 0; +} + +static int parse_cmd_read_reg(const char *str, struct input *input) +{ + uint32_t port_id; + uint32_t id; + + if (sscanf(str, "%u %x", &port_id, &id) != 2) { + return -1; + } + + cmd_read_reg(port_id, id); + return 0; +} + +static int parse_cmd_cache_reset(const char *str, struct input *input) +{ + cmd_cache_reset(); + return 0; +} + +static int parse_cmd_set_cache_class_mask(const char *str, struct input *input) +{ + uint32_t lcore_id; + uint32_t set; + uint32_t val; + + if (sscanf(str, "%u %u %u", &lcore_id, &set, &val) != 3) { + return -1; + } + + cmd_set_cache_class_mask(lcore_id, set, val); + return 0; +} + +static int parse_cmd_set_cache_class(const char *str, struct input *input) +{ + uint32_t lcore_id; + uint32_t set; + + if (sscanf(str, "%u %u", &lcore_id, &set) != 2) { + return -1; + } + + cmd_set_cache_class(lcore_id, set); + return 0; +} + +static int parse_cmd_get_cache_class_mask(const char *str, struct input *input) +{ + uint32_t lcore_id; + uint32_t set; + uint32_t val = 0; + + if (sscanf(str, "%u %u", &lcore_id, &set) != 2) { + return -1; + } + + cmd_get_cache_class_mask(lcore_id, set, &val); + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), "%d, %d, %x\n", lcore_id, set, val); + input->reply(input, buf, strlen(buf)); + } else { + plog_info("core=%d, set=%d, mask=%x\n", lcore_id, set, val); + } + return 0; +} + +static int parse_cmd_get_cache_class(const char *str, struct input *input) +{ + uint32_t lcore_id; + uint32_t set; + uint32_t val; + + if (sscanf(str, "%u", &lcore_id) != 1) { + return -1; + } + + cmd_get_cache_class(lcore_id, &set); + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), "%d, %d\n", lcore_id, set); + input->reply(input, buf, strlen(buf)); + } else { + plog_info("core=%d, cos=%d\n", lcore_id, set); + } + return 0; +} + +static int parse_cmd_get_cache_mask(const char *str, struct input *input) +{ + uint32_t lcore_id; + uint32_t set; + uint32_t mask; + + if (sscanf(str, "%u", &lcore_id) != 1) { + return -1; + } + + cmd_get_cache_class(lcore_id, &set); + cmd_get_cache_class_mask(lcore_id, set, &mask); + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), "%d, %x\n", lcore_id, mask); + input->reply(input, buf, strlen(buf)); + } else { + plog_info("core=%d, mask=%x\n", lcore_id, mask); + } + return 0; +} + +static int parse_cmd_set_vlan_offload(const char *str, struct input *input) +{ + uint32_t port_id; + uint32_t val; + + if (sscanf(str, "%u %u", &port_id, &val) != 2) { + return -1; + } + + cmd_set_vlan_offload(port_id, val); + return 0; +} + +static int parse_cmd_set_vlan_filter(const char *str, struct input *input) +{ + uint32_t port_id; + uint32_t id, val; + + if (sscanf(str, "%u %d %u", &port_id, &id, &val) != 3) { + return -1; + } + + cmd_set_vlan_filter(port_id, id, val); + return 0; +} + +static int parse_cmd_ring_info_all(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + cmd_ringinfo_all(); + return 0; +} + +static int parse_cmd_port_up(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + cmd_port_up(val); + return 0; +} + +static int parse_cmd_port_down(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + cmd_port_down(val); + return 0; +} + +static int parse_cmd_port_link_state(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + if (!port_is_active(val)) + return -1; + + int active = prox_port_cfg[val].link_up; + const char *state = active? "up\n" : "down\n"; + + if (input->reply) + input->reply(input, state, strlen(state)); + else + plog_info("%s", state); + + return 0; +} + +static int parse_cmd_xstats(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + cmd_xstats(val); + return 0; +} + +static int parse_cmd_stats(const char *str, struct input *input) +{ + if (strcmp(str, "") == 0) + return -1; + + char buf[32768]; + char ret2[32768]; + char *ret = ret2; + int list = 0; + + strncpy(buf, str, sizeof(buf) - 1); + char *tok; + uint64_t stat_val; + + while ((tok = strchr(str, ','))) { + *tok = 0; + stat_val = stats_parser_get(str); + + ret += sprintf(ret, "%s%"PRIu64"", list? "," :"", stat_val); + list = 1; + str = tok + 1; + } + + stat_val = stats_parser_get(str); + ret += sprintf(ret, "%s%"PRIu64"", list? "," :"", stat_val); + + sprintf(ret, "\n"); + + if (input->reply) + input->reply(input, ret2, strlen(ret2)); + else + plog_info("%s", ret2); + return 0; +} + +static void replace_char(char *str, char to_replace, char by) +{ + for (size_t i = 0; str[i] != '\0'; ++i) { + if (str[i] == to_replace) + str[i] = by; + } +} + +static int parse_cmd_port_info(const char *str, struct input *input) +{ + int val; + + if (strcmp(str, "all") == 0) { + val = -1; + } + else if (sscanf(str, "%d", &val) != 1) { + return -1; + } + + char port_info[2048]; + + cmd_portinfo(val, port_info, sizeof(port_info)); + + if (input->reply) { + replace_char(port_info, '\n', ','); + port_info[strlen(port_info) - 1] = '\n'; + input->reply(input, port_info, strlen(port_info)); + } else + plog_info("%s", port_info); + + return 0; +} + +static int parse_cmd_ring_info(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + cmd_ringinfo(lcores[i], task_id); + } + } + return 0; +} + +static int parse_cmd_port_stats(const char *str, struct input *input) +{ + unsigned val; + + if (sscanf(str, "%u", &val) != 1) { + return -1; + } + + struct get_port_stats s; + if (stats_port(val, &s)) { + plog_err("Invalid port %u\n", val); + return 0; + } + char buf[256]; + snprintf(buf, sizeof(buf), + "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," + "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"," + "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"\n", + s.no_mbufs_diff, s.ierrors_diff + s.imissed_diff, + s.rx_bytes_diff, s.tx_bytes_diff, + s.rx_pkts_diff, s.tx_pkts_diff, + s.rx_tot, s.tx_tot, + s.no_mbufs_tot, s.ierrors_tot + s.imissed_tot, + s.last_tsc, s.prev_tsc); + plog_info("%s", buf); + if (input->reply) + input->reply(input, buf, strlen(buf)); + return 0; +} + +static int parse_cmd_core_stats(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + uint64_t tot_rx = stats_core_task_tot_rx(lcore_id, task_id); + uint64_t tot_tx = stats_core_task_tot_tx(lcore_id, task_id); + uint64_t tot_drop = stats_core_task_tot_drop(lcore_id, task_id); + uint64_t last_tsc = stats_core_task_last_tsc(lcore_id, task_id); + + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), + "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"\n", + tot_rx, tot_tx, tot_drop, last_tsc, rte_get_tsc_hz()); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("RX: %"PRIu64", TX: %"PRIu64", DROP: %"PRIu64"\n", + tot_rx, tot_tx, tot_drop); + } + } + } + return 0; +} + +static int parse_cmd_lat_stats(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "lat", "")) { + plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id); + } + else { + struct stats_latency *stats = stats_latency_find(lcore_id, task_id); + struct stats_latency *tot = stats_latency_tot_find(lcore_id, task_id); + + uint64_t last_tsc = stats_core_task_last_tsc(lcore_id, task_id); + uint64_t lat_min_usec = time_unit_to_usec(&stats->min.time); + uint64_t lat_max_usec = time_unit_to_usec(&stats->max.time); + uint64_t tot_lat_min_usec = time_unit_to_usec(&tot->min.time); + uint64_t tot_lat_max_usec = time_unit_to_usec(&tot->max.time); + uint64_t lat_avg_usec = time_unit_to_usec(&stats->avg.time); + + if (input->reply) { + char buf[128]; + snprintf(buf, sizeof(buf), + "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"\n", + lat_min_usec, + lat_max_usec, + lat_avg_usec, + tot_lat_min_usec, + tot_lat_max_usec, + last_tsc, + rte_get_tsc_hz()); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("min: %"PRIu64", max: %"PRIu64", avg: %"PRIu64", min since reset: %"PRIu64", max since reset: %"PRIu64"\n", + lat_min_usec, + lat_max_usec, + lat_avg_usec, + tot_lat_min_usec, + tot_lat_max_usec); + } + } + } + } + return 0; +} + +static int parse_cmd_irq(const char *str, struct input *input) +{ + unsigned int i, c; + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (c = 0; c < nb_cores; c++) { + lcore_id = lcores[c]; + if (!task_is_mode(lcore_id, task_id, "irq", "")) { + plog_err("Core %u task %u is not in irq mode\n", lcore_id, task_id); + } else { + struct task_irq *task_irq = (struct task_irq *)(lcore_cfg[lcore_id].tasks_all[task_id]); + + task_irq_show_stats(task_irq, input); + } + } + } + return 0; +} + +static void task_lat_show_latency_histogram(uint8_t lcore_id, uint8_t task_id, struct input *input) +{ +#ifdef LATENCY_HISTOGRAM + uint64_t *buckets; + + stats_core_lat_histogram(lcore_id, task_id, &buckets); + + if (buckets == NULL) + return; + + if (input->reply) { + char buf[4096] = {0}; + for (size_t i = 0; i < 128; i++) + sprintf(buf+strlen(buf), "Bucket [%zu]: %"PRIu64"\n", i, buckets[i]); + input->reply(input, buf, strlen(buf)); + } + else { + for (size_t i = 0; i < 128; i++) + if (buckets[i]) + plog_info("Bucket [%zu]: %"PRIu64"\n", i, buckets[i]); + } +#else + plog_info("LATENCY_DETAILS disabled\n"); +#endif +} + +static int parse_cmd_lat_packets(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + if (!task_is_mode(lcore_id, task_id, "lat", "")) { + plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id); + } + else { + task_lat_show_latency_histogram(lcore_id, task_id, input); + } + } + } + return 0; +} + +static int parse_cmd_cgnat_public_hash(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + + if (!task_is_mode(lcore_id, task_id, "cgnat", "")) { + plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + task_cgnat_dump_public_hash((struct task_nat *)tbase); + } + } + } + return 0; +} + +static int parse_cmd_cgnat_private_hash(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + uint32_t val; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + + if (!task_is_mode(lcore_id, task_id, "cgnat", "")) { + plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + task_cgnat_dump_private_hash((struct task_nat *)tbase); + } + } + } + return 0; +} + +static int parse_cmd_accuracy(const char *str, struct input *input) +{ + unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores; + uint32_t val; + + if (parse_core_task(str, lcores, &task_id, &nb_cores)) + return -1; + if (!(str = strchr_skip_twice(str, ' '))) + return -1; + if (sscanf(str, "%"PRIu32"", &val) != 1) + return -1; + + if (cores_task_are_valid(lcores, task_id, nb_cores)) { + for (unsigned int i = 0; i < nb_cores; i++) { + lcore_id = lcores[i]; + + if (!task_is_mode(lcore_id, task_id, "lat", "")) { + plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id); + } + else { + struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id]; + + task_lat_set_accuracy_limit((struct task_lat *)tbase, val); + } + } + } + return 0; +} + +static int parse_cmd_rx_tx_info(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + cmd_rx_tx_info(); + return 0; +} + +static int parse_cmd_version(const char *str, struct input *input) +{ + if (strcmp(str, "") != 0) { + return -1; + } + + if (input->reply) { + uint64_t version = + ((uint64_t)VERSION_MAJOR) << 24 | + ((uint64_t)VERSION_MINOR) << 16 | + ((uint64_t)VERSION_REV) << 8; + + char buf[128]; + snprintf(buf, sizeof(buf), "%"PRIu64",%"PRIu64"\n", version, (uint64_t)RTE_VERSION); + input->reply(input, buf, strlen(buf)); + } + else { + plog_info("prox version: %d.%d, DPDK version: %s\n", + VERSION_MAJOR, VERSION_MINOR, + rte_version() + sizeof(RTE_VER_PREFIX)); + } + return 0; +} + +struct cmd_str { + const char *cmd; + const char *args; + const char *help; + int (*parse)(const char *args, struct input *input); +}; + +static int parse_cmd_help(const char *str, struct input *input); + +static struct cmd_str cmd_strings[] = { + {"history", "", "Print command history", parse_cmd_history}, + {"echo", "", "echo parameter, useful to resolving variables", parse_cmd_echo}, + {"quit", "", "Stop all cores and quit", parse_cmd_quit}, + {"quit_force", "", "Quit without waiting on cores to stop", parse_cmd_quit_force}, + {"help", "<substr>", "Show list of commands that have <substr> as a substring. If no substring is provided, all commands are shown.", parse_cmd_help}, + {"verbose", "<level>", "Set verbosity level", parse_cmd_verbose}, + {"thread info", "<core_id> <task_id>", "", parse_cmd_thread_info}, + {"mem info", "", "Show information about system memory (number of huge pages and addresses of these huge pages)", parse_cmd_mem_info}, + {"update interval", "<value>", "Update statistics refresh rate, in msec (must be >=10). Default is 1 second", parse_cmd_update_interval}, + {"rx tx info", "", "Print connections between tasks on all cores", parse_cmd_rx_tx_info}, + {"start", "<core list>|all <task_id>", "Start core <core_id> or all cores", parse_cmd_start}, + {"stop", "<core list>|all <task_id>", "Stop core <core id> or all cores", parse_cmd_stop}, + + {"dump", "<core id> <task id> <nb packets>", "Create a hex dump of <nb_packets> from <task_id> on <core_id> showing how packets have changed between RX and TX.", parse_cmd_trace}, + {"dump_rx", "<core id> <task id> <nb packets>", "Create a hex dump of <nb_packets> from <task_id> on <core_id> at RX", parse_cmd_dump_rx}, + {"dump_tx", "<core id> <task id> <nb packets>", "Create a hex dump of <nb_packets> from <task_id> on <core_id> at TX", parse_cmd_dump_tx}, + {"rx distr start", "", "Start gathering statistical distribution of received packets", parse_cmd_rx_distr_start}, + {"rx distr stop", "", "Stop gathering statistical distribution of received packets", parse_cmd_rx_distr_stop}, + {"rx distr reset", "", "Reset gathered statistical distribution of received packets", parse_cmd_rx_distr_reset}, + {"rx distr show", "", "Display gathered statistical distribution of received packets", parse_cmd_rx_distr_show}, + {"tx distr start", "", "Start gathering statistical distribution of xmitted packets", parse_cmd_tx_distr_start}, + {"tx distr stop", "", "Stop gathering statistical distribution of xmitted packets", parse_cmd_tx_distr_stop}, + {"tx distr reset", "", "Reset gathered statistical distribution of xmitted packets", parse_cmd_tx_distr_reset}, + {"tx distr show", "", "Display gathered statistical distribution of xmitted packets", parse_cmd_tx_distr_show}, + + {"rate", "<port id> <queue id> <rate>", "rate does not include preamble, SFD and IFG", parse_cmd_rate}, + {"count","<core id> <task id> <count>", "Generate <count> packets", parse_cmd_count}, + {"bypass", "<core_id> <task_id>", "Bypass task", parse_cmd_bypass}, + {"reconnect", "<core_id> <task_id>", "Reconnect task", parse_cmd_reconnect}, + {"pkt_size", "<core_id> <task_id> <pkt_size>", "Set the packet size to <pkt_size>", parse_cmd_pkt_size}, + {"speed", "<core_id> <task_id> <speed percentage>", "Change the speed to <speed percentage> at which packets are being generated on core <core_id> in task <task_id>.", parse_cmd_speed}, + {"speed_byte", "<core_id> <task_id> <speed>", "Change speed to <speed>. The speed is specified in units of bytes per second.", parse_cmd_speed_byte}, + {"set value", "<core_id> <task_id> <offset> <value> <value_len>", "Set <value_len> bytes to <value> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_value}, + {"set random", "<core_id> <task_id> <offset> <random_str> <value_len>", "Set <value_len> bytes to <rand_str> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_random}, + {"reset values all", "", "Undo all \"set value\" commands on all cores/tasks", parse_cmd_reset_values_all}, + {"reset randoms all", "", "Undo all \"set random\" commands on all cores/tasks", parse_cmd_reset_randoms_all}, + {"reset values", "<core id> <task id>", "Undo all \"set value\" commands on specified core/task", parse_cmd_reset_values}, + + {"arp add", "<core id> <task id> <port id> <gre id> <svlan> <cvlan> <ip addr> <mac addr> <user>", "Add a single ARP entry into a CPE table on <core id>/<task id>.", parse_cmd_arp_add}, + {"rule add", "<core id> <task id> svlan_id&mask cvlan_id&mask ip_proto&mask source_ip/prefix destination_ip/prefix range dport_range action", "Add a rule to the ACL table on <core id>/<task id>", parse_cmd_rule_add}, + {"route add", "<core id> <task id> <ip/prefix> <next hop id>", "Add a route to the routing table on core <core id> <task id>. Example: route add 10.0.16.0/24 9", parse_cmd_route_add}, + {"gateway ip", "<core id> <task id> <ip>", "Define/Change IP address of destination gateway on core <core id> <task id>.", parse_cmd_gateway_ip}, + {"local ip", "<core id> <task id> <ip>", "Define/Change IP address of destination gateway on core <core id> <task id>.", parse_cmd_local_ip}, + + {"pps unit", "", "Change core stats pps unit", parse_cmd_pps_unit}, + {"reset stats", "", "Reset all statistics", parse_cmd_reset_stats}, + {"reset lat stats", "", "Reset all latency statistics", parse_cmd_reset_lat_stats}, + {"tot stats", "", "Print total RX and TX packets", parse_cmd_tot_stats}, + {"tot ierrors tot", "", "Print total number of ierrors since reset", parse_cmd_tot_ierrors_tot}, + {"tot imissed tot", "", "Print total number of imissed since reset", parse_cmd_tot_imissed_tot}, + {"lat stats", "<core id> <task id>", "Print min,max,avg latency as measured during last sampling interval", parse_cmd_lat_stats}, + {"irq stats", "<core id> <task id>", "Print irq related infos", parse_cmd_irq}, + {"lat packets", "<core id> <task id>", "Print the latency for each of the last set of packets", parse_cmd_lat_packets}, + {"accuracy limit", "<core id> <task id> <nsec>", "Only consider latency of packets that were measured with an error no more than <nsec>", parse_cmd_accuracy}, + {"core stats", "<core id> <task id>", "Print rx/tx/drop for task <task id> running on core <core id>", parse_cmd_core_stats}, + {"port_stats", "<port id>", "Print rate for no_mbufs, ierrors + imissed, rx_bytes, tx_bytes, rx_pkts, tx_pkts; totals for RX, TX, no_mbufs, ierrors + imissed for port <port id>", parse_cmd_port_stats}, + {"read reg", "", "Read register", parse_cmd_read_reg}, + {"write reg", "", "Read register", parse_cmd_write_reg}, + {"set vlan offload", "", "Set Vlan offload", parse_cmd_set_vlan_offload}, + {"set vlan filter", "", "Set Vlan filter", parse_cmd_set_vlan_filter}, + {"reset cache", "", "Reset cache", parse_cmd_cache_reset}, + {"set cache class mask", "<core id> <class> <mask>", "Set cache class mask for <core id>", parse_cmd_set_cache_class_mask}, + {"get cache class mask", "<core id> <class>", "Get cache class mask", parse_cmd_get_cache_class_mask}, + {"set cache class", "<core id> <class>", "Set cache class", parse_cmd_set_cache_class}, + {"get cache class", "<core id>", "Get cache class", parse_cmd_get_cache_class}, + {"get cache mask", "<core id>", "Get cache mask", parse_cmd_get_cache_mask}, + {"reset port", "", "Reset port", parse_cmd_reset_port}, + {"ring info all", "", "Get information about ring, such as ring size and number of elements in the ring", parse_cmd_ring_info_all}, + {"ring info", "<core id> <task id>", "Get information about ring on core <core id> in task <task id>, such as ring size and number of elements in the ring", parse_cmd_ring_info}, + {"port info", "<port id> [brief?]", "Get port related information, such as MAC address, socket, number of descriptors..., . Adding \"brief\" after command prints short version of output.", parse_cmd_port_info}, + {"port up", "<port id>", "Set the port up", parse_cmd_port_up}, + {"port down", "<port id>", "Set the port down", parse_cmd_port_down}, + {"port link state", "<port id>", "Get link state (up or down) for port", parse_cmd_port_link_state}, + {"port xstats", "<port id>", "Get extra statistics for the port", parse_cmd_xstats}, + {"stats", "<stats_path>", "Get stats as sepcified by <stats_path>. A comma-separated list of <stats_path> can be supplied", parse_cmd_stats}, + {"cgnat dump public hash", "<core id> <task id>", "Dump cgnat public hash table", parse_cmd_cgnat_public_hash}, + {"cgnat dump private hash", "<core id> <task id>", "Dump cgnat private hash table", parse_cmd_cgnat_private_hash}, + {"delay_us", "<core_id> <task_id> <delay_us>", "Set the delay in usec for the impair mode to <delay_us>", parse_cmd_delay_us}, + {"random delay_us", "<core_id> <task_id> <random delay_us>", "Set the delay in usec for the impair mode to <random delay_us>", parse_cmd_random_delay_us}, + {"probability", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_probability}, + {"version", "", "Show version", parse_cmd_version}, + {0,0,0,0}, +}; + +static int parse_cmd_help(const char *str, struct input *input) +{ + /* str contains the arguments, all commands that have str as a + substring will be shown. */ + size_t len, len2, longest_cmd = 0; + for (size_t i = 0; i < cmd_parser_n_cmd(); ++i) { + if (longest_cmd <strlen(cmd_strings[i].cmd)) + longest_cmd = strlen(cmd_strings[i].cmd); + } + /* A single call to log will be executed after the help string + has been built. The reason for this is to make use of the + built-in pager. */ + char buf[32768] = {0}; + + for (size_t i = 0; i < cmd_parser_n_cmd(); ++i) { + int is_substr = 0; + const size_t cmd_len = strlen(cmd_strings[i].cmd); + for (size_t j = 0; j < cmd_len; ++j) { + is_substr = 1; + for (size_t k = 0; k < strlen(str); ++k) { + if (str[k] != (cmd_strings[i].cmd + j)[k]) { + is_substr = 0; + break; + } + } + if (is_substr) + break; + } + if (!is_substr) + continue; + + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%s", cmd_strings[i].cmd); + len = strlen(cmd_strings[i].cmd); + while (len < longest_cmd) { + len++; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " "); + } + + if (strlen(cmd_strings[i].args)) { + char tmp[256] = {0}; + strncpy(tmp, cmd_strings[i].args, 128); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "Arguments: %s\n", tmp); + len2 = len; + if (strlen(cmd_strings[i].help)) { + while (len2) { + len2--; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " "); + } + } + } + + if (strlen(cmd_strings[i].help)) { + int add = 0; + const char *h = cmd_strings[i].help; + do { + if (add) { + len2 = len; + while (len2) { + len2--; + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " "); + } + } + char tmp[128] = {0}; + const size_t max_len = strlen(h) > 80? 80 : strlen(h); + size_t len3 = max_len; + if (len3 == 80) { + while (len3 && h[len3] != ' ') + len3--; + if (len3 == 0) + len3 = max_len; + } + + strncpy(tmp, h, len3); + h += len3; + while (h[0] == ' ' && strlen(h)) + h++; + + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%s\n", tmp); + add = 1; + } while(strlen(h)); + } + if (strlen(cmd_strings[i].help) == 0&& strlen(cmd_strings[i].args) == 0) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "\n"); + } + } + plog_info("%s", buf); + + return 0; +} + +const char *cmd_parser_cmd(size_t i) +{ + i = i < cmd_parser_n_cmd()? i: cmd_parser_n_cmd(); + return cmd_strings[i].cmd; +} + +size_t cmd_parser_n_cmd(void) +{ + return sizeof(cmd_strings)/sizeof(cmd_strings[0]) - 1; +} + +void cmd_parser_parse(const char *str, struct input *input) +{ + size_t skip; + + for (size_t i = 0; i < cmd_parser_n_cmd(); ++i) { + skip = strlen(cmd_strings[i].cmd); + if (strncmp(cmd_strings[i].cmd, str, skip) == 0 && + (str[skip] == ' ' || str[skip] == 0)) { + while (str[skip] == ' ') + skip++; + + if (cmd_strings[i].parse(str + skip, input) != 0) { + plog_warn("Invalid syntax for command '%s': %s %s\n", + cmd_strings[i].cmd, cmd_strings[i].args, cmd_strings[i].help); + } + return ; + } + } + + plog_err("Unknown command: '%s'\n", str); +} diff --git a/VNFs/DPPD-PROX/cmd_parser.h b/VNFs/DPPD-PROX/cmd_parser.h new file mode 100644 index 00000000..05284bb7 --- /dev/null +++ b/VNFs/DPPD-PROX/cmd_parser.h @@ -0,0 +1,29 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CMD_PARSER_H_ +#define _CMD_PARSER_H_ + +#include <stddef.h> + +struct input; +void cmd_parser_parse(const char *str, struct input *input); +const char *cmd_parser_cmd(size_t i); +size_t cmd_parser_n_cmd(void); +int task_is_mode(uint32_t lcore_id, uint32_t task_id, const char *mode, const char *sub_mode); +int task_is_sub_mode(uint32_t lcore_id, uint32_t task_id, const char *sub_mode); + +#endif /* _CMD_PARSER_H_ */ diff --git a/VNFs/DPPD-PROX/commands.c b/VNFs/DPPD-PROX/commands.c new file mode 100644 index 00000000..93acc62a --- /dev/null +++ b/VNFs/DPPD-PROX/commands.c @@ -0,0 +1,1016 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <rte_table_hash.h> +#include <rte_version.h> +#include <rte_malloc.h> + +#include "prox_malloc.h" +#include "display.h" +#include "commands.h" +#include "log.h" +#include "run.h" +#include "lconf.h" +#include "hash_utils.h" +#include "prox_cfg.h" +#include "prox_port_cfg.h" +#include "defines.h" +#include "handle_qos.h" +#include "handle_qinq_encap4.h" +#include "quit.h" +#include "input.h" +#include "rw_reg.h" +#include "cqm.h" +#include "stats_core.h" + +void start_core_all(int task_id) +{ + uint32_t cores[RTE_MAX_LCORE]; + uint32_t lcore_id; + char tmp[256]; + int cnt = 0; + + prox_core_to_str(tmp, sizeof(tmp), 0); + plog_info("Starting cores: %s\n", tmp); + + lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + cores[cnt++] = lcore_id; + } + start_cores(cores, cnt, task_id); +} + +void stop_core_all(int task_id) +{ + uint32_t cores[RTE_MAX_LCORE]; + uint32_t lcore_id; + char tmp[256]; + int cnt = 0; + + prox_core_to_str(tmp, sizeof(tmp), 0); + plog_info("Stopping cores: %s\n", tmp); + + lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + cores[cnt++] = lcore_id; + } + + stop_cores(cores, cnt, task_id); +} + +static void warn_inactive_cores(uint32_t *cores, int count, const char *prefix) +{ + for (int i = 0; i < count; ++i) { + if (!prox_core_active(cores[i], 0)) { + plog_warn("%s %u: core is not active\n", prefix, cores[i]); + } + } +} + +static inline int wait_command_handled(struct lcore_cfg *lconf) +{ + uint64_t t1 = rte_rdtsc(), t2; + while (lconf_is_req(lconf)) { + t2 = rte_rdtsc(); + if (t2 - t1 > 5 * rte_get_tsc_hz()) { + // Failed to handle command ... + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targs = &lconf->targs[task_id]; + if (!(targs->flags & TASK_ARG_DROP)) { + plogx_err("Failed to handle command - task is in NO_DROP and might be stuck...\n"); + return - 1; + } + } + plogx_err("Failed to handle command\n"); + return -1; + } + } + return 0; +} +void start_cores(uint32_t *cores, int count, int task_id) +{ + int n_started_cores = 0; + uint32_t started_cores[RTE_MAX_LCORE]; + + warn_inactive_cores(cores, count, "Can't start core"); + + for (int i = 0; i < count; ++i) { + struct lcore_cfg *lconf = &lcore_cfg[cores[i]]; + + if (lconf->n_tasks_run != lconf->n_tasks_all) { + + lconf->msg.type = LCONF_MSG_START; + lconf->msg.task_id = task_id; + lconf_set_req(lconf); + if (task_id == -1) + plog_info("Starting core %u (all tasks)\n", cores[i]); + else + plog_info("Starting core %u task %u\n", cores[i], task_id); + started_cores[n_started_cores++] = cores[i]; + lconf->flags |= LCONF_FLAG_RUNNING; + rte_eal_remote_launch(lconf_run, NULL, cores[i]); + } + else { + plog_warn("Core %u is already running all its tasks\n", cores[i]); + } + } + + /* This function is blocking, so detect when each core has + consumed the message. */ + for (int i = 0; i < n_started_cores; ++i) { + struct lcore_cfg *lconf = &lcore_cfg[started_cores[i]]; + plog_info("Waiting for core %u to start...", started_cores[i]); + if (wait_command_handled(lconf) == -1) return; + plog_info(" OK\n"); + } +} + +void stop_cores(uint32_t *cores, int count, int task_id) +{ + int n_stopped_cores = 0; + uint32_t stopped_cores[RTE_MAX_LCORE]; + uint32_t c; + + warn_inactive_cores(cores, count, "Can't stop core"); + + for (int i = 0; i < count; ++i) { + struct lcore_cfg *lconf = &lcore_cfg[cores[i]]; + if (lconf->n_tasks_run) { + if (wait_command_handled(lconf) == -1) return; + + lconf->msg.type = LCONF_MSG_STOP; + lconf->msg.task_id = task_id; + lconf_set_req(lconf); + stopped_cores[n_stopped_cores++] = cores[i]; + } + } + + for (int i = 0; i < n_stopped_cores; ++i) { + c = stopped_cores[i]; + struct lcore_cfg *lconf = &lcore_cfg[c]; + if (wait_command_handled(lconf) == -1) return; + + if (lconf->n_tasks_run == 0) { + plog_info("All tasks stopped on core %u, waiting for core to stop...", c); + rte_eal_wait_lcore(c); + plog_info(" OK\n"); + lconf->flags &= ~LCONF_FLAG_RUNNING; + } + else { + plog_info("Stopped task %u on core %u\n", task_id, c); + } + } +} + +struct size_unit { + uint64_t val; + uint64_t frac; + char unit[8]; +}; + +static struct size_unit to_size_unit(uint64_t bytes) +{ + struct size_unit ret; + + if (bytes > 1 << 30) { + ret.val = bytes >> 30; + ret.frac = ((bytes - (ret.val << 30)) * 1000) / (1 << 30); + strcpy(ret.unit, "GB"); + } + else if (bytes > 1 << 20) { + ret.val = bytes >> 20; + ret.frac = ((bytes - (ret.val << 20)) * 1000) / (1 << 20); + strcpy(ret.unit, "MB"); + } + else if (bytes > 1 << 10) { + ret.val = bytes >> 10; + ret.frac = (bytes - (ret.val << 10)) * 1000 / (1 << 10); + strcpy(ret.unit, "KB"); + } + else { + ret.val = bytes; + ret.frac = 0; + strcpy(ret.unit, "B"); + } + + return ret; +} + +void cmd_mem_stats(void) +{ + struct rte_malloc_socket_stats sock_stats; + uint64_t v; + struct size_unit su; + + for (uint32_t i = 0; i < RTE_MAX_NUMA_NODES; ++i) { + if (rte_malloc_get_socket_stats(i, &sock_stats) < 0 || sock_stats.heap_totalsz_bytes == 0) + continue; + + plogx_info("Socket %u memory stats:\n", i); + su = to_size_unit(sock_stats.heap_totalsz_bytes); + plogx_info("\tHeap_size: %zu.%03zu %s\n", su.val, su.frac, su.unit); + su = to_size_unit(sock_stats.heap_freesz_bytes); + plogx_info("\tFree_size: %zu.%03zu %s\n", su.val, su.frac, su.unit); + su = to_size_unit(sock_stats.heap_allocsz_bytes); + plogx_info("\tAlloc_size: %zu.%03zu %s\n", su.val, su.frac, su.unit); + su = to_size_unit(sock_stats.greatest_free_size); + plogx_info("\tGreatest_free_size: %zu %s\n", su.val, su.unit); + plogx_info("\tAlloc_count: %u\n", sock_stats.alloc_count); + plogx_info("\tFree_count: %u\n", sock_stats.free_count); + } +} + +void cmd_mem_layout(void) +{ + const struct rte_memseg* memseg = rte_eal_get_physmem_layout(); + + plog_info("Memory layout:\n"); + for (uint32_t i = 0; i < RTE_MAX_MEMSEG; i++) { + if (memseg[i].addr == NULL) + break; + + const char *sz_str; + switch (memseg[i].hugepage_sz >> 20) { + case 2: + sz_str = "2MB"; + break; + case 1024: + sz_str = "1GB"; + break; + default: + sz_str = "??"; + } + + plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n", + i, + memseg[i].phys_addr, + memseg[i].phys_addr + memseg[i].len, + memseg[i].addr, + memseg[i].len/memseg[i].hugepage_sz, sz_str); + } +} + +void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx) +{ + plog_info("dump %u %u %u\n", lcore_id, task_id, nb_packets); + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } + else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) { + plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1); + } + else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + lconf->tasks_all[task_id]->aux->task_rt_dump.input = input; + + if (wait_command_handled(lconf) == -1) return; + if (rx && tx) + lconf->msg.type = LCONF_MSG_DUMP; + else if (rx) + lconf->msg.type = LCONF_MSG_DUMP_RX; + else if (tx) + lconf->msg.type = LCONF_MSG_DUMP_TX; + + if (rx || tx) { + lconf->msg.task_id = task_id; + lconf->msg.val = nb_packets; + lconf_set_req(lconf); + } + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets) +{ + plog_info("trace %u %u %u\n", lcore_id, task_id, nb_packets); + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } + else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) { + plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1); + } + else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + + lconf->msg.type = LCONF_MSG_TRACE; + lconf->msg.task_id = task_id; + lconf->msg.val = nb_packets; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_rx_bw_start(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE) { + plog_warn("rx bandwidt already on core %u\n", lcore_id); + } else { + + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_RX_BW_START; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_tx_bw_start(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE) { + plog_warn("tx bandwidth already running on core %u\n", lcore_id); + } else { + + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_TX_BW_START; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_rx_bw_stop(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_BW_ACTIVE)) { + plog_warn("rx bandwidth not running on core %u\n", lcore_id); + } else { + + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_RX_BW_STOP; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_tx_bw_stop(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (!(lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_BW_ACTIVE)) { + plog_warn("tx bandwidth not running on core %u\n", lcore_id); + } else { + + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_TX_BW_STOP; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} +void cmd_rx_distr_start(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) { + plog_warn("rx distribution already xrunning on core %u\n", lcore_id); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_RX_DISTR_START; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_tx_distr_start(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if (lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) { + plog_warn("tx distribution already xrunning on core %u\n", lcore_id); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_TX_DISTR_START; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_rx_distr_stop(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_RX_DISTR_ACTIVE) == 0) { + plog_warn("rx distribution not running on core %u\n", lcore_id); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_RX_DISTR_STOP; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_tx_distr_stop(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else if ((lcore_cfg[lcore_id].flags & LCONF_FLAG_TX_DISTR_ACTIVE) == 0) { + plog_warn("tx distribution not running on core %u\n", lcore_id); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_TX_DISTR_STOP; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_rx_distr_rst(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_RX_DISTR_RESET; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_tx_distr_rst(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (wait_command_handled(lconf) == -1) return; + lconf->msg.type = LCONF_MSG_TX_DISTR_RESET; + lconf_set_req(lconf); + + if (lconf->n_tasks_run == 0) { + lconf_do_flags(lconf); + } + } +} + +void cmd_rx_distr_show(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else { + for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) { + struct task_base *t = lcore_cfg[lcore_id].tasks_all[i]; + plog_info("t[%u]: ", i); + for (uint32_t j = 0; j < sizeof(t->aux->rx_bucket)/sizeof(t->aux->rx_bucket[0]); ++j) { + plog_info("%u ", t->aux->rx_bucket[j]); + } + plog_info("\n"); + } + } +} +void cmd_tx_distr_show(uint32_t lcore_id) +{ + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } else { + for (uint32_t i = 0; i < lcore_cfg[lcore_id].n_tasks_all; ++i) { + struct task_base *t = lcore_cfg[lcore_id].tasks_all[i]; + uint64_t tot = 0, avg = 0; + for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) { + tot += t->aux->tx_bucket[j]; + avg += j * t->aux->tx_bucket[j]; + } + if (tot) { + avg = avg / tot; + } + plog_info("t[%u]: %lu: ", i, avg); + for (uint32_t j = 0; j < sizeof(t->aux->tx_bucket)/sizeof(t->aux->tx_bucket[0]); ++j) { + plog_info("%u ", t->aux->tx_bucket[j]); + } + plog_info("\n"); + } + } +} + +void cmd_ringinfo_all(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + cmd_ringinfo(lcore_id, task_id); + } + } +} + +void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id) +{ + struct lcore_cfg *lconf; + struct rte_ring *ring; + struct task_args* targ; + uint32_t count; + + if (!prox_core_active(lcore_id, 0)) { + plog_info("lcore %u is not active\n", lcore_id); + return; + } + lconf = &lcore_cfg[lcore_id]; + if (task_id >= lconf->n_tasks_all) { + plog_warn("Invalid task index %u: lcore %u has %u tasks\n", task_id, lcore_id, lconf->n_tasks_all); + return; + } + + targ = &lconf->targs[task_id]; + plog_info("Core %u task %u: %u rings\n", lcore_id, task_id, targ->nb_rxrings); + for (uint8_t i = 0; i < targ->nb_rxrings; ++i) { + ring = targ->rx_rings[i]; +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + count = ring->prod.mask + 1; +#else + count = ring->mask + 1; +#endif + plog_info("\tRing %u:\n", i); + plog_info("\t\tFlags: %s,%s\n", ring->flags & RING_F_SP_ENQ? "sp":"mp", ring->flags & RING_F_SC_DEQ? "sc":"mc"); + plog_info("\t\tMemory size: %zu bytes\n", rte_ring_get_memsize(count)); + plog_info("\t\tOccupied: %u/%u\n", rte_ring_count(ring), count); + } +} + +void cmd_port_up(uint8_t port_id) +{ + int err; + + if (!port_is_active(port_id)) { + return ; + } + + if ((err = rte_eth_dev_set_link_up(port_id)) == 0) { + plog_info("Bringing port %d up\n", port_id); + } + else { + plog_warn("Failed to bring port %d up with error %d\n", port_id, err); + } +} + +void cmd_port_down(uint8_t port_id) +{ + int err; + + if (!port_is_active(port_id)) { + return ; + } + + if ((err = rte_eth_dev_set_link_down(port_id)) == 0) { + plog_info("Bringing port %d down\n", port_id); + } + else { + plog_warn("Failed to bring port %d down with error %d\n", port_id, err); + } +} + +void cmd_xstats(uint8_t port_id) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0) + int n_xstats; + struct rte_eth_xstat *eth_xstat = NULL; // id and value + struct rte_eth_xstat_name *eth_xstat_name = NULL; // only names + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + int rc; + + n_xstats = rte_eth_xstats_get(port_id, NULL, 0); + eth_xstat_name = prox_zmalloc(n_xstats * sizeof(*eth_xstat_name), port_cfg->socket); + PROX_ASSERT(eth_xstat_name); + rc = rte_eth_xstats_get_names(port_id, eth_xstat_name, n_xstats); + if ((rc < 0) || (rc > n_xstats)) { + if (rc < 0) { + plog_warn("Failed to get xstats_names on port %d with error %d\n", port_id, rc); + } else if (rc > n_xstats) { + plog_warn("Failed to get xstats_names on port %d: too many xstats (%d)\n", port_id, rc); + } + } + + eth_xstat = prox_zmalloc(n_xstats * sizeof(*eth_xstat), port_cfg->socket); + PROX_ASSERT(eth_xstat); + rc = rte_eth_xstats_get(port_id, eth_xstat, n_xstats); + if ((rc < 0) || (rc > n_xstats)) { + if (rc < 0) { + plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc); + } else if (rc > n_xstats) { + plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc); + } + } else { + for (int i=0;i<rc;i++) { + plog_info("%s: %ld\n", eth_xstat_name[i].name, eth_xstat[i].value); + } + } + if (eth_xstat_name) + prox_free(eth_xstat_name); + if (eth_xstat) + prox_free(eth_xstat); +#else +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + int n_xstats; + struct rte_eth_xstats *eth_xstats; + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + int rc; + + n_xstats = rte_eth_xstats_get(port_id, NULL, 0); + eth_xstats = prox_zmalloc(n_xstats * sizeof(*eth_xstats), port_cfg->socket); + PROX_ASSERT(eth_xstats); + rc = rte_eth_xstats_get(port_id, eth_xstats, n_xstats); + if ((rc < 0) || (rc > n_xstats)) { + if (rc < 0) { + plog_warn("Failed to get xstats on port %d with error %d\n", port_id, rc); + } else if (rc > n_xstats) { + plog_warn("Failed to get xstats on port %d: too many xstats (%d)\n", port_id, rc); + } + } else { + for (int i=0;i<rc;i++) { + plog_info("%s: %ld\n", eth_xstats[i].name, eth_xstats[i].value); + } + } + if (eth_xstats) + prox_free(eth_xstats); +#else + plog_warn("Failed to get xstats, xstats are not supported in this version of dpdk\n"); +#endif +#endif +} + +void cmd_portinfo(int port_id, char *dst, size_t max_len) +{ + char *end = dst + max_len; + + *dst = 0; + if (port_id == -1) { + uint8_t max_port_idx = prox_last_port_active() + 1; + + for (uint8_t port_id = 0; port_id < max_port_idx; ++port_id) { + if (!prox_port_cfg[port_id].active) { + continue; + } + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + + dst += snprintf(dst, end - dst, + "%2d:%10s; "MAC_BYTES_FMT"; %s\n", + port_id, + port_cfg->name, + MAC_BYTES(port_cfg->eth_addr.addr_bytes), + port_cfg->pci_addr); + } + return; + } + + if (!port_is_active(port_id)) { + return ; + } + + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + + dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id); + dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name); + dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name); + dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes)); + dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed); + dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down"); + dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket); + dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr); + dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no"); + dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd); + dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq); + dst += snprintf(dst, end - dst, "\tMemory pools:\n"); + + for (uint8_t i = 0; i < 32; ++i) { + if (port_cfg->pool[i]) { + dst += snprintf(dst, end - dst, "\t\tname: %s (%p)\n", + port_cfg->pool[i]->name, port_cfg->pool[i]); + } + } +} + +void cmd_read_reg(uint8_t port_id, unsigned int id) +{ + unsigned int val, rc; + if (!port_is_active(port_id)) { + return ; + } + rc = read_reg(port_id, id, &val); + if (rc) { + plog_warn("Failed to read register %d on port %d\n", id, port_id); + } + else { + plog_info("Register 0x%08X : %08X \n", id, val); + } +} + +void cmd_reset_port(uint8_t portid) +{ + unsigned int rc; + if (!prox_port_cfg[portid].active) { + plog_info("port not active \n"); + return; + } + rte_eth_dev_stop(portid); + rc = rte_eth_dev_start(portid); + if (rc) { + plog_warn("Failed to restart port %d\n", portid); + } +} +void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val) +{ + if (!port_is_active(port_id)) { + return ; + } + + plog_info("writing 0x%08X %08X\n", id, val); + write_reg(port_id, id, val); +} + +void cmd_set_vlan_offload(uint8_t port_id, unsigned int val) +{ + if (!port_is_active(port_id)) { + return ; + } + + plog_info("setting vlan offload to %d\n", val); + if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) { + plog_info("wrong vlan offload value\n"); + } + int ret = rte_eth_dev_set_vlan_offload(port_id, val); + plog_info("rte_eth_dev_set_vlan_offload return %d\n", ret); +} + +void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val) +{ + if (!port_is_active(port_id)) { + return ; + } + + plog_info("setting vln filter for vlan %d to %d\n", id, val); + int ret = rte_eth_dev_vlan_filter(port_id, id, val); + plog_info("rte_eth_dev_vlan_filter return %d\n", ret); +} + +void cmd_thread_info(uint8_t lcore_id, uint8_t task_id) +{ + plog_info("thread_info %u %u \n", lcore_id, task_id); + if (lcore_id > RTE_MAX_LCORE) { + plog_warn("core_id too high, maximum allowed is: %u\n", RTE_MAX_LCORE); + } + if (!prox_core_active(lcore_id, 0)) { + plog_warn("lcore %u is not active\n", lcore_id); + return; + } + if (task_id >= lcore_cfg[lcore_id].n_tasks_all) { + plog_warn("task_id too high, should be in [0, %u]\n", lcore_cfg[lcore_id].n_tasks_all - 1); + return; + } + if (strcmp(lcore_cfg[lcore_id].targs[task_id].task_init->mode_str, "qos") == 0) { + struct task_base *task; + + task = lcore_cfg[lcore_id].tasks_all[task_id]; + plog_info("core %d, task %d: %d mbufs stored in QoS\n", lcore_id, task_id, + task_qos_n_pkts_buffered(task)); + +#ifdef ENABLE_EXTRA_USER_STATISTICS + } + else if (lcore_cfg[lcore_id].targs[task_id].mode == QINQ_ENCAP4) { + struct task_qinq_encap4 *task; + task = (struct task_qinq_encap4 *)(lcore_cfg[lcore_id].tasks_all[task_id]); + for (int i=0;i<task->n_users;i++) { + if (task->stats_per_user[i]) + plog_info("User %d: %d packets\n", i, task->stats_per_user[i]); + } +#endif + } + else { + // Only QoS thread info so far + plog_err("core %d, task %d: not a qos core (%p)\n", lcore_id, task_id, lcore_cfg[lcore_id].thread_x); + } +} + +void cmd_rx_tx_info(void) +{ + uint32_t lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + for (uint8_t task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; ++task_id) { + struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id]; + + plog_info("Core %u:", lcore_id); + if (targ->rx_port_queue[0].port != OUT_DISCARD) { + for (int i = 0; i < targ->nb_rxports; i++) { + plog_info(" RX port %u (queue %u)", targ->rx_port_queue[i].port, targ->rx_port_queue[i].queue); + } + } + else { + for (uint8_t j = 0; j < targ->nb_rxrings; ++j) { + plog_info(" RX ring[%u,%u] %p", task_id, j, targ->rx_rings[j]); + } + } + plog_info(" ==>"); + for (uint8_t j = 0; j < targ->nb_txports; ++j) { + plog_info(" TX port %u (queue %u)", targ->tx_port_queue[j].port, + targ->tx_port_queue[j].queue); + } + + for (uint8_t j = 0; j < targ->nb_txrings; ++j) { + plog_info(" TX ring %p", targ->tx_rings[j]); + } + + plog_info("\n"); + } + } +} +void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set) +{ + uint64_t tmp_rmid = 0; + cqm_assoc_read(lcore_id, &tmp_rmid); + *set = (uint32_t)(tmp_rmid >> 32); +} + +void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val) +{ + cat_get_class_mask(lcore_id, set, val); +} + +void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val) +{ + cat_set_class_mask(lcore_id, set, val); + lcore_cfg[lcore_id].cache_set = set; + uint32_t id = -1; + while(prox_core_next(&id, 0) == 0) { + if ((lcore_cfg[id].cache_set == set) && (rte_lcore_to_socket_id(id) == rte_lcore_to_socket_id(lcore_id))) { + plog_info("Updating mask for core %d to %d\n", id, set); + stats_update_cache_mask(id, val); + } + } +} + +void cmd_set_cache_class(uint32_t lcore_id, uint32_t set) +{ + uint64_t tmp_rmid = 0; + uint32_t val = 0; + cqm_assoc_read(lcore_id, &tmp_rmid); + cqm_assoc(lcore_id, (tmp_rmid & 0xffffffff) | ((set * 1L) << 32)); + cat_get_class_mask(lcore_id, set, &val); + stats_update_cache_mask(lcore_id, val); +} + +void cmd_cache_reset(void) +{ + uint8_t sockets[MAX_SOCKETS] = {0}; + uint8_t cores[MAX_SOCKETS] = {0}; + uint32_t mask = (1 << cat_get_num_ways()) - 1; + uint32_t lcore_id = -1, socket_id; + while(prox_core_next(&lcore_id, 0) == 0) { + cqm_assoc(lcore_id, 0); + socket_id = rte_lcore_to_socket_id(lcore_id); + if (socket_id < MAX_SOCKETS) { + sockets[socket_id] = 1; + cores[socket_id] = lcore_id; + } + stats_update_cache_mask(lcore_id, mask); + plog_info("Setting core %d to cache mask %x\n", lcore_id, mask); + lcore_cfg[lcore_id].cache_set = 0; + } + for (uint32_t s = 0; s < MAX_SOCKETS; s++) { + if (sockets[s]) + cat_reset_cache(cores[s]); + } + stats_lcore_assoc_rmid(); +} + +int bypass_task(uint32_t lcore_id, uint32_t task_id) +{ + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + struct task_args *targ, *starg, *dtarg; + struct rte_ring *ring = NULL; + + if (task_id >= lconf->n_tasks_all) + return -1; + + targ = &lconf->targs[task_id]; + if (targ->nb_txrings == 1) { + plog_info("Task has %d receive and 1 transmmit ring and can be bypassed, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks); + // Find source task + for (unsigned int i = 0; i < targ->n_prev_tasks; i++) { + starg = targ->prev_tasks[i]; + for (unsigned int j = 0; j < starg->nb_txrings; j++) { + for (unsigned int k = 0; k < targ->nb_rxrings; k++) { + if (starg->tx_rings[j] == targ->rx_rings[k]) { + plog_info("bypassing ring %p and connecting it to %p\n", starg->tx_rings[j], targ->tx_rings[0]); + starg->tx_rings[j] = targ->tx_rings[0]; + struct task_base *tbase = starg->tbase; + tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j]; + } + } + } + } + } else { + plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings); + return -1; + } + + return 0; +} + +int reconnect_task(uint32_t lcore_id, uint32_t task_id) +{ + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + struct task_args *targ, *starg, *dtarg = NULL; + struct rte_ring *ring = NULL; + + if (task_id >= lconf->n_tasks_all) + return -1; + + targ = &lconf->targs[task_id]; + if (targ->nb_txrings == 1) { + // Find source task + for (unsigned int i = 0; i < targ->n_prev_tasks; i++) { + starg = targ->prev_tasks[i]; + for (unsigned int j = 0; j < starg->nb_txrings; j++) { + if (starg->tx_rings[j] == targ->tx_rings[0]) { + if (targ->n_prev_tasks == targ->nb_rxrings) { + starg->tx_rings[j] = targ->rx_rings[i]; + struct task_base *tbase = starg->tbase; + tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j]; + plog_info("Task has %d receive and 1 transmmit ring and can be reconnected, %d precedent tasks\n", targ->nb_rxrings, targ->n_prev_tasks); + } else if (targ->nb_rxrings == 1) { + starg->tx_rings[j] = targ->rx_rings[0]; + struct task_base *tbase = starg->tbase; + tbase->tx_params_sw.tx_rings[j] = starg->tx_rings[j]; + plog_info("Task has %d receive and 1 transmmit ring and ring %p can be reconnected, %d precedent tasks\n", targ->nb_rxrings, starg->tx_rings[j], targ->n_prev_tasks); + } else { + plog_err("Unexpected configuration: %d precedent tasks, %d rx rings\n", targ->n_prev_tasks, targ->nb_rxrings); + } + } + } + } + } else { + plog_info("Task has %d receive and %d transmit ring and cannot be bypassed\n", targ->nb_rxrings, targ->nb_txrings); + return -1; + } + + return 0; +} diff --git a/VNFs/DPPD-PROX/commands.h b/VNFs/DPPD-PROX/commands.h new file mode 100644 index 00000000..6c4a29a3 --- /dev/null +++ b/VNFs/DPPD-PROX/commands.h @@ -0,0 +1,70 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _COMMANDS_H_ +#define _COMMANDS_H_ + +#include <inttypes.h> + +struct input; + +/* command functions */ +void start_core_all(int task_id); +void stop_core_all(int task_id); +void start_cores(uint32_t *cores, int count, int task_id); +void stop_cores(uint32_t *cores, int count, int task_id); + +void cmd_trace(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets); +void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx); +void cmd_mem_stats(void); +void cmd_mem_layout(void); +void cmd_hashdump(uint8_t lcore_id, uint8_t task_id, uint32_t table_id); +void cmd_rx_distr_start(uint32_t lcore_id); +void cmd_rx_distr_stop(uint32_t lcore_id); +void cmd_rx_distr_rst(uint32_t lcore_id); +void cmd_rx_distr_show(uint32_t lcore_id); +void cmd_tx_distr_start(uint32_t lcore_id); +void cmd_tx_distr_stop(uint32_t lcore_id); +void cmd_tx_distr_rst(uint32_t lcore_id); +void cmd_tx_distr_show(uint32_t lcore_id); +void cmd_rx_bw_start(uint32_t lcore_id); +void cmd_tx_bw_start(uint32_t lcore_id); +void cmd_rx_bw_stop(uint32_t lcore_id); +void cmd_tx_bw_stop(uint32_t lcore_id); + +void cmd_portinfo(int port_id, char *dst, size_t max_len); +void cmd_port_up(uint8_t port_id); +void cmd_port_down(uint8_t port_id); +void cmd_xstats(uint8_t port_id); +void cmd_thread_info(uint8_t lcore_id, uint8_t task_id); +void cmd_ringinfo(uint8_t lcore_id, uint8_t task_id); +void cmd_ringinfo_all(void); +void cmd_rx_tx_info(void); +void cmd_read_reg(uint8_t port_id, uint32_t id); +void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val); +void cmd_set_vlan_filter(uint8_t port_id, unsigned int id, unsigned int val); +void cmd_set_vlan_offload(uint8_t port_id, unsigned int val); +void cmd_get_cache_class(uint32_t lcore_id, uint32_t *set); +void cmd_get_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t *val); +void cmd_set_cache_class_mask(uint32_t lcore_id, uint32_t set, uint32_t val); +void cmd_set_cache_class(uint32_t lcore_id, uint32_t set); +void cmd_cache_reset(void); + +void cmd_reset_port(uint8_t port_id); +int reconnect_task(uint32_t lcore_id, uint32_t task_id); +int bypass_task(uint32_t lcore_id, uint32_t task_id); + +#endif /* _COMMANDS_H_ */ diff --git a/VNFs/DPPD-PROX/config/acl_table.lua b/VNFs/DPPD-PROX/config/acl_table.lua new file mode 100644 index 00000000..cebe3b77 --- /dev/null +++ b/VNFs/DPPD-PROX/config/acl_table.lua @@ -0,0 +1,36 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +five_tuple = function(ip_proto, src, dst, sport, dport, action) + return { + ip_proto = ip_proto, + src_cidr = src, + dst_cidr = dst, + sport = sport, + dport = dport, + action = action, + } +end + +return { + five_tuple(val_mask(17, 0xff), cidr("192.168.0.0/18"), cidr("10.10.0.0/16"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(17, 0xff), cidr("10.10.0.0/18"), cidr("192.168.0.0/16"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(17, 0xff), cidr("192.168.0.0/18"), cidr("74.0.0.0/7"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(17, 0xff), cidr("1.1.1.0/24"), cidr("1.2.3.0/24"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(17, 0xff), cidr("192.168.1.0/24"), cidr("192.168.1.0/24"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(6, 0xf), cidr("10.0.0.0/18"), cidr("192.168.0.0/16"), val_range(0,65535), val_range(0,65535), "allow"), + five_tuple(val_mask(6, 0xf), cidr("192.168.0.0/18"), cidr("10.0.0.0/16"), val_range(0,65535), val_range(0,65535), "allow"), +} diff --git a/VNFs/DPPD-PROX/config/bng-1q-4ports.cfg b/VNFs/DPPD-PROX/config/bng-1q-4ports.cfg new file mode 100644 index 00000000..661d4aa0 --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-1q-4ports.cfg @@ -0,0 +1,130 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-4ports.cfg. The difference is that on each of the interfaces, only +; one queue is used. Use-cases for this configuration include running in a +; virtualized environment using SRIOV. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=hardware +[port 1] +name=inet0 +mac=hardware +[port 2] +name=cpe1 +mac=hardware +[port 3] +name=inet1 +mac=hardware +[variables] +;$wk=3s0,3s0h; 2 workers +;$wk=3s0-4s0,3s0h-4s0h; 4 workers +;$wk=3s0-5s0,3s0h-5s0h; 6 workers +$wk=5s0-8s0,5s0h-8s0h; 8 workers +;$wk=3s0-7s0,3s0h-7s0h; 10 workers +;$wk=3s0-8s0,3s0h-8s0h; 12 workers +[lua] +lpm4 = dofile("ipv4.lua") +user_table = dofile("user_table-65K-bng.lua") +[defaults] +mempool size=16K + +[global] +start time=20 +name=BNG (1Q) + +[core 0s0] +mode=master +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=nop +rx ring=yes +tx port=cpe0 +task=1 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp + +[core 2s0] +name=LB-inet +task=0 +mode=nop +rx ring=yes +tx port=inet0 +task=1 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 + +[core 3s0] +name=LB-cpe +task=0 +mode=nop +rx ring=yes +tx port=cpe1 +task=1 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp + +[core 4s0] +name=LB-inet +task=0 +mode=nop +rx ring=yes +tx port=inet1 +task=1 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx cores from routing table=2s0,4s0 +route table=lpm4 +local ipv4=21.22.23.24 +user table=user_table +handle arp=yes + +task=1 +mode=qinqencapv4 +rx ring=yes +user table=user_table +tx cores from cpe table=1s0,3s0 remap=cpe0,cpe1 diff --git a/VNFs/DPPD-PROX/config/bng-4ports.cfg b/VNFs/DPPD-PROX/config/bng-4ports.cfg new file mode 100644 index 00000000..6ef195ae --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-4ports.cfg @@ -0,0 +1,125 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a simplified Border Network Gateway (BNG) on the +; first socket (socket 0). Four load balancers (two physical cores, four logical +; cores) and eight workers (four physical cores, eight logical cores) are set +; up. The number of workers can be changed by uncommenting one of the lines in +; the [variables] section. If this configuration is to be used on a system with +; few cores, the number of workers need to be reduced. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=hardware +[port 1] +name=inet0 +mac=hardware +[port 2] +name=cpe1 +mac=hardware +[port 3] +name=inet1 +mac=hardware + +[lua] +lpm4 = dofile("ipv4.lua") +user_table = dofile("user_table-65K-bng.lua") +[variables] +;uncomment one of the following to change the number of workers +;$wk=3s0,3s0h; 2 workers +;$wk=3s0-4s0,3s0h-4s0h; 4 workers +;$wk=3s0-5s0,3s0h-5s0h; 6 workers +$wk=3s0-6s0,3s0h-6s0h; 8 workers +;$wk=3s0-7s0,3s0h-7s0h; 10 workers +;$wk=3s0-8s0,3s0h-8s0h; 12 workers + +[defaults] +mempool size=16K +qinq tag=0x0081 +[global] +start time=20 +name=BNG +[core 0s0] +mode=master +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0 proto=arp +drop=no + +[core 1s0h] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=no + +[core 2s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0 proto=arp +drop=no + +[core 2s0h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 +drop=no + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1 +route table=lpm4 +local ipv4=21.22.23.24 +handle arp=yes +user table=user_table +drop=no +fast path handle arp=yes + +task=1 +mode=qinqencapv4 +rx ring=yes ; gre received from internal queue +tx ports from cpe table=cpe0,cpe1 +user table=user_table +drop=no diff --git a/VNFs/DPPD-PROX/config/bng-8ports-17cores.cfg b/VNFs/DPPD-PROX/config/bng-8ports-17cores.cfg new file mode 100644 index 00000000..295aa131 --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-8ports-17cores.cfg @@ -0,0 +1,222 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-qos-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=6 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:01:00:00:01 +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +mac=00:00:01:00:00:04 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +mac=00:00:02:00:00:01 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 5] +name=inet2 +mac=00:00:02:00:00:02 +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +mac=00:00:02:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +mac=00:00:02:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[variables] +$wk=9s0-16s0,9s0h-16s0h +$lb_drop=no +$wt_drop=no +$rxd=256 +$txd=256 +$promiscuous=yes +$mcs=128 +$rs=1024 + +[defaults] +mempool size=16K +qinq tag=0xa888 + +[lua] +lpm4 = dofile("ipv4-4ports.lua") +user_table = dofile("user_table-131K-bng.lua") +[global] +start time=20 +name=BNG + QoS +unique mempool per socket=yes +mp rings=yes + +[core 0s0] +mode=master + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe2 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe3 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 5s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 6s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet1 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 7s0] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet2 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 8s0] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wt_drop +handle arp=yes +cpe table timeout ms=15000000 +ctrl path polling frequency=10000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx ports from cpe table=cpe0,cpe1,cpe2,cpe3 +drop=$wt_drop +ctrl path polling frequency=10000 +user table=user_table +ring size=$rs diff --git a/VNFs/DPPD-PROX/config/bng-8ports-25cores.cfg b/VNFs/DPPD-PROX/config/bng-8ports-25cores.cfg new file mode 100644 index 00000000..602e0a3c --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-8ports-25cores.cfg @@ -0,0 +1,222 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-qos-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=6 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:01:00:00:01 +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +mac=00:00:01:00:00:04 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +mac=00:00:02:00:00:01 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 5] +name=inet2 +mac=00:00:02:00:00:02 +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +mac=00:00:02:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +mac=00:00:02:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[variables] +$wk=9s0-24s0,9s0h-24s0h +$lb_drop=no +$wt_drop=no +$rxd=256 +$txd=256 +$promiscuous=yes +$mcs=128 +$rs=1024 + +[defaults] +mempool size=16K +qinq tag=0xa888 + +[lua] +lpm4 = dofile("ipv4-4ports.lua") +user_table = dofile("user_table-131K-bng.lua") +[global] +start time=20 +name=BNG + QoS +unique mempool per socket=yes +mp rings=yes + +[core 0s0] +mode=master + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe2 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s0] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe3 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 5s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 6s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet1 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 7s0] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet2 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 8s0] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wt_drop +handle arp=yes +cpe table timeout ms=15000000 +ctrl path polling frequency=10000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx ports from cpe table=cpe0,cpe1,cpe2,cpe3 +drop=$wt_drop +ctrl path polling frequency=10000 +user table=user_table +ring size=$rs diff --git a/VNFs/DPPD-PROX/config/bng-8ports.cfg b/VNFs/DPPD-PROX/config/bng-8ports.cfg new file mode 100644 index 00000000..07d31cbd --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-8ports.cfg @@ -0,0 +1,231 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[variables] +;uncomment one of the following to change the number of worker threads +$lb_drop=yes +$wk_drop=yes +$rxd=128 +$txd=128 +$promiscuous=yes +$mp=16K +$mcs=512 +$rs=128 + +[port 0] +name=cpe0 +mac=00:00:01:00:00:01 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +mac=00:00:01:00:00:03 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +mac=00:00:01:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +mac=00:00:01:00:00:01 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 5] +name=inet2 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +mac=00:00:01:00:00:03 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +mac=00:00:01:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous +[lua] +lpm4 = dofile("ipv4-4ports.lua") +dscp_table = dofile("dscp.lua") +user_table = dofile("user_table-131K-bng.lua") + +wk="5s1-9s1,5s1h-9s1h" +name="BNG (" .. task_count(wk) .. " workers)" + +[defaults] +mempool size=$mp +qinq tag=0xa888 + +[global] +start time=10 +duration time=0 +name=$name +unique mempool per socket=yes +shuffle=yes + +[core 0s1] +task=0 +mode=master +tx cores=(${wk})t0m + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s1] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 1s1h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s1] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet2 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s1h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s1] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s1h] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s1] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe2 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s1h] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe3 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +;***************************************************************************************** +;#### Worker Threads receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=WK +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wk_drop +handle arp=yes +cpe table timeout ms=15000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes ; gre received from internal queue +tx ports from cpe table=cpe0,cpe1,cpe2,cpe3 +drop=$wk_drop +user table=user_table diff --git a/VNFs/DPPD-PROX/config/bng-no-cpu-topology-4ports.cfg b/VNFs/DPPD-PROX/config/bng-no-cpu-topology-4ports.cfg new file mode 100644 index 00000000..02598098 --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-no-cpu-topology-4ports.cfg @@ -0,0 +1,102 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:03 +[port 2] +name=cpe1 +mac=00:00:00:00:00:02 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 +[variables] +$wk=3-6,9-12 + +[defaults] +mempool size=16K +[lua] +lpm4 = dofile("ipv4.lua") +user_table = dofile("user_table-65K-bng.lua") +[global] +start time=20 +name=vBNG + +[core 0] +mode=master +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp + +[core 7] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 + +[core 2] +name=LB-cpe +task=0 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp + +[core 8] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1 +route table=lpm4 +local ipv4=21.22.23.24 +handle arp=yes +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx ports from cpe table=cpe0,cpe1 +user table=user_table diff --git a/VNFs/DPPD-PROX/config/bng-ovs-usv-4ports.cfg b/VNFs/DPPD-PROX/config/bng-ovs-usv-4ports.cfg new file mode 100644 index 00000000..48321c0d --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-ovs-usv-4ports.cfg @@ -0,0 +1,137 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration is provided for virtualized environments running on top of +; a soft-switch. Specifically, ingredients are Open vSwitch (openvswitch.org) +; and Qemu version 1.6.2. Note that since the currently supported version of +; Open vSwitch does not handle all the protocols that are used in the full BNG, +; PROX has to be recompiled to use different packet processing paths as a +; workaround. DPDK version 1.8.0 should be used with this configuration and it +; has to be compiled with COMBINE_LIBS enabled: +; make install T=$RTE_TARGET CONFIG_RTE_BUILD_COMBINE_LIBS=y CONFIG_RTE_LIBRTE_VHOST=y +; The following commands demonstrate how to set up Open vSwitch: +; git clone https://github.com/openvswitch/ovs.git +; cd ovs +; git checkout 5c62a855c7bb24424cbe7ec48ecf2f128db8b102 +; ./boot.sh && ./configure --with-dpdk=$RTE_SDK/$RTE_TARGET --disable-ssl && make +; This configuration is intended to be used in a VM with 4 virtual ports. This +; means that 4 virtual ports (with type dpdkvhost) and 4 physical ports (with +; type dpdk) will need to be added and connected through open-flow commands in +; Open vSwitch. After Open vSwitch has been set up on the host, PROX needs to be +; recompiled in the VM as follows before running it with this configuration: +; make BNG_QINQ=n MPLS_ROUTING=n +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:03 +[port 2] +name=cpe1 +mac=00:00:00:00:00:02 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 +[variables] +$wk=5s0,6s0 + +[defaults] +mempool size=16K +[lua] +lpm4 = dofile("ipv4.lua") +user_table =dofile("user_table-65K-bng.lua") +[global] +start time=20 +name=BNG (OVS) + +[core 0s0] +mode=master +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=nop +rx ring=yes +tx port=cpe0 +task=1 +mode=lbqinq +rx port=cpe0 +tx cores=(${wk})t0 proto=ipv4 + +[core 2s0] +name=LB-inet +task=0 +mode=nop +rx ring=yes +tx port=inet0 +task=1 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 + +[core 3s0] +name=LB-cpe +task=0 +mode=nop +rx ring=yes +tx port=cpe1 +task=1 +mode=lbqinq +rx port=cpe1 +tx cores=(${wk})t0 proto=ipv4 + +[core 4s0] +name=LB-inet +task=0 +mode=nop +rx ring=yes +tx port=inet1 +task=1 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: Upstream traffic +;#### Task 1: Downstream traffic +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx cores from routing table=2s0,4s0 +route table=lpm4 +local ipv4=21.22.23.24 +handle arp=no +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx cores from cpe table=1s0,3s0 remap=cpe0,cpe1 +user table=user_table diff --git a/VNFs/DPPD-PROX/config/bng-qos-4ports.cfg b/VNFs/DPPD-PROX/config/bng-qos-4ports.cfg new file mode 100644 index 00000000..cd26fd5b --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-qos-4ports.cfg @@ -0,0 +1,209 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; Compared to config/bng-4ports.cfg, this configuration sets up a BNG with QoS +; functionality. In total, an extra eight cores (four physical cores) are needed +; to run this configuration. Four cores are used for QoS, two cores are assigned +; with the task of classifying upstream packets and two cores are assigned with +; transmitting downstream packets. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=hardware +[port 1] +name=inet0 +mac=hardware +[port 2] +name=cpe1 +mac=hardware +[port 3] +name=inet1 +mac=hardware +[variables] +;uncomment one of the following to change the number of workers +;$wk=7s0,7s0h; 2 workers +;$wk=7s0-8s0,7s0h-8s0h; 4 workers +$wk=7s0-9s0,7s0h-9s0h; 6 workers +;$wk=7s0-10s0,7s0h-10s0h; 8 workers + +[defaults] +mempool size=128K +qinq tag=0xa888;0x0081 +[lua] +lpm4 = dofile("ipv4.lua") +dscp_table = dofile("dscp.lua") +user_table = dofile("user_table-65K-bng.lua") +[global] +start time=20 +name=BNG + QoS + +[core 0s0] +mode=master +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=no + +[core 1s0h] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 + +[core 2s0] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=no + +[core 2s0h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1 +route table=lpm4 +local ipv4=21.22.23.24 +handle arp=yes +drop=no +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx cores from cpe table=3s0,4s0 remap=cpe0,cpe1 ;map packets going to cpe0 to 3s0 and cpe1 to 4s0 +classify=yes +dscp=dscp_table +user table=user_table + +;***************************************************************************************** +;#### Downstream QoS receiving from workers +;#### classification done by workers +;#### Downstream QoS = QoS core and TX core +[core 3s0] +name=txqos0 +task=0 +mode=qos +rx ring=yes +tx cores=3s0ht0 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 3s0h] +name=txnop0 +task=0 +mode=nop +rx ring=yes +tx port=cpe0 +drop=no + +[core 4s0] +name=txqos1 +task=0 +mode=qos +rx ring=yes +tx cores=4s0ht0 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 4s0h] +name=txnop1 +task=0 +mode=nop +rx ring=yes +tx port=cpe1 +drop=no + +;***************************************************************************************** +;#### upstream QoS receiving from CPE +;#### classification done by RX, QoS core +;#### upstream QoS = RX core (classify) + QoS core +[core 5s0h] +name=rxcl0 +task=0 +mode=classify +rx port=cpe0 +tx cores=5s0t0 +dscp=dscp_table +drop=no +user table=user_table + +[core 5s0] +name=rxqos0 +task=0 +mode=qos +rx ring=yes +tx cores=1s0t0 +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table + +[core 6s0h] +name=rxcl1 +task=0 +mode=classify +rx port=cpe1 +tx cores=6s0t0 +dscp=dscp_table +drop=no +user table=user_table + +[core 6s0] +name=rxqos1 +task=0 +mode=qos +rx ring=yes +tx cores=2s0t0 +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table diff --git a/VNFs/DPPD-PROX/config/bng-qos-8ports.cfg b/VNFs/DPPD-PROX/config/bng-qos-8ports.cfg new file mode 100644 index 00000000..c191a22e --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-qos-8ports.cfg @@ -0,0 +1,323 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-qos-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +;mac=00:00:01:00:00:01 +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +;mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +;mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +;mac=00:00:01:00:00:04 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +;mac=00:00:01:00:00:01 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 5] +name=inet2 +;mac=00:00:01:00:00:02 +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +;mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +;mac=00:00:01:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[variables] +$wk=9s1,5s1h-9s1h; 6 workers +$lb_drop=no +$wt_drop=no +$rxd=256 +$txd=256 +$promiscuous=yes +$mp=6K +$mcs=128 +$rs=256 + +[defaults] +mempool size=128K +qinq tag=0xa888 + +[lua] +lpm4 = dofile("ipv4-4ports.lua") +dscp_table = dofile("dscp.lua") +user_table = dofile("user_table-131K-bng.lua") +[global] +start time=20 +name=BNG + QoS +unique mempool per socket=no + +[core 0s1] +mode=master + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s1] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +task=1 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 1s1h] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +task=1 +mode=lbnetwork +rx port=inet2 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s1] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +task=1 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s1h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +task=1 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wt_drop +handle arp=yes +cpe table timeout ms=15000000 +ctrl path polling frequency=10000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx cores from cpe table=3s1,3s1h,4s1,4s1h remap=cpe0,cpe1,cpe2,cpe3 ;map packets going to cpe0 to 3s1 and cpe1 to 4s1 +classify=yes +drop=$wt_drop +ctrl path polling frequency=10000 +user table=user_table +dscp=dscp_table + +;***************************************************************************************** +;#### Downstream QoS receiving from workers +;#### classification done by workers +;#### Downstream QoS = QoS core and TX core +[core 3s1] +name=txqos0 +task=0 +mode=qos +rx ring=yes +tx port=cpe0 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 3s1h] +name=txqos1 +task=0 +mode=qos +rx ring=yes +tx port=cpe1 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 4s1] +name=txqos2 +task=0 +mode=qos +rx ring=yes +tx port=cpe2 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 4s1h] +name=txqos3 +task=0 +mode=qos +rx ring=yes +drop=no +tx port=cpe3 +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +;***************************************************************************************** +;#### upstream QoS receiving from CPE +;#### classification done by RX, QoS core +;#### upstream QoS = RX core (classify) + QoS core +[core 5s1] +name=rxqos0 +task=0 +mode=qos +rx port=cpe0 +tx cores=1s1t0 +classify=yes +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 6s1] +name=rxqos1 +task=0 +mode=qos +rx port=cpe1 +classify=yes +dscp=dscp_table +tx cores=1s1t1 +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 7s1] +name=rxqos1 +task=0 +mode=qos +rx port=cpe2 +tx cores=2s1t0 +classify=yes +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 8s1] +task=0 +mode=qos +rx port=cpe3 +tx cores=2s1t1 +classify=yes +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table diff --git a/VNFs/DPPD-PROX/config/bng-qos-8ports_17cores.cfg b/VNFs/DPPD-PROX/config/bng-qos-8ports_17cores.cfg new file mode 100644 index 00000000..74c45872 --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-qos-8ports_17cores.cfg @@ -0,0 +1,411 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-qos-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=6 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:01:00:00:01 +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +mac=00:00:01:00:00:04 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +mac=00:00:02:00:00:01 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 5] +name=inet2 +mac=00:00:02:00:00:02 +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +mac=00:00:02:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +mac=00:00:02:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[variables] +$wk=0s0h,13s0-16s0,13s0h-16s0h,4s0,4s0h +$lb_drop=no +$wt_drop=no +$rxd=256 +$txd=256 +$promiscuous=yes +$mcs=128 +$rs=1024 + +[defaults] +mempool size=256K +qinq tag=0xa888 + +[lua] +lpm4 = dofile("ipv4-4ports.lua") +dscp_table = dofile("dscp.lua") +user_table = dofile("user_table-131K-bng.lua") +[global] +start time=20 +name=BNG + QoS +unique mempool per socket=no +mp rings=yes +enable bypass=yes + +[core 0s0] +mode=master + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +name=LB-cpe +task=1 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 1s0h] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +name=LB-cpe +task=1 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +name=LB-inet +task=1 +mode=lbnetwork +rx port=inet1 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet2 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +name=LB-inet +task=1 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s0] +name=classify1 +task=0 +mode=classify +rx port=cpe0 +tx cores=9s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +name=classify2 +task=1 +mode=classify +rx port=cpe1 +tx cores=10s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 3s0h] +name=classify3 +task=0 +mode=classify +rx port=cpe2 +tx cores=11s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +name=classify4 +task=1 +mode=classify +rx port=cpe3 +tx cores=12s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wt_drop +handle arp=yes +cpe table timeout ms=15000000 +ctrl path polling frequency=10000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx cores from cpe table=5s0t1,6s0t1,7s0t1,8s0t1 remap=cpe0,cpe1,cpe2,cpe3 ;map packets going to cpe0 to 3s0 and cpe1 to 4s0 +classify=yes +drop=$wt_drop +ctrl path polling frequency=10000 +user table=user_table +dscp=dscp_table +ring size=$rs + +;***************************************************************************************** +;#### Downstream QoS receiving from workers +;#### classification done by workers +;#### Downstream QoS = QoS core and TX core +[core 5s0] +name=txqos0 +task=0 +mode=nop +rx ring=yes +tx port=cpe0 +drop=no + +task=1 +mode=qos +rx ring=yes +tx cores=5s0t0 +drop=yes +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 6s0] +name=txqos1 +task=0 +mode=nop +rx ring=yes +tx port=cpe1 +drop=no + +task=1 +mode=qos +rx ring=yes +;tx port=cpe1 +tx cores=6s0t0 +drop=yes +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 7s0] +name=txqos2 +task=0 +mode=nop +rx ring=yes +tx port=cpe2 +drop=no + +task=1 +mode=qos +rx ring=yes +;tx port=cpe2 +tx cores=7s0t0 +drop=yes +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 8s0] +name=txqos3 +task=0 +mode=nop +rx ring=yes +tx port=cpe3 +drop=no + +task=1 +mode=qos +rx ring=yes +drop=yes +;tx port=cpe3 +tx cores=8s0t0 +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +;***************************************************************************************** +;#### upstream QoS receiving from CPE +;#### classification done by RX, QoS core +;#### upstream QoS = RX core (classify) + QoS core +[core 9s0] +name=rxqos0 +task=0 +mode=qos +;rx port=cpe0 +rx ring=yes +tx cores=1s0 +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 10s0] +name=rxqos0 +task=0 +mode=qos +;rx port=cpe1 +rx ring=yes +;classify=yes +;dscp=dscp_table +tx cores=1s0t1 +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 11s0] +name=rxqos2 +task=0 +mode=qos +;rx port=cpe2 +rx ring=yes +tx cores=1s0h +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 12s0] +name=rxqos3 +task=0 +mode=qos +;rx port=cpe3 +rx ring=yes +tx cores=1s0ht1 +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table diff --git a/VNFs/DPPD-PROX/config/bng-qos-8ports_25cores.cfg b/VNFs/DPPD-PROX/config/bng-qos-8ports_25cores.cfg new file mode 100644 index 00000000..cfb58bac --- /dev/null +++ b/VNFs/DPPD-PROX/config/bng-qos-8ports_25cores.cfg @@ -0,0 +1,438 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration sets up a system that handles the same workload as +; config/bng-qos-4ports.cfg, but on 8 ports instead of 4 and on CPU socket 1 +; instead of socket 0. +;; + +[eal options] +-n=6 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:01:00:00:01 +tx desc=$txd +promiscuous=$promiscuous + +[port 1] +name=inet0 +mac=00:00:01:00:00:02 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[port 2] +name=cpe1 +mac=00:00:01:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 3] +name=inet1 +mac=00:00:01:00:00:04 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 4] +name=cpe2 +mac=00:00:02:00:00:01 +tx desc=$txd +rx desc=$rxd +promiscuous=$promiscuous + +[port 5] +name=inet2 +mac=00:00:02:00:00:02 +tx desc=$txd +promiscuous=$promiscuous + +[port 6] +name=cpe3 +mac=00:00:02:00:00:03 +tx desc=$txd +promiscuous=$promiscuous + +[port 7] +name=inet3 +mac=00:00:02:00:00:04 +rx desc=$rxd +tx desc=$txd +promiscuous=$promiscuous + +[variables] +$wk=0s0h,15s0-20s0,7s0h-20s0h +$lb_drop=no +$wt_drop=no +$rxd=256 +$txd=256 +$promiscuous=yes +$mcs=128 +$rs=1024 +$tx1=21s0 +$tx2=22s0 +$tx3=23s0 +$tx4=24s0 + +[defaults] +mempool size=256K +qinq tag=0xa888 + +[lua] +lpm4 = dofile("ipv4-4ports.lua") +dscp_table = dofile("dscp.lua") +user_table = dofile("user_table-131K-bng.lua") +[global] +start time=20 +name=BNG + QoS +unique mempool per socket=yes +mp rings=yes +enable bypass=yes + +[core 0s0] +mode=master + +; IPv4 +;***************************************************************************************** +;##### Load Balancing receiving from CPE and from Internet #### +[core 1s0] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 1s0h] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 2s0h] +name=LB-cpe +task=0 +mode=lbqinq +rx ring=yes +tx cores=(${wk})t0 proto=ipv4 +tx cores=(${wk})t0p proto=arp +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s0] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 3s0h] +name=LB-inet +task=0 +mode=lbnetwork +rx port=inet1 +untag mpls=yes +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s0] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet2 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 4s0h] +name=LB-inet +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wk})t1 proto=ipv4 +drop=$lb_drop +memcache size=$mcs +ring size=$rs + +[core 5s0] +name=classify1 +task=0 +mode=classify +rx port=cpe0 +tx cores=11s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table +memcache size=$mcs + +[core 5s0h] +name=classify2 +task=0 +mode=classify +rx port=cpe1 +tx cores=12s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table +memcache size=$mcs + +[core 6s0] +name=classify3 +task=0 +mode=classify +rx port=cpe2 +tx cores=13s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table +memcache size=$mcs + +[core 6s0h] +name=classify4 +task=0 +mode=classify +rx port=cpe3 +tx cores=14s0 +dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table +memcache size=$mcs + +;***************************************************************************************** +;#### Workers receiving from LB +;#### Task 0: QinQ decapsulation + gre encapsulation + routing +;#### Task 1: ARP +;#### Task 2: GRE depcapsulation + QinQ encapsulation + use learned mac +[core $wk] +name=Worker +task=0 +mode=qinqdecapv4 +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +local ipv4=21.22.23.24 +drop=$wt_drop +handle arp=yes +cpe table timeout ms=15000000 +ctrl path polling frequency=10000 +user table=user_table + +task=1 +mode=qinqencapv4 +rx ring=yes +tx cores from cpe table=7s0,8s0,9s0,10s0 remap=cpe0,cpe1,cpe2,cpe3 ;map packets going to cpe0 to 3s0 and cpe1 to 4s0 +classify=yes +drop=$wt_drop +ctrl path polling frequency=10000 +user table=user_table +dscp=dscp_table +ring size=$rs + +;***************************************************************************************** +;#### Downstream QoS receiving from workers +;#### classification done by workers +;#### Downstream QoS = QoS core and TX core +[core 7s0] +name=txqos0 +task=0 +mode=qos +rx ring=yes +;tx port=cpe0 +tx cores=$tx1 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 8s0] +name=txqos0 +task=0 +mode=qos +rx ring=yes +;tx port=cpe1 +tx cores=$tx2 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 9s0] +name=txqos2 +task=0 +mode=qos +rx ring=yes +;tx port=cpe2 +tx cores=$tx3 +drop=no +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +[core 10s0] +name=txqos3 +task=0 +mode=qos +rx ring=yes +drop=no +;tx port=cpe3 +tx cores=$tx4 +pipe tc rate=125000 +pipe tb rate=125000 +user table=user_table + +;***************************************************************************************** +;#### upstream QoS receiving from CPE +;#### classification done by RX, QoS core +;#### upstream QoS = RX core (classify) + QoS core +[core 11s0] +name=rxqos0 +task=0 +mode=qos +;rx port=cpe0 +rx ring=yes +tx cores=1s0 +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 12s0] +name=rxqos0 +task=0 +mode=qos +;rx port=cpe1 +rx ring=yes +;classify=yes +;dscp=dscp_table +tx cores=1s0h +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 13s0] +name=rxqos2 +task=0 +mode=qos +;rx port=cpe2 +rx ring=yes +tx cores=2s0 +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core 14s0] +name=rxqos3 +task=0 +mode=qos +;rx port=cpe3 +rx ring=yes +tx cores=2s0h +;classify=yes +;dscp=dscp_table +pipe tc rate=125000 +pipe tb rate=125000 +drop=no +user table=user_table +dscp=dscp_table + +[core $tx1] +name=tx1 +task=0 +mode=read +rx ring=yes +tx port=cpe0 +ring size=$rs +drop=no + +[core $tx2] +name=tx2 +task=0 +mode=read +rx ring=yes +tx port=cpe1 +ring size=$rs +drop=no + +[core $tx3] +name=tx3 +task=0 +mode=read +rx ring=yes +tx port=cpe2 +ring size=$rs +drop=no + +[core $tx4] +name=tx4 +task=0 +mode=read +rx ring=yes +tx port=cpe3 +ring size=$rs +drop=no diff --git a/VNFs/DPPD-PROX/config/cgnat.cfg b/VNFs/DPPD-PROX/config/cgnat.cfg new file mode 100644 index 00000000..4015d3ab --- /dev/null +++ b/VNFs/DPPD-PROX/config/cgnat.cfg @@ -0,0 +1,58 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=if1 +mac=hardware + +[lua] +nat_table = dofile("cgnat_table.lua") +lpm4 = dofile("ipv4_1port.lua") + +[defaults] +mempool size=4K + +[global] +start time=5 +name=CGNAT + +[core 0s0] +mode=master + +[core 1s0] +name=nat +task=0 +mode=cgnat +private=yes +nat table=nat_table +route table=lpm4 +rx port=if0 +tx ports from routing table=if1 + +task=1 +mode=cgnat +private=no +nat table=nat_table +route table=lpm4 +rx port=if1 +tx port=if0 diff --git a/VNFs/DPPD-PROX/config/cgnat_table.lua b/VNFs/DPPD-PROX/config/cgnat_table.lua new file mode 100644 index 00000000..26ae9c7a --- /dev/null +++ b/VNFs/DPPD-PROX/config/cgnat_table.lua @@ -0,0 +1,38 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local cgnat = {} +cgnat.dynamic = { + {public_ip_range_start = ip("20.0.1.0"),public_ip_range_stop = ip("20.0.1.15"), public_port = val_range(0,65535)}, + {public_ip_range_start = ip("20.0.1.16"),public_ip_range_stop = ip("20.0.1.31"), public_port = val_range(0,65535)}, +} +cgnat.static_ip_port = { + {src_ip = ip("192.168.2.1"), src_port = 68, dst_ip = ip("20.0.2.1"), dst_port = 68}, + {src_ip = ip("192.168.2.1"), src_port = 168, dst_ip = ip("20.0.2.1"), dst_port = 5000}, + {src_ip = ip("192.168.2.1"), src_port = 268, dst_ip = ip("20.0.2.1"), dst_port = 5001}, + {src_ip = ip("192.168.2.1"), src_port = 368, dst_ip = ip("20.0.2.1"), dst_port = 5002}, +} +cgnat.static_ip = { + {src_ip = ip("192.168.3.1"), dst_ip = ip("20.0.3.1")}, + {src_ip = ip("192.168.3.2"), dst_ip = ip("20.0.3.2")}, + {src_ip = ip("192.168.3.3"), dst_ip = ip("20.0.3.3")}, + {src_ip = ip("192.168.3.4"), dst_ip = ip("20.0.3.4")}, + {src_ip = ip("192.168.3.5"), dst_ip = ip("20.0.3.5")}, + {src_ip = ip("192.168.3.6"), dst_ip = ip("20.0.3.6")}, + {src_ip = ip("192.168.3.7"), dst_ip = ip("20.0.3.7")}, + {src_ip = ip("192.168.3.8"), dst_ip = ip("20.0.3.8")}, +} +return cgnat diff --git a/VNFs/DPPD-PROX/config/cpe_table.lua b/VNFs/DPPD-PROX/config/cpe_table.lua new file mode 100644 index 00000000..305e14f2 --- /dev/null +++ b/VNFs/DPPD-PROX/config/cpe_table.lua @@ -0,0 +1,2066 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return { + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=0, cidr = cidr("192.168.0.0/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=0, cidr = cidr("192.168.0.8/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=1, cidr = cidr("192.168.0.16/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=1, cidr = cidr("192.168.0.24/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=2, cidr = cidr("192.168.0.32/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=2, cidr = cidr("192.168.0.40/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=3, cidr = cidr("192.168.0.48/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=3, cidr = cidr("192.168.0.56/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=4, cidr = cidr("192.168.0.64/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=4, cidr = cidr("192.168.0.72/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=5, cidr = cidr("192.168.0.80/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=5, cidr = cidr("192.168.0.88/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=6, cidr = cidr("192.168.0.96/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=6, cidr = cidr("192.168.0.104/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=7, cidr = cidr("192.168.0.112/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=7, cidr = cidr("192.168.0.120/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=8, cidr = cidr("192.168.0.128/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=8, cidr = cidr("192.168.0.136/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=9, cidr = cidr("192.168.0.144/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=9, cidr = cidr("192.168.0.152/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=10, cidr = cidr("192.168.0.160/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=10, cidr = cidr("192.168.0.168/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=11, cidr = cidr("192.168.0.176/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=11, cidr = cidr("192.168.0.184/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=12, cidr = cidr("192.168.0.192/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=12, cidr = cidr("192.168.0.200/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=13, cidr = cidr("192.168.0.208/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=13, cidr = cidr("192.168.0.216/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=14, cidr = cidr("192.168.0.224/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=14, cidr = cidr("192.168.0.232/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=15, cidr = cidr("192.168.0.240/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=15, cidr = cidr("192.168.0.248/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=16, cidr = cidr("192.168.1.0/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=16, cidr = cidr("192.168.1.8/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=17, cidr = cidr("192.168.1.16/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=17, cidr = cidr("192.168.1.24/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=18, cidr = cidr("192.168.1.32/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=18, cidr = cidr("192.168.1.40/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=19, cidr = cidr("192.168.1.48/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=19, cidr = cidr("192.168.1.56/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=20, cidr = cidr("192.168.1.64/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=20, cidr = cidr("192.168.1.72/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=21, cidr = cidr("192.168.1.80/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=21, cidr = cidr("192.168.1.88/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=22, cidr = cidr("192.168.1.96/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=22, cidr = cidr("192.168.1.104/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=23, cidr = cidr("192.168.1.112/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=23, cidr = cidr("192.168.1.120/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=24, cidr = cidr("192.168.1.128/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=24, cidr = cidr("192.168.1.136/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=25, cidr = cidr("192.168.1.144/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=25, cidr = cidr("192.168.1.152/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=26, cidr = cidr("192.168.1.160/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=26, cidr = cidr("192.168.1.168/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=27, cidr = cidr("192.168.1.176/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=27, cidr = cidr("192.168.1.184/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=28, cidr = cidr("192.168.1.192/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=28, cidr = cidr("192.168.1.200/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=29, cidr = cidr("192.168.1.208/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=29, cidr = cidr("192.168.1.216/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=30, cidr = cidr("192.168.1.224/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=30, cidr = cidr("192.168.1.232/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=31, cidr = cidr("192.168.1.240/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=31, cidr = cidr("192.168.1.248/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=32, cidr = cidr("192.168.2.0/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=32, cidr = cidr("192.168.2.8/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=33, cidr = cidr("192.168.2.16/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=33, cidr = cidr("192.168.2.24/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=34, cidr = cidr("192.168.2.32/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=34, cidr = cidr("192.168.2.40/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=35, cidr = cidr("192.168.2.48/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=35, cidr = cidr("192.168.2.56/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=36, cidr = cidr("192.168.2.64/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=36, cidr = cidr("192.168.2.72/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=37, cidr = cidr("192.168.2.80/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=37, cidr = cidr("192.168.2.88/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=38, cidr = cidr("192.168.2.96/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=38, cidr = cidr("192.168.2.104/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=39, cidr = cidr("192.168.2.112/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=39, cidr = cidr("192.168.2.120/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=40, cidr = cidr("192.168.2.128/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=40, cidr = cidr("192.168.2.136/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=41, cidr = cidr("192.168.2.144/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=41, cidr = cidr("192.168.2.152/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=42, cidr = cidr("192.168.2.160/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=42, cidr = cidr("192.168.2.168/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=43, cidr = cidr("192.168.2.176/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=43, cidr = cidr("192.168.2.184/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=44, cidr = cidr("192.168.2.192/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=44, cidr = cidr("192.168.2.200/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=45, cidr = cidr("192.168.2.208/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=45, cidr = cidr("192.168.2.216/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=46, cidr = cidr("192.168.2.224/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=46, cidr = cidr("192.168.2.232/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=47, cidr = cidr("192.168.2.240/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=47, cidr = cidr("192.168.2.248/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=48, cidr = cidr("192.168.3.0/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=48, cidr = cidr("192.168.3.8/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=49, cidr = cidr("192.168.3.16/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=49, cidr = cidr("192.168.3.24/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=50, cidr = cidr("192.168.3.32/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=50, cidr = cidr("192.168.3.40/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=51, cidr = cidr("192.168.3.48/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=51, cidr = cidr("192.168.3.56/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=52, cidr = cidr("192.168.3.64/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=52, cidr = cidr("192.168.3.72/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=53, cidr = cidr("192.168.3.80/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=53, cidr = cidr("192.168.3.88/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=54, cidr = cidr("192.168.3.96/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=54, cidr = cidr("192.168.3.104/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=55, cidr = cidr("192.168.3.112/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=55, cidr = cidr("192.168.3.120/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=56, cidr = cidr("192.168.3.128/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=56, cidr = cidr("192.168.3.136/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=57, cidr = cidr("192.168.3.144/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=57, cidr = cidr("192.168.3.152/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=58, cidr = cidr("192.168.3.160/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=58, cidr = cidr("192.168.3.168/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=59, cidr = cidr("192.168.3.176/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=59, cidr = cidr("192.168.3.184/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=60, cidr = cidr("192.168.3.192/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=60, cidr = cidr("192.168.3.200/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=61, cidr = cidr("192.168.3.208/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=61, cidr = cidr("192.168.3.216/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=62, cidr = cidr("192.168.3.224/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=62, cidr = cidr("192.168.3.232/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=63, cidr = cidr("192.168.3.240/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=63, cidr = cidr("192.168.3.248/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=64, cidr = cidr("192.168.4.0/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=64, cidr = cidr("192.168.4.8/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=65, cidr = cidr("192.168.4.16/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=65, cidr = cidr("192.168.4.24/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=66, cidr = cidr("192.168.4.32/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=66, cidr = cidr("192.168.4.40/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=67, cidr = cidr("192.168.4.48/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=67, cidr = cidr("192.168.4.56/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=68, cidr = cidr("192.168.4.64/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=68, cidr = cidr("192.168.4.72/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=69, cidr = cidr("192.168.4.80/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=69, cidr = cidr("192.168.4.88/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=70, cidr = cidr("192.168.4.96/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=70, cidr = cidr("192.168.4.104/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=71, cidr = cidr("192.168.4.112/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=71, cidr = cidr("192.168.4.120/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=72, cidr = cidr("192.168.4.128/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=72, cidr = cidr("192.168.4.136/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=73, cidr = cidr("192.168.4.144/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=73, cidr = cidr("192.168.4.152/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=74, cidr = cidr("192.168.4.160/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=74, cidr = cidr("192.168.4.168/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=75, cidr = cidr("192.168.4.176/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=75, cidr = cidr("192.168.4.184/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=76, cidr = cidr("192.168.4.192/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=76, cidr = cidr("192.168.4.200/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=77, cidr = cidr("192.168.4.208/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=77, cidr = cidr("192.168.4.216/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=78, cidr = cidr("192.168.4.224/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=78, cidr = cidr("192.168.4.232/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=79, cidr = cidr("192.168.4.240/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=79, cidr = cidr("192.168.4.248/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=80, cidr = cidr("192.168.5.0/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=80, cidr = cidr("192.168.5.8/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=81, cidr = cidr("192.168.5.16/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=81, cidr = cidr("192.168.5.24/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=82, cidr = cidr("192.168.5.32/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=82, cidr = cidr("192.168.5.40/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=83, cidr = cidr("192.168.5.48/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=83, cidr = cidr("192.168.5.56/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=84, cidr = cidr("192.168.5.64/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=84, cidr = cidr("192.168.5.72/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=85, cidr = cidr("192.168.5.80/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=85, cidr = cidr("192.168.5.88/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=86, cidr = cidr("192.168.5.96/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=86, cidr = cidr("192.168.5.104/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=87, cidr = cidr("192.168.5.112/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=87, cidr = cidr("192.168.5.120/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=88, cidr = cidr("192.168.5.128/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=88, cidr = cidr("192.168.5.136/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=89, cidr = cidr("192.168.5.144/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=89, cidr = cidr("192.168.5.152/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=90, cidr = cidr("192.168.5.160/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=90, cidr = cidr("192.168.5.168/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=91, cidr = cidr("192.168.5.176/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=91, cidr = cidr("192.168.5.184/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=92, cidr = cidr("192.168.5.192/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=92, cidr = cidr("192.168.5.200/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=93, cidr = cidr("192.168.5.208/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=93, cidr = cidr("192.168.5.216/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=94, cidr = cidr("192.168.5.224/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=94, cidr = cidr("192.168.5.232/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=95, cidr = cidr("192.168.5.240/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=95, cidr = cidr("192.168.5.248/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=96, cidr = cidr("192.168.6.0/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=96, cidr = cidr("192.168.6.8/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=97, cidr = cidr("192.168.6.16/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=97, cidr = cidr("192.168.6.24/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=98, cidr = cidr("192.168.6.32/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=98, cidr = cidr("192.168.6.40/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=99, cidr = cidr("192.168.6.48/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=99, cidr = cidr("192.168.6.56/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=100, cidr = cidr("192.168.6.64/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=100, cidr = cidr("192.168.6.72/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=101, cidr = cidr("192.168.6.80/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=101, cidr = cidr("192.168.6.88/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=102, cidr = cidr("192.168.6.96/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=102, cidr = cidr("192.168.6.104/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=103, cidr = cidr("192.168.6.112/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=103, cidr = cidr("192.168.6.120/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=104, cidr = cidr("192.168.6.128/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=104, cidr = cidr("192.168.6.136/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=105, cidr = cidr("192.168.6.144/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=105, cidr = cidr("192.168.6.152/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=106, cidr = cidr("192.168.6.160/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=106, cidr = cidr("192.168.6.168/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=107, cidr = cidr("192.168.6.176/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=107, cidr = cidr("192.168.6.184/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=108, cidr = cidr("192.168.6.192/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=108, cidr = cidr("192.168.6.200/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=109, cidr = cidr("192.168.6.208/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=109, cidr = cidr("192.168.6.216/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=110, cidr = cidr("192.168.6.224/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=110, cidr = cidr("192.168.6.232/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=111, cidr = cidr("192.168.6.240/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=111, cidr = cidr("192.168.6.248/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=112, cidr = cidr("192.168.7.0/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=112, cidr = cidr("192.168.7.8/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=113, cidr = cidr("192.168.7.16/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=113, cidr = cidr("192.168.7.24/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=114, cidr = cidr("192.168.7.32/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=114, cidr = cidr("192.168.7.40/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=115, cidr = cidr("192.168.7.48/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=115, cidr = cidr("192.168.7.56/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=116, cidr = cidr("192.168.7.64/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=116, cidr = cidr("192.168.7.72/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=117, cidr = cidr("192.168.7.80/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=117, cidr = cidr("192.168.7.88/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=118, cidr = cidr("192.168.7.96/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=118, cidr = cidr("192.168.7.104/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=119, cidr = cidr("192.168.7.112/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=119, cidr = cidr("192.168.7.120/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=120, cidr = cidr("192.168.7.128/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=120, cidr = cidr("192.168.7.136/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=121, cidr = cidr("192.168.7.144/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=121, cidr = cidr("192.168.7.152/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=122, cidr = cidr("192.168.7.160/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=122, cidr = cidr("192.168.7.168/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=123, cidr = cidr("192.168.7.176/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=123, cidr = cidr("192.168.7.184/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=124, cidr = cidr("192.168.7.192/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=124, cidr = cidr("192.168.7.200/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=125, cidr = cidr("192.168.7.208/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=125, cidr = cidr("192.168.7.216/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=126, cidr = cidr("192.168.7.224/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=126, cidr = cidr("192.168.7.232/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=127, cidr = cidr("192.168.7.240/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=127, cidr = cidr("192.168.7.248/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=128, cidr = cidr("192.168.8.0/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=128, cidr = cidr("192.168.8.8/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=129, cidr = cidr("192.168.8.16/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=129, cidr = cidr("192.168.8.24/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=130, cidr = cidr("192.168.8.32/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=130, cidr = cidr("192.168.8.40/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=131, cidr = cidr("192.168.8.48/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=131, cidr = cidr("192.168.8.56/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=132, cidr = cidr("192.168.8.64/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=132, cidr = cidr("192.168.8.72/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=133, cidr = cidr("192.168.8.80/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=133, cidr = cidr("192.168.8.88/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=134, cidr = cidr("192.168.8.96/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=134, cidr = cidr("192.168.8.104/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=135, cidr = cidr("192.168.8.112/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=135, cidr = cidr("192.168.8.120/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=136, cidr = cidr("192.168.8.128/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=136, cidr = cidr("192.168.8.136/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=137, cidr = cidr("192.168.8.144/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=137, cidr = cidr("192.168.8.152/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=138, cidr = cidr("192.168.8.160/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=138, cidr = cidr("192.168.8.168/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=139, cidr = cidr("192.168.8.176/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=139, cidr = cidr("192.168.8.184/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=140, cidr = cidr("192.168.8.192/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=140, cidr = cidr("192.168.8.200/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=141, cidr = cidr("192.168.8.208/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=141, cidr = cidr("192.168.8.216/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=142, cidr = cidr("192.168.8.224/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=142, cidr = cidr("192.168.8.232/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=143, cidr = cidr("192.168.8.240/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=143, cidr = cidr("192.168.8.248/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=144, cidr = cidr("192.168.9.0/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=144, cidr = cidr("192.168.9.8/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=145, cidr = cidr("192.168.9.16/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=145, cidr = cidr("192.168.9.24/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=146, cidr = cidr("192.168.9.32/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=146, cidr = cidr("192.168.9.40/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=147, cidr = cidr("192.168.9.48/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=147, cidr = cidr("192.168.9.56/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=148, cidr = cidr("192.168.9.64/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=148, cidr = cidr("192.168.9.72/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=149, cidr = cidr("192.168.9.80/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=149, cidr = cidr("192.168.9.88/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=150, cidr = cidr("192.168.9.96/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=150, cidr = cidr("192.168.9.104/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=151, cidr = cidr("192.168.9.112/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=151, cidr = cidr("192.168.9.120/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=152, cidr = cidr("192.168.9.128/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=152, cidr = cidr("192.168.9.136/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=153, cidr = cidr("192.168.9.144/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=153, cidr = cidr("192.168.9.152/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=154, cidr = cidr("192.168.9.160/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=154, cidr = cidr("192.168.9.168/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=155, cidr = cidr("192.168.9.176/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=155, cidr = cidr("192.168.9.184/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=156, cidr = cidr("192.168.9.192/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=156, cidr = cidr("192.168.9.200/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=157, cidr = cidr("192.168.9.208/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=157, cidr = cidr("192.168.9.216/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=158, cidr = cidr("192.168.9.224/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=158, cidr = cidr("192.168.9.232/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=159, cidr = cidr("192.168.9.240/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=159, cidr = cidr("192.168.9.248/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=160, cidr = cidr("192.168.10.0/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=160, cidr = cidr("192.168.10.8/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=161, cidr = cidr("192.168.10.16/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=161, cidr = cidr("192.168.10.24/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=162, cidr = cidr("192.168.10.32/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=162, cidr = cidr("192.168.10.40/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=163, cidr = cidr("192.168.10.48/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=163, cidr = cidr("192.168.10.56/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=164, cidr = cidr("192.168.10.64/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=164, cidr = cidr("192.168.10.72/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=165, cidr = cidr("192.168.10.80/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=165, cidr = cidr("192.168.10.88/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=166, cidr = cidr("192.168.10.96/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=166, cidr = cidr("192.168.10.104/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=167, cidr = cidr("192.168.10.112/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=167, cidr = cidr("192.168.10.120/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=168, cidr = cidr("192.168.10.128/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=168, cidr = cidr("192.168.10.136/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=169, cidr = cidr("192.168.10.144/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=169, cidr = cidr("192.168.10.152/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=170, cidr = cidr("192.168.10.160/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=170, cidr = cidr("192.168.10.168/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=171, cidr = cidr("192.168.10.176/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=171, cidr = cidr("192.168.10.184/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=172, cidr = cidr("192.168.10.192/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=172, cidr = cidr("192.168.10.200/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=173, cidr = cidr("192.168.10.208/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=173, cidr = cidr("192.168.10.216/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=174, cidr = cidr("192.168.10.224/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=174, cidr = cidr("192.168.10.232/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=175, cidr = cidr("192.168.10.240/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=175, cidr = cidr("192.168.10.248/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=176, cidr = cidr("192.168.11.0/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=176, cidr = cidr("192.168.11.8/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=177, cidr = cidr("192.168.11.16/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=177, cidr = cidr("192.168.11.24/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=178, cidr = cidr("192.168.11.32/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=178, cidr = cidr("192.168.11.40/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=179, cidr = cidr("192.168.11.48/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=179, cidr = cidr("192.168.11.56/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=180, cidr = cidr("192.168.11.64/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=180, cidr = cidr("192.168.11.72/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=181, cidr = cidr("192.168.11.80/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=181, cidr = cidr("192.168.11.88/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=182, cidr = cidr("192.168.11.96/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=182, cidr = cidr("192.168.11.104/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=183, cidr = cidr("192.168.11.112/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=183, cidr = cidr("192.168.11.120/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=184, cidr = cidr("192.168.11.128/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=184, cidr = cidr("192.168.11.136/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=185, cidr = cidr("192.168.11.144/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=185, cidr = cidr("192.168.11.152/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=186, cidr = cidr("192.168.11.160/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=186, cidr = cidr("192.168.11.168/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=187, cidr = cidr("192.168.11.176/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=187, cidr = cidr("192.168.11.184/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=188, cidr = cidr("192.168.11.192/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=188, cidr = cidr("192.168.11.200/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=189, cidr = cidr("192.168.11.208/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=189, cidr = cidr("192.168.11.216/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=190, cidr = cidr("192.168.11.224/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=190, cidr = cidr("192.168.11.232/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=191, cidr = cidr("192.168.11.240/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=191, cidr = cidr("192.168.11.248/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=192, cidr = cidr("192.168.12.0/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=192, cidr = cidr("192.168.12.8/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=193, cidr = cidr("192.168.12.16/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=193, cidr = cidr("192.168.12.24/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=194, cidr = cidr("192.168.12.32/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=194, cidr = cidr("192.168.12.40/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=195, cidr = cidr("192.168.12.48/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=195, cidr = cidr("192.168.12.56/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=196, cidr = cidr("192.168.12.64/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=196, cidr = cidr("192.168.12.72/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=197, cidr = cidr("192.168.12.80/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=197, cidr = cidr("192.168.12.88/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=198, cidr = cidr("192.168.12.96/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=198, cidr = cidr("192.168.12.104/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=199, cidr = cidr("192.168.12.112/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=199, cidr = cidr("192.168.12.120/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=200, cidr = cidr("192.168.12.128/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=200, cidr = cidr("192.168.12.136/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=201, cidr = cidr("192.168.12.144/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=201, cidr = cidr("192.168.12.152/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=202, cidr = cidr("192.168.12.160/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=202, cidr = cidr("192.168.12.168/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=203, cidr = cidr("192.168.12.176/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=203, cidr = cidr("192.168.12.184/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=204, cidr = cidr("192.168.12.192/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=204, cidr = cidr("192.168.12.200/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=205, cidr = cidr("192.168.12.208/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=205, cidr = cidr("192.168.12.216/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=206, cidr = cidr("192.168.12.224/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=206, cidr = cidr("192.168.12.232/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=207, cidr = cidr("192.168.12.240/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=207, cidr = cidr("192.168.12.248/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=208, cidr = cidr("192.168.13.0/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=208, cidr = cidr("192.168.13.8/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=209, cidr = cidr("192.168.13.16/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=209, cidr = cidr("192.168.13.24/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=210, cidr = cidr("192.168.13.32/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=210, cidr = cidr("192.168.13.40/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=211, cidr = cidr("192.168.13.48/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=211, cidr = cidr("192.168.13.56/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=212, cidr = cidr("192.168.13.64/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=212, cidr = cidr("192.168.13.72/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=213, cidr = cidr("192.168.13.80/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=213, cidr = cidr("192.168.13.88/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=214, cidr = cidr("192.168.13.96/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=214, cidr = cidr("192.168.13.104/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=215, cidr = cidr("192.168.13.112/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=215, cidr = cidr("192.168.13.120/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=216, cidr = cidr("192.168.13.128/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=216, cidr = cidr("192.168.13.136/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=217, cidr = cidr("192.168.13.144/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=217, cidr = cidr("192.168.13.152/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=218, cidr = cidr("192.168.13.160/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=218, cidr = cidr("192.168.13.168/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=219, cidr = cidr("192.168.13.176/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=219, cidr = cidr("192.168.13.184/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=220, cidr = cidr("192.168.13.192/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=220, cidr = cidr("192.168.13.200/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=221, cidr = cidr("192.168.13.208/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=221, cidr = cidr("192.168.13.216/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=222, cidr = cidr("192.168.13.224/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=222, cidr = cidr("192.168.13.232/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=223, cidr = cidr("192.168.13.240/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=223, cidr = cidr("192.168.13.248/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=224, cidr = cidr("192.168.14.0/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=224, cidr = cidr("192.168.14.8/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=225, cidr = cidr("192.168.14.16/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=225, cidr = cidr("192.168.14.24/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=226, cidr = cidr("192.168.14.32/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=226, cidr = cidr("192.168.14.40/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=227, cidr = cidr("192.168.14.48/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=227, cidr = cidr("192.168.14.56/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=228, cidr = cidr("192.168.14.64/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=228, cidr = cidr("192.168.14.72/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=229, cidr = cidr("192.168.14.80/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=229, cidr = cidr("192.168.14.88/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=230, cidr = cidr("192.168.14.96/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=230, cidr = cidr("192.168.14.104/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=231, cidr = cidr("192.168.14.112/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=231, cidr = cidr("192.168.14.120/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=232, cidr = cidr("192.168.14.128/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=232, cidr = cidr("192.168.14.136/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=233, cidr = cidr("192.168.14.144/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=233, cidr = cidr("192.168.14.152/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=234, cidr = cidr("192.168.14.160/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=234, cidr = cidr("192.168.14.168/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=235, cidr = cidr("192.168.14.176/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=235, cidr = cidr("192.168.14.184/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=236, cidr = cidr("192.168.14.192/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=236, cidr = cidr("192.168.14.200/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=237, cidr = cidr("192.168.14.208/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=237, cidr = cidr("192.168.14.216/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=238, cidr = cidr("192.168.14.224/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=238, cidr = cidr("192.168.14.232/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=239, cidr = cidr("192.168.14.240/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=239, cidr = cidr("192.168.14.248/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=240, cidr = cidr("192.168.15.0/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=240, cidr = cidr("192.168.15.8/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=241, cidr = cidr("192.168.15.16/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=241, cidr = cidr("192.168.15.24/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=242, cidr = cidr("192.168.15.32/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=242, cidr = cidr("192.168.15.40/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=243, cidr = cidr("192.168.15.48/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=243, cidr = cidr("192.168.15.56/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=244, cidr = cidr("192.168.15.64/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=244, cidr = cidr("192.168.15.72/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=245, cidr = cidr("192.168.15.80/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=245, cidr = cidr("192.168.15.88/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=246, cidr = cidr("192.168.15.96/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=246, cidr = cidr("192.168.15.104/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=247, cidr = cidr("192.168.15.112/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=247, cidr = cidr("192.168.15.120/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=248, cidr = cidr("192.168.15.128/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=248, cidr = cidr("192.168.15.136/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=249, cidr = cidr("192.168.15.144/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=249, cidr = cidr("192.168.15.152/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=250, cidr = cidr("192.168.15.160/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=250, cidr = cidr("192.168.15.168/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=251, cidr = cidr("192.168.15.176/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=251, cidr = cidr("192.168.15.184/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=252, cidr = cidr("192.168.15.192/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=252, cidr = cidr("192.168.15.200/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=253, cidr = cidr("192.168.15.208/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=253, cidr = cidr("192.168.15.216/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=254, cidr = cidr("192.168.15.224/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=254, cidr = cidr("192.168.15.232/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=0, gre_id=0, svlan_id=0, cvlan_id=255, cidr = cidr("192.168.15.240/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=0, gre_id=0, svlan_id=1, cvlan_id=255, cidr = cidr("192.168.15.248/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=0, cidr = cidr("192.168.16.0/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=0, cidr = cidr("192.168.16.8/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=1, cidr = cidr("192.168.16.16/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=1, cidr = cidr("192.168.16.24/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=2, cidr = cidr("192.168.16.32/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=2, cidr = cidr("192.168.16.40/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=3, cidr = cidr("192.168.16.48/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=3, cidr = cidr("192.168.16.56/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=4, cidr = cidr("192.168.16.64/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=4, cidr = cidr("192.168.16.72/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=5, cidr = cidr("192.168.16.80/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=5, cidr = cidr("192.168.16.88/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=6, cidr = cidr("192.168.16.96/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=6, cidr = cidr("192.168.16.104/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=7, cidr = cidr("192.168.16.112/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=7, cidr = cidr("192.168.16.120/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=8, cidr = cidr("192.168.16.128/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=8, cidr = cidr("192.168.16.136/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=9, cidr = cidr("192.168.16.144/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=9, cidr = cidr("192.168.16.152/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=10, cidr = cidr("192.168.16.160/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=10, cidr = cidr("192.168.16.168/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=11, cidr = cidr("192.168.16.176/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=11, cidr = cidr("192.168.16.184/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=12, cidr = cidr("192.168.16.192/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=12, cidr = cidr("192.168.16.200/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=13, cidr = cidr("192.168.16.208/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=13, cidr = cidr("192.168.16.216/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=14, cidr = cidr("192.168.16.224/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=14, cidr = cidr("192.168.16.232/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=15, cidr = cidr("192.168.16.240/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=15, cidr = cidr("192.168.16.248/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=16, cidr = cidr("192.168.17.0/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=16, cidr = cidr("192.168.17.8/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=17, cidr = cidr("192.168.17.16/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=17, cidr = cidr("192.168.17.24/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=18, cidr = cidr("192.168.17.32/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=18, cidr = cidr("192.168.17.40/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=19, cidr = cidr("192.168.17.48/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=19, cidr = cidr("192.168.17.56/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=20, cidr = cidr("192.168.17.64/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=20, cidr = cidr("192.168.17.72/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=21, cidr = cidr("192.168.17.80/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=21, cidr = cidr("192.168.17.88/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=22, cidr = cidr("192.168.17.96/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=22, cidr = cidr("192.168.17.104/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=23, cidr = cidr("192.168.17.112/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=23, cidr = cidr("192.168.17.120/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=24, cidr = cidr("192.168.17.128/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=24, cidr = cidr("192.168.17.136/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=25, cidr = cidr("192.168.17.144/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=25, cidr = cidr("192.168.17.152/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=26, cidr = cidr("192.168.17.160/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=26, cidr = cidr("192.168.17.168/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=27, cidr = cidr("192.168.17.176/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=27, cidr = cidr("192.168.17.184/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=28, cidr = cidr("192.168.17.192/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=28, cidr = cidr("192.168.17.200/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=29, cidr = cidr("192.168.17.208/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=29, cidr = cidr("192.168.17.216/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=30, cidr = cidr("192.168.17.224/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=30, cidr = cidr("192.168.17.232/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=31, cidr = cidr("192.168.17.240/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=31, cidr = cidr("192.168.17.248/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=32, cidr = cidr("192.168.18.0/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=32, cidr = cidr("192.168.18.8/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=33, cidr = cidr("192.168.18.16/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=33, cidr = cidr("192.168.18.24/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=34, cidr = cidr("192.168.18.32/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=34, cidr = cidr("192.168.18.40/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=35, cidr = cidr("192.168.18.48/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=35, cidr = cidr("192.168.18.56/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=36, cidr = cidr("192.168.18.64/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=36, cidr = cidr("192.168.18.72/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=37, cidr = cidr("192.168.18.80/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=37, cidr = cidr("192.168.18.88/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=38, cidr = cidr("192.168.18.96/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=38, cidr = cidr("192.168.18.104/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=39, cidr = cidr("192.168.18.112/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=39, cidr = cidr("192.168.18.120/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=40, cidr = cidr("192.168.18.128/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=40, cidr = cidr("192.168.18.136/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=41, cidr = cidr("192.168.18.144/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=41, cidr = cidr("192.168.18.152/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=42, cidr = cidr("192.168.18.160/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=42, cidr = cidr("192.168.18.168/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=43, cidr = cidr("192.168.18.176/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=43, cidr = cidr("192.168.18.184/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=44, cidr = cidr("192.168.18.192/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=44, cidr = cidr("192.168.18.200/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=45, cidr = cidr("192.168.18.208/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=45, cidr = cidr("192.168.18.216/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=46, cidr = cidr("192.168.18.224/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=46, cidr = cidr("192.168.18.232/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=47, cidr = cidr("192.168.18.240/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=47, cidr = cidr("192.168.18.248/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=48, cidr = cidr("192.168.19.0/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=48, cidr = cidr("192.168.19.8/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=49, cidr = cidr("192.168.19.16/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=49, cidr = cidr("192.168.19.24/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=50, cidr = cidr("192.168.19.32/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=50, cidr = cidr("192.168.19.40/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=51, cidr = cidr("192.168.19.48/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=51, cidr = cidr("192.168.19.56/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=52, cidr = cidr("192.168.19.64/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=52, cidr = cidr("192.168.19.72/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=53, cidr = cidr("192.168.19.80/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=53, cidr = cidr("192.168.19.88/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=54, cidr = cidr("192.168.19.96/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=54, cidr = cidr("192.168.19.104/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=55, cidr = cidr("192.168.19.112/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=55, cidr = cidr("192.168.19.120/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=56, cidr = cidr("192.168.19.128/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=56, cidr = cidr("192.168.19.136/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=57, cidr = cidr("192.168.19.144/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=57, cidr = cidr("192.168.19.152/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=58, cidr = cidr("192.168.19.160/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=58, cidr = cidr("192.168.19.168/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=59, cidr = cidr("192.168.19.176/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=59, cidr = cidr("192.168.19.184/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=60, cidr = cidr("192.168.19.192/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=60, cidr = cidr("192.168.19.200/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=61, cidr = cidr("192.168.19.208/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=61, cidr = cidr("192.168.19.216/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=62, cidr = cidr("192.168.19.224/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=62, cidr = cidr("192.168.19.232/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=63, cidr = cidr("192.168.19.240/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=63, cidr = cidr("192.168.19.248/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=64, cidr = cidr("192.168.20.0/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=64, cidr = cidr("192.168.20.8/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=65, cidr = cidr("192.168.20.16/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=65, cidr = cidr("192.168.20.24/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=66, cidr = cidr("192.168.20.32/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=66, cidr = cidr("192.168.20.40/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=67, cidr = cidr("192.168.20.48/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=67, cidr = cidr("192.168.20.56/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=68, cidr = cidr("192.168.20.64/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=68, cidr = cidr("192.168.20.72/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=69, cidr = cidr("192.168.20.80/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=69, cidr = cidr("192.168.20.88/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=70, cidr = cidr("192.168.20.96/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=70, cidr = cidr("192.168.20.104/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=71, cidr = cidr("192.168.20.112/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=71, cidr = cidr("192.168.20.120/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=72, cidr = cidr("192.168.20.128/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=72, cidr = cidr("192.168.20.136/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=73, cidr = cidr("192.168.20.144/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=73, cidr = cidr("192.168.20.152/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=74, cidr = cidr("192.168.20.160/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=74, cidr = cidr("192.168.20.168/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=75, cidr = cidr("192.168.20.176/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=75, cidr = cidr("192.168.20.184/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=76, cidr = cidr("192.168.20.192/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=76, cidr = cidr("192.168.20.200/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=77, cidr = cidr("192.168.20.208/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=77, cidr = cidr("192.168.20.216/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=78, cidr = cidr("192.168.20.224/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=78, cidr = cidr("192.168.20.232/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=79, cidr = cidr("192.168.20.240/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=79, cidr = cidr("192.168.20.248/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=80, cidr = cidr("192.168.21.0/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=80, cidr = cidr("192.168.21.8/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=81, cidr = cidr("192.168.21.16/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=81, cidr = cidr("192.168.21.24/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=82, cidr = cidr("192.168.21.32/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=82, cidr = cidr("192.168.21.40/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=83, cidr = cidr("192.168.21.48/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=83, cidr = cidr("192.168.21.56/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=84, cidr = cidr("192.168.21.64/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=84, cidr = cidr("192.168.21.72/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=85, cidr = cidr("192.168.21.80/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=85, cidr = cidr("192.168.21.88/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=86, cidr = cidr("192.168.21.96/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=86, cidr = cidr("192.168.21.104/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=87, cidr = cidr("192.168.21.112/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=87, cidr = cidr("192.168.21.120/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=88, cidr = cidr("192.168.21.128/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=88, cidr = cidr("192.168.21.136/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=89, cidr = cidr("192.168.21.144/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=89, cidr = cidr("192.168.21.152/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=90, cidr = cidr("192.168.21.160/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=90, cidr = cidr("192.168.21.168/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=91, cidr = cidr("192.168.21.176/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=91, cidr = cidr("192.168.21.184/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=92, cidr = cidr("192.168.21.192/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=92, cidr = cidr("192.168.21.200/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=93, cidr = cidr("192.168.21.208/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=93, cidr = cidr("192.168.21.216/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=94, cidr = cidr("192.168.21.224/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=94, cidr = cidr("192.168.21.232/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=95, cidr = cidr("192.168.21.240/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=95, cidr = cidr("192.168.21.248/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=96, cidr = cidr("192.168.22.0/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=96, cidr = cidr("192.168.22.8/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=97, cidr = cidr("192.168.22.16/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=97, cidr = cidr("192.168.22.24/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=98, cidr = cidr("192.168.22.32/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=98, cidr = cidr("192.168.22.40/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=99, cidr = cidr("192.168.22.48/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=99, cidr = cidr("192.168.22.56/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=100, cidr = cidr("192.168.22.64/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=100, cidr = cidr("192.168.22.72/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=101, cidr = cidr("192.168.22.80/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=101, cidr = cidr("192.168.22.88/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=102, cidr = cidr("192.168.22.96/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=102, cidr = cidr("192.168.22.104/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=103, cidr = cidr("192.168.22.112/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=103, cidr = cidr("192.168.22.120/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=104, cidr = cidr("192.168.22.128/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=104, cidr = cidr("192.168.22.136/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=105, cidr = cidr("192.168.22.144/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=105, cidr = cidr("192.168.22.152/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=106, cidr = cidr("192.168.22.160/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=106, cidr = cidr("192.168.22.168/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=107, cidr = cidr("192.168.22.176/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=107, cidr = cidr("192.168.22.184/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=108, cidr = cidr("192.168.22.192/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=108, cidr = cidr("192.168.22.200/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=109, cidr = cidr("192.168.22.208/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=109, cidr = cidr("192.168.22.216/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=110, cidr = cidr("192.168.22.224/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=110, cidr = cidr("192.168.22.232/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=111, cidr = cidr("192.168.22.240/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=111, cidr = cidr("192.168.22.248/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=112, cidr = cidr("192.168.23.0/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=112, cidr = cidr("192.168.23.8/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=113, cidr = cidr("192.168.23.16/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=113, cidr = cidr("192.168.23.24/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=114, cidr = cidr("192.168.23.32/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=114, cidr = cidr("192.168.23.40/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=115, cidr = cidr("192.168.23.48/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=115, cidr = cidr("192.168.23.56/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=116, cidr = cidr("192.168.23.64/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=116, cidr = cidr("192.168.23.72/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=117, cidr = cidr("192.168.23.80/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=117, cidr = cidr("192.168.23.88/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=118, cidr = cidr("192.168.23.96/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=118, cidr = cidr("192.168.23.104/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=119, cidr = cidr("192.168.23.112/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=119, cidr = cidr("192.168.23.120/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=120, cidr = cidr("192.168.23.128/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=120, cidr = cidr("192.168.23.136/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=121, cidr = cidr("192.168.23.144/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=121, cidr = cidr("192.168.23.152/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=122, cidr = cidr("192.168.23.160/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=122, cidr = cidr("192.168.23.168/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=123, cidr = cidr("192.168.23.176/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=123, cidr = cidr("192.168.23.184/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=124, cidr = cidr("192.168.23.192/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=124, cidr = cidr("192.168.23.200/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=125, cidr = cidr("192.168.23.208/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=125, cidr = cidr("192.168.23.216/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=126, cidr = cidr("192.168.23.224/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=126, cidr = cidr("192.168.23.232/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=127, cidr = cidr("192.168.23.240/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=127, cidr = cidr("192.168.23.248/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=128, cidr = cidr("192.168.24.0/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=128, cidr = cidr("192.168.24.8/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=129, cidr = cidr("192.168.24.16/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=129, cidr = cidr("192.168.24.24/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=130, cidr = cidr("192.168.24.32/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=130, cidr = cidr("192.168.24.40/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=131, cidr = cidr("192.168.24.48/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=131, cidr = cidr("192.168.24.56/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=132, cidr = cidr("192.168.24.64/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=132, cidr = cidr("192.168.24.72/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=133, cidr = cidr("192.168.24.80/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=133, cidr = cidr("192.168.24.88/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=134, cidr = cidr("192.168.24.96/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=134, cidr = cidr("192.168.24.104/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=135, cidr = cidr("192.168.24.112/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=135, cidr = cidr("192.168.24.120/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=136, cidr = cidr("192.168.24.128/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=136, cidr = cidr("192.168.24.136/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=137, cidr = cidr("192.168.24.144/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=137, cidr = cidr("192.168.24.152/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=138, cidr = cidr("192.168.24.160/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=138, cidr = cidr("192.168.24.168/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=139, cidr = cidr("192.168.24.176/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=139, cidr = cidr("192.168.24.184/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=140, cidr = cidr("192.168.24.192/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=140, cidr = cidr("192.168.24.200/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=141, cidr = cidr("192.168.24.208/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=141, cidr = cidr("192.168.24.216/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=142, cidr = cidr("192.168.24.224/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=142, cidr = cidr("192.168.24.232/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=143, cidr = cidr("192.168.24.240/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=143, cidr = cidr("192.168.24.248/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=144, cidr = cidr("192.168.25.0/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=144, cidr = cidr("192.168.25.8/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=145, cidr = cidr("192.168.25.16/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=145, cidr = cidr("192.168.25.24/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=146, cidr = cidr("192.168.25.32/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=146, cidr = cidr("192.168.25.40/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=147, cidr = cidr("192.168.25.48/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=147, cidr = cidr("192.168.25.56/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=148, cidr = cidr("192.168.25.64/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=148, cidr = cidr("192.168.25.72/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=149, cidr = cidr("192.168.25.80/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=149, cidr = cidr("192.168.25.88/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=150, cidr = cidr("192.168.25.96/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=150, cidr = cidr("192.168.25.104/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=151, cidr = cidr("192.168.25.112/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=151, cidr = cidr("192.168.25.120/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=152, cidr = cidr("192.168.25.128/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=152, cidr = cidr("192.168.25.136/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=153, cidr = cidr("192.168.25.144/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=153, cidr = cidr("192.168.25.152/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=154, cidr = cidr("192.168.25.160/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=154, cidr = cidr("192.168.25.168/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=155, cidr = cidr("192.168.25.176/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=155, cidr = cidr("192.168.25.184/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=156, cidr = cidr("192.168.25.192/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=156, cidr = cidr("192.168.25.200/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=157, cidr = cidr("192.168.25.208/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=157, cidr = cidr("192.168.25.216/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=158, cidr = cidr("192.168.25.224/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=158, cidr = cidr("192.168.25.232/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=159, cidr = cidr("192.168.25.240/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=159, cidr = cidr("192.168.25.248/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=160, cidr = cidr("192.168.26.0/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=160, cidr = cidr("192.168.26.8/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=161, cidr = cidr("192.168.26.16/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=161, cidr = cidr("192.168.26.24/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=162, cidr = cidr("192.168.26.32/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=162, cidr = cidr("192.168.26.40/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=163, cidr = cidr("192.168.26.48/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=163, cidr = cidr("192.168.26.56/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=164, cidr = cidr("192.168.26.64/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=164, cidr = cidr("192.168.26.72/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=165, cidr = cidr("192.168.26.80/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=165, cidr = cidr("192.168.26.88/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=166, cidr = cidr("192.168.26.96/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=166, cidr = cidr("192.168.26.104/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=167, cidr = cidr("192.168.26.112/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=167, cidr = cidr("192.168.26.120/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=168, cidr = cidr("192.168.26.128/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=168, cidr = cidr("192.168.26.136/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=169, cidr = cidr("192.168.26.144/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=169, cidr = cidr("192.168.26.152/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=170, cidr = cidr("192.168.26.160/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=170, cidr = cidr("192.168.26.168/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=171, cidr = cidr("192.168.26.176/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=171, cidr = cidr("192.168.26.184/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=172, cidr = cidr("192.168.26.192/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=172, cidr = cidr("192.168.26.200/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=173, cidr = cidr("192.168.26.208/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=173, cidr = cidr("192.168.26.216/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=174, cidr = cidr("192.168.26.224/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=174, cidr = cidr("192.168.26.232/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=175, cidr = cidr("192.168.26.240/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=175, cidr = cidr("192.168.26.248/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=176, cidr = cidr("192.168.27.0/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=176, cidr = cidr("192.168.27.8/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=177, cidr = cidr("192.168.27.16/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=177, cidr = cidr("192.168.27.24/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=178, cidr = cidr("192.168.27.32/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=178, cidr = cidr("192.168.27.40/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=179, cidr = cidr("192.168.27.48/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=179, cidr = cidr("192.168.27.56/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=180, cidr = cidr("192.168.27.64/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=180, cidr = cidr("192.168.27.72/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=181, cidr = cidr("192.168.27.80/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=181, cidr = cidr("192.168.27.88/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=182, cidr = cidr("192.168.27.96/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=182, cidr = cidr("192.168.27.104/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=183, cidr = cidr("192.168.27.112/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=183, cidr = cidr("192.168.27.120/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=184, cidr = cidr("192.168.27.128/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=184, cidr = cidr("192.168.27.136/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=185, cidr = cidr("192.168.27.144/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=185, cidr = cidr("192.168.27.152/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=186, cidr = cidr("192.168.27.160/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=186, cidr = cidr("192.168.27.168/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=187, cidr = cidr("192.168.27.176/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=187, cidr = cidr("192.168.27.184/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=188, cidr = cidr("192.168.27.192/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=188, cidr = cidr("192.168.27.200/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=189, cidr = cidr("192.168.27.208/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=189, cidr = cidr("192.168.27.216/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=190, cidr = cidr("192.168.27.224/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=190, cidr = cidr("192.168.27.232/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=191, cidr = cidr("192.168.27.240/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=191, cidr = cidr("192.168.27.248/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=192, cidr = cidr("192.168.28.0/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=192, cidr = cidr("192.168.28.8/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=193, cidr = cidr("192.168.28.16/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=193, cidr = cidr("192.168.28.24/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=194, cidr = cidr("192.168.28.32/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=194, cidr = cidr("192.168.28.40/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=195, cidr = cidr("192.168.28.48/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=195, cidr = cidr("192.168.28.56/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=196, cidr = cidr("192.168.28.64/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=196, cidr = cidr("192.168.28.72/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=197, cidr = cidr("192.168.28.80/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=197, cidr = cidr("192.168.28.88/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=198, cidr = cidr("192.168.28.96/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=198, cidr = cidr("192.168.28.104/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=199, cidr = cidr("192.168.28.112/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=199, cidr = cidr("192.168.28.120/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=200, cidr = cidr("192.168.28.128/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=200, cidr = cidr("192.168.28.136/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=201, cidr = cidr("192.168.28.144/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=201, cidr = cidr("192.168.28.152/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=202, cidr = cidr("192.168.28.160/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=202, cidr = cidr("192.168.28.168/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=203, cidr = cidr("192.168.28.176/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=203, cidr = cidr("192.168.28.184/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=204, cidr = cidr("192.168.28.192/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=204, cidr = cidr("192.168.28.200/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=205, cidr = cidr("192.168.28.208/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=205, cidr = cidr("192.168.28.216/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=206, cidr = cidr("192.168.28.224/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=206, cidr = cidr("192.168.28.232/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=207, cidr = cidr("192.168.28.240/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=207, cidr = cidr("192.168.28.248/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=208, cidr = cidr("192.168.29.0/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=208, cidr = cidr("192.168.29.8/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=209, cidr = cidr("192.168.29.16/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=209, cidr = cidr("192.168.29.24/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=210, cidr = cidr("192.168.29.32/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=210, cidr = cidr("192.168.29.40/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=211, cidr = cidr("192.168.29.48/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=211, cidr = cidr("192.168.29.56/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=212, cidr = cidr("192.168.29.64/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=212, cidr = cidr("192.168.29.72/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=213, cidr = cidr("192.168.29.80/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=213, cidr = cidr("192.168.29.88/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=214, cidr = cidr("192.168.29.96/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=214, cidr = cidr("192.168.29.104/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=215, cidr = cidr("192.168.29.112/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=215, cidr = cidr("192.168.29.120/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=216, cidr = cidr("192.168.29.128/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=216, cidr = cidr("192.168.29.136/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=217, cidr = cidr("192.168.29.144/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=217, cidr = cidr("192.168.29.152/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=218, cidr = cidr("192.168.29.160/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=218, cidr = cidr("192.168.29.168/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=219, cidr = cidr("192.168.29.176/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=219, cidr = cidr("192.168.29.184/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=220, cidr = cidr("192.168.29.192/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=220, cidr = cidr("192.168.29.200/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=221, cidr = cidr("192.168.29.208/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=221, cidr = cidr("192.168.29.216/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=222, cidr = cidr("192.168.29.224/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=222, cidr = cidr("192.168.29.232/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=223, cidr = cidr("192.168.29.240/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=223, cidr = cidr("192.168.29.248/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=224, cidr = cidr("192.168.30.0/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=224, cidr = cidr("192.168.30.8/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=225, cidr = cidr("192.168.30.16/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=225, cidr = cidr("192.168.30.24/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=226, cidr = cidr("192.168.30.32/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=226, cidr = cidr("192.168.30.40/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=227, cidr = cidr("192.168.30.48/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=227, cidr = cidr("192.168.30.56/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=228, cidr = cidr("192.168.30.64/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=228, cidr = cidr("192.168.30.72/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=229, cidr = cidr("192.168.30.80/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=229, cidr = cidr("192.168.30.88/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=230, cidr = cidr("192.168.30.96/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=230, cidr = cidr("192.168.30.104/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=231, cidr = cidr("192.168.30.112/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=231, cidr = cidr("192.168.30.120/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=232, cidr = cidr("192.168.30.128/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=232, cidr = cidr("192.168.30.136/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=233, cidr = cidr("192.168.30.144/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=233, cidr = cidr("192.168.30.152/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=234, cidr = cidr("192.168.30.160/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=234, cidr = cidr("192.168.30.168/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=235, cidr = cidr("192.168.30.176/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=235, cidr = cidr("192.168.30.184/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=236, cidr = cidr("192.168.30.192/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=236, cidr = cidr("192.168.30.200/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=237, cidr = cidr("192.168.30.208/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=237, cidr = cidr("192.168.30.216/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=238, cidr = cidr("192.168.30.224/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=238, cidr = cidr("192.168.30.232/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=239, cidr = cidr("192.168.30.240/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=239, cidr = cidr("192.168.30.248/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=240, cidr = cidr("192.168.31.0/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=240, cidr = cidr("192.168.31.8/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=241, cidr = cidr("192.168.31.16/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=241, cidr = cidr("192.168.31.24/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=242, cidr = cidr("192.168.31.32/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=242, cidr = cidr("192.168.31.40/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=243, cidr = cidr("192.168.31.48/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=243, cidr = cidr("192.168.31.56/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=244, cidr = cidr("192.168.31.64/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=244, cidr = cidr("192.168.31.72/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=245, cidr = cidr("192.168.31.80/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=245, cidr = cidr("192.168.31.88/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=246, cidr = cidr("192.168.31.96/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=246, cidr = cidr("192.168.31.104/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=247, cidr = cidr("192.168.31.112/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=247, cidr = cidr("192.168.31.120/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=248, cidr = cidr("192.168.31.128/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=248, cidr = cidr("192.168.31.136/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=249, cidr = cidr("192.168.31.144/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=249, cidr = cidr("192.168.31.152/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=250, cidr = cidr("192.168.31.160/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=250, cidr = cidr("192.168.31.168/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=251, cidr = cidr("192.168.31.176/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=251, cidr = cidr("192.168.31.184/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=252, cidr = cidr("192.168.31.192/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=252, cidr = cidr("192.168.31.200/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=253, cidr = cidr("192.168.31.208/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=253, cidr = cidr("192.168.31.216/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=254, cidr = cidr("192.168.31.224/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=254, cidr = cidr("192.168.31.232/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=1, gre_id=0, svlan_id=16, cvlan_id=255, cidr = cidr("192.168.31.240/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=1, gre_id=0, svlan_id=17, cvlan_id=255, cidr = cidr("192.168.31.248/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=0, cidr = cidr("192.168.32.0/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=0, cidr = cidr("192.168.32.8/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=1, cidr = cidr("192.168.32.16/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=1, cidr = cidr("192.168.32.24/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=2, cidr = cidr("192.168.32.32/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=2, cidr = cidr("192.168.32.40/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=3, cidr = cidr("192.168.32.48/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=3, cidr = cidr("192.168.32.56/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=4, cidr = cidr("192.168.32.64/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=4, cidr = cidr("192.168.32.72/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=5, cidr = cidr("192.168.32.80/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=5, cidr = cidr("192.168.32.88/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=6, cidr = cidr("192.168.32.96/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=6, cidr = cidr("192.168.32.104/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=7, cidr = cidr("192.168.32.112/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=7, cidr = cidr("192.168.32.120/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=8, cidr = cidr("192.168.32.128/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=8, cidr = cidr("192.168.32.136/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=9, cidr = cidr("192.168.32.144/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=9, cidr = cidr("192.168.32.152/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=10, cidr = cidr("192.168.32.160/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=10, cidr = cidr("192.168.32.168/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=11, cidr = cidr("192.168.32.176/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=11, cidr = cidr("192.168.32.184/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=12, cidr = cidr("192.168.32.192/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=12, cidr = cidr("192.168.32.200/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=13, cidr = cidr("192.168.32.208/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=13, cidr = cidr("192.168.32.216/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=14, cidr = cidr("192.168.32.224/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=14, cidr = cidr("192.168.32.232/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=15, cidr = cidr("192.168.32.240/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=15, cidr = cidr("192.168.32.248/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=16, cidr = cidr("192.168.33.0/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=16, cidr = cidr("192.168.33.8/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=17, cidr = cidr("192.168.33.16/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=17, cidr = cidr("192.168.33.24/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=18, cidr = cidr("192.168.33.32/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=18, cidr = cidr("192.168.33.40/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=19, cidr = cidr("192.168.33.48/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=19, cidr = cidr("192.168.33.56/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=20, cidr = cidr("192.168.33.64/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=20, cidr = cidr("192.168.33.72/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=21, cidr = cidr("192.168.33.80/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=21, cidr = cidr("192.168.33.88/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=22, cidr = cidr("192.168.33.96/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=22, cidr = cidr("192.168.33.104/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=23, cidr = cidr("192.168.33.112/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=23, cidr = cidr("192.168.33.120/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=24, cidr = cidr("192.168.33.128/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=24, cidr = cidr("192.168.33.136/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=25, cidr = cidr("192.168.33.144/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=25, cidr = cidr("192.168.33.152/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=26, cidr = cidr("192.168.33.160/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=26, cidr = cidr("192.168.33.168/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=27, cidr = cidr("192.168.33.176/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=27, cidr = cidr("192.168.33.184/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=28, cidr = cidr("192.168.33.192/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=28, cidr = cidr("192.168.33.200/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=29, cidr = cidr("192.168.33.208/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=29, cidr = cidr("192.168.33.216/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=30, cidr = cidr("192.168.33.224/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=30, cidr = cidr("192.168.33.232/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=31, cidr = cidr("192.168.33.240/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=31, cidr = cidr("192.168.33.248/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=32, cidr = cidr("192.168.34.0/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=32, cidr = cidr("192.168.34.8/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=33, cidr = cidr("192.168.34.16/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=33, cidr = cidr("192.168.34.24/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=34, cidr = cidr("192.168.34.32/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=34, cidr = cidr("192.168.34.40/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=35, cidr = cidr("192.168.34.48/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=35, cidr = cidr("192.168.34.56/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=36, cidr = cidr("192.168.34.64/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=36, cidr = cidr("192.168.34.72/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=37, cidr = cidr("192.168.34.80/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=37, cidr = cidr("192.168.34.88/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=38, cidr = cidr("192.168.34.96/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=38, cidr = cidr("192.168.34.104/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=39, cidr = cidr("192.168.34.112/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=39, cidr = cidr("192.168.34.120/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=40, cidr = cidr("192.168.34.128/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=40, cidr = cidr("192.168.34.136/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=41, cidr = cidr("192.168.34.144/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=41, cidr = cidr("192.168.34.152/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=42, cidr = cidr("192.168.34.160/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=42, cidr = cidr("192.168.34.168/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=43, cidr = cidr("192.168.34.176/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=43, cidr = cidr("192.168.34.184/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=44, cidr = cidr("192.168.34.192/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=44, cidr = cidr("192.168.34.200/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=45, cidr = cidr("192.168.34.208/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=45, cidr = cidr("192.168.34.216/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=46, cidr = cidr("192.168.34.224/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=46, cidr = cidr("192.168.34.232/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=47, cidr = cidr("192.168.34.240/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=47, cidr = cidr("192.168.34.248/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=48, cidr = cidr("192.168.35.0/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=48, cidr = cidr("192.168.35.8/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=49, cidr = cidr("192.168.35.16/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=49, cidr = cidr("192.168.35.24/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=50, cidr = cidr("192.168.35.32/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=50, cidr = cidr("192.168.35.40/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=51, cidr = cidr("192.168.35.48/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=51, cidr = cidr("192.168.35.56/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=52, cidr = cidr("192.168.35.64/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=52, cidr = cidr("192.168.35.72/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=53, cidr = cidr("192.168.35.80/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=53, cidr = cidr("192.168.35.88/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=54, cidr = cidr("192.168.35.96/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=54, cidr = cidr("192.168.35.104/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=55, cidr = cidr("192.168.35.112/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=55, cidr = cidr("192.168.35.120/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=56, cidr = cidr("192.168.35.128/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=56, cidr = cidr("192.168.35.136/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=57, cidr = cidr("192.168.35.144/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=57, cidr = cidr("192.168.35.152/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=58, cidr = cidr("192.168.35.160/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=58, cidr = cidr("192.168.35.168/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=59, cidr = cidr("192.168.35.176/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=59, cidr = cidr("192.168.35.184/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=60, cidr = cidr("192.168.35.192/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=60, cidr = cidr("192.168.35.200/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=61, cidr = cidr("192.168.35.208/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=61, cidr = cidr("192.168.35.216/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=62, cidr = cidr("192.168.35.224/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=62, cidr = cidr("192.168.35.232/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=63, cidr = cidr("192.168.35.240/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=63, cidr = cidr("192.168.35.248/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=64, cidr = cidr("192.168.36.0/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=64, cidr = cidr("192.168.36.8/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=65, cidr = cidr("192.168.36.16/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=65, cidr = cidr("192.168.36.24/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=66, cidr = cidr("192.168.36.32/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=66, cidr = cidr("192.168.36.40/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=67, cidr = cidr("192.168.36.48/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=67, cidr = cidr("192.168.36.56/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=68, cidr = cidr("192.168.36.64/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=68, cidr = cidr("192.168.36.72/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=69, cidr = cidr("192.168.36.80/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=69, cidr = cidr("192.168.36.88/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=70, cidr = cidr("192.168.36.96/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=70, cidr = cidr("192.168.36.104/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=71, cidr = cidr("192.168.36.112/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=71, cidr = cidr("192.168.36.120/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=72, cidr = cidr("192.168.36.128/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=72, cidr = cidr("192.168.36.136/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=73, cidr = cidr("192.168.36.144/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=73, cidr = cidr("192.168.36.152/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=74, cidr = cidr("192.168.36.160/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=74, cidr = cidr("192.168.36.168/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=75, cidr = cidr("192.168.36.176/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=75, cidr = cidr("192.168.36.184/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=76, cidr = cidr("192.168.36.192/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=76, cidr = cidr("192.168.36.200/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=77, cidr = cidr("192.168.36.208/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=77, cidr = cidr("192.168.36.216/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=78, cidr = cidr("192.168.36.224/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=78, cidr = cidr("192.168.36.232/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=79, cidr = cidr("192.168.36.240/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=79, cidr = cidr("192.168.36.248/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=80, cidr = cidr("192.168.37.0/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=80, cidr = cidr("192.168.37.8/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=81, cidr = cidr("192.168.37.16/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=81, cidr = cidr("192.168.37.24/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=82, cidr = cidr("192.168.37.32/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=82, cidr = cidr("192.168.37.40/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=83, cidr = cidr("192.168.37.48/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=83, cidr = cidr("192.168.37.56/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=84, cidr = cidr("192.168.37.64/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=84, cidr = cidr("192.168.37.72/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=85, cidr = cidr("192.168.37.80/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=85, cidr = cidr("192.168.37.88/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=86, cidr = cidr("192.168.37.96/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=86, cidr = cidr("192.168.37.104/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=87, cidr = cidr("192.168.37.112/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=87, cidr = cidr("192.168.37.120/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=88, cidr = cidr("192.168.37.128/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=88, cidr = cidr("192.168.37.136/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=89, cidr = cidr("192.168.37.144/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=89, cidr = cidr("192.168.37.152/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=90, cidr = cidr("192.168.37.160/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=90, cidr = cidr("192.168.37.168/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=91, cidr = cidr("192.168.37.176/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=91, cidr = cidr("192.168.37.184/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=92, cidr = cidr("192.168.37.192/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=92, cidr = cidr("192.168.37.200/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=93, cidr = cidr("192.168.37.208/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=93, cidr = cidr("192.168.37.216/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=94, cidr = cidr("192.168.37.224/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=94, cidr = cidr("192.168.37.232/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=95, cidr = cidr("192.168.37.240/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=95, cidr = cidr("192.168.37.248/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=96, cidr = cidr("192.168.38.0/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=96, cidr = cidr("192.168.38.8/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=97, cidr = cidr("192.168.38.16/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=97, cidr = cidr("192.168.38.24/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=98, cidr = cidr("192.168.38.32/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=98, cidr = cidr("192.168.38.40/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=99, cidr = cidr("192.168.38.48/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=99, cidr = cidr("192.168.38.56/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=100, cidr = cidr("192.168.38.64/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=100, cidr = cidr("192.168.38.72/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=101, cidr = cidr("192.168.38.80/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=101, cidr = cidr("192.168.38.88/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=102, cidr = cidr("192.168.38.96/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=102, cidr = cidr("192.168.38.104/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=103, cidr = cidr("192.168.38.112/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=103, cidr = cidr("192.168.38.120/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=104, cidr = cidr("192.168.38.128/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=104, cidr = cidr("192.168.38.136/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=105, cidr = cidr("192.168.38.144/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=105, cidr = cidr("192.168.38.152/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=106, cidr = cidr("192.168.38.160/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=106, cidr = cidr("192.168.38.168/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=107, cidr = cidr("192.168.38.176/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=107, cidr = cidr("192.168.38.184/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=108, cidr = cidr("192.168.38.192/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=108, cidr = cidr("192.168.38.200/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=109, cidr = cidr("192.168.38.208/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=109, cidr = cidr("192.168.38.216/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=110, cidr = cidr("192.168.38.224/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=110, cidr = cidr("192.168.38.232/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=111, cidr = cidr("192.168.38.240/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=111, cidr = cidr("192.168.38.248/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=112, cidr = cidr("192.168.39.0/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=112, cidr = cidr("192.168.39.8/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=113, cidr = cidr("192.168.39.16/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=113, cidr = cidr("192.168.39.24/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=114, cidr = cidr("192.168.39.32/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=114, cidr = cidr("192.168.39.40/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=115, cidr = cidr("192.168.39.48/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=115, cidr = cidr("192.168.39.56/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=116, cidr = cidr("192.168.39.64/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=116, cidr = cidr("192.168.39.72/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=117, cidr = cidr("192.168.39.80/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=117, cidr = cidr("192.168.39.88/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=118, cidr = cidr("192.168.39.96/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=118, cidr = cidr("192.168.39.104/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=119, cidr = cidr("192.168.39.112/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=119, cidr = cidr("192.168.39.120/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=120, cidr = cidr("192.168.39.128/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=120, cidr = cidr("192.168.39.136/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=121, cidr = cidr("192.168.39.144/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=121, cidr = cidr("192.168.39.152/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=122, cidr = cidr("192.168.39.160/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=122, cidr = cidr("192.168.39.168/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=123, cidr = cidr("192.168.39.176/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=123, cidr = cidr("192.168.39.184/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=124, cidr = cidr("192.168.39.192/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=124, cidr = cidr("192.168.39.200/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=125, cidr = cidr("192.168.39.208/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=125, cidr = cidr("192.168.39.216/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=126, cidr = cidr("192.168.39.224/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=126, cidr = cidr("192.168.39.232/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=127, cidr = cidr("192.168.39.240/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=127, cidr = cidr("192.168.39.248/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=128, cidr = cidr("192.168.40.0/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=128, cidr = cidr("192.168.40.8/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=129, cidr = cidr("192.168.40.16/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=129, cidr = cidr("192.168.40.24/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=130, cidr = cidr("192.168.40.32/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=130, cidr = cidr("192.168.40.40/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=131, cidr = cidr("192.168.40.48/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=131, cidr = cidr("192.168.40.56/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=132, cidr = cidr("192.168.40.64/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=132, cidr = cidr("192.168.40.72/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=133, cidr = cidr("192.168.40.80/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=133, cidr = cidr("192.168.40.88/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=134, cidr = cidr("192.168.40.96/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=134, cidr = cidr("192.168.40.104/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=135, cidr = cidr("192.168.40.112/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=135, cidr = cidr("192.168.40.120/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=136, cidr = cidr("192.168.40.128/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=136, cidr = cidr("192.168.40.136/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=137, cidr = cidr("192.168.40.144/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=137, cidr = cidr("192.168.40.152/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=138, cidr = cidr("192.168.40.160/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=138, cidr = cidr("192.168.40.168/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=139, cidr = cidr("192.168.40.176/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=139, cidr = cidr("192.168.40.184/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=140, cidr = cidr("192.168.40.192/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=140, cidr = cidr("192.168.40.200/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=141, cidr = cidr("192.168.40.208/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=141, cidr = cidr("192.168.40.216/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=142, cidr = cidr("192.168.40.224/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=142, cidr = cidr("192.168.40.232/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=143, cidr = cidr("192.168.40.240/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=143, cidr = cidr("192.168.40.248/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=144, cidr = cidr("192.168.41.0/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=144, cidr = cidr("192.168.41.8/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=145, cidr = cidr("192.168.41.16/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=145, cidr = cidr("192.168.41.24/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=146, cidr = cidr("192.168.41.32/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=146, cidr = cidr("192.168.41.40/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=147, cidr = cidr("192.168.41.48/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=147, cidr = cidr("192.168.41.56/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=148, cidr = cidr("192.168.41.64/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=148, cidr = cidr("192.168.41.72/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=149, cidr = cidr("192.168.41.80/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=149, cidr = cidr("192.168.41.88/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=150, cidr = cidr("192.168.41.96/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=150, cidr = cidr("192.168.41.104/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=151, cidr = cidr("192.168.41.112/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=151, cidr = cidr("192.168.41.120/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=152, cidr = cidr("192.168.41.128/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=152, cidr = cidr("192.168.41.136/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=153, cidr = cidr("192.168.41.144/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=153, cidr = cidr("192.168.41.152/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=154, cidr = cidr("192.168.41.160/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=154, cidr = cidr("192.168.41.168/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=155, cidr = cidr("192.168.41.176/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=155, cidr = cidr("192.168.41.184/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=156, cidr = cidr("192.168.41.192/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=156, cidr = cidr("192.168.41.200/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=157, cidr = cidr("192.168.41.208/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=157, cidr = cidr("192.168.41.216/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=158, cidr = cidr("192.168.41.224/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=158, cidr = cidr("192.168.41.232/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=159, cidr = cidr("192.168.41.240/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=159, cidr = cidr("192.168.41.248/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=160, cidr = cidr("192.168.42.0/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=160, cidr = cidr("192.168.42.8/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=161, cidr = cidr("192.168.42.16/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=161, cidr = cidr("192.168.42.24/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=162, cidr = cidr("192.168.42.32/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=162, cidr = cidr("192.168.42.40/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=163, cidr = cidr("192.168.42.48/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=163, cidr = cidr("192.168.42.56/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=164, cidr = cidr("192.168.42.64/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=164, cidr = cidr("192.168.42.72/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=165, cidr = cidr("192.168.42.80/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=165, cidr = cidr("192.168.42.88/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=166, cidr = cidr("192.168.42.96/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=166, cidr = cidr("192.168.42.104/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=167, cidr = cidr("192.168.42.112/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=167, cidr = cidr("192.168.42.120/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=168, cidr = cidr("192.168.42.128/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=168, cidr = cidr("192.168.42.136/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=169, cidr = cidr("192.168.42.144/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=169, cidr = cidr("192.168.42.152/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=170, cidr = cidr("192.168.42.160/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=170, cidr = cidr("192.168.42.168/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=171, cidr = cidr("192.168.42.176/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=171, cidr = cidr("192.168.42.184/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=172, cidr = cidr("192.168.42.192/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=172, cidr = cidr("192.168.42.200/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=173, cidr = cidr("192.168.42.208/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=173, cidr = cidr("192.168.42.216/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=174, cidr = cidr("192.168.42.224/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=174, cidr = cidr("192.168.42.232/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=175, cidr = cidr("192.168.42.240/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=175, cidr = cidr("192.168.42.248/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=176, cidr = cidr("192.168.43.0/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=176, cidr = cidr("192.168.43.8/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=177, cidr = cidr("192.168.43.16/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=177, cidr = cidr("192.168.43.24/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=178, cidr = cidr("192.168.43.32/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=178, cidr = cidr("192.168.43.40/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=179, cidr = cidr("192.168.43.48/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=179, cidr = cidr("192.168.43.56/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=180, cidr = cidr("192.168.43.64/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=180, cidr = cidr("192.168.43.72/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=181, cidr = cidr("192.168.43.80/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=181, cidr = cidr("192.168.43.88/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=182, cidr = cidr("192.168.43.96/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=182, cidr = cidr("192.168.43.104/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=183, cidr = cidr("192.168.43.112/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=183, cidr = cidr("192.168.43.120/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=184, cidr = cidr("192.168.43.128/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=184, cidr = cidr("192.168.43.136/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=185, cidr = cidr("192.168.43.144/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=185, cidr = cidr("192.168.43.152/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=186, cidr = cidr("192.168.43.160/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=186, cidr = cidr("192.168.43.168/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=187, cidr = cidr("192.168.43.176/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=187, cidr = cidr("192.168.43.184/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=188, cidr = cidr("192.168.43.192/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=188, cidr = cidr("192.168.43.200/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=189, cidr = cidr("192.168.43.208/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=189, cidr = cidr("192.168.43.216/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=190, cidr = cidr("192.168.43.224/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=190, cidr = cidr("192.168.43.232/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=191, cidr = cidr("192.168.43.240/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=191, cidr = cidr("192.168.43.248/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=192, cidr = cidr("192.168.44.0/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=192, cidr = cidr("192.168.44.8/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=193, cidr = cidr("192.168.44.16/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=193, cidr = cidr("192.168.44.24/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=194, cidr = cidr("192.168.44.32/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=194, cidr = cidr("192.168.44.40/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=195, cidr = cidr("192.168.44.48/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=195, cidr = cidr("192.168.44.56/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=196, cidr = cidr("192.168.44.64/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=196, cidr = cidr("192.168.44.72/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=197, cidr = cidr("192.168.44.80/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=197, cidr = cidr("192.168.44.88/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=198, cidr = cidr("192.168.44.96/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=198, cidr = cidr("192.168.44.104/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=199, cidr = cidr("192.168.44.112/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=199, cidr = cidr("192.168.44.120/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=200, cidr = cidr("192.168.44.128/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=200, cidr = cidr("192.168.44.136/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=201, cidr = cidr("192.168.44.144/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=201, cidr = cidr("192.168.44.152/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=202, cidr = cidr("192.168.44.160/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=202, cidr = cidr("192.168.44.168/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=203, cidr = cidr("192.168.44.176/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=203, cidr = cidr("192.168.44.184/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=204, cidr = cidr("192.168.44.192/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=204, cidr = cidr("192.168.44.200/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=205, cidr = cidr("192.168.44.208/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=205, cidr = cidr("192.168.44.216/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=206, cidr = cidr("192.168.44.224/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=206, cidr = cidr("192.168.44.232/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=207, cidr = cidr("192.168.44.240/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=207, cidr = cidr("192.168.44.248/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=208, cidr = cidr("192.168.45.0/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=208, cidr = cidr("192.168.45.8/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=209, cidr = cidr("192.168.45.16/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=209, cidr = cidr("192.168.45.24/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=210, cidr = cidr("192.168.45.32/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=210, cidr = cidr("192.168.45.40/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=211, cidr = cidr("192.168.45.48/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=211, cidr = cidr("192.168.45.56/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=212, cidr = cidr("192.168.45.64/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=212, cidr = cidr("192.168.45.72/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=213, cidr = cidr("192.168.45.80/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=213, cidr = cidr("192.168.45.88/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=214, cidr = cidr("192.168.45.96/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=214, cidr = cidr("192.168.45.104/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=215, cidr = cidr("192.168.45.112/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=215, cidr = cidr("192.168.45.120/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=216, cidr = cidr("192.168.45.128/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=216, cidr = cidr("192.168.45.136/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=217, cidr = cidr("192.168.45.144/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=217, cidr = cidr("192.168.45.152/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=218, cidr = cidr("192.168.45.160/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=218, cidr = cidr("192.168.45.168/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=219, cidr = cidr("192.168.45.176/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=219, cidr = cidr("192.168.45.184/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=220, cidr = cidr("192.168.45.192/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=220, cidr = cidr("192.168.45.200/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=221, cidr = cidr("192.168.45.208/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=221, cidr = cidr("192.168.45.216/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=222, cidr = cidr("192.168.45.224/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=222, cidr = cidr("192.168.45.232/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=223, cidr = cidr("192.168.45.240/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=223, cidr = cidr("192.168.45.248/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=224, cidr = cidr("192.168.46.0/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=224, cidr = cidr("192.168.46.8/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=225, cidr = cidr("192.168.46.16/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=225, cidr = cidr("192.168.46.24/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=226, cidr = cidr("192.168.46.32/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=226, cidr = cidr("192.168.46.40/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=227, cidr = cidr("192.168.46.48/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=227, cidr = cidr("192.168.46.56/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=228, cidr = cidr("192.168.46.64/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=228, cidr = cidr("192.168.46.72/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=229, cidr = cidr("192.168.46.80/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=229, cidr = cidr("192.168.46.88/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=230, cidr = cidr("192.168.46.96/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=230, cidr = cidr("192.168.46.104/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=231, cidr = cidr("192.168.46.112/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=231, cidr = cidr("192.168.46.120/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=232, cidr = cidr("192.168.46.128/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=232, cidr = cidr("192.168.46.136/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=233, cidr = cidr("192.168.46.144/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=233, cidr = cidr("192.168.46.152/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=234, cidr = cidr("192.168.46.160/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=234, cidr = cidr("192.168.46.168/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=235, cidr = cidr("192.168.46.176/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=235, cidr = cidr("192.168.46.184/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=236, cidr = cidr("192.168.46.192/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=236, cidr = cidr("192.168.46.200/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=237, cidr = cidr("192.168.46.208/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=237, cidr = cidr("192.168.46.216/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=238, cidr = cidr("192.168.46.224/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=238, cidr = cidr("192.168.46.232/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=239, cidr = cidr("192.168.46.240/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=239, cidr = cidr("192.168.46.248/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=240, cidr = cidr("192.168.47.0/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=240, cidr = cidr("192.168.47.8/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=241, cidr = cidr("192.168.47.16/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=241, cidr = cidr("192.168.47.24/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=242, cidr = cidr("192.168.47.32/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=242, cidr = cidr("192.168.47.40/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=243, cidr = cidr("192.168.47.48/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=243, cidr = cidr("192.168.47.56/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=244, cidr = cidr("192.168.47.64/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=244, cidr = cidr("192.168.47.72/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=245, cidr = cidr("192.168.47.80/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=245, cidr = cidr("192.168.47.88/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=246, cidr = cidr("192.168.47.96/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=246, cidr = cidr("192.168.47.104/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=247, cidr = cidr("192.168.47.112/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=247, cidr = cidr("192.168.47.120/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=248, cidr = cidr("192.168.47.128/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=248, cidr = cidr("192.168.47.136/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=249, cidr = cidr("192.168.47.144/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=249, cidr = cidr("192.168.47.152/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=250, cidr = cidr("192.168.47.160/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=250, cidr = cidr("192.168.47.168/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=251, cidr = cidr("192.168.47.176/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=251, cidr = cidr("192.168.47.184/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=252, cidr = cidr("192.168.47.192/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=252, cidr = cidr("192.168.47.200/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=253, cidr = cidr("192.168.47.208/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=253, cidr = cidr("192.168.47.216/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=254, cidr = cidr("192.168.47.224/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=254, cidr = cidr("192.168.47.232/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=2, gre_id=0, svlan_id=32, cvlan_id=255, cidr = cidr("192.168.47.240/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=2, gre_id=0, svlan_id=33, cvlan_id=255, cidr = cidr("192.168.47.248/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=0, cidr = cidr("192.168.48.0/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=0, cidr = cidr("192.168.48.8/29"), mac = mac("00:00:01:00:00:00"), user_id=0}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=1, cidr = cidr("192.168.48.16/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=1, cidr = cidr("192.168.48.24/29"), mac = mac("00:00:01:00:00:01"), user_id=1}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=2, cidr = cidr("192.168.48.32/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=2, cidr = cidr("192.168.48.40/29"), mac = mac("00:00:01:00:00:02"), user_id=2}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=3, cidr = cidr("192.168.48.48/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=3, cidr = cidr("192.168.48.56/29"), mac = mac("00:00:01:00:00:03"), user_id=3}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=4, cidr = cidr("192.168.48.64/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=4, cidr = cidr("192.168.48.72/29"), mac = mac("00:00:01:00:00:04"), user_id=4}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=5, cidr = cidr("192.168.48.80/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=5, cidr = cidr("192.168.48.88/29"), mac = mac("00:00:01:00:00:05"), user_id=5}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=6, cidr = cidr("192.168.48.96/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=6, cidr = cidr("192.168.48.104/29"), mac = mac("00:00:01:00:00:06"), user_id=6}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=7, cidr = cidr("192.168.48.112/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=7, cidr = cidr("192.168.48.120/29"), mac = mac("00:00:01:00:00:07"), user_id=7}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=8, cidr = cidr("192.168.48.128/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=8, cidr = cidr("192.168.48.136/29"), mac = mac("00:00:01:00:00:08"), user_id=8}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=9, cidr = cidr("192.168.48.144/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=9, cidr = cidr("192.168.48.152/29"), mac = mac("00:00:01:00:00:09"), user_id=9}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=10, cidr = cidr("192.168.48.160/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=10, cidr = cidr("192.168.48.168/29"), mac = mac("00:00:01:00:00:0a"), user_id=10}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=11, cidr = cidr("192.168.48.176/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=11, cidr = cidr("192.168.48.184/29"), mac = mac("00:00:01:00:00:0b"), user_id=11}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=12, cidr = cidr("192.168.48.192/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=12, cidr = cidr("192.168.48.200/29"), mac = mac("00:00:01:00:00:0c"), user_id=12}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=13, cidr = cidr("192.168.48.208/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=13, cidr = cidr("192.168.48.216/29"), mac = mac("00:00:01:00:00:0d"), user_id=13}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=14, cidr = cidr("192.168.48.224/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=14, cidr = cidr("192.168.48.232/29"), mac = mac("00:00:01:00:00:0e"), user_id=14}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=15, cidr = cidr("192.168.48.240/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=15, cidr = cidr("192.168.48.248/29"), mac = mac("00:00:01:00:00:0f"), user_id=15}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=16, cidr = cidr("192.168.49.0/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=16, cidr = cidr("192.168.49.8/29"), mac = mac("00:00:01:00:00:10"), user_id=16}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=17, cidr = cidr("192.168.49.16/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=17, cidr = cidr("192.168.49.24/29"), mac = mac("00:00:01:00:00:11"), user_id=17}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=18, cidr = cidr("192.168.49.32/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=18, cidr = cidr("192.168.49.40/29"), mac = mac("00:00:01:00:00:12"), user_id=18}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=19, cidr = cidr("192.168.49.48/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=19, cidr = cidr("192.168.49.56/29"), mac = mac("00:00:01:00:00:13"), user_id=19}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=20, cidr = cidr("192.168.49.64/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=20, cidr = cidr("192.168.49.72/29"), mac = mac("00:00:01:00:00:14"), user_id=20}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=21, cidr = cidr("192.168.49.80/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=21, cidr = cidr("192.168.49.88/29"), mac = mac("00:00:01:00:00:15"), user_id=21}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=22, cidr = cidr("192.168.49.96/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=22, cidr = cidr("192.168.49.104/29"), mac = mac("00:00:01:00:00:16"), user_id=22}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=23, cidr = cidr("192.168.49.112/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=23, cidr = cidr("192.168.49.120/29"), mac = mac("00:00:01:00:00:17"), user_id=23}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=24, cidr = cidr("192.168.49.128/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=24, cidr = cidr("192.168.49.136/29"), mac = mac("00:00:01:00:00:18"), user_id=24}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=25, cidr = cidr("192.168.49.144/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=25, cidr = cidr("192.168.49.152/29"), mac = mac("00:00:01:00:00:19"), user_id=25}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=26, cidr = cidr("192.168.49.160/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=26, cidr = cidr("192.168.49.168/29"), mac = mac("00:00:01:00:00:1a"), user_id=26}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=27, cidr = cidr("192.168.49.176/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=27, cidr = cidr("192.168.49.184/29"), mac = mac("00:00:01:00:00:1b"), user_id=27}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=28, cidr = cidr("192.168.49.192/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=28, cidr = cidr("192.168.49.200/29"), mac = mac("00:00:01:00:00:1c"), user_id=28}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=29, cidr = cidr("192.168.49.208/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=29, cidr = cidr("192.168.49.216/29"), mac = mac("00:00:01:00:00:1d"), user_id=29}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=30, cidr = cidr("192.168.49.224/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=30, cidr = cidr("192.168.49.232/29"), mac = mac("00:00:01:00:00:1e"), user_id=30}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=31, cidr = cidr("192.168.49.240/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=31, cidr = cidr("192.168.49.248/29"), mac = mac("00:00:01:00:00:1f"), user_id=31}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=32, cidr = cidr("192.168.50.0/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=32, cidr = cidr("192.168.50.8/29"), mac = mac("00:00:01:00:00:20"), user_id=32}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=33, cidr = cidr("192.168.50.16/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=33, cidr = cidr("192.168.50.24/29"), mac = mac("00:00:01:00:00:21"), user_id=33}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=34, cidr = cidr("192.168.50.32/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=34, cidr = cidr("192.168.50.40/29"), mac = mac("00:00:01:00:00:22"), user_id=34}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=35, cidr = cidr("192.168.50.48/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=35, cidr = cidr("192.168.50.56/29"), mac = mac("00:00:01:00:00:23"), user_id=35}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=36, cidr = cidr("192.168.50.64/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=36, cidr = cidr("192.168.50.72/29"), mac = mac("00:00:01:00:00:24"), user_id=36}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=37, cidr = cidr("192.168.50.80/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=37, cidr = cidr("192.168.50.88/29"), mac = mac("00:00:01:00:00:25"), user_id=37}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=38, cidr = cidr("192.168.50.96/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=38, cidr = cidr("192.168.50.104/29"), mac = mac("00:00:01:00:00:26"), user_id=38}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=39, cidr = cidr("192.168.50.112/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=39, cidr = cidr("192.168.50.120/29"), mac = mac("00:00:01:00:00:27"), user_id=39}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=40, cidr = cidr("192.168.50.128/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=40, cidr = cidr("192.168.50.136/29"), mac = mac("00:00:01:00:00:28"), user_id=40}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=41, cidr = cidr("192.168.50.144/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=41, cidr = cidr("192.168.50.152/29"), mac = mac("00:00:01:00:00:29"), user_id=41}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=42, cidr = cidr("192.168.50.160/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=42, cidr = cidr("192.168.50.168/29"), mac = mac("00:00:01:00:00:2a"), user_id=42}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=43, cidr = cidr("192.168.50.176/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=43, cidr = cidr("192.168.50.184/29"), mac = mac("00:00:01:00:00:2b"), user_id=43}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=44, cidr = cidr("192.168.50.192/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=44, cidr = cidr("192.168.50.200/29"), mac = mac("00:00:01:00:00:2c"), user_id=44}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=45, cidr = cidr("192.168.50.208/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=45, cidr = cidr("192.168.50.216/29"), mac = mac("00:00:01:00:00:2d"), user_id=45}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=46, cidr = cidr("192.168.50.224/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=46, cidr = cidr("192.168.50.232/29"), mac = mac("00:00:01:00:00:2e"), user_id=46}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=47, cidr = cidr("192.168.50.240/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=47, cidr = cidr("192.168.50.248/29"), mac = mac("00:00:01:00:00:2f"), user_id=47}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=48, cidr = cidr("192.168.51.0/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=48, cidr = cidr("192.168.51.8/29"), mac = mac("00:00:01:00:00:30"), user_id=48}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=49, cidr = cidr("192.168.51.16/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=49, cidr = cidr("192.168.51.24/29"), mac = mac("00:00:01:00:00:31"), user_id=49}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=50, cidr = cidr("192.168.51.32/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=50, cidr = cidr("192.168.51.40/29"), mac = mac("00:00:01:00:00:32"), user_id=50}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=51, cidr = cidr("192.168.51.48/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=51, cidr = cidr("192.168.51.56/29"), mac = mac("00:00:01:00:00:33"), user_id=51}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=52, cidr = cidr("192.168.51.64/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=52, cidr = cidr("192.168.51.72/29"), mac = mac("00:00:01:00:00:34"), user_id=52}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=53, cidr = cidr("192.168.51.80/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=53, cidr = cidr("192.168.51.88/29"), mac = mac("00:00:01:00:00:35"), user_id=53}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=54, cidr = cidr("192.168.51.96/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=54, cidr = cidr("192.168.51.104/29"), mac = mac("00:00:01:00:00:36"), user_id=54}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=55, cidr = cidr("192.168.51.112/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=55, cidr = cidr("192.168.51.120/29"), mac = mac("00:00:01:00:00:37"), user_id=55}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=56, cidr = cidr("192.168.51.128/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=56, cidr = cidr("192.168.51.136/29"), mac = mac("00:00:01:00:00:38"), user_id=56}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=57, cidr = cidr("192.168.51.144/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=57, cidr = cidr("192.168.51.152/29"), mac = mac("00:00:01:00:00:39"), user_id=57}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=58, cidr = cidr("192.168.51.160/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=58, cidr = cidr("192.168.51.168/29"), mac = mac("00:00:01:00:00:3a"), user_id=58}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=59, cidr = cidr("192.168.51.176/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=59, cidr = cidr("192.168.51.184/29"), mac = mac("00:00:01:00:00:3b"), user_id=59}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=60, cidr = cidr("192.168.51.192/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=60, cidr = cidr("192.168.51.200/29"), mac = mac("00:00:01:00:00:3c"), user_id=60}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=61, cidr = cidr("192.168.51.208/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=61, cidr = cidr("192.168.51.216/29"), mac = mac("00:00:01:00:00:3d"), user_id=61}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=62, cidr = cidr("192.168.51.224/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=62, cidr = cidr("192.168.51.232/29"), mac = mac("00:00:01:00:00:3e"), user_id=62}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=63, cidr = cidr("192.168.51.240/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=63, cidr = cidr("192.168.51.248/29"), mac = mac("00:00:01:00:00:3f"), user_id=63}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=64, cidr = cidr("192.168.52.0/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=64, cidr = cidr("192.168.52.8/29"), mac = mac("00:00:01:00:00:40"), user_id=64}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=65, cidr = cidr("192.168.52.16/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=65, cidr = cidr("192.168.52.24/29"), mac = mac("00:00:01:00:00:41"), user_id=65}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=66, cidr = cidr("192.168.52.32/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=66, cidr = cidr("192.168.52.40/29"), mac = mac("00:00:01:00:00:42"), user_id=66}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=67, cidr = cidr("192.168.52.48/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=67, cidr = cidr("192.168.52.56/29"), mac = mac("00:00:01:00:00:43"), user_id=67}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=68, cidr = cidr("192.168.52.64/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=68, cidr = cidr("192.168.52.72/29"), mac = mac("00:00:01:00:00:44"), user_id=68}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=69, cidr = cidr("192.168.52.80/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=69, cidr = cidr("192.168.52.88/29"), mac = mac("00:00:01:00:00:45"), user_id=69}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=70, cidr = cidr("192.168.52.96/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=70, cidr = cidr("192.168.52.104/29"), mac = mac("00:00:01:00:00:46"), user_id=70}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=71, cidr = cidr("192.168.52.112/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=71, cidr = cidr("192.168.52.120/29"), mac = mac("00:00:01:00:00:47"), user_id=71}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=72, cidr = cidr("192.168.52.128/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=72, cidr = cidr("192.168.52.136/29"), mac = mac("00:00:01:00:00:48"), user_id=72}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=73, cidr = cidr("192.168.52.144/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=73, cidr = cidr("192.168.52.152/29"), mac = mac("00:00:01:00:00:49"), user_id=73}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=74, cidr = cidr("192.168.52.160/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=74, cidr = cidr("192.168.52.168/29"), mac = mac("00:00:01:00:00:4a"), user_id=74}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=75, cidr = cidr("192.168.52.176/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=75, cidr = cidr("192.168.52.184/29"), mac = mac("00:00:01:00:00:4b"), user_id=75}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=76, cidr = cidr("192.168.52.192/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=76, cidr = cidr("192.168.52.200/29"), mac = mac("00:00:01:00:00:4c"), user_id=76}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=77, cidr = cidr("192.168.52.208/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=77, cidr = cidr("192.168.52.216/29"), mac = mac("00:00:01:00:00:4d"), user_id=77}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=78, cidr = cidr("192.168.52.224/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=78, cidr = cidr("192.168.52.232/29"), mac = mac("00:00:01:00:00:4e"), user_id=78}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=79, cidr = cidr("192.168.52.240/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=79, cidr = cidr("192.168.52.248/29"), mac = mac("00:00:01:00:00:4f"), user_id=79}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=80, cidr = cidr("192.168.53.0/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=80, cidr = cidr("192.168.53.8/29"), mac = mac("00:00:01:00:00:50"), user_id=80}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=81, cidr = cidr("192.168.53.16/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=81, cidr = cidr("192.168.53.24/29"), mac = mac("00:00:01:00:00:51"), user_id=81}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=82, cidr = cidr("192.168.53.32/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=82, cidr = cidr("192.168.53.40/29"), mac = mac("00:00:01:00:00:52"), user_id=82}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=83, cidr = cidr("192.168.53.48/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=83, cidr = cidr("192.168.53.56/29"), mac = mac("00:00:01:00:00:53"), user_id=83}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=84, cidr = cidr("192.168.53.64/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=84, cidr = cidr("192.168.53.72/29"), mac = mac("00:00:01:00:00:54"), user_id=84}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=85, cidr = cidr("192.168.53.80/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=85, cidr = cidr("192.168.53.88/29"), mac = mac("00:00:01:00:00:55"), user_id=85}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=86, cidr = cidr("192.168.53.96/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=86, cidr = cidr("192.168.53.104/29"), mac = mac("00:00:01:00:00:56"), user_id=86}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=87, cidr = cidr("192.168.53.112/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=87, cidr = cidr("192.168.53.120/29"), mac = mac("00:00:01:00:00:57"), user_id=87}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=88, cidr = cidr("192.168.53.128/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=88, cidr = cidr("192.168.53.136/29"), mac = mac("00:00:01:00:00:58"), user_id=88}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=89, cidr = cidr("192.168.53.144/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=89, cidr = cidr("192.168.53.152/29"), mac = mac("00:00:01:00:00:59"), user_id=89}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=90, cidr = cidr("192.168.53.160/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=90, cidr = cidr("192.168.53.168/29"), mac = mac("00:00:01:00:00:5a"), user_id=90}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=91, cidr = cidr("192.168.53.176/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=91, cidr = cidr("192.168.53.184/29"), mac = mac("00:00:01:00:00:5b"), user_id=91}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=92, cidr = cidr("192.168.53.192/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=92, cidr = cidr("192.168.53.200/29"), mac = mac("00:00:01:00:00:5c"), user_id=92}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=93, cidr = cidr("192.168.53.208/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=93, cidr = cidr("192.168.53.216/29"), mac = mac("00:00:01:00:00:5d"), user_id=93}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=94, cidr = cidr("192.168.53.224/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=94, cidr = cidr("192.168.53.232/29"), mac = mac("00:00:01:00:00:5e"), user_id=94}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=95, cidr = cidr("192.168.53.240/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=95, cidr = cidr("192.168.53.248/29"), mac = mac("00:00:01:00:00:5f"), user_id=95}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=96, cidr = cidr("192.168.54.0/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=96, cidr = cidr("192.168.54.8/29"), mac = mac("00:00:01:00:00:60"), user_id=96}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=97, cidr = cidr("192.168.54.16/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=97, cidr = cidr("192.168.54.24/29"), mac = mac("00:00:01:00:00:61"), user_id=97}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=98, cidr = cidr("192.168.54.32/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=98, cidr = cidr("192.168.54.40/29"), mac = mac("00:00:01:00:00:62"), user_id=98}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=99, cidr = cidr("192.168.54.48/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=99, cidr = cidr("192.168.54.56/29"), mac = mac("00:00:01:00:00:63"), user_id=99}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=100, cidr = cidr("192.168.54.64/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=100, cidr = cidr("192.168.54.72/29"), mac = mac("00:00:01:00:00:64"), user_id=100}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=101, cidr = cidr("192.168.54.80/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=101, cidr = cidr("192.168.54.88/29"), mac = mac("00:00:01:00:00:65"), user_id=101}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=102, cidr = cidr("192.168.54.96/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=102, cidr = cidr("192.168.54.104/29"), mac = mac("00:00:01:00:00:66"), user_id=102}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=103, cidr = cidr("192.168.54.112/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=103, cidr = cidr("192.168.54.120/29"), mac = mac("00:00:01:00:00:67"), user_id=103}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=104, cidr = cidr("192.168.54.128/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=104, cidr = cidr("192.168.54.136/29"), mac = mac("00:00:01:00:00:68"), user_id=104}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=105, cidr = cidr("192.168.54.144/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=105, cidr = cidr("192.168.54.152/29"), mac = mac("00:00:01:00:00:69"), user_id=105}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=106, cidr = cidr("192.168.54.160/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=106, cidr = cidr("192.168.54.168/29"), mac = mac("00:00:01:00:00:6a"), user_id=106}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=107, cidr = cidr("192.168.54.176/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=107, cidr = cidr("192.168.54.184/29"), mac = mac("00:00:01:00:00:6b"), user_id=107}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=108, cidr = cidr("192.168.54.192/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=108, cidr = cidr("192.168.54.200/29"), mac = mac("00:00:01:00:00:6c"), user_id=108}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=109, cidr = cidr("192.168.54.208/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=109, cidr = cidr("192.168.54.216/29"), mac = mac("00:00:01:00:00:6d"), user_id=109}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=110, cidr = cidr("192.168.54.224/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=110, cidr = cidr("192.168.54.232/29"), mac = mac("00:00:01:00:00:6e"), user_id=110}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=111, cidr = cidr("192.168.54.240/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=111, cidr = cidr("192.168.54.248/29"), mac = mac("00:00:01:00:00:6f"), user_id=111}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=112, cidr = cidr("192.168.55.0/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=112, cidr = cidr("192.168.55.8/29"), mac = mac("00:00:01:00:00:70"), user_id=112}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=113, cidr = cidr("192.168.55.16/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=113, cidr = cidr("192.168.55.24/29"), mac = mac("00:00:01:00:00:71"), user_id=113}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=114, cidr = cidr("192.168.55.32/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=114, cidr = cidr("192.168.55.40/29"), mac = mac("00:00:01:00:00:72"), user_id=114}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=115, cidr = cidr("192.168.55.48/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=115, cidr = cidr("192.168.55.56/29"), mac = mac("00:00:01:00:00:73"), user_id=115}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=116, cidr = cidr("192.168.55.64/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=116, cidr = cidr("192.168.55.72/29"), mac = mac("00:00:01:00:00:74"), user_id=116}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=117, cidr = cidr("192.168.55.80/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=117, cidr = cidr("192.168.55.88/29"), mac = mac("00:00:01:00:00:75"), user_id=117}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=118, cidr = cidr("192.168.55.96/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=118, cidr = cidr("192.168.55.104/29"), mac = mac("00:00:01:00:00:76"), user_id=118}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=119, cidr = cidr("192.168.55.112/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=119, cidr = cidr("192.168.55.120/29"), mac = mac("00:00:01:00:00:77"), user_id=119}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=120, cidr = cidr("192.168.55.128/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=120, cidr = cidr("192.168.55.136/29"), mac = mac("00:00:01:00:00:78"), user_id=120}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=121, cidr = cidr("192.168.55.144/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=121, cidr = cidr("192.168.55.152/29"), mac = mac("00:00:01:00:00:79"), user_id=121}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=122, cidr = cidr("192.168.55.160/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=122, cidr = cidr("192.168.55.168/29"), mac = mac("00:00:01:00:00:7a"), user_id=122}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=123, cidr = cidr("192.168.55.176/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=123, cidr = cidr("192.168.55.184/29"), mac = mac("00:00:01:00:00:7b"), user_id=123}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=124, cidr = cidr("192.168.55.192/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=124, cidr = cidr("192.168.55.200/29"), mac = mac("00:00:01:00:00:7c"), user_id=124}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=125, cidr = cidr("192.168.55.208/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=125, cidr = cidr("192.168.55.216/29"), mac = mac("00:00:01:00:00:7d"), user_id=125}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=126, cidr = cidr("192.168.55.224/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=126, cidr = cidr("192.168.55.232/29"), mac = mac("00:00:01:00:00:7e"), user_id=126}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=127, cidr = cidr("192.168.55.240/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=127, cidr = cidr("192.168.55.248/29"), mac = mac("00:00:01:00:00:7f"), user_id=127}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=128, cidr = cidr("192.168.56.0/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=128, cidr = cidr("192.168.56.8/29"), mac = mac("00:00:01:00:00:80"), user_id=128}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=129, cidr = cidr("192.168.56.16/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=129, cidr = cidr("192.168.56.24/29"), mac = mac("00:00:01:00:00:81"), user_id=129}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=130, cidr = cidr("192.168.56.32/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=130, cidr = cidr("192.168.56.40/29"), mac = mac("00:00:01:00:00:82"), user_id=130}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=131, cidr = cidr("192.168.56.48/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=131, cidr = cidr("192.168.56.56/29"), mac = mac("00:00:01:00:00:83"), user_id=131}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=132, cidr = cidr("192.168.56.64/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=132, cidr = cidr("192.168.56.72/29"), mac = mac("00:00:01:00:00:84"), user_id=132}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=133, cidr = cidr("192.168.56.80/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=133, cidr = cidr("192.168.56.88/29"), mac = mac("00:00:01:00:00:85"), user_id=133}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=134, cidr = cidr("192.168.56.96/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=134, cidr = cidr("192.168.56.104/29"), mac = mac("00:00:01:00:00:86"), user_id=134}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=135, cidr = cidr("192.168.56.112/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=135, cidr = cidr("192.168.56.120/29"), mac = mac("00:00:01:00:00:87"), user_id=135}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=136, cidr = cidr("192.168.56.128/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=136, cidr = cidr("192.168.56.136/29"), mac = mac("00:00:01:00:00:88"), user_id=136}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=137, cidr = cidr("192.168.56.144/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=137, cidr = cidr("192.168.56.152/29"), mac = mac("00:00:01:00:00:89"), user_id=137}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=138, cidr = cidr("192.168.56.160/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=138, cidr = cidr("192.168.56.168/29"), mac = mac("00:00:01:00:00:8a"), user_id=138}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=139, cidr = cidr("192.168.56.176/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=139, cidr = cidr("192.168.56.184/29"), mac = mac("00:00:01:00:00:8b"), user_id=139}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=140, cidr = cidr("192.168.56.192/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=140, cidr = cidr("192.168.56.200/29"), mac = mac("00:00:01:00:00:8c"), user_id=140}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=141, cidr = cidr("192.168.56.208/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=141, cidr = cidr("192.168.56.216/29"), mac = mac("00:00:01:00:00:8d"), user_id=141}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=142, cidr = cidr("192.168.56.224/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=142, cidr = cidr("192.168.56.232/29"), mac = mac("00:00:01:00:00:8e"), user_id=142}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=143, cidr = cidr("192.168.56.240/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=143, cidr = cidr("192.168.56.248/29"), mac = mac("00:00:01:00:00:8f"), user_id=143}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=144, cidr = cidr("192.168.57.0/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=144, cidr = cidr("192.168.57.8/29"), mac = mac("00:00:01:00:00:90"), user_id=144}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=145, cidr = cidr("192.168.57.16/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=145, cidr = cidr("192.168.57.24/29"), mac = mac("00:00:01:00:00:91"), user_id=145}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=146, cidr = cidr("192.168.57.32/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=146, cidr = cidr("192.168.57.40/29"), mac = mac("00:00:01:00:00:92"), user_id=146}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=147, cidr = cidr("192.168.57.48/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=147, cidr = cidr("192.168.57.56/29"), mac = mac("00:00:01:00:00:93"), user_id=147}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=148, cidr = cidr("192.168.57.64/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=148, cidr = cidr("192.168.57.72/29"), mac = mac("00:00:01:00:00:94"), user_id=148}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=149, cidr = cidr("192.168.57.80/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=149, cidr = cidr("192.168.57.88/29"), mac = mac("00:00:01:00:00:95"), user_id=149}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=150, cidr = cidr("192.168.57.96/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=150, cidr = cidr("192.168.57.104/29"), mac = mac("00:00:01:00:00:96"), user_id=150}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=151, cidr = cidr("192.168.57.112/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=151, cidr = cidr("192.168.57.120/29"), mac = mac("00:00:01:00:00:97"), user_id=151}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=152, cidr = cidr("192.168.57.128/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=152, cidr = cidr("192.168.57.136/29"), mac = mac("00:00:01:00:00:98"), user_id=152}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=153, cidr = cidr("192.168.57.144/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=153, cidr = cidr("192.168.57.152/29"), mac = mac("00:00:01:00:00:99"), user_id=153}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=154, cidr = cidr("192.168.57.160/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=154, cidr = cidr("192.168.57.168/29"), mac = mac("00:00:01:00:00:9a"), user_id=154}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=155, cidr = cidr("192.168.57.176/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=155, cidr = cidr("192.168.57.184/29"), mac = mac("00:00:01:00:00:9b"), user_id=155}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=156, cidr = cidr("192.168.57.192/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=156, cidr = cidr("192.168.57.200/29"), mac = mac("00:00:01:00:00:9c"), user_id=156}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=157, cidr = cidr("192.168.57.208/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=157, cidr = cidr("192.168.57.216/29"), mac = mac("00:00:01:00:00:9d"), user_id=157}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=158, cidr = cidr("192.168.57.224/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=158, cidr = cidr("192.168.57.232/29"), mac = mac("00:00:01:00:00:9e"), user_id=158}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=159, cidr = cidr("192.168.57.240/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=159, cidr = cidr("192.168.57.248/29"), mac = mac("00:00:01:00:00:9f"), user_id=159}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=160, cidr = cidr("192.168.58.0/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=160, cidr = cidr("192.168.58.8/29"), mac = mac("00:00:01:00:00:a0"), user_id=160}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=161, cidr = cidr("192.168.58.16/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=161, cidr = cidr("192.168.58.24/29"), mac = mac("00:00:01:00:00:a1"), user_id=161}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=162, cidr = cidr("192.168.58.32/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=162, cidr = cidr("192.168.58.40/29"), mac = mac("00:00:01:00:00:a2"), user_id=162}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=163, cidr = cidr("192.168.58.48/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=163, cidr = cidr("192.168.58.56/29"), mac = mac("00:00:01:00:00:a3"), user_id=163}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=164, cidr = cidr("192.168.58.64/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=164, cidr = cidr("192.168.58.72/29"), mac = mac("00:00:01:00:00:a4"), user_id=164}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=165, cidr = cidr("192.168.58.80/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=165, cidr = cidr("192.168.58.88/29"), mac = mac("00:00:01:00:00:a5"), user_id=165}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=166, cidr = cidr("192.168.58.96/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=166, cidr = cidr("192.168.58.104/29"), mac = mac("00:00:01:00:00:a6"), user_id=166}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=167, cidr = cidr("192.168.58.112/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=167, cidr = cidr("192.168.58.120/29"), mac = mac("00:00:01:00:00:a7"), user_id=167}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=168, cidr = cidr("192.168.58.128/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=168, cidr = cidr("192.168.58.136/29"), mac = mac("00:00:01:00:00:a8"), user_id=168}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=169, cidr = cidr("192.168.58.144/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=169, cidr = cidr("192.168.58.152/29"), mac = mac("00:00:01:00:00:a9"), user_id=169}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=170, cidr = cidr("192.168.58.160/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=170, cidr = cidr("192.168.58.168/29"), mac = mac("00:00:01:00:00:aa"), user_id=170}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=171, cidr = cidr("192.168.58.176/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=171, cidr = cidr("192.168.58.184/29"), mac = mac("00:00:01:00:00:ab"), user_id=171}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=172, cidr = cidr("192.168.58.192/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=172, cidr = cidr("192.168.58.200/29"), mac = mac("00:00:01:00:00:ac"), user_id=172}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=173, cidr = cidr("192.168.58.208/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=173, cidr = cidr("192.168.58.216/29"), mac = mac("00:00:01:00:00:ad"), user_id=173}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=174, cidr = cidr("192.168.58.224/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=174, cidr = cidr("192.168.58.232/29"), mac = mac("00:00:01:00:00:ae"), user_id=174}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=175, cidr = cidr("192.168.58.240/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=175, cidr = cidr("192.168.58.248/29"), mac = mac("00:00:01:00:00:af"), user_id=175}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=176, cidr = cidr("192.168.59.0/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=176, cidr = cidr("192.168.59.8/29"), mac = mac("00:00:01:00:00:b0"), user_id=176}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=177, cidr = cidr("192.168.59.16/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=177, cidr = cidr("192.168.59.24/29"), mac = mac("00:00:01:00:00:b1"), user_id=177}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=178, cidr = cidr("192.168.59.32/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=178, cidr = cidr("192.168.59.40/29"), mac = mac("00:00:01:00:00:b2"), user_id=178}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=179, cidr = cidr("192.168.59.48/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=179, cidr = cidr("192.168.59.56/29"), mac = mac("00:00:01:00:00:b3"), user_id=179}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=180, cidr = cidr("192.168.59.64/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=180, cidr = cidr("192.168.59.72/29"), mac = mac("00:00:01:00:00:b4"), user_id=180}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=181, cidr = cidr("192.168.59.80/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=181, cidr = cidr("192.168.59.88/29"), mac = mac("00:00:01:00:00:b5"), user_id=181}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=182, cidr = cidr("192.168.59.96/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=182, cidr = cidr("192.168.59.104/29"), mac = mac("00:00:01:00:00:b6"), user_id=182}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=183, cidr = cidr("192.168.59.112/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=183, cidr = cidr("192.168.59.120/29"), mac = mac("00:00:01:00:00:b7"), user_id=183}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=184, cidr = cidr("192.168.59.128/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=184, cidr = cidr("192.168.59.136/29"), mac = mac("00:00:01:00:00:b8"), user_id=184}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=185, cidr = cidr("192.168.59.144/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=185, cidr = cidr("192.168.59.152/29"), mac = mac("00:00:01:00:00:b9"), user_id=185}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=186, cidr = cidr("192.168.59.160/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=186, cidr = cidr("192.168.59.168/29"), mac = mac("00:00:01:00:00:ba"), user_id=186}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=187, cidr = cidr("192.168.59.176/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=187, cidr = cidr("192.168.59.184/29"), mac = mac("00:00:01:00:00:bb"), user_id=187}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=188, cidr = cidr("192.168.59.192/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=188, cidr = cidr("192.168.59.200/29"), mac = mac("00:00:01:00:00:bc"), user_id=188}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=189, cidr = cidr("192.168.59.208/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=189, cidr = cidr("192.168.59.216/29"), mac = mac("00:00:01:00:00:bd"), user_id=189}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=190, cidr = cidr("192.168.59.224/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=190, cidr = cidr("192.168.59.232/29"), mac = mac("00:00:01:00:00:be"), user_id=190}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=191, cidr = cidr("192.168.59.240/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=191, cidr = cidr("192.168.59.248/29"), mac = mac("00:00:01:00:00:bf"), user_id=191}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=192, cidr = cidr("192.168.60.0/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=192, cidr = cidr("192.168.60.8/29"), mac = mac("00:00:01:00:00:c0"), user_id=192}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=193, cidr = cidr("192.168.60.16/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=193, cidr = cidr("192.168.60.24/29"), mac = mac("00:00:01:00:00:c1"), user_id=193}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=194, cidr = cidr("192.168.60.32/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=194, cidr = cidr("192.168.60.40/29"), mac = mac("00:00:01:00:00:c2"), user_id=194}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=195, cidr = cidr("192.168.60.48/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=195, cidr = cidr("192.168.60.56/29"), mac = mac("00:00:01:00:00:c3"), user_id=195}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=196, cidr = cidr("192.168.60.64/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=196, cidr = cidr("192.168.60.72/29"), mac = mac("00:00:01:00:00:c4"), user_id=196}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=197, cidr = cidr("192.168.60.80/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=197, cidr = cidr("192.168.60.88/29"), mac = mac("00:00:01:00:00:c5"), user_id=197}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=198, cidr = cidr("192.168.60.96/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=198, cidr = cidr("192.168.60.104/29"), mac = mac("00:00:01:00:00:c6"), user_id=198}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=199, cidr = cidr("192.168.60.112/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=199, cidr = cidr("192.168.60.120/29"), mac = mac("00:00:01:00:00:c7"), user_id=199}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=200, cidr = cidr("192.168.60.128/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=200, cidr = cidr("192.168.60.136/29"), mac = mac("00:00:01:00:00:c8"), user_id=200}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=201, cidr = cidr("192.168.60.144/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=201, cidr = cidr("192.168.60.152/29"), mac = mac("00:00:01:00:00:c9"), user_id=201}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=202, cidr = cidr("192.168.60.160/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=202, cidr = cidr("192.168.60.168/29"), mac = mac("00:00:01:00:00:ca"), user_id=202}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=203, cidr = cidr("192.168.60.176/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=203, cidr = cidr("192.168.60.184/29"), mac = mac("00:00:01:00:00:cb"), user_id=203}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=204, cidr = cidr("192.168.60.192/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=204, cidr = cidr("192.168.60.200/29"), mac = mac("00:00:01:00:00:cc"), user_id=204}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=205, cidr = cidr("192.168.60.208/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=205, cidr = cidr("192.168.60.216/29"), mac = mac("00:00:01:00:00:cd"), user_id=205}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=206, cidr = cidr("192.168.60.224/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=206, cidr = cidr("192.168.60.232/29"), mac = mac("00:00:01:00:00:ce"), user_id=206}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=207, cidr = cidr("192.168.60.240/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=207, cidr = cidr("192.168.60.248/29"), mac = mac("00:00:01:00:00:cf"), user_id=207}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=208, cidr = cidr("192.168.61.0/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=208, cidr = cidr("192.168.61.8/29"), mac = mac("00:00:01:00:00:d0"), user_id=208}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=209, cidr = cidr("192.168.61.16/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=209, cidr = cidr("192.168.61.24/29"), mac = mac("00:00:01:00:00:d1"), user_id=209}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=210, cidr = cidr("192.168.61.32/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=210, cidr = cidr("192.168.61.40/29"), mac = mac("00:00:01:00:00:d2"), user_id=210}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=211, cidr = cidr("192.168.61.48/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=211, cidr = cidr("192.168.61.56/29"), mac = mac("00:00:01:00:00:d3"), user_id=211}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=212, cidr = cidr("192.168.61.64/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=212, cidr = cidr("192.168.61.72/29"), mac = mac("00:00:01:00:00:d4"), user_id=212}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=213, cidr = cidr("192.168.61.80/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=213, cidr = cidr("192.168.61.88/29"), mac = mac("00:00:01:00:00:d5"), user_id=213}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=214, cidr = cidr("192.168.61.96/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=214, cidr = cidr("192.168.61.104/29"), mac = mac("00:00:01:00:00:d6"), user_id=214}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=215, cidr = cidr("192.168.61.112/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=215, cidr = cidr("192.168.61.120/29"), mac = mac("00:00:01:00:00:d7"), user_id=215}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=216, cidr = cidr("192.168.61.128/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=216, cidr = cidr("192.168.61.136/29"), mac = mac("00:00:01:00:00:d8"), user_id=216}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=217, cidr = cidr("192.168.61.144/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=217, cidr = cidr("192.168.61.152/29"), mac = mac("00:00:01:00:00:d9"), user_id=217}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=218, cidr = cidr("192.168.61.160/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=218, cidr = cidr("192.168.61.168/29"), mac = mac("00:00:01:00:00:da"), user_id=218}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=219, cidr = cidr("192.168.61.176/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=219, cidr = cidr("192.168.61.184/29"), mac = mac("00:00:01:00:00:db"), user_id=219}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=220, cidr = cidr("192.168.61.192/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=220, cidr = cidr("192.168.61.200/29"), mac = mac("00:00:01:00:00:dc"), user_id=220}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=221, cidr = cidr("192.168.61.208/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=221, cidr = cidr("192.168.61.216/29"), mac = mac("00:00:01:00:00:dd"), user_id=221}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=222, cidr = cidr("192.168.61.224/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=222, cidr = cidr("192.168.61.232/29"), mac = mac("00:00:01:00:00:de"), user_id=222}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=223, cidr = cidr("192.168.61.240/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=223, cidr = cidr("192.168.61.248/29"), mac = mac("00:00:01:00:00:df"), user_id=223}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=224, cidr = cidr("192.168.62.0/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=224, cidr = cidr("192.168.62.8/29"), mac = mac("00:00:01:00:00:e0"), user_id=224}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=225, cidr = cidr("192.168.62.16/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=225, cidr = cidr("192.168.62.24/29"), mac = mac("00:00:01:00:00:e1"), user_id=225}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=226, cidr = cidr("192.168.62.32/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=226, cidr = cidr("192.168.62.40/29"), mac = mac("00:00:01:00:00:e2"), user_id=226}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=227, cidr = cidr("192.168.62.48/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=227, cidr = cidr("192.168.62.56/29"), mac = mac("00:00:01:00:00:e3"), user_id=227}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=228, cidr = cidr("192.168.62.64/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=228, cidr = cidr("192.168.62.72/29"), mac = mac("00:00:01:00:00:e4"), user_id=228}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=229, cidr = cidr("192.168.62.80/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=229, cidr = cidr("192.168.62.88/29"), mac = mac("00:00:01:00:00:e5"), user_id=229}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=230, cidr = cidr("192.168.62.96/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=230, cidr = cidr("192.168.62.104/29"), mac = mac("00:00:01:00:00:e6"), user_id=230}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=231, cidr = cidr("192.168.62.112/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=231, cidr = cidr("192.168.62.120/29"), mac = mac("00:00:01:00:00:e7"), user_id=231}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=232, cidr = cidr("192.168.62.128/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=232, cidr = cidr("192.168.62.136/29"), mac = mac("00:00:01:00:00:e8"), user_id=232}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=233, cidr = cidr("192.168.62.144/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=233, cidr = cidr("192.168.62.152/29"), mac = mac("00:00:01:00:00:e9"), user_id=233}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=234, cidr = cidr("192.168.62.160/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=234, cidr = cidr("192.168.62.168/29"), mac = mac("00:00:01:00:00:ea"), user_id=234}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=235, cidr = cidr("192.168.62.176/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=235, cidr = cidr("192.168.62.184/29"), mac = mac("00:00:01:00:00:eb"), user_id=235}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=236, cidr = cidr("192.168.62.192/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=236, cidr = cidr("192.168.62.200/29"), mac = mac("00:00:01:00:00:ec"), user_id=236}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=237, cidr = cidr("192.168.62.208/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=237, cidr = cidr("192.168.62.216/29"), mac = mac("00:00:01:00:00:ed"), user_id=237}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=238, cidr = cidr("192.168.62.224/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=238, cidr = cidr("192.168.62.232/29"), mac = mac("00:00:01:00:00:ee"), user_id=238}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=239, cidr = cidr("192.168.62.240/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=239, cidr = cidr("192.168.62.248/29"), mac = mac("00:00:01:00:00:ef"), user_id=239}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=240, cidr = cidr("192.168.63.0/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=240, cidr = cidr("192.168.63.8/29"), mac = mac("00:00:01:00:00:f0"), user_id=240}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=241, cidr = cidr("192.168.63.16/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=241, cidr = cidr("192.168.63.24/29"), mac = mac("00:00:01:00:00:f1"), user_id=241}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=242, cidr = cidr("192.168.63.32/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=242, cidr = cidr("192.168.63.40/29"), mac = mac("00:00:01:00:00:f2"), user_id=242}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=243, cidr = cidr("192.168.63.48/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=243, cidr = cidr("192.168.63.56/29"), mac = mac("00:00:01:00:00:f3"), user_id=243}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=244, cidr = cidr("192.168.63.64/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=244, cidr = cidr("192.168.63.72/29"), mac = mac("00:00:01:00:00:f4"), user_id=244}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=245, cidr = cidr("192.168.63.80/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=245, cidr = cidr("192.168.63.88/29"), mac = mac("00:00:01:00:00:f5"), user_id=245}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=246, cidr = cidr("192.168.63.96/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=246, cidr = cidr("192.168.63.104/29"), mac = mac("00:00:01:00:00:f6"), user_id=246}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=247, cidr = cidr("192.168.63.112/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=247, cidr = cidr("192.168.63.120/29"), mac = mac("00:00:01:00:00:f7"), user_id=247}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=248, cidr = cidr("192.168.63.128/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=248, cidr = cidr("192.168.63.136/29"), mac = mac("00:00:01:00:00:f8"), user_id=248}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=249, cidr = cidr("192.168.63.144/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=249, cidr = cidr("192.168.63.152/29"), mac = mac("00:00:01:00:00:f9"), user_id=249}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=250, cidr = cidr("192.168.63.160/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=250, cidr = cidr("192.168.63.168/29"), mac = mac("00:00:01:00:00:fa"), user_id=250}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=251, cidr = cidr("192.168.63.176/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=251, cidr = cidr("192.168.63.184/29"), mac = mac("00:00:01:00:00:fb"), user_id=251}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=252, cidr = cidr("192.168.63.192/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=252, cidr = cidr("192.168.63.200/29"), mac = mac("00:00:01:00:00:fc"), user_id=252}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=253, cidr = cidr("192.168.63.208/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=253, cidr = cidr("192.168.63.216/29"), mac = mac("00:00:01:00:00:fd"), user_id=253}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=254, cidr = cidr("192.168.63.224/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=254, cidr = cidr("192.168.63.232/29"), mac = mac("00:00:01:00:00:fe"), user_id=254}, + {dest_id=3, gre_id=0, svlan_id=48, cvlan_id=255, cidr = cidr("192.168.63.240/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, + {dest_id=3, gre_id=0, svlan_id=49, cvlan_id=255, cidr = cidr("192.168.63.248/29"), mac = mac("00:00:01:00:00:ff"), user_id=255}, +} diff --git a/VNFs/DPPD-PROX/config/cpe_table_short.lua b/VNFs/DPPD-PROX/config/cpe_table_short.lua new file mode 100644 index 00000000..c2314897 --- /dev/null +++ b/VNFs/DPPD-PROX/config/cpe_table_short.lua @@ -0,0 +1,32 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +t2={} +svlan_id=0 +ip1=2^24*192 + 2^16*168 + 2^8*0 + 0; +for id1=0,3,1 do + for cvlan_id=0,255,1 do + mac_s=string.format("00:00:01:00:00:%02x", cvlan_id); + table.insert(t2,{dest_id=id1, gre_id=0, svlan_id=svlan_id, cvlan_id=cvlan_id, cidr = {ip=ip(ip1),depth=29}, mac = mac(mac_s), user_id=cvlan_id}); + ip1=ip1+8 + table.insert(t2,{dest_id=id1, gre_id=0, svlan_id=svlan_id+1, cvlan_id=cvlan_id, cidr = {ip=ip(ip1),depth=29}, mac = mac(mac_s), user_id=cvlan_id}); + ip1=ip1+8 + end + svlan_id=svlan_id+16 +end + +return t; + diff --git a/VNFs/DPPD-PROX/config/dscp.lua b/VNFs/DPPD-PROX/config/dscp.lua new file mode 100644 index 00000000..ff994b16 --- /dev/null +++ b/VNFs/DPPD-PROX/config/dscp.lua @@ -0,0 +1,82 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return { + {dscp = 0, tc = 0, queue = 0}, + {dscp = 1, tc = 0, queue = 1}, + {dscp = 2, tc = 0, queue = 2}, + {dscp = 3, tc = 0, queue = 3}, + {dscp = 4, tc = 1, queue = 0}, + {dscp = 5, tc = 1, queue = 1}, + {dscp = 6, tc = 1, queue = 2}, + {dscp = 7, tc = 1, queue = 3}, + {dscp = 8, tc = 2, queue = 0}, + {dscp = 9, tc = 2, queue = 1}, + {dscp = 10, tc = 2, queue = 2}, + {dscp = 11, tc = 2, queue = 3}, + {dscp = 12, tc = 3, queue = 0}, + {dscp = 13, tc = 3, queue = 1}, + {dscp = 14, tc = 3, queue = 2}, + {dscp = 15, tc = 3, queue = 3}, + {dscp = 16, tc = 0, queue = 0}, + {dscp = 17, tc = 0, queue = 1}, + {dscp = 18, tc = 0, queue = 2}, + {dscp = 19, tc = 0, queue = 3}, + {dscp = 20, tc = 1, queue = 0}, + {dscp = 21, tc = 1, queue = 1}, + {dscp = 22, tc = 1, queue = 2}, + {dscp = 23, tc = 1, queue = 3}, + {dscp = 24, tc = 2, queue = 0}, + {dscp = 25, tc = 2, queue = 1}, + {dscp = 26, tc = 2, queue = 2}, + {dscp = 27, tc = 2, queue = 3}, + {dscp = 28, tc = 3, queue = 0}, + {dscp = 29, tc = 3, queue = 1}, + {dscp = 30, tc = 3, queue = 2}, + {dscp = 31, tc = 3, queue = 3}, + {dscp = 32, tc = 0, queue = 0}, + {dscp = 33, tc = 0, queue = 1}, + {dscp = 34, tc = 0, queue = 2}, + {dscp = 35, tc = 0, queue = 3}, + {dscp = 36, tc = 1, queue = 0}, + {dscp = 37, tc = 1, queue = 1}, + {dscp = 38, tc = 1, queue = 2}, + {dscp = 39, tc = 1, queue = 3}, + {dscp = 40, tc = 2, queue = 0}, + {dscp = 41, tc = 2, queue = 1}, + {dscp = 42, tc = 2, queue = 2}, + {dscp = 43, tc = 2, queue = 3}, + {dscp = 44, tc = 3, queue = 0}, + {dscp = 45, tc = 3, queue = 1}, + {dscp = 46, tc = 3, queue = 2}, + {dscp = 47, tc = 3, queue = 3}, + {dscp = 48, tc = 0, queue = 0}, + {dscp = 49, tc = 0, queue = 1}, + {dscp = 50, tc = 0, queue = 2}, + {dscp = 51, tc = 0, queue = 3}, + {dscp = 52, tc = 1, queue = 0}, + {dscp = 53, tc = 1, queue = 1}, + {dscp = 54, tc = 1, queue = 2}, + {dscp = 55, tc = 1, queue = 3}, + {dscp = 56, tc = 2, queue = 0}, + {dscp = 57, tc = 2, queue = 1}, + {dscp = 58, tc = 2, queue = 2}, + {dscp = 59, tc = 2, queue = 3}, + {dscp = 60, tc = 3, queue = 0}, + {dscp = 61, tc = 3, queue = 1}, + {dscp = 62, tc = 3, queue = 2}, + {dscp = 63, tc = 3, queue = 3}, +} diff --git a/VNFs/DPPD-PROX/config/dscp2.lua b/VNFs/DPPD-PROX/config/dscp2.lua new file mode 100644 index 00000000..0bc044a8 --- /dev/null +++ b/VNFs/DPPD-PROX/config/dscp2.lua @@ -0,0 +1,22 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local dscp = {} +for i = 1,2^6 do + dscp[i] = {dscp = i, tc = 0, queue = 0} +end + +return dscp; diff --git a/VNFs/DPPD-PROX/config/ip6_tun_bind.lua b/VNFs/DPPD-PROX/config/ip6_tun_bind.lua new file mode 100644 index 00000000..75910048 --- /dev/null +++ b/VNFs/DPPD-PROX/config/ip6_tun_bind.lua @@ -0,0 +1,25 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- Bindings for lwaftr: lwB4 IPv6 address, next hop MAC address +-- towards lwB4, IPv4 Public address, IPv4 Public Port Set + +return { + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0000"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4608}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0001"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4672}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0002"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4736}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0003"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4800}, +} diff --git a/VNFs/DPPD-PROX/config/ipv4-2.lua b/VNFs/DPPD-PROX/config/ipv4-2.lua new file mode 100644 index 00000000..0283ed48 --- /dev/null +++ b/VNFs/DPPD-PROX/config/ipv4-2.lua @@ -0,0 +1,109 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local lpm4 = {} +lpm4.next_hops = { + {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x112}, + {id = 1, port_id = 1, ip = ip("2.1.1.1"), mac = mac("00:00:00:00:00:02"), mpls = 0x212}, + {id = 2, port_id = 0, ip = ip("3.1.1.1"), mac = mac("00:00:00:00:00:03"), mpls = 0x312}, + {id = 3, port_id = 1, ip = ip("4.1.1.1"), mac = mac("00:00:00:00:00:04"), mpls = 0x412}, + {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac("00:00:00:00:00:05"), mpls = 0x512}, + {id = 5, port_id = 1, ip = ip("6.1.1.1"), mac = mac("00:00:00:00:00:06"), mpls = 0x612}, + {id = 6, port_id = 0, ip = ip("7.1.1.1"), mac = mac("00:00:00:00:00:07"), mpls = 0x712}, + {id = 7, port_id = 1, ip = ip("8.1.1.1"), mac = mac("00:00:00:00:00:08"), mpls = 0x812}, + {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac("00:00:00:00:00:09"), mpls = 0x912}, + {id = 9, port_id = 1, ip = ip("10.1.1.1"), mac = mac("00:00:00:00:00:10"), mpls = 0x1012}, + {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac("00:00:00:00:00:11"), mpls = 0x1112}, + {id = 11, port_id = 1, ip = ip("12.1.1.1"), mac = mac("00:00:00:00:00:12"), mpls = 0x1212}, + {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac("00:00:00:00:00:13"), mpls = 0x1312}, + {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac("00:00:00:00:00:14"), mpls = 0x1412}, + {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac("00:00:00:00:00:15"), mpls = 0x1512}, + {id = 15, port_id = 1, ip = ip("16.1.1.1"), mac = mac("00:00:00:00:00:16"), mpls = 0x1612}, + {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac("00:00:00:00:00:17"), mpls = 0x1712}, + {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac("00:00:00:00:00:18"), mpls = 0x1812}, + {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac("00:00:00:00:00:19"), mpls = 0x1912}, + {id = 19, port_id = 1, ip = ip("20.1.1.1"), mac = mac("00:00:00:00:00:20"), mpls = 0x2012}, + {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac("00:00:00:00:00:21"), mpls = 0x2112}, + {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac("00:00:00:00:00:22"), mpls = 0x2212}, + {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac("00:00:00:00:00:23"), mpls = 0x2312}, + {id = 23, port_id = 1, ip = ip("24.1.1.1"), mac = mac("00:00:00:00:00:24"), mpls = 0x2412}, + {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac("00:00:00:00:00:25"), mpls = 0x2512}, + {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac("00:00:00:00:00:26"), mpls = 0x2612}, + {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac("00:00:00:00:00:27"), mpls = 0x2712}, + {id = 27, port_id = 1, ip = ip("28.1.1.1"), mac = mac("00:00:00:00:00:28"), mpls = 0x2812}, + {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac("00:00:00:00:00:29"), mpls = 0x2912}, + {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac("00:00:00:00:00:30"), mpls = 0x3012}, + {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac("00:00:00:00:00:31"), mpls = 0x3112}, + {id = 31, port_id = 1, ip = ip("32.1.1.1"), mac = mac("00:00:00:00:00:32"), mpls = 0x3212}, + {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac("00:00:00:00:00:33"), mpls = 0x3312}, + {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac("00:00:00:00:00:34"), mpls = 0x3412}, + {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac("00:00:00:00:00:35"), mpls = 0x3512}, + {id = 35, port_id = 1, ip = ip("36.1.1.1"), mac = mac("00:00:00:00:00:36"), mpls = 0x3612}, + {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac("00:00:00:00:00:37"), mpls = 0x3712}, + {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac("00:00:00:00:00:38"), mpls = 0x3812}, + {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac("00:00:00:00:00:39"), mpls = 0x3912}, + {id = 39, port_id = 1, ip = ip("40.1.1.1"), mac = mac("00:00:00:00:00:40"), mpls = 0x4012}, + {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac("00:00:00:00:00:41"), mpls = 0x4112}, + {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac("00:00:00:00:00:42"), mpls = 0x4212}, + {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac("00:00:00:00:00:43"), mpls = 0x4312}, + {id = 43, port_id = 1, ip = ip("44.1.1.1"), mac = mac("00:00:00:00:00:44"), mpls = 0x4412}, + {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac("00:00:00:00:00:45"), mpls = 0x4512}, + {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac("00:00:00:00:00:46"), mpls = 0x4612}, + {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac("00:00:00:00:00:47"), mpls = 0x4712}, + {id = 47, port_id = 1, ip = ip("48.1.1.1"), mac = mac("00:00:00:00:00:48"), mpls = 0x4812}, + {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac("00:00:00:00:00:49"), mpls = 0x4912}, + {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac("00:00:00:00:00:50"), mpls = 0x5012}, + {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac("00:00:00:00:00:51"), mpls = 0x5112}, + {id = 51, port_id = 1, ip = ip("52.1.1.1"), mac = mac("00:00:00:00:00:52"), mpls = 0x5212}, + {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac("00:00:00:00:00:53"), mpls = 0x5312}, + {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac("00:00:00:00:00:54"), mpls = 0x5412}, + {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac("00:00:00:00:00:55"), mpls = 0x5512}, + {id = 55, port_id = 1, ip = ip("56.1.1.1"), mac = mac("00:00:00:00:00:56"), mpls = 0x5612}, + {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac("00:00:00:00:00:57"), mpls = 0x5712}, + {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac("00:00:00:00:00:58"), mpls = 0x5812}, + {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac("00:00:00:00:00:59"), mpls = 0x5912}, + {id = 59, port_id = 1, ip = ip("60.1.1.1"), mac = mac("00:00:00:00:00:60"), mpls = 0x6012}, + {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac("00:00:00:00:00:61"), mpls = 0x6112}, + {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac("00:00:00:00:00:62"), mpls = 0x6212}, + {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac("00:00:00:00:00:63"), mpls = 0x6312}, + {id = 63, port_id = 1, ip = ip("64.1.1.1"), mac = mac("00:00:00:00:00:64"), mpls = 0x6412}, +} + +lpm4.routes = {}; + +base_ip = 10 * 2^24; + +for i = 1,2^13 do + res = ip(base_ip + (1 * 2^12) * (i - 1)); + + lpm4.routes[i] = { + cidr = {ip = res, depth = 24}, + next_hop_id = (i - 1) % 64, + } +end + +base_ip = 74 * 2^24; + +for i = 1,2^13 do + res = ip(base_ip + (1 * 2^12) * (i - 1)); + + lpm4.routes[2^13 + i] = { + cidr = {ip = res, depth = 24}, + next_hop_id = (i - 1) % 64, + } +end + +return lpm4; diff --git a/VNFs/DPPD-PROX/config/ipv4-4ports.lua b/VNFs/DPPD-PROX/config/ipv4-4ports.lua new file mode 100644 index 00000000..54b102e8 --- /dev/null +++ b/VNFs/DPPD-PROX/config/ipv4-4ports.lua @@ -0,0 +1,98 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local lpm4 = {} +lpm4.next_hops = { + {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x112}, + {id = 1, port_id = 1, ip = ip("2.1.1.1"), mac = mac("00:00:00:00:00:02"), mpls = 0x212}, + {id = 2, port_id = 2, ip = ip("3.1.1.1"), mac = mac("00:00:00:00:00:03"), mpls = 0x312}, + {id = 3, port_id = 3, ip = ip("4.1.1.1"), mac = mac("00:00:00:00:00:04"), mpls = 0x412}, + {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac("00:00:00:00:00:05"), mpls = 0x512}, + {id = 5, port_id = 1, ip = ip("6.1.1.1"), mac = mac("00:00:00:00:00:06"), mpls = 0x612}, + {id = 6, port_id = 2, ip = ip("7.1.1.1"), mac = mac("00:00:00:00:00:07"), mpls = 0x712}, + {id = 7, port_id = 3, ip = ip("8.1.1.1"), mac = mac("00:00:00:00:00:08"), mpls = 0x812}, + {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac("00:00:00:00:00:09"), mpls = 0x912}, + {id = 9, port_id = 1, ip = ip("10.1.1.1"), mac = mac("00:00:00:00:00:10"), mpls = 0x1012}, + {id = 10, port_id = 2, ip = ip("11.1.1.1"), mac = mac("00:00:00:00:00:11"), mpls = 0x1112}, + {id = 11, port_id = 3, ip = ip("12.1.1.1"), mac = mac("00:00:00:00:00:12"), mpls = 0x1212}, + {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac("00:00:00:00:00:13"), mpls = 0x1312}, + {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac("00:00:00:00:00:14"), mpls = 0x1412}, + {id = 14, port_id = 2, ip = ip("15.1.1.1"), mac = mac("00:00:00:00:00:15"), mpls = 0x1512}, + {id = 15, port_id = 3, ip = ip("16.1.1.1"), mac = mac("00:00:00:00:00:16"), mpls = 0x1612}, + {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac("00:00:00:00:00:17"), mpls = 0x1712}, + {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac("00:00:00:00:00:18"), mpls = 0x1812}, + {id = 18, port_id = 2, ip = ip("19.1.1.1"), mac = mac("00:00:00:00:00:19"), mpls = 0x1912}, + {id = 19, port_id = 3, ip = ip("20.1.1.1"), mac = mac("00:00:00:00:00:20"), mpls = 0x2012}, + {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac("00:00:00:00:00:21"), mpls = 0x2112}, + {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac("00:00:00:00:00:22"), mpls = 0x2212}, + {id = 22, port_id = 2, ip = ip("23.1.1.1"), mac = mac("00:00:00:00:00:23"), mpls = 0x2312}, + {id = 23, port_id = 3, ip = ip("24.1.1.1"), mac = mac("00:00:00:00:00:24"), mpls = 0x2412}, + {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac("00:00:00:00:00:25"), mpls = 0x2512}, + {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac("00:00:00:00:00:26"), mpls = 0x2612}, + {id = 26, port_id = 2, ip = ip("27.1.1.1"), mac = mac("00:00:00:00:00:27"), mpls = 0x2712}, + {id = 27, port_id = 3, ip = ip("28.1.1.1"), mac = mac("00:00:00:00:00:28"), mpls = 0x2812}, + {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac("00:00:00:00:00:29"), mpls = 0x2912}, + {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac("00:00:00:00:00:30"), mpls = 0x3012}, + {id = 30, port_id = 2, ip = ip("31.1.1.1"), mac = mac("00:00:00:00:00:31"), mpls = 0x3112}, + {id = 31, port_id = 3, ip = ip("32.1.1.1"), mac = mac("00:00:00:00:00:32"), mpls = 0x3212}, + {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac("00:00:00:00:00:33"), mpls = 0x3312}, + {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac("00:00:00:00:00:34"), mpls = 0x3412}, + {id = 34, port_id = 2, ip = ip("35.1.1.1"), mac = mac("00:00:00:00:00:35"), mpls = 0x3512}, + {id = 35, port_id = 3, ip = ip("36.1.1.1"), mac = mac("00:00:00:00:00:36"), mpls = 0x3612}, + {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac("00:00:00:00:00:37"), mpls = 0x3712}, + {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac("00:00:00:00:00:38"), mpls = 0x3812}, + {id = 38, port_id = 2, ip = ip("39.1.1.1"), mac = mac("00:00:00:00:00:39"), mpls = 0x3912}, + {id = 39, port_id = 3, ip = ip("40.1.1.1"), mac = mac("00:00:00:00:00:40"), mpls = 0x4012}, + {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac("00:00:00:00:00:41"), mpls = 0x4112}, + {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac("00:00:00:00:00:42"), mpls = 0x4212}, + {id = 42, port_id = 2, ip = ip("43.1.1.1"), mac = mac("00:00:00:00:00:43"), mpls = 0x4312}, + {id = 43, port_id = 3, ip = ip("44.1.1.1"), mac = mac("00:00:00:00:00:44"), mpls = 0x4412}, + {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac("00:00:00:00:00:45"), mpls = 0x4512}, + {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac("00:00:00:00:00:46"), mpls = 0x4612}, + {id = 46, port_id = 2, ip = ip("47.1.1.1"), mac = mac("00:00:00:00:00:47"), mpls = 0x4712}, + {id = 47, port_id = 3, ip = ip("48.1.1.1"), mac = mac("00:00:00:00:00:48"), mpls = 0x4812}, + {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac("00:00:00:00:00:49"), mpls = 0x4912}, + {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac("00:00:00:00:00:50"), mpls = 0x5012}, + {id = 50, port_id = 2, ip = ip("51.1.1.1"), mac = mac("00:00:00:00:00:51"), mpls = 0x5112}, + {id = 51, port_id = 3, ip = ip("52.1.1.1"), mac = mac("00:00:00:00:00:52"), mpls = 0x5212}, + {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac("00:00:00:00:00:53"), mpls = 0x5312}, + {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac("00:00:00:00:00:54"), mpls = 0x5412}, + {id = 54, port_id = 2, ip = ip("55.1.1.1"), mac = mac("00:00:00:00:00:55"), mpls = 0x5512}, + {id = 55, port_id = 3, ip = ip("56.1.1.1"), mac = mac("00:00:00:00:00:56"), mpls = 0x5612}, + {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac("00:00:00:00:00:57"), mpls = 0x5712}, + {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac("00:00:00:00:00:58"), mpls = 0x5812}, + {id = 58, port_id = 2, ip = ip("59.1.1.1"), mac = mac("00:00:00:00:00:59"), mpls = 0x5912}, + {id = 59, port_id = 3, ip = ip("60.1.1.1"), mac = mac("00:00:00:00:00:60"), mpls = 0x6012}, + {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac("00:00:00:00:00:61"), mpls = 0x6112}, + {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac("00:00:00:00:00:62"), mpls = 0x6212}, + {id = 62, port_id = 2, ip = ip("63.1.1.1"), mac = mac("00:00:00:00:00:63"), mpls = 0x6312}, + {id = 63, port_id = 3, ip = ip("64.1.1.1"), mac = mac("00:00:00:00:00:64"), mpls = 0x6412}, +} + +lpm4.routes = {}; + +base_ip = 10 * 2^24; + +for i = 1,2^13 do + res = ip(base_ip + (1 *2^12) * (i - 1)); + + lpm4.routes[i] = { + cidr = {ip = res, depth = 24}, + next_hop_id = (i - 1) % 64, + } +end + +return lpm4 diff --git a/VNFs/DPPD-PROX/config/ipv4.lua b/VNFs/DPPD-PROX/config/ipv4.lua new file mode 100644 index 00000000..9fb8c385 --- /dev/null +++ b/VNFs/DPPD-PROX/config/ipv4.lua @@ -0,0 +1,98 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local lpm4 = {} +lpm4.next_hops = { + {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x112}, + {id = 1, port_id = 1, ip = ip("2.1.1.1"), mac = mac("00:00:00:00:00:02"), mpls = 0x212}, + {id = 2, port_id = 0, ip = ip("3.1.1.1"), mac = mac("00:00:00:00:00:03"), mpls = 0x312}, + {id = 3, port_id = 1, ip = ip("4.1.1.1"), mac = mac("00:00:00:00:00:04"), mpls = 0x412}, + {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac("00:00:00:00:00:05"), mpls = 0x512}, + {id = 5, port_id = 1, ip = ip("6.1.1.1"), mac = mac("00:00:00:00:00:06"), mpls = 0x612}, + {id = 6, port_id = 0, ip = ip("7.1.1.1"), mac = mac("00:00:00:00:00:07"), mpls = 0x712}, + {id = 7, port_id = 1, ip = ip("8.1.1.1"), mac = mac("00:00:00:00:00:08"), mpls = 0x812}, + {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac("00:00:00:00:00:09"), mpls = 0x912}, + {id = 9, port_id = 1, ip = ip("10.1.1.1"), mac = mac("00:00:00:00:00:10"), mpls = 0x1012}, + {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac("00:00:00:00:00:11"), mpls = 0x1112}, + {id = 11, port_id = 1, ip = ip("12.1.1.1"), mac = mac("00:00:00:00:00:12"), mpls = 0x1212}, + {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac("00:00:00:00:00:13"), mpls = 0x1312}, + {id = 13, port_id = 1, ip = ip("14.1.1.1"), mac = mac("00:00:00:00:00:14"), mpls = 0x1412}, + {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac("00:00:00:00:00:15"), mpls = 0x1512}, + {id = 15, port_id = 1, ip = ip("16.1.1.1"), mac = mac("00:00:00:00:00:16"), mpls = 0x1612}, + {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac("00:00:00:00:00:17"), mpls = 0x1712}, + {id = 17, port_id = 1, ip = ip("18.1.1.1"), mac = mac("00:00:00:00:00:18"), mpls = 0x1812}, + {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac("00:00:00:00:00:19"), mpls = 0x1912}, + {id = 19, port_id = 1, ip = ip("20.1.1.1"), mac = mac("00:00:00:00:00:20"), mpls = 0x2012}, + {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac("00:00:00:00:00:21"), mpls = 0x2112}, + {id = 21, port_id = 1, ip = ip("22.1.1.1"), mac = mac("00:00:00:00:00:22"), mpls = 0x2212}, + {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac("00:00:00:00:00:23"), mpls = 0x2312}, + {id = 23, port_id = 1, ip = ip("24.1.1.1"), mac = mac("00:00:00:00:00:24"), mpls = 0x2412}, + {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac("00:00:00:00:00:25"), mpls = 0x2512}, + {id = 25, port_id = 1, ip = ip("26.1.1.1"), mac = mac("00:00:00:00:00:26"), mpls = 0x2612}, + {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac("00:00:00:00:00:27"), mpls = 0x2712}, + {id = 27, port_id = 1, ip = ip("28.1.1.1"), mac = mac("00:00:00:00:00:28"), mpls = 0x2812}, + {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac("00:00:00:00:00:29"), mpls = 0x2912}, + {id = 29, port_id = 1, ip = ip("30.1.1.1"), mac = mac("00:00:00:00:00:30"), mpls = 0x3012}, + {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac("00:00:00:00:00:31"), mpls = 0x3112}, + {id = 31, port_id = 1, ip = ip("32.1.1.1"), mac = mac("00:00:00:00:00:32"), mpls = 0x3212}, + {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac("00:00:00:00:00:33"), mpls = 0x3312}, + {id = 33, port_id = 1, ip = ip("34.1.1.1"), mac = mac("00:00:00:00:00:34"), mpls = 0x3412}, + {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac("00:00:00:00:00:35"), mpls = 0x3512}, + {id = 35, port_id = 1, ip = ip("36.1.1.1"), mac = mac("00:00:00:00:00:36"), mpls = 0x3612}, + {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac("00:00:00:00:00:37"), mpls = 0x3712}, + {id = 37, port_id = 1, ip = ip("38.1.1.1"), mac = mac("00:00:00:00:00:38"), mpls = 0x3812}, + {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac("00:00:00:00:00:39"), mpls = 0x3912}, + {id = 39, port_id = 1, ip = ip("40.1.1.1"), mac = mac("00:00:00:00:00:40"), mpls = 0x4012}, + {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac("00:00:00:00:00:41"), mpls = 0x4112}, + {id = 41, port_id = 1, ip = ip("42.1.1.1"), mac = mac("00:00:00:00:00:42"), mpls = 0x4212}, + {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac("00:00:00:00:00:43"), mpls = 0x4312}, + {id = 43, port_id = 1, ip = ip("44.1.1.1"), mac = mac("00:00:00:00:00:44"), mpls = 0x4412}, + {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac("00:00:00:00:00:45"), mpls = 0x4512}, + {id = 45, port_id = 1, ip = ip("46.1.1.1"), mac = mac("00:00:00:00:00:46"), mpls = 0x4612}, + {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac("00:00:00:00:00:47"), mpls = 0x4712}, + {id = 47, port_id = 1, ip = ip("48.1.1.1"), mac = mac("00:00:00:00:00:48"), mpls = 0x4812}, + {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac("00:00:00:00:00:49"), mpls = 0x4912}, + {id = 49, port_id = 1, ip = ip("50.1.1.1"), mac = mac("00:00:00:00:00:50"), mpls = 0x5012}, + {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac("00:00:00:00:00:51"), mpls = 0x5112}, + {id = 51, port_id = 1, ip = ip("52.1.1.1"), mac = mac("00:00:00:00:00:52"), mpls = 0x5212}, + {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac("00:00:00:00:00:53"), mpls = 0x5312}, + {id = 53, port_id = 1, ip = ip("54.1.1.1"), mac = mac("00:00:00:00:00:54"), mpls = 0x5412}, + {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac("00:00:00:00:00:55"), mpls = 0x5512}, + {id = 55, port_id = 1, ip = ip("56.1.1.1"), mac = mac("00:00:00:00:00:56"), mpls = 0x5612}, + {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac("00:00:00:00:00:57"), mpls = 0x5712}, + {id = 57, port_id = 1, ip = ip("58.1.1.1"), mac = mac("00:00:00:00:00:58"), mpls = 0x5812}, + {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac("00:00:00:00:00:59"), mpls = 0x5912}, + {id = 59, port_id = 1, ip = ip("60.1.1.1"), mac = mac("00:00:00:00:00:60"), mpls = 0x6012}, + {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac("00:00:00:00:00:61"), mpls = 0x6112}, + {id = 61, port_id = 1, ip = ip("62.1.1.1"), mac = mac("00:00:00:00:00:62"), mpls = 0x6212}, + {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac("00:00:00:00:00:63"), mpls = 0x6312}, + {id = 63, port_id = 1, ip = ip("64.1.1.1"), mac = mac("00:00:00:00:00:64"), mpls = 0x6412}, +} + +lpm4.routes = {}; + +base_ip = 10 * 2^24; + +for i = 1,2^13 do + res = ip(base_ip + (1 * 2^12) * (i - 1)); + + lpm4.routes[i] = { + cidr = {ip = res, depth = 24}, + next_hop_id = (i - 1) % 64, + } +end + +return lpm4 diff --git a/VNFs/DPPD-PROX/config/ipv4_1port.lua b/VNFs/DPPD-PROX/config/ipv4_1port.lua new file mode 100644 index 00000000..ad5c3472 --- /dev/null +++ b/VNFs/DPPD-PROX/config/ipv4_1port.lua @@ -0,0 +1,98 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local lpm4 = {} +lpm4.next_hops = { + {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x112}, + {id = 1, port_id = 0, ip = ip("2.1.1.1"), mac = mac("00:00:00:00:00:02"), mpls = 0x212}, + {id = 2, port_id = 0, ip = ip("3.1.1.1"), mac = mac("00:00:00:00:00:03"), mpls = 0x312}, + {id = 3, port_id = 0, ip = ip("4.1.1.1"), mac = mac("00:00:00:00:00:04"), mpls = 0x412}, + {id = 4, port_id = 0, ip = ip("5.1.1.1"), mac = mac("00:00:00:00:00:05"), mpls = 0x512}, + {id = 5, port_id = 0, ip = ip("6.1.1.1"), mac = mac("00:00:00:00:00:06"), mpls = 0x612}, + {id = 6, port_id = 0, ip = ip("7.1.1.1"), mac = mac("00:00:00:00:00:07"), mpls = 0x712}, + {id = 7, port_id = 0, ip = ip("8.1.1.1"), mac = mac("00:00:00:00:00:08"), mpls = 0x812}, + {id = 8, port_id = 0, ip = ip("9.1.1.1"), mac = mac("00:00:00:00:00:09"), mpls = 0x912}, + {id = 9, port_id = 0, ip = ip("10.1.1.1"), mac = mac("00:00:00:00:00:10"), mpls = 0x1012}, + {id = 10, port_id = 0, ip = ip("11.1.1.1"), mac = mac("00:00:00:00:00:11"), mpls = 0x1112}, + {id = 11, port_id = 0, ip = ip("12.1.1.1"), mac = mac("00:00:00:00:00:12"), mpls = 0x1212}, + {id = 12, port_id = 0, ip = ip("13.1.1.1"), mac = mac("00:00:00:00:00:13"), mpls = 0x1312}, + {id = 13, port_id = 0, ip = ip("14.1.1.1"), mac = mac("00:00:00:00:00:14"), mpls = 0x1412}, + {id = 14, port_id = 0, ip = ip("15.1.1.1"), mac = mac("00:00:00:00:00:15"), mpls = 0x1512}, + {id = 15, port_id = 0, ip = ip("16.1.1.1"), mac = mac("00:00:00:00:00:16"), mpls = 0x1612}, + {id = 16, port_id = 0, ip = ip("17.1.1.1"), mac = mac("00:00:00:00:00:17"), mpls = 0x1712}, + {id = 17, port_id = 0, ip = ip("18.1.1.1"), mac = mac("00:00:00:00:00:18"), mpls = 0x1812}, + {id = 18, port_id = 0, ip = ip("19.1.1.1"), mac = mac("00:00:00:00:00:19"), mpls = 0x1912}, + {id = 19, port_id = 0, ip = ip("20.1.1.1"), mac = mac("00:00:00:00:00:20"), mpls = 0x2012}, + {id = 20, port_id = 0, ip = ip("21.1.1.1"), mac = mac("00:00:00:00:00:21"), mpls = 0x2112}, + {id = 21, port_id = 0, ip = ip("22.1.1.1"), mac = mac("00:00:00:00:00:22"), mpls = 0x2212}, + {id = 22, port_id = 0, ip = ip("23.1.1.1"), mac = mac("00:00:00:00:00:23"), mpls = 0x2312}, + {id = 23, port_id = 0, ip = ip("24.1.1.1"), mac = mac("00:00:00:00:00:24"), mpls = 0x2412}, + {id = 24, port_id = 0, ip = ip("25.1.1.1"), mac = mac("00:00:00:00:00:25"), mpls = 0x2512}, + {id = 25, port_id = 0, ip = ip("26.1.1.1"), mac = mac("00:00:00:00:00:26"), mpls = 0x2612}, + {id = 26, port_id = 0, ip = ip("27.1.1.1"), mac = mac("00:00:00:00:00:27"), mpls = 0x2712}, + {id = 27, port_id = 0, ip = ip("28.1.1.1"), mac = mac("00:00:00:00:00:28"), mpls = 0x2812}, + {id = 28, port_id = 0, ip = ip("29.1.1.1"), mac = mac("00:00:00:00:00:29"), mpls = 0x2912}, + {id = 29, port_id = 0, ip = ip("30.1.1.1"), mac = mac("00:00:00:00:00:30"), mpls = 0x3012}, + {id = 30, port_id = 0, ip = ip("31.1.1.1"), mac = mac("00:00:00:00:00:31"), mpls = 0x3112}, + {id = 31, port_id = 0, ip = ip("32.1.1.1"), mac = mac("00:00:00:00:00:32"), mpls = 0x3212}, + {id = 32, port_id = 0, ip = ip("33.1.1.1"), mac = mac("00:00:00:00:00:33"), mpls = 0x3312}, + {id = 33, port_id = 0, ip = ip("34.1.1.1"), mac = mac("00:00:00:00:00:34"), mpls = 0x3412}, + {id = 34, port_id = 0, ip = ip("35.1.1.1"), mac = mac("00:00:00:00:00:35"), mpls = 0x3512}, + {id = 35, port_id = 0, ip = ip("36.1.1.1"), mac = mac("00:00:00:00:00:36"), mpls = 0x3612}, + {id = 36, port_id = 0, ip = ip("37.1.1.1"), mac = mac("00:00:00:00:00:37"), mpls = 0x3712}, + {id = 37, port_id = 0, ip = ip("38.1.1.1"), mac = mac("00:00:00:00:00:38"), mpls = 0x3812}, + {id = 38, port_id = 0, ip = ip("39.1.1.1"), mac = mac("00:00:00:00:00:39"), mpls = 0x3912}, + {id = 39, port_id = 0, ip = ip("40.1.1.1"), mac = mac("00:00:00:00:00:40"), mpls = 0x4012}, + {id = 40, port_id = 0, ip = ip("41.1.1.1"), mac = mac("00:00:00:00:00:41"), mpls = 0x4112}, + {id = 41, port_id = 0, ip = ip("42.1.1.1"), mac = mac("00:00:00:00:00:42"), mpls = 0x4212}, + {id = 42, port_id = 0, ip = ip("43.1.1.1"), mac = mac("00:00:00:00:00:43"), mpls = 0x4312}, + {id = 43, port_id = 0, ip = ip("44.1.1.1"), mac = mac("00:00:00:00:00:44"), mpls = 0x4412}, + {id = 44, port_id = 0, ip = ip("45.1.1.1"), mac = mac("00:00:00:00:00:45"), mpls = 0x4512}, + {id = 45, port_id = 0, ip = ip("46.1.1.1"), mac = mac("00:00:00:00:00:46"), mpls = 0x4612}, + {id = 46, port_id = 0, ip = ip("47.1.1.1"), mac = mac("00:00:00:00:00:47"), mpls = 0x4712}, + {id = 47, port_id = 0, ip = ip("48.1.1.1"), mac = mac("00:00:00:00:00:48"), mpls = 0x4812}, + {id = 48, port_id = 0, ip = ip("49.1.1.1"), mac = mac("00:00:00:00:00:49"), mpls = 0x4912}, + {id = 49, port_id = 0, ip = ip("50.1.1.1"), mac = mac("00:00:00:00:00:50"), mpls = 0x5012}, + {id = 50, port_id = 0, ip = ip("51.1.1.1"), mac = mac("00:00:00:00:00:51"), mpls = 0x5112}, + {id = 51, port_id = 0, ip = ip("52.1.1.1"), mac = mac("00:00:00:00:00:52"), mpls = 0x5212}, + {id = 52, port_id = 0, ip = ip("53.1.1.1"), mac = mac("00:00:00:00:00:53"), mpls = 0x5312}, + {id = 53, port_id = 0, ip = ip("54.1.1.1"), mac = mac("00:00:00:00:00:54"), mpls = 0x5412}, + {id = 54, port_id = 0, ip = ip("55.1.1.1"), mac = mac("00:00:00:00:00:55"), mpls = 0x5512}, + {id = 55, port_id = 0, ip = ip("56.1.1.1"), mac = mac("00:00:00:00:00:56"), mpls = 0x5612}, + {id = 56, port_id = 0, ip = ip("57.1.1.1"), mac = mac("00:00:00:00:00:57"), mpls = 0x5712}, + {id = 57, port_id = 0, ip = ip("58.1.1.1"), mac = mac("00:00:00:00:00:58"), mpls = 0x5812}, + {id = 58, port_id = 0, ip = ip("59.1.1.1"), mac = mac("00:00:00:00:00:59"), mpls = 0x5912}, + {id = 59, port_id = 0, ip = ip("60.1.1.1"), mac = mac("00:00:00:00:00:60"), mpls = 0x6012}, + {id = 60, port_id = 0, ip = ip("61.1.1.1"), mac = mac("00:00:00:00:00:61"), mpls = 0x6112}, + {id = 61, port_id = 0, ip = ip("62.1.1.1"), mac = mac("00:00:00:00:00:62"), mpls = 0x6212}, + {id = 62, port_id = 0, ip = ip("63.1.1.1"), mac = mac("00:00:00:00:00:63"), mpls = 0x6312}, + {id = 63, port_id = 0, ip = ip("64.1.1.1"), mac = mac("00:00:00:00:00:64"), mpls = 0x6412}, +} + +lpm4.routes = {}; + +base_ip = 10 * 2^24; + +for i = 1,2^13 do + res = ip(base_ip + (1 * 2^12) * (i - 1)); + + lpm4.routes[i] = { + cidr = {ip = res, depth = 24}, + next_hop_id = (i - 1) % 64, + } +end + +return lpm4 diff --git a/VNFs/DPPD-PROX/config/ipv6.lua b/VNFs/DPPD-PROX/config/ipv6.lua new file mode 100644 index 00000000..dcd6058f --- /dev/null +++ b/VNFs/DPPD-PROX/config/ipv6.lua @@ -0,0 +1,111 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +lpm6 = {} +lpm6.next_hops6 = { + {id = 0, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0000"), mac = mac("fe:80:00:00:00:00"), mpls = 4660}, + {id = 1, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0001"), mac = mac("fe:80:00:00:00:00"), mpls = 4661}, + {id = 2, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0002"), mac = mac("fe:80:00:00:00:00"), mpls = 4662}, + {id = 3, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0003"), mac = mac("fe:80:00:00:00:00"), mpls = 4663}, + {id = 4, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0004"), mac = mac("fe:80:00:00:00:00"), mpls = 4664}, + {id = 5, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0005"), mac = mac("fe:80:00:00:00:00"), mpls = 4665}, + {id = 6, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0006"), mac = mac("fe:80:00:00:00:00"), mpls = 4666}, + {id = 7, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0007"), mac = mac("fe:80:00:00:00:00"), mpls = 4667}, + {id = 8, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0008"), mac = mac("fe:80:00:00:00:00"), mpls = 4668}, + {id = 9, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0009"), mac = mac("fe:80:00:00:00:00"), mpls = 4669}, + {id = 10, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000a"), mac = mac("fe:80:00:00:00:00"), mpls = 4670}, + {id = 11, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000b"), mac = mac("fe:80:00:00:00:00"), mpls = 4671}, + {id = 12, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000c"), mac = mac("fe:80:00:00:00:00"), mpls = 4672}, + {id = 13, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000d"), mac = mac("fe:80:00:00:00:00"), mpls = 4673}, + {id = 14, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000e"), mac = mac("fe:80:00:00:00:00"), mpls = 4674}, + {id = 15, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:000f"), mac = mac("fe:80:00:00:00:00"), mpls = 4675}, + {id = 16, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0010"), mac = mac("fe:80:00:00:00:00"), mpls = 4676}, + {id = 17, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0011"), mac = mac("fe:80:00:00:00:00"), mpls = 4677}, + {id = 18, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0012"), mac = mac("fe:80:00:00:00:00"), mpls = 4678}, + {id = 19, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0013"), mac = mac("fe:80:00:00:00:00"), mpls = 4679}, + {id = 20, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0014"), mac = mac("fe:80:00:00:00:00"), mpls = 4680}, + {id = 21, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0015"), mac = mac("fe:80:00:00:00:00"), mpls = 4681}, + {id = 22, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0016"), mac = mac("fe:80:00:00:00:00"), mpls = 4682}, + {id = 23, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0017"), mac = mac("fe:80:00:00:00:00"), mpls = 4683}, + {id = 24, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0018"), mac = mac("fe:80:00:00:00:00"), mpls = 4684}, + {id = 25, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0019"), mac = mac("fe:80:00:00:00:00"), mpls = 4685}, + {id = 26, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001a"), mac = mac("fe:80:00:00:00:00"), mpls = 4686}, + {id = 27, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001b"), mac = mac("fe:80:00:00:00:00"), mpls = 4687}, + {id = 28, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001c"), mac = mac("fe:80:00:00:00:00"), mpls = 4688}, + {id = 29, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001d"), mac = mac("fe:80:00:00:00:00"), mpls = 4689}, + {id = 30, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001e"), mac = mac("fe:80:00:00:00:00"), mpls = 4690}, + {id = 31, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:001f"), mac = mac("fe:80:00:00:00:00"), mpls = 4691}, + {id = 32, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0020"), mac = mac("fe:80:00:00:00:00"), mpls = 4692}, + {id = 33, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0021"), mac = mac("fe:80:00:00:00:00"), mpls = 4693}, + {id = 34, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0022"), mac = mac("fe:80:00:00:00:00"), mpls = 4694}, + {id = 35, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0023"), mac = mac("fe:80:00:00:00:00"), mpls = 4695}, + {id = 36, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0024"), mac = mac("fe:80:00:00:00:00"), mpls = 4696}, + {id = 37, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0025"), mac = mac("fe:80:00:00:00:00"), mpls = 4697}, + {id = 38, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0026"), mac = mac("fe:80:00:00:00:00"), mpls = 4698}, + {id = 39, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0027"), mac = mac("fe:80:00:00:00:00"), mpls = 4699}, + {id = 40, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0028"), mac = mac("fe:80:00:00:00:00"), mpls = 4700}, + {id = 41, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0029"), mac = mac("fe:80:00:00:00:00"), mpls = 4701}, + {id = 42, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002a"), mac = mac("fe:80:00:00:00:00"), mpls = 4702}, + {id = 43, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002b"), mac = mac("fe:80:00:00:00:00"), mpls = 4703}, + {id = 44, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002c"), mac = mac("fe:80:00:00:00:00"), mpls = 4704}, + {id = 45, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002d"), mac = mac("fe:80:00:00:00:00"), mpls = 4705}, + {id = 46, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002e"), mac = mac("fe:80:00:00:00:00"), mpls = 4706}, + {id = 47, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:002f"), mac = mac("fe:80:00:00:00:00"), mpls = 4707}, + {id = 48, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0030"), mac = mac("fe:80:00:00:00:00"), mpls = 4708}, + {id = 49, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0031"), mac = mac("fe:80:00:00:00:00"), mpls = 4709}, + {id = 50, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0032"), mac = mac("fe:80:00:00:00:00"), mpls = 4710}, + {id = 51, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0033"), mac = mac("fe:80:00:00:00:00"), mpls = 4711}, + {id = 52, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0034"), mac = mac("fe:80:00:00:00:00"), mpls = 4712}, + {id = 53, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0035"), mac = mac("fe:80:00:00:00:00"), mpls = 4713}, + {id = 54, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0036"), mac = mac("fe:80:00:00:00:00"), mpls = 4714}, + {id = 55, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0037"), mac = mac("fe:80:00:00:00:00"), mpls = 4715}, + {id = 56, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0038"), mac = mac("fe:80:00:00:00:00"), mpls = 4716}, + {id = 57, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0039"), mac = mac("fe:80:00:00:00:00"), mpls = 4717}, + {id = 58, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003a"), mac = mac("fe:80:00:00:00:00"), mpls = 4718}, + {id = 59, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003b"), mac = mac("fe:80:00:00:00:00"), mpls = 4719}, + {id = 60, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003c"), mac = mac("fe:80:00:00:00:00"), mpls = 4720}, + {id = 61, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003d"), mac = mac("fe:80:00:00:00:00"), mpls = 4721}, + {id = 62, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003e"), mac = mac("fe:80:00:00:00:00"), mpls = 4722}, + {id = 63, port_id = 0, ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:003f"), mac = mac("fe:80:00:00:00:00"), mpls = 4723}, +} + +lpm6.routes6 = {} + +-- add 1K routes with depth /128 +for i = 1,2^10 do + lpm6.routes6[i] = { + cidr6 = cidr6("fe80:0000:0000:0000:0200:00ff:fe00:".. string.format("%04x", i - 1) .."/128"), + next_hop_id = (i - 1) % 64, + } +end + +-- add 1K routes with depth /64 +for i = 1,2^10 do + lpm6.routes6[i + 2^10] = { + cidr6 = cidr6("fe80:0000:0000:" .. string.format("%04x", i - 1) .. ":0200:00ff:fe00:03e7/64"), + next_hop_id = (i - 1) % 64, + } +end + +-- -- add fallback routes +lpm6.routes6[2^11] = { + cidr6 = cidr6("fe80:0000:0000:03e7:0200:00ff:fe00:03e7/1"), + next_hop_id = 0, +} +lpm6.routes6[2^11 + 1] = { + cidr6 = cidr6("7e80:0000:0000:03e7:0200:00ff:fe00:03e7/1"), + next_hop_id = 0, +} diff --git a/VNFs/DPPD-PROX/config/irq.cfg b/VNFs/DPPD-PROX/config/irq.cfg new file mode 100644 index 00000000..d34c7977 --- /dev/null +++ b/VNFs/DPPD-PROX/config/irq.cfg @@ -0,0 +1,46 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[global] +start time=5 +name=Interrupt (4x) + +[core 0s0] +mode=master + +[core 1s0] +name=irq +task=0 +mode=irq + +[core 2s0] +name=irq +task=0 +mode=irq + +[core 3s0] +name=irq +task=0 +mode=irq + +[core 4s0] +name=irq +task=0 +mode=irq diff --git a/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg b/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg new file mode 100644 index 00000000..27fd08e5 --- /dev/null +++ b/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg @@ -0,0 +1,74 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=50:00:00:00:00:01 +[port 1] +name=if1 +mac=50:00:00:00:00:02 +[port 2] +name=if2 +mac=50:00:00:00:00:03 +[port 3] +name=if3 +mac=50:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=L2 forward (4x) + +[core 0s0] +mode=master + +[core 1s0] +name=L2 fwd +task=0 +mode=l2fwd +rx port=if0 +tx port=if1 +drop=no + +[core 2s0] +name=L2 fwd +task=0 +mode=l2fwd +rx port=if1 +tx port=if0 +drop=no + +[core 3s0] +name=L2 fwd +task=0 +mode=l2fwd +rx port=if2 +tx port=if3 +drop=no + +[core 4s0] +name=L2 fwd +task=0 +mode=l2fwd +rx port=if3 +tx port=if2 +drop=no diff --git a/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg b/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg new file mode 100644 index 00000000..3c452b0e --- /dev/null +++ b/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg @@ -0,0 +1,81 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=50:00:00:00:00:01 +[port 1] +name=if1 +mac=50:00:00:00:00:02 +[port 2] +name=if2 +mac=50:00:00:00:00:03 +[port 3] +name=if3 +mac=50:00:00:00:00:04 + +[defaults] +mempool size=4K + +[lua] +lpm4 = dofile("ipv4.lua") + +[global] +start time=5 +name=Routing (4x) + +[core 0s0] +mode=master + +[core 1s0] +name=Routing +task=0 +mode=routing +route table=lpm4 +rx port=if0 +tx port=if0,if1 +drop=no + +[core 2s0] +name=Routing +task=0 +mode=routing +route table=lpm4 +rx port=if1 +tx port=if0,if1 +drop=no + +[core 3s0] +name=Routing +task=0 +mode=routing +route table=lpm4 +rx port=if2 +tx port=if2,if3 +drop=no + +[core 4s0] +name=Routing +task=0 +mode=routing +route table=lpm4 +rx port=if3 +tx port=if2,if3 +drop=no diff --git a/VNFs/DPPD-PROX/config/lb_5tuple.cfg b/VNFs/DPPD-PROX/config/lb_5tuple.cfg new file mode 100644 index 00000000..e958acca --- /dev/null +++ b/VNFs/DPPD-PROX/config/lb_5tuple.cfg @@ -0,0 +1,52 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("tuples.lua") + +[port 0] +name=if0 +mac=hardware +[port 1] +name=if1 +mac=hardware +[port 2] +name=if2 +mac=hardware +[port 3] +name=if3 +mac=hardware + +[defaults] +mempool size=8K + +[global] +start time=5 +name=Load balance 5-tuple + +[core 0s0] +mode=master + +[core 1s0] +name=lb 5tuple +task=0 +mode=lb5tuple +rx port=if0 +tx port=if0,if1,if2,if3 diff --git a/VNFs/DPPD-PROX/config/lw_aftr.cfg b/VNFs/DPPD-PROX/config/lw_aftr.cfg new file mode 100644 index 00000000..eaed2c5d --- /dev/null +++ b/VNFs/DPPD-PROX/config/lw_aftr.cfg @@ -0,0 +1,115 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration creates the functionality of a lwAFTR component of the +; lw4over6 architecture as described in IETF draft available at: +; http://tools.ietf.org/id/draft-ietf-softwire-lw4over6-13.txt +; The lwAFTR simply terminates IPv6 tunnels that carry IPv4 traffic for many +; customers (one tunnel per customer). It consists of two tasks: +; 1) ipv6_encap that encapsulates IPv4 packets into IPv6 and sends those tunnel +; packets towards the customer tunnel endpoint. For this, it must use a +; binding table that associates with each tunnel, a public IPv4 address and a +; set of ports. +; 2) ipv6_decap which handles packets arriving from the tunnel, checks they use +; a source IPv4 address and port combination that matches their originating +; tunnel (based on the same binding table as used by ipv6_encap), removes the +; IPv6 encapsulation and sends them out its "internet" interface. +; The binding table must be loaded in the [lua] section and assigned to the +; tasks using the "tun_bindings" parameter. This configuration loads its binding +; table from the provided ip6_tun_bind.lua but other binding tables can be used. +; +; Binding tables of different sizes and different ranges of addresses and ports +; can be generated by a provided helper script: +; helper-scripts/ipv6_tun/ipv6_tun_bindings.pl -n <num_entries> +; Most other parameters of the generated binding table can be tweaked through +; script command-line switches. For more details, refer to the documentation of +; the script obtained by running it with -help as argument. +; The same script can also generate tables for testing tools to generate packets +; with addresses and ports that match entries from the binding table (randomly +; selecting entries from the binding table). +; Additionally, the helper-scripts/ipv6_tun/gen_4over6.pl script can be used to +; generate pcap files with IPv6 (tunnel) and IPv4 (internet) traffic matching a +; given binding table. +; Example usage: +; ./helper-scripts/ipv6_tun/ipv6_tun_bindings.pl -n 100000 -suffix _100k +; ./helper-scripts/ipv6_tun/gen_4over6.pl -tun -count=200000 \ +; -in ip6_tun_bind_100k.lua -out lwAFTR_tun_100k.pcap +; ./helper-scripts/ipv6_tun/gen_4over6.pl -inet -count=200000 \ +; -in ip6_tun_bind_100k.lua -out lwAFTR_inet_100k.pcap +; The above sequence of invocations generates a binding table with 100k entries, +; written to file ip6_tun_bind_100k.lua (which the PROX configuration file needs +; to load in the [lua] section then assign using the "tun_bindings" parameter), +; and two pcap files to be used to generate traffic that will hit valid entries +; from the binding table. Each pcap file contains 200k packets of either IPv4 or +; IPv6 traffic. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=inet_0 +mac=00:00:00:00:00:01 +[port 1] +name=lwB4_0 +mac=00:00:00:00:00:03 + +[variables] +$tun_hop_limit=5 +$local_ipv6=fe80:0000:0000:0000:0100:00ff:fe00:0000 +$lookup_port_mask=0xffc0 + +[lua] +bindings = dofile("ip6_tun_bind.lua") + +[defaults] +mempool size=16K + +[global] +start time=20 +name=lwAFTR + +[core 0s0] +mode=master + +;***************************************************************************************** +;##### Send Internet IPv4 traffic into IPv6 tunnels, according to binding table #### +[core 1s0] +name=v6_encap +task=0 +mode=ipv6_encap +rx port=inet_0 +tx port=lwB4_0 +local ipv6=$local_ipv6 +tunnel hop limit=$tun_hop_limit +lookup port mask=$lookup_port_mask +tun_bindings=bindings +;***************************************************************************************** +;##### Terminate IPv6 tunnels and transmit IPv4 out to Internet #### +;# Binding table is checked to ensure src IPv4 address and port combo is allocated to the originating tunnel +[core 2s0] +name=v6_decap +task=0 +mode=ipv6_decap +rx port=lwB4_0 +tx port=inet_0 +dst mac=fe:80:00:ee:00:01 +local ipv6=$local_ipv6 +tunnel hop limit=$tun_hop_limit +lookup port mask=$lookup_port_mask +tun_bindings=bindings diff --git a/VNFs/DPPD-PROX/config/nat_table.lua b/VNFs/DPPD-PROX/config/nat_table.lua new file mode 100644 index 00000000..4e3a78ab --- /dev/null +++ b/VNFs/DPPD-PROX/config/nat_table.lua @@ -0,0 +1,26 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return { + {from = ip("10.10.100.100"), to = ip("192.168.1.1")}, + {from = ip("192.168.1.1"), to = ip("10.10.100.100")}, + {from = ip("192.168.1.101"), to = ip("10.10.10.101")}, + {from = ip("10.10.10.101"), to = ip("192.168.1.101")}, + {from = ip("192.168.1.102"), to = ip("10.10.10.102")}, + {from = ip("10.10.10.102"), to = ip("192.168.1.102")}, + {from = ip("192.168.100.100"), to = ip("10.0.100.100")}, + {from = ip("10.0.100.100"), to = ip("192.168.100.100")}, +} diff --git a/VNFs/DPPD-PROX/config/nop-rings.cfg b/VNFs/DPPD-PROX/config/nop-rings.cfg new file mode 100644 index 00000000..000353ad --- /dev/null +++ b/VNFs/DPPD-PROX/config/nop-rings.cfg @@ -0,0 +1,109 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This configuration is similar to config/nop.cfg with the difference being the +; type of interfaces. The physical ports are replaced by DPDK rings. To use this +; functionality, RTE_TARGET must be set to x86_64-ivshmem-linuxapp-gcc before +; compiling DPDK (i.e. export RTE_TARGET=x86_64-ivshmem-linuxapp-gcc). Also, +; DPDK needs to be compiled with both CONFIG_RTE_BUILD_COMBINE_LIBS=y and +; CONFIG_RTE_LIBRTE_VHOST=y +; The configuration can then be used inside a VM running on top of Open vSwitch. +; The SHA-1 of the Open vSwitch version that has been tested is c78a00b112c9. To +; run the VM, Qemu needs to be patched to support ivshmem with multiple regions +; and the right command line arguments to be used to share memory. Download and +; patch Qemu 1.6.2 using the following commands: +; git clone git://git.qemu-project.org/qemu.git +; cd qemu +; git checkout v1.6.2 +; wget https://01.org/sites/default/files/page/qemu-v1.6.2-ivshmem-dpdk.patch +; patch -p1 < qemu-v1.6.2-ivshmem-dpdk.patch +; ./configure +; make +; After Open vSwitch has been configured with DPDK rings as ports (i.e. ports +; with type dpdkr), Qemu needs to be started with the correct command line +; arguments. Refer to Section 11.1 from the DPDK Programmer's Guide on how to +; build the Qemu command line arguments. +; This configuration uses 4 ports. This means that 8 rings (4 for TX and 4 for +; RX) will need to be shared with the VM through ivshmem. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=00:00:00:00:00:01 +rx_ring=dpdkr0_tx +tx_ring=dpdkr0_rx +[port 1] +name=if1 +mac=00:00:00:00:00:02 +rx_ring=dpdkr1_tx +tx_ring=dpdkr1_rx +[port 2] +name=if2 +mac=00:00:00:00:00:03 +rx_ring=dpdkr2_tx +tx_ring=dpdkr2_rx +[port 3] +name=if3 +mac=00:00:00:00:00:04 +rx_ring=dpdkr3_tx +tx_ring=dpdkr3_rx + +[defaults] +mempool size=4K + +[global] +start time=5 +name=NOP forwarding rings (4x) + +[core 0] +mode=master + +[core 1] +name=nop +task=0 +mode=nop +rx port=if0 +tx port=if1 +drop=no + +[core 2] +name=nop +task=0 +mode=nop +rx port=if1 +tx port=if0 +drop=no + +[core 3] +name=nop +task=0 +mode=nop +rx port=if2 +tx port=if3 +drop=no + +[core 4] +name=nop +task=0 +mode=nop +rx port=if3 +tx port=if2 +drop=no diff --git a/VNFs/DPPD-PROX/config/nop.cfg b/VNFs/DPPD-PROX/config/nop.cfg new file mode 100644 index 00000000..757b1eda --- /dev/null +++ b/VNFs/DPPD-PROX/config/nop.cfg @@ -0,0 +1,86 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +;; +; This is one of the most basic configurations. Note that this configuration +; does not perform any real work as opposed to configurations like BNG/BRAS +; or lwAFTR. This configuration sets up four interfaces and five cores (one +; master core and four worker cores). Packets are passed (i.e. without being +; touched) as follows: +; - interface 0 to interface 1 (handled by core 1) +; - interface 1 to interface 0 (handled by core 2) +; - interface 2 to interface 3 (handled by core 3) +; - interface 3 to interface 2 (handled by core 4) +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=if1 +mac=hardware +[port 2] +name=if2 +mac=hardware +[port 3] +name=if3 +mac=hardware + +[defaults] +mempool size=2K + +[global] +start time=5 +name=NOP forwarding (4x) + +[core 0s0] +mode=master + +[core 1s0] +name=nop +task=0 +mode=nop +rx port=if0 +tx port=if1 +drop=no + +[core 2s0] +name=nop +task=0 +mode=nop +rx port=if1 +tx port=if0 +drop=no + +[core 3s0] +name=nop +task=0 +mode=nop +rx port=if2 +tx port=if3 +drop=no + +[core 4s0] +name=nop +task=0 +mode=nop +rx port=if3 +tx port=if2 +drop=no diff --git a/VNFs/DPPD-PROX/config/nsh_acl.cfg b/VNFs/DPPD-PROX/config/nsh_acl.cfg new file mode 100644 index 00000000..2893bd4d --- /dev/null +++ b/VNFs/DPPD-PROX/config/nsh_acl.cfg @@ -0,0 +1,58 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=if1 +mac=hardware +[lua] +acl_table=dofile("acl_table.lua") + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Firewall + +[core 0s0] +mode=master + +[core 1s0] +name=firewall +task=0 +mode=decapnsh +rx port=if0 +tx cores=${self}t1 + +task=1 +mode=acl +rx ring=yes +tx cores=${self}t2 +rules=acl_table +qinq=no + +task=2 +mode=encapnsh +rx ring=yes +tx port=if0 +drop=no diff --git a/VNFs/DPPD-PROX/config/nsh_nat.cfg b/VNFs/DPPD-PROX/config/nsh_nat.cfg new file mode 100644 index 00000000..bb3bf4bc --- /dev/null +++ b/VNFs/DPPD-PROX/config/nsh_nat.cfg @@ -0,0 +1,57 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=if1 +mac=hardware +[lua] +nat_table = dofile("nat_table.lua") + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Network Address Translation + +[core 0s0] +mode=master + +[core 1s0] +name=nat +task=0 +mode=decapnsh +rx port=if0 +tx cores=${self}t1 + +task=1 +mode=nat +rx ring=yes +tx cores=${self}t2 +nat table=nat_table + +task=2 +mode=encapnsh +rx ring=yes +tx port=if0 +drop=no diff --git a/VNFs/DPPD-PROX/config/pe-4ports.cfg b/VNFs/DPPD-PROX/config/pe-4ports.cfg new file mode 100644 index 00000000..1c7556e1 --- /dev/null +++ b/VNFs/DPPD-PROX/config/pe-4ports.cfg @@ -0,0 +1,170 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=inet0 +mac=hardware +[port 2] +name=if1 +mac=hardware +[port 3] +name=inet1 +mac=hardware +[lua] +lpm4 = dofile("ipv4-2.lua") +dscp_table = dofile("dscp2.lua") +cpe_table = dofile("cpe_table.lua") +acl_table = dofile("rules-2.lua") +user_table = dofile("user_table-pe.lua") +[defaults] +mempool size=65K +[global] +start time=5 +name=PE +cpe table map=if0,if1,if0,if1 + +[variables] +;$wkd=5s1-6s1,5s1h-6s1h; 4 workers +;$wku=7s1-9s1,7s1h-9s1h; 6 workers +$wkd=5s1-6s1,5s1h-6s1h; 6 workers +$wku=7s1-9s1,7s1h-9s1h; 10 workers +[core 0s1] +task=0 +mode=master +tx cores=(${wku})t3m + +[core 1s1] +name=LB-inet0 +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wkd})t0 proto=ipv4 + +[core 1s1h] +name=LB-inet1 +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wkd})t0 proto=ipv4 + +[core 2s1] +name=LB-c0 +task=0 +mode=lbnetwork +rx port=if0 +mempool size=16K +untag mpls=no +tx cores=(${wku})t0 proto=ipv4 + +[core 2s1h] +name=LB-c1 +task=0 +mode=lbnetwork +mempool size=16K +untag mpls=no +rx port=if1 +tx cores=(${wku})t0 proto=ipv4 + +[core $wkd] +name=W-down +task=0 +mode=qinqencapv4 +sub mode=pe +rx ring=yes +tx cores from cpe table=3s1,4s1 remap=if0,if1 +user table=user_table +cpe table=cpe_table +classify=yes +dscp=dscp_table + +[core $wku] +name=W-up +task=0 +mode=acl +rx ring=yes +rules=acl_table +tx cores=${self}t1 +max rules=32768 + +task=1 +mode=police +sub mode=trtcm +police action=yellow io=green,green +police action=drop io=green,yellow +police action=drop io=green,red +police action=drop io=yellow,yellow +police action=drop io=yellow,red +police action=drop io=red,red +cir=4000000000 +pir=4000000000 +cbs=20480 +pbs=20480 +classify=yes +rx ring=yes +tx cores=${self}t2 +users=256 +mark=yes +user table=user_table + +task=2 +mode=untag +ether type=0xa888 +rx ring=yes +tx cores=${self}t3 + +task=3 +mode=routing +add mpls=yes +rx ring=yes +tx ports from routing table=inet0,inet1,inet0,inet1 +route table=lpm4 +mark=yes +mark green=1 +mark yellow=2 +mark red=3 + +[core 3s1] +name=qos1 +task=0 +rx ring=yes +mode=qos +tx port=if0 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table + +[core 4s1] +name=qos1 +rx ring=yes +task=0 +mode=qos +tx port=if1 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table diff --git a/VNFs/DPPD-PROX/config/pe-8ports.cfg b/VNFs/DPPD-PROX/config/pe-8ports.cfg new file mode 100644 index 00000000..b67c48ba --- /dev/null +++ b/VNFs/DPPD-PROX/config/pe-8ports.cfg @@ -0,0 +1,232 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=if0 +mac=hardware +[port 1] +name=inet0 +mac=hardware +[port 2] +name=if1 +mac=hardware +[port 3] +name=inet1 +mac=hardware +[port 4] +name=if2 +mac=hardware +[port 5] +name=inet2 +mac=hardware +[port 6] +name=if3 +mac=hardware +[port 7] +name=inet3 +mac=hardware +[lua] +lpm4 = dofile("ipv4-2.lua") +dscp_table = dofile("dscp2.lua") +cpe_table = dofile("cpe_table.lua") +acl_table = dofile("rules-2.lua") +user_table = dofile("user_table-pe.lua") +[defaults] +mempool size=65K +[global] +start time=5 +name=PE +cpe table map=if0,if1,if2,if3 + +[variables] +$wkd=5s1-6s1,5s1h-6s1h; 4 workers +$wku=7s1-9s1,7s1h-9s1h; 6 workers +;$wkd=5s1-6s1,5s1h-6s1h; 6 workers +;$wku=7s1-9s1,7s1h-9s1h; 10 workers +[core 0s1] +task=0 +mode=master +tx cores=(${wku})t3m + +[core 1s1] +name=LB-inet0 +task=0 +mode=lbnetwork +rx port=inet0 +untag mpls=yes +tx cores=(${wkd})t0 proto=ipv4 + +task=1 +mode=lbnetwork +rx port=inet2 +untag mpls=yes +tx cores=(${wkd})t0 proto=ipv4 + +[core 1s1h] +name=LB-inet1 +task=0 +mode=lbnetwork +untag mpls=yes +rx port=inet1 +tx cores=(${wkd})t0 proto=ipv4 + +task=1 +mode=lbnetwork +untag mpls=yes +rx port=inet3 +tx cores=(${wkd})t0 proto=ipv4 + +[core 2s1] +name=LB-c0 +task=0 +mode=lbnetwork +rx port=if0 +mempool size=16K +untag mpls=no +tx cores=(${wku})t0 proto=ipv4 + +task=1 +mode=lbnetwork +rx port=if2 +mempool size=16K +untag mpls=no +tx cores=(${wku})t0 proto=ipv4 + +[core 2s1h] +name=LB-c1 +task=0 +mode=lbnetwork +mempool size=16K +untag mpls=no +rx port=if1 +tx cores=(${wku})t0 proto=ipv4 + +task=1 +mode=lbnetwork +mempool size=16K +untag mpls=no +rx port=if3 +tx cores=(${wku})t0 proto=ipv4 + +[core $wkd] +name=W-down +task=0 +mode=qinqencapv4 +sub mode=pe +rx ring=yes +tx cores from cpe table=3s1,4s1,3s1h,4s1h remap=if0,if1,if2,if3 +user table=user_table +cpe table=cpe_table +classify=yes +dscp=dscp_table + +[core $wku] +name=W-up +task=0 +mode=acl +rx ring=yes +rules=acl_table +tx cores=${self}t1 +max rules=32768 + +task=1 +mode=police +sub mode=trtcm +police action=yellow io=green,green +police action=drop io=green,yellow +police action=drop io=green,red +police action=drop io=yellow,yellow +police action=drop io=yellow,red +police action=drop io=red,red +cir=4000000000 +pir=4000000000 +cbs=20480 +pbs=20480 +classify=yes +rx ring=yes +tx cores=${self}t2 +users=256 +mark=yes +user table=user_table + +task=2 +mode=untag +ether type=0xa888 +rx ring=yes +tx cores=${self}t3 + +task=3 +mode=routing +add mpls=yes +rx ring=yes +tx ports from routing table=inet0,inet1,inet2,inet3 +route table=lpm4 +mark=yes +mark green=1 +mark yellow=2 +mark red=3 + +[core 3s1] +name=qos1 +task=0 +rx ring=yes +mode=qos +tx port=if0 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table + +[core 4s1] +name=qos1 +rx ring=yes +task=0 +mode=qos +tx port=if1 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table + +[core 3s1h] +name=qos1 +task=0 +rx ring=yes +mode=qos +tx port=if2 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table + +[core 4s1h] +name=qos1 +rx ring=yes +task=0 +mode=qos +tx port=if3 +pipes=256 +pipe tb rate=6250000 +pipe tc rate=6250000 +drop=no +user table=user_table diff --git a/VNFs/DPPD-PROX/config/rules-1.lua b/VNFs/DPPD-PROX/config/rules-1.lua new file mode 100644 index 00000000..20d33f3d --- /dev/null +++ b/VNFs/DPPD-PROX/config/rules-1.lua @@ -0,0 +1,33 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +seven_tuple = function(svlan, cvlan, ip_proto, src, dst, sport, dport, action) + return { + svlan_id = svlan, + cvlan_id = cvlan, + ip_proto = ip_proto, + src_cidr = src, + dst_cidr = dst, + sport = sport, + dport = dport, + action = action, + } +end + +return { + seven_tuple(val_mask(0, 0x0000), val_mask(0, 0x0000), val_mask(17, 0xff), cidr("192.168.0.0/18"), cidr("10.0.0.0/7"), val_range(0,65535), val_range(0,65535), "allow"), + seven_tuple(val_mask(0, 0x0000), val_mask(0, 0x0000), val_mask(17, 0xff), cidr("192.168.0.0/18"), cidr("74.0.0.0/7"), val_range(0,65535), val_range(0,65535), "allow"), +} diff --git a/VNFs/DPPD-PROX/config/rules-2.lua b/VNFs/DPPD-PROX/config/rules-2.lua new file mode 100644 index 00000000..3e2762be --- /dev/null +++ b/VNFs/DPPD-PROX/config/rules-2.lua @@ -0,0 +1,51 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +seven_tuple = function(svlan, cvlan, ip_proto, src, dst, sport, dport, action) + return { + svlan_id = svlan, + cvlan_id = cvlan, + ip_proto = ip_proto, + src_cidr = src, + dst_cidr = dst, + sport = sport, + dport = dport, + action = action, + } +end + +rules2={}; +sports={0,2,4,6,8,10,12,14}; +src_t={{s="192.168.0.0/18", svlan1=0, svlan2=1}, + {s="192.168.16.0/18", svlan1=16, svlan2=17}, + {s="192.168.32.0/18", svlan1=32, svlan2=33}, + {s="192.168.48.0/18", svlan1=48, svlan2=49}, + }; + +for srck,srcv in pairs(src_t) do + for cvlan_mask = 0,255 do + for spk,spv in pairs(sports) do + table.insert(rules2,seven_tuple(val_mask(srcv.svlan1,0x0fff), val_mask(cvlan_mask,0x0fff), val_mask(17,0xff), cidr(srcv.s), cidr("10.0.0.0/7"), val_range(spv,spv), val_range(0,511), "allow")); + table.insert(rules2,seven_tuple(val_mask(srcv.svlan1,0x0fff), val_mask(cvlan_mask,0x0fff), val_mask(17,0xff), cidr(srcv.s), cidr("74.0.0.0/7"), val_range(spv,spv), val_range(0,511), "allow")); + table.insert(rules2,seven_tuple(val_mask(srcv.svlan2,0x0fff), val_mask(cvlan_mask,0x0fff), val_mask(17,0xff), cidr(srcv.s), cidr("10.0.0.0/7"), val_range(spv,spv), val_range(0,511), "allow")); + table.insert(rules2,seven_tuple(val_mask(srcv.svlan2,0x0fff), val_mask(cvlan_mask,0x0fff), val_mask(17,0xff), cidr(srcv.s), cidr("74.0.0.0/7"), val_range(spv,spv), val_range(0,511), "allow")); + table.insert(rules2,rules4); + end + end +end + +return rules2 + diff --git a/VNFs/DPPD-PROX/config/tuples.lua b/VNFs/DPPD-PROX/config/tuples.lua new file mode 100644 index 00000000..268efff4 --- /dev/null +++ b/VNFs/DPPD-PROX/config/tuples.lua @@ -0,0 +1,28 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +tuples = {}; + +for i = 0,2^23-1 do + tuples[i] = {if_out = i%4, + ip_src = i%2^5, + ip_dst = ((i-i%2^5)/2^5)%2^5, + port_src = ((i-i%2^10)/2^10)%2^5, + port_dst = ((i-i%2^15)/2^15)%2^5, + proto = ((i-i%2^20)/2^20)%2^3 * 2^5, + } +end + diff --git a/VNFs/DPPD-PROX/config/user_table-131K-bng.lua b/VNFs/DPPD-PROX/config/user_table-131K-bng.lua new file mode 100644 index 00000000..10475796 --- /dev/null +++ b/VNFs/DPPD-PROX/config/user_table-131K-bng.lua @@ -0,0 +1,74 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- This script generates a user table containing 131072 users. It is +-- meant to be used in a BNG with 4 CPE facing ports. Each of the CPE +-- facing ports has 32768 users behind it. Each user has a unique +-- svlan/cvlan combination. The only difference between the two sets +-- of users is the svlan id. Note that any arbitrary configuration is +-- possible. + +local user_table = {} + +for i = 1,2^15 do + idx = i - 1 + user_table[i] = { + gre_id = idx, + -- svlan_id is 000000000XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +for i = 1,2^15 do + idx = i - 1 + user_table[2^15 + i] = { + gre_id = 2^15 + idx, + -- svlan_id is 000000001XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8 + 0x80, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +for i = 1,2^15 do + idx = i - 1 + user_table[2*2^15 + i] = { + gre_id = 2*2^15 + idx, + -- svlan_id is 000000010XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8 + 0x100, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +for i = 1,2^15 do + idx = i - 1 + user_table[3*2^15 + i] = { + gre_id = 3*2^15 + idx, + -- svlan_id is 000000011XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8 + 0x180, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +return user_table diff --git a/VNFs/DPPD-PROX/config/user_table-65K-bng.lua b/VNFs/DPPD-PROX/config/user_table-65K-bng.lua new file mode 100644 index 00000000..aa62874c --- /dev/null +++ b/VNFs/DPPD-PROX/config/user_table-65K-bng.lua @@ -0,0 +1,50 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- This script generates a user table containing 65536 users. It is +-- meant to be used in a BNG with 2 CPE facing ports. Each of the CPE +-- facing ports has 32768 users behind it. Each user has a unique +-- svlan/cvlan combination. The only difference between the two sets +-- of users is the svlan id. Note that any arbitrary configuration is +-- possible. + +local user_table = {} + +for i = 1,2^15 do + idx = i - 1 + user_table[i] = { + gre_id = idx, + -- svlan_id is 000000000XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +for i = 1,2^15 do + idx = i - 1 + user_table[2^15 + i] = { + gre_id = 2^15 + idx, + -- svlan_id is 000000001XXXXXXX at the bit level + -- cvlan_id is 0000XXXX00XX00XX at the bit level + svlan_id = mask(idx, 0x7f00) / 2^8 + 0x80, + cvlan_id = mask(idx, 0xf0) * 2^4 + mask(idx, 0xc) * 2^2 + mask(idx, 0x3), + user_id = idx, + } +end + +return user_table diff --git a/VNFs/DPPD-PROX/config/user_table-pe.lua b/VNFs/DPPD-PROX/config/user_table-pe.lua new file mode 100644 index 00000000..67afbffa --- /dev/null +++ b/VNFs/DPPD-PROX/config/user_table-pe.lua @@ -0,0 +1,2066 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return { + {gre_id = 0, svlan_id = 0, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 1, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 0, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 1, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 0, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 1, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 0, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 1, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 0, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 1, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 0, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 1, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 0, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 1, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 0, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 1, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 0, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 1, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 0, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 1, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 0, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 1, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 0, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 1, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 0, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 1, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 0, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 1, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 0, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 1, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 0, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 1, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 0, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 1, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 0, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 1, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 0, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 1, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 0, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 1, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 0, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 1, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 0, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 1, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 0, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 1, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 0, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 1, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 0, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 1, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 0, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 1, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 0, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 1, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 0, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 1, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 0, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 1, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 0, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 1, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 0, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 1, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 0, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 1, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 0, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 1, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 0, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 1, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 0, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 1, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 0, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 1, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 0, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 1, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 0, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 1, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 0, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 1, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 0, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 1, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 0, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 1, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 0, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 1, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 0, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 1, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 0, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 1, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 0, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 1, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 0, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 1, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 0, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 1, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 0, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 1, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 0, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 1, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 0, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 1, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 0, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 1, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 0, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 1, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 0, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 1, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 0, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 1, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 0, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 1, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 0, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 1, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 0, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 1, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 0, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 1, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 0, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 1, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 0, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 1, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 0, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 1, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 0, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 1, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 0, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 1, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 0, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 1, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 0, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 1, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 0, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 1, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 0, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 1, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 0, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 1, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 0, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 1, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 0, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 1, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 0, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 1, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 0, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 1, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 0, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 1, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 0, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 1, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 0, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 1, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 0, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 1, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 0, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 1, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 0, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 1, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 0, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 1, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 0, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 1, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 0, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 1, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 0, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 1, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 0, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 1, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 0, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 1, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 0, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 1, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 0, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 1, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 0, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 1, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 0, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 1, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 0, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 1, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 0, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 1, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 0, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 1, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 0, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 1, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 0, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 1, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 0, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 1, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 0, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 1, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 0, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 1, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 0, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 1, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 0, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 1, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 0, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 1, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 0, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 1, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 0, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 1, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 0, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 1, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 0, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 1, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 0, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 1, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 0, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 1, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 0, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 1, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 0, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 1, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 0, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 1, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 0, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 1, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 0, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 1, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 0, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 1, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 0, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 1, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 0, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 1, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 0, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 1, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 0, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 1, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 0, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 1, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 0, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 1, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 0, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 1, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 0, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 1, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 0, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 1, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 0, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 1, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 0, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 1, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 0, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 1, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 0, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 1, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 0, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 1, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 0, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 1, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 0, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 1, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 0, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 1, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 0, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 1, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 0, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 1, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 0, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 1, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 0, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 1, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 0, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 1, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 0, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 1, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 0, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 1, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 0, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 1, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 0, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 1, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 0, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 1, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 0, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 1, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 0, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 1, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 0, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 1, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 0, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 1, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 0, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 1, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 0, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 1, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 0, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 1, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 0, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 1, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 0, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 1, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 0, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 1, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 0, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 1, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 0, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 1, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 0, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 1, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 0, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 1, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 0, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 1, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 0, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 1, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 0, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 1, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 0, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 1, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 0, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 1, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 0, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 1, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 0, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 1, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 0, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 1, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 0, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 1, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 0, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 1, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 0, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 1, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 0, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 1, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 0, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 1, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 0, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 1, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 0, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 1, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 0, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 1, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 0, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 1, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 0, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 1, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 0, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 1, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 0, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 1, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 0, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 1, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 0, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 1, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 0, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 1, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 0, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 1, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 0, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 1, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 0, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 1, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 0, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 1, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 0, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 1, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 0, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 1, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 0, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 1, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 0, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 1, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 0, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 1, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 0, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 1, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 0, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 1, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 0, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 1, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 0, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 1, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 0, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 1, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 0, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 1, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 0, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 1, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 0, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 1, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 0, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 1, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 0, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 1, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 0, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 1, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 0, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 1, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 0, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 1, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 0, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 1, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 0, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 1, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 0, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 1, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 0, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 1, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 0, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 1, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 0, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 1, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 0, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 1, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 0, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 1, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 0, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 1, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 0, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 1, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 0, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 1, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 0, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 1, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 0, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 1, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 0, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 1, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 0, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 1, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 0, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 1, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 0, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 1, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 0, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 1, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 0, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 1, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 0, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 1, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 0, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 1, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 0, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 1, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 0, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 1, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 0, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 1, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 0, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 1, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 0, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 1, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 0, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 1, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 0, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 1, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 0, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 1, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 0, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 1, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 0, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 1, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 0, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 1, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 0, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 1, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 0, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 1, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 0, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 1, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 0, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 1, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 0, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 1, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 0, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 1, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 0, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 1, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 0, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 1, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 0, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 1, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 0, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 1, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 0, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 1, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 0, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 1, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 0, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 1, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 0, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 1, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 0, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 1, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 0, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 1, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 0, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 1, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 0, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 1, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 0, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 1, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 0, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 1, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 0, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 1, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 0, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 1, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 0, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 1, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 0, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 1, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 0, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 1, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 0, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 1, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 0, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 1, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 16, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 17, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 16, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 17, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 16, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 17, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 16, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 17, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 16, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 17, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 16, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 17, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 16, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 17, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 16, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 17, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 16, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 17, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 16, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 17, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 16, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 17, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 16, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 17, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 16, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 17, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 16, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 17, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 16, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 17, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 16, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 17, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 16, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 17, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 16, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 17, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 16, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 17, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 16, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 17, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 16, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 17, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 16, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 17, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 16, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 17, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 16, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 17, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 16, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 17, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 16, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 17, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 16, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 17, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 16, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 17, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 16, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 17, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 16, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 17, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 16, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 17, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 16, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 17, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 16, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 17, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 16, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 17, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 16, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 17, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 16, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 17, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 16, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 17, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 16, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 17, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 16, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 17, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 16, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 17, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 16, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 17, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 16, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 17, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 16, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 17, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 16, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 17, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 16, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 17, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 16, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 17, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 16, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 17, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 16, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 17, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 16, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 17, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 16, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 17, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 16, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 17, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 16, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 17, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 16, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 17, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 16, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 17, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 16, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 17, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 16, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 17, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 16, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 17, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 16, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 17, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 16, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 17, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 16, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 17, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 16, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 17, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 16, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 17, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 16, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 17, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 16, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 17, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 16, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 17, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 16, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 17, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 16, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 17, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 16, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 17, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 16, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 17, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 16, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 17, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 16, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 17, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 16, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 17, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 16, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 17, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 16, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 17, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 16, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 17, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 16, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 17, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 16, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 17, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 16, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 17, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 16, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 17, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 16, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 17, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 16, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 17, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 16, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 17, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 16, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 17, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 16, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 17, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 16, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 17, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 16, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 17, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 16, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 17, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 16, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 17, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 16, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 17, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 16, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 17, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 16, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 17, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 16, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 17, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 16, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 17, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 16, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 17, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 16, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 17, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 16, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 17, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 16, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 17, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 16, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 17, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 16, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 17, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 16, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 17, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 16, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 17, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 16, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 17, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 16, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 17, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 16, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 17, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 16, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 17, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 16, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 17, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 16, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 17, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 16, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 17, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 16, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 17, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 16, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 17, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 16, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 17, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 16, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 17, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 16, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 17, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 16, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 17, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 16, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 17, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 16, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 17, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 16, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 17, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 16, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 17, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 16, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 17, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 16, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 17, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 16, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 17, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 16, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 17, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 16, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 17, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 16, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 17, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 16, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 17, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 16, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 17, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 16, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 17, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 16, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 17, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 16, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 17, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 16, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 17, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 16, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 17, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 16, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 17, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 16, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 17, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 16, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 17, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 16, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 17, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 16, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 17, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 16, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 17, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 16, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 17, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 16, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 17, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 16, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 17, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 16, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 17, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 16, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 17, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 16, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 17, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 16, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 17, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 16, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 17, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 16, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 17, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 16, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 17, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 16, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 17, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 16, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 17, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 16, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 17, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 16, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 17, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 16, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 17, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 16, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 17, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 16, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 17, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 16, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 17, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 16, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 17, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 16, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 17, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 16, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 17, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 16, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 17, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 16, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 17, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 16, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 17, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 16, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 17, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 16, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 17, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 16, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 17, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 16, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 17, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 16, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 17, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 16, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 17, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 16, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 17, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 16, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 17, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 16, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 17, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 16, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 17, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 16, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 17, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 16, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 17, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 16, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 17, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 16, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 17, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 16, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 17, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 16, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 17, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 16, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 17, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 16, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 17, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 16, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 17, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 16, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 17, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 16, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 17, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 16, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 17, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 16, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 17, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 16, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 17, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 16, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 17, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 16, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 17, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 16, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 17, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 16, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 17, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 16, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 17, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 16, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 17, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 16, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 17, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 16, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 17, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 16, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 17, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 16, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 17, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 16, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 17, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 16, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 17, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 16, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 17, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 16, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 17, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 16, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 17, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 16, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 17, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 16, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 17, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 16, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 17, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 16, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 17, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 16, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 17, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 16, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 17, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 16, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 17, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 16, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 17, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 16, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 17, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 16, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 17, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 16, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 17, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 16, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 17, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 16, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 17, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 16, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 17, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 16, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 17, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 16, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 17, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 16, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 17, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 16, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 17, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 16, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 17, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 16, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 17, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 16, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 17, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 16, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 17, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 16, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 17, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 16, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 17, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 16, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 17, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 16, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 17, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 16, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 17, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 16, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 17, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 16, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 17, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 16, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 17, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 16, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 17, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 16, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 17, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 16, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 17, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 16, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 17, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 16, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 17, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 16, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 17, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 16, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 17, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 16, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 17, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 16, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 17, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 16, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 17, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 16, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 17, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 16, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 17, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 16, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 17, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 16, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 17, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 16, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 17, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 16, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 17, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 16, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 17, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 16, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 17, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 16, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 17, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 16, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 17, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 16, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 17, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 16, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 17, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 16, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 17, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 16, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 17, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 16, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 17, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 16, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 17, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 32, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 33, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 32, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 33, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 32, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 33, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 32, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 33, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 32, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 33, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 32, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 33, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 32, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 33, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 32, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 33, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 32, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 33, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 32, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 33, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 32, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 33, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 32, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 33, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 32, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 33, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 32, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 33, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 32, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 33, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 32, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 33, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 32, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 33, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 32, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 33, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 32, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 33, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 32, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 33, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 32, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 33, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 32, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 33, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 32, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 33, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 32, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 33, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 32, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 33, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 32, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 33, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 32, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 33, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 32, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 33, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 32, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 33, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 32, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 33, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 32, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 33, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 32, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 33, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 32, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 33, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 32, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 33, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 32, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 33, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 32, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 33, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 32, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 33, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 32, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 33, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 32, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 33, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 32, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 33, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 32, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 33, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 32, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 33, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 32, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 33, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 32, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 33, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 32, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 33, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 32, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 33, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 32, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 33, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 32, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 33, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 32, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 33, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 32, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 33, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 32, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 33, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 32, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 33, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 32, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 33, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 32, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 33, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 32, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 33, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 32, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 33, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 32, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 33, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 32, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 33, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 32, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 33, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 32, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 33, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 32, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 33, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 32, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 33, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 32, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 33, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 32, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 33, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 32, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 33, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 32, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 33, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 32, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 33, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 32, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 33, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 32, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 33, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 32, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 33, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 32, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 33, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 32, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 33, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 32, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 33, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 32, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 33, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 32, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 33, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 32, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 33, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 32, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 33, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 32, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 33, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 32, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 33, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 32, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 33, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 32, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 33, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 32, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 33, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 32, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 33, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 32, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 33, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 32, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 33, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 32, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 33, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 32, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 33, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 32, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 33, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 32, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 33, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 32, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 33, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 32, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 33, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 32, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 33, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 32, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 33, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 32, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 33, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 32, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 33, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 32, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 33, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 32, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 33, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 32, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 33, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 32, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 33, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 32, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 33, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 32, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 33, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 32, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 33, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 32, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 33, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 32, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 33, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 32, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 33, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 32, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 33, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 32, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 33, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 32, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 33, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 32, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 33, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 32, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 33, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 32, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 33, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 32, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 33, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 32, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 33, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 32, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 33, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 32, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 33, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 32, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 33, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 32, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 33, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 32, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 33, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 32, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 33, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 32, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 33, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 32, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 33, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 32, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 33, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 32, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 33, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 32, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 33, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 32, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 33, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 32, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 33, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 32, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 33, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 32, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 33, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 32, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 33, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 32, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 33, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 32, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 33, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 32, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 33, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 32, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 33, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 32, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 33, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 32, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 33, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 32, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 33, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 32, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 33, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 32, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 33, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 32, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 33, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 32, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 33, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 32, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 33, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 32, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 33, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 32, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 33, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 32, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 33, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 32, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 33, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 32, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 33, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 32, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 33, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 32, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 33, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 32, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 33, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 32, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 33, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 32, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 33, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 32, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 33, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 32, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 33, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 32, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 33, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 32, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 33, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 32, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 33, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 32, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 33, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 32, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 33, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 32, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 33, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 32, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 33, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 32, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 33, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 32, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 33, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 32, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 33, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 32, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 33, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 32, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 33, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 32, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 33, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 32, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 33, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 32, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 33, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 32, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 33, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 32, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 33, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 32, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 33, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 32, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 33, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 32, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 33, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 32, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 33, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 32, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 33, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 32, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 33, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 32, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 33, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 32, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 33, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 32, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 33, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 32, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 33, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 32, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 33, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 32, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 33, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 32, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 33, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 32, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 33, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 32, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 33, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 32, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 33, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 32, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 33, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 32, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 33, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 32, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 33, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 32, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 33, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 32, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 33, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 32, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 33, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 32, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 33, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 32, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 33, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 32, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 33, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 32, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 33, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 32, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 33, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 32, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 33, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 32, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 33, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 32, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 33, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 32, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 33, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 32, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 33, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 32, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 33, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 32, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 33, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 32, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 33, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 32, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 33, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 32, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 33, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 32, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 33, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 32, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 33, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 32, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 33, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 32, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 33, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 32, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 33, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 32, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 33, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 32, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 33, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 32, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 33, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 32, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 33, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 32, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 33, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 32, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 33, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 32, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 33, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 32, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 33, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 32, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 33, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 32, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 33, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 32, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 33, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 32, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 33, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 32, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 33, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 32, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 33, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 32, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 33, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 32, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 33, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 32, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 33, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 32, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 33, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 32, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 33, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 32, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 33, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 32, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 33, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 32, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 33, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 32, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 33, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 32, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 33, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 32, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 33, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 32, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 33, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 32, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 33, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 32, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 33, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 32, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 33, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 32, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 33, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 32, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 33, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 32, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 33, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 32, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 33, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 32, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 33, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 32, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 33, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 32, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 33, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 32, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 33, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 32, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 33, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 32, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 33, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 32, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 33, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 32, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 33, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 32, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 33, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 32, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 33, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 32, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 33, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 48, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 49, cvlan_id = 0, user_id = 0}, + {gre_id = 0, svlan_id = 48, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 49, cvlan_id = 1, user_id = 1}, + {gre_id = 0, svlan_id = 48, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 49, cvlan_id = 2, user_id = 2}, + {gre_id = 0, svlan_id = 48, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 49, cvlan_id = 3, user_id = 3}, + {gre_id = 0, svlan_id = 48, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 49, cvlan_id = 4, user_id = 4}, + {gre_id = 0, svlan_id = 48, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 49, cvlan_id = 5, user_id = 5}, + {gre_id = 0, svlan_id = 48, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 49, cvlan_id = 6, user_id = 6}, + {gre_id = 0, svlan_id = 48, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 49, cvlan_id = 7, user_id = 7}, + {gre_id = 0, svlan_id = 48, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 49, cvlan_id = 8, user_id = 8}, + {gre_id = 0, svlan_id = 48, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 49, cvlan_id = 9, user_id = 9}, + {gre_id = 0, svlan_id = 48, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 49, cvlan_id = 10, user_id = 10}, + {gre_id = 0, svlan_id = 48, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 49, cvlan_id = 11, user_id = 11}, + {gre_id = 0, svlan_id = 48, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 49, cvlan_id = 12, user_id = 12}, + {gre_id = 0, svlan_id = 48, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 49, cvlan_id = 13, user_id = 13}, + {gre_id = 0, svlan_id = 48, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 49, cvlan_id = 14, user_id = 14}, + {gre_id = 0, svlan_id = 48, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 49, cvlan_id = 15, user_id = 15}, + {gre_id = 0, svlan_id = 48, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 49, cvlan_id = 16, user_id = 16}, + {gre_id = 0, svlan_id = 48, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 49, cvlan_id = 17, user_id = 17}, + {gre_id = 0, svlan_id = 48, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 49, cvlan_id = 18, user_id = 18}, + {gre_id = 0, svlan_id = 48, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 49, cvlan_id = 19, user_id = 19}, + {gre_id = 0, svlan_id = 48, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 49, cvlan_id = 20, user_id = 20}, + {gre_id = 0, svlan_id = 48, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 49, cvlan_id = 21, user_id = 21}, + {gre_id = 0, svlan_id = 48, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 49, cvlan_id = 22, user_id = 22}, + {gre_id = 0, svlan_id = 48, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 49, cvlan_id = 23, user_id = 23}, + {gre_id = 0, svlan_id = 48, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 49, cvlan_id = 24, user_id = 24}, + {gre_id = 0, svlan_id = 48, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 49, cvlan_id = 25, user_id = 25}, + {gre_id = 0, svlan_id = 48, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 49, cvlan_id = 26, user_id = 26}, + {gre_id = 0, svlan_id = 48, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 49, cvlan_id = 27, user_id = 27}, + {gre_id = 0, svlan_id = 48, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 49, cvlan_id = 28, user_id = 28}, + {gre_id = 0, svlan_id = 48, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 49, cvlan_id = 29, user_id = 29}, + {gre_id = 0, svlan_id = 48, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 49, cvlan_id = 30, user_id = 30}, + {gre_id = 0, svlan_id = 48, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 49, cvlan_id = 31, user_id = 31}, + {gre_id = 0, svlan_id = 48, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 49, cvlan_id = 32, user_id = 32}, + {gre_id = 0, svlan_id = 48, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 49, cvlan_id = 33, user_id = 33}, + {gre_id = 0, svlan_id = 48, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 49, cvlan_id = 34, user_id = 34}, + {gre_id = 0, svlan_id = 48, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 49, cvlan_id = 35, user_id = 35}, + {gre_id = 0, svlan_id = 48, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 49, cvlan_id = 36, user_id = 36}, + {gre_id = 0, svlan_id = 48, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 49, cvlan_id = 37, user_id = 37}, + {gre_id = 0, svlan_id = 48, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 49, cvlan_id = 38, user_id = 38}, + {gre_id = 0, svlan_id = 48, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 49, cvlan_id = 39, user_id = 39}, + {gre_id = 0, svlan_id = 48, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 49, cvlan_id = 40, user_id = 40}, + {gre_id = 0, svlan_id = 48, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 49, cvlan_id = 41, user_id = 41}, + {gre_id = 0, svlan_id = 48, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 49, cvlan_id = 42, user_id = 42}, + {gre_id = 0, svlan_id = 48, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 49, cvlan_id = 43, user_id = 43}, + {gre_id = 0, svlan_id = 48, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 49, cvlan_id = 44, user_id = 44}, + {gre_id = 0, svlan_id = 48, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 49, cvlan_id = 45, user_id = 45}, + {gre_id = 0, svlan_id = 48, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 49, cvlan_id = 46, user_id = 46}, + {gre_id = 0, svlan_id = 48, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 49, cvlan_id = 47, user_id = 47}, + {gre_id = 0, svlan_id = 48, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 49, cvlan_id = 48, user_id = 48}, + {gre_id = 0, svlan_id = 48, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 49, cvlan_id = 49, user_id = 49}, + {gre_id = 0, svlan_id = 48, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 49, cvlan_id = 50, user_id = 50}, + {gre_id = 0, svlan_id = 48, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 49, cvlan_id = 51, user_id = 51}, + {gre_id = 0, svlan_id = 48, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 49, cvlan_id = 52, user_id = 52}, + {gre_id = 0, svlan_id = 48, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 49, cvlan_id = 53, user_id = 53}, + {gre_id = 0, svlan_id = 48, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 49, cvlan_id = 54, user_id = 54}, + {gre_id = 0, svlan_id = 48, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 49, cvlan_id = 55, user_id = 55}, + {gre_id = 0, svlan_id = 48, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 49, cvlan_id = 56, user_id = 56}, + {gre_id = 0, svlan_id = 48, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 49, cvlan_id = 57, user_id = 57}, + {gre_id = 0, svlan_id = 48, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 49, cvlan_id = 58, user_id = 58}, + {gre_id = 0, svlan_id = 48, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 49, cvlan_id = 59, user_id = 59}, + {gre_id = 0, svlan_id = 48, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 49, cvlan_id = 60, user_id = 60}, + {gre_id = 0, svlan_id = 48, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 49, cvlan_id = 61, user_id = 61}, + {gre_id = 0, svlan_id = 48, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 49, cvlan_id = 62, user_id = 62}, + {gre_id = 0, svlan_id = 48, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 49, cvlan_id = 63, user_id = 63}, + {gre_id = 0, svlan_id = 48, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 49, cvlan_id = 64, user_id = 64}, + {gre_id = 0, svlan_id = 48, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 49, cvlan_id = 65, user_id = 65}, + {gre_id = 0, svlan_id = 48, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 49, cvlan_id = 66, user_id = 66}, + {gre_id = 0, svlan_id = 48, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 49, cvlan_id = 67, user_id = 67}, + {gre_id = 0, svlan_id = 48, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 49, cvlan_id = 68, user_id = 68}, + {gre_id = 0, svlan_id = 48, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 49, cvlan_id = 69, user_id = 69}, + {gre_id = 0, svlan_id = 48, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 49, cvlan_id = 70, user_id = 70}, + {gre_id = 0, svlan_id = 48, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 49, cvlan_id = 71, user_id = 71}, + {gre_id = 0, svlan_id = 48, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 49, cvlan_id = 72, user_id = 72}, + {gre_id = 0, svlan_id = 48, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 49, cvlan_id = 73, user_id = 73}, + {gre_id = 0, svlan_id = 48, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 49, cvlan_id = 74, user_id = 74}, + {gre_id = 0, svlan_id = 48, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 49, cvlan_id = 75, user_id = 75}, + {gre_id = 0, svlan_id = 48, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 49, cvlan_id = 76, user_id = 76}, + {gre_id = 0, svlan_id = 48, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 49, cvlan_id = 77, user_id = 77}, + {gre_id = 0, svlan_id = 48, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 49, cvlan_id = 78, user_id = 78}, + {gre_id = 0, svlan_id = 48, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 49, cvlan_id = 79, user_id = 79}, + {gre_id = 0, svlan_id = 48, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 49, cvlan_id = 80, user_id = 80}, + {gre_id = 0, svlan_id = 48, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 49, cvlan_id = 81, user_id = 81}, + {gre_id = 0, svlan_id = 48, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 49, cvlan_id = 82, user_id = 82}, + {gre_id = 0, svlan_id = 48, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 49, cvlan_id = 83, user_id = 83}, + {gre_id = 0, svlan_id = 48, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 49, cvlan_id = 84, user_id = 84}, + {gre_id = 0, svlan_id = 48, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 49, cvlan_id = 85, user_id = 85}, + {gre_id = 0, svlan_id = 48, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 49, cvlan_id = 86, user_id = 86}, + {gre_id = 0, svlan_id = 48, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 49, cvlan_id = 87, user_id = 87}, + {gre_id = 0, svlan_id = 48, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 49, cvlan_id = 88, user_id = 88}, + {gre_id = 0, svlan_id = 48, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 49, cvlan_id = 89, user_id = 89}, + {gre_id = 0, svlan_id = 48, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 49, cvlan_id = 90, user_id = 90}, + {gre_id = 0, svlan_id = 48, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 49, cvlan_id = 91, user_id = 91}, + {gre_id = 0, svlan_id = 48, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 49, cvlan_id = 92, user_id = 92}, + {gre_id = 0, svlan_id = 48, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 49, cvlan_id = 93, user_id = 93}, + {gre_id = 0, svlan_id = 48, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 49, cvlan_id = 94, user_id = 94}, + {gre_id = 0, svlan_id = 48, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 49, cvlan_id = 95, user_id = 95}, + {gre_id = 0, svlan_id = 48, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 49, cvlan_id = 96, user_id = 96}, + {gre_id = 0, svlan_id = 48, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 49, cvlan_id = 97, user_id = 97}, + {gre_id = 0, svlan_id = 48, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 49, cvlan_id = 98, user_id = 98}, + {gre_id = 0, svlan_id = 48, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 49, cvlan_id = 99, user_id = 99}, + {gre_id = 0, svlan_id = 48, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 49, cvlan_id = 100, user_id = 100}, + {gre_id = 0, svlan_id = 48, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 49, cvlan_id = 101, user_id = 101}, + {gre_id = 0, svlan_id = 48, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 49, cvlan_id = 102, user_id = 102}, + {gre_id = 0, svlan_id = 48, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 49, cvlan_id = 103, user_id = 103}, + {gre_id = 0, svlan_id = 48, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 49, cvlan_id = 104, user_id = 104}, + {gre_id = 0, svlan_id = 48, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 49, cvlan_id = 105, user_id = 105}, + {gre_id = 0, svlan_id = 48, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 49, cvlan_id = 106, user_id = 106}, + {gre_id = 0, svlan_id = 48, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 49, cvlan_id = 107, user_id = 107}, + {gre_id = 0, svlan_id = 48, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 49, cvlan_id = 108, user_id = 108}, + {gre_id = 0, svlan_id = 48, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 49, cvlan_id = 109, user_id = 109}, + {gre_id = 0, svlan_id = 48, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 49, cvlan_id = 110, user_id = 110}, + {gre_id = 0, svlan_id = 48, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 49, cvlan_id = 111, user_id = 111}, + {gre_id = 0, svlan_id = 48, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 49, cvlan_id = 112, user_id = 112}, + {gre_id = 0, svlan_id = 48, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 49, cvlan_id = 113, user_id = 113}, + {gre_id = 0, svlan_id = 48, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 49, cvlan_id = 114, user_id = 114}, + {gre_id = 0, svlan_id = 48, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 49, cvlan_id = 115, user_id = 115}, + {gre_id = 0, svlan_id = 48, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 49, cvlan_id = 116, user_id = 116}, + {gre_id = 0, svlan_id = 48, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 49, cvlan_id = 117, user_id = 117}, + {gre_id = 0, svlan_id = 48, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 49, cvlan_id = 118, user_id = 118}, + {gre_id = 0, svlan_id = 48, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 49, cvlan_id = 119, user_id = 119}, + {gre_id = 0, svlan_id = 48, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 49, cvlan_id = 120, user_id = 120}, + {gre_id = 0, svlan_id = 48, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 49, cvlan_id = 121, user_id = 121}, + {gre_id = 0, svlan_id = 48, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 49, cvlan_id = 122, user_id = 122}, + {gre_id = 0, svlan_id = 48, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 49, cvlan_id = 123, user_id = 123}, + {gre_id = 0, svlan_id = 48, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 49, cvlan_id = 124, user_id = 124}, + {gre_id = 0, svlan_id = 48, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 49, cvlan_id = 125, user_id = 125}, + {gre_id = 0, svlan_id = 48, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 49, cvlan_id = 126, user_id = 126}, + {gre_id = 0, svlan_id = 48, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 49, cvlan_id = 127, user_id = 127}, + {gre_id = 0, svlan_id = 48, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 49, cvlan_id = 128, user_id = 128}, + {gre_id = 0, svlan_id = 48, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 49, cvlan_id = 129, user_id = 129}, + {gre_id = 0, svlan_id = 48, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 49, cvlan_id = 130, user_id = 130}, + {gre_id = 0, svlan_id = 48, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 49, cvlan_id = 131, user_id = 131}, + {gre_id = 0, svlan_id = 48, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 49, cvlan_id = 132, user_id = 132}, + {gre_id = 0, svlan_id = 48, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 49, cvlan_id = 133, user_id = 133}, + {gre_id = 0, svlan_id = 48, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 49, cvlan_id = 134, user_id = 134}, + {gre_id = 0, svlan_id = 48, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 49, cvlan_id = 135, user_id = 135}, + {gre_id = 0, svlan_id = 48, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 49, cvlan_id = 136, user_id = 136}, + {gre_id = 0, svlan_id = 48, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 49, cvlan_id = 137, user_id = 137}, + {gre_id = 0, svlan_id = 48, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 49, cvlan_id = 138, user_id = 138}, + {gre_id = 0, svlan_id = 48, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 49, cvlan_id = 139, user_id = 139}, + {gre_id = 0, svlan_id = 48, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 49, cvlan_id = 140, user_id = 140}, + {gre_id = 0, svlan_id = 48, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 49, cvlan_id = 141, user_id = 141}, + {gre_id = 0, svlan_id = 48, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 49, cvlan_id = 142, user_id = 142}, + {gre_id = 0, svlan_id = 48, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 49, cvlan_id = 143, user_id = 143}, + {gre_id = 0, svlan_id = 48, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 49, cvlan_id = 144, user_id = 144}, + {gre_id = 0, svlan_id = 48, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 49, cvlan_id = 145, user_id = 145}, + {gre_id = 0, svlan_id = 48, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 49, cvlan_id = 146, user_id = 146}, + {gre_id = 0, svlan_id = 48, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 49, cvlan_id = 147, user_id = 147}, + {gre_id = 0, svlan_id = 48, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 49, cvlan_id = 148, user_id = 148}, + {gre_id = 0, svlan_id = 48, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 49, cvlan_id = 149, user_id = 149}, + {gre_id = 0, svlan_id = 48, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 49, cvlan_id = 150, user_id = 150}, + {gre_id = 0, svlan_id = 48, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 49, cvlan_id = 151, user_id = 151}, + {gre_id = 0, svlan_id = 48, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 49, cvlan_id = 152, user_id = 152}, + {gre_id = 0, svlan_id = 48, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 49, cvlan_id = 153, user_id = 153}, + {gre_id = 0, svlan_id = 48, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 49, cvlan_id = 154, user_id = 154}, + {gre_id = 0, svlan_id = 48, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 49, cvlan_id = 155, user_id = 155}, + {gre_id = 0, svlan_id = 48, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 49, cvlan_id = 156, user_id = 156}, + {gre_id = 0, svlan_id = 48, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 49, cvlan_id = 157, user_id = 157}, + {gre_id = 0, svlan_id = 48, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 49, cvlan_id = 158, user_id = 158}, + {gre_id = 0, svlan_id = 48, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 49, cvlan_id = 159, user_id = 159}, + {gre_id = 0, svlan_id = 48, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 49, cvlan_id = 160, user_id = 160}, + {gre_id = 0, svlan_id = 48, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 49, cvlan_id = 161, user_id = 161}, + {gre_id = 0, svlan_id = 48, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 49, cvlan_id = 162, user_id = 162}, + {gre_id = 0, svlan_id = 48, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 49, cvlan_id = 163, user_id = 163}, + {gre_id = 0, svlan_id = 48, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 49, cvlan_id = 164, user_id = 164}, + {gre_id = 0, svlan_id = 48, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 49, cvlan_id = 165, user_id = 165}, + {gre_id = 0, svlan_id = 48, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 49, cvlan_id = 166, user_id = 166}, + {gre_id = 0, svlan_id = 48, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 49, cvlan_id = 167, user_id = 167}, + {gre_id = 0, svlan_id = 48, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 49, cvlan_id = 168, user_id = 168}, + {gre_id = 0, svlan_id = 48, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 49, cvlan_id = 169, user_id = 169}, + {gre_id = 0, svlan_id = 48, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 49, cvlan_id = 170, user_id = 170}, + {gre_id = 0, svlan_id = 48, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 49, cvlan_id = 171, user_id = 171}, + {gre_id = 0, svlan_id = 48, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 49, cvlan_id = 172, user_id = 172}, + {gre_id = 0, svlan_id = 48, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 49, cvlan_id = 173, user_id = 173}, + {gre_id = 0, svlan_id = 48, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 49, cvlan_id = 174, user_id = 174}, + {gre_id = 0, svlan_id = 48, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 49, cvlan_id = 175, user_id = 175}, + {gre_id = 0, svlan_id = 48, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 49, cvlan_id = 176, user_id = 176}, + {gre_id = 0, svlan_id = 48, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 49, cvlan_id = 177, user_id = 177}, + {gre_id = 0, svlan_id = 48, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 49, cvlan_id = 178, user_id = 178}, + {gre_id = 0, svlan_id = 48, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 49, cvlan_id = 179, user_id = 179}, + {gre_id = 0, svlan_id = 48, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 49, cvlan_id = 180, user_id = 180}, + {gre_id = 0, svlan_id = 48, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 49, cvlan_id = 181, user_id = 181}, + {gre_id = 0, svlan_id = 48, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 49, cvlan_id = 182, user_id = 182}, + {gre_id = 0, svlan_id = 48, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 49, cvlan_id = 183, user_id = 183}, + {gre_id = 0, svlan_id = 48, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 49, cvlan_id = 184, user_id = 184}, + {gre_id = 0, svlan_id = 48, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 49, cvlan_id = 185, user_id = 185}, + {gre_id = 0, svlan_id = 48, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 49, cvlan_id = 186, user_id = 186}, + {gre_id = 0, svlan_id = 48, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 49, cvlan_id = 187, user_id = 187}, + {gre_id = 0, svlan_id = 48, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 49, cvlan_id = 188, user_id = 188}, + {gre_id = 0, svlan_id = 48, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 49, cvlan_id = 189, user_id = 189}, + {gre_id = 0, svlan_id = 48, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 49, cvlan_id = 190, user_id = 190}, + {gre_id = 0, svlan_id = 48, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 49, cvlan_id = 191, user_id = 191}, + {gre_id = 0, svlan_id = 48, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 49, cvlan_id = 192, user_id = 192}, + {gre_id = 0, svlan_id = 48, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 49, cvlan_id = 193, user_id = 193}, + {gre_id = 0, svlan_id = 48, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 49, cvlan_id = 194, user_id = 194}, + {gre_id = 0, svlan_id = 48, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 49, cvlan_id = 195, user_id = 195}, + {gre_id = 0, svlan_id = 48, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 49, cvlan_id = 196, user_id = 196}, + {gre_id = 0, svlan_id = 48, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 49, cvlan_id = 197, user_id = 197}, + {gre_id = 0, svlan_id = 48, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 49, cvlan_id = 198, user_id = 198}, + {gre_id = 0, svlan_id = 48, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 49, cvlan_id = 199, user_id = 199}, + {gre_id = 0, svlan_id = 48, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 49, cvlan_id = 200, user_id = 200}, + {gre_id = 0, svlan_id = 48, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 49, cvlan_id = 201, user_id = 201}, + {gre_id = 0, svlan_id = 48, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 49, cvlan_id = 202, user_id = 202}, + {gre_id = 0, svlan_id = 48, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 49, cvlan_id = 203, user_id = 203}, + {gre_id = 0, svlan_id = 48, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 49, cvlan_id = 204, user_id = 204}, + {gre_id = 0, svlan_id = 48, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 49, cvlan_id = 205, user_id = 205}, + {gre_id = 0, svlan_id = 48, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 49, cvlan_id = 206, user_id = 206}, + {gre_id = 0, svlan_id = 48, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 49, cvlan_id = 207, user_id = 207}, + {gre_id = 0, svlan_id = 48, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 49, cvlan_id = 208, user_id = 208}, + {gre_id = 0, svlan_id = 48, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 49, cvlan_id = 209, user_id = 209}, + {gre_id = 0, svlan_id = 48, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 49, cvlan_id = 210, user_id = 210}, + {gre_id = 0, svlan_id = 48, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 49, cvlan_id = 211, user_id = 211}, + {gre_id = 0, svlan_id = 48, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 49, cvlan_id = 212, user_id = 212}, + {gre_id = 0, svlan_id = 48, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 49, cvlan_id = 213, user_id = 213}, + {gre_id = 0, svlan_id = 48, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 49, cvlan_id = 214, user_id = 214}, + {gre_id = 0, svlan_id = 48, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 49, cvlan_id = 215, user_id = 215}, + {gre_id = 0, svlan_id = 48, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 49, cvlan_id = 216, user_id = 216}, + {gre_id = 0, svlan_id = 48, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 49, cvlan_id = 217, user_id = 217}, + {gre_id = 0, svlan_id = 48, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 49, cvlan_id = 218, user_id = 218}, + {gre_id = 0, svlan_id = 48, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 49, cvlan_id = 219, user_id = 219}, + {gre_id = 0, svlan_id = 48, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 49, cvlan_id = 220, user_id = 220}, + {gre_id = 0, svlan_id = 48, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 49, cvlan_id = 221, user_id = 221}, + {gre_id = 0, svlan_id = 48, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 49, cvlan_id = 222, user_id = 222}, + {gre_id = 0, svlan_id = 48, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 49, cvlan_id = 223, user_id = 223}, + {gre_id = 0, svlan_id = 48, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 49, cvlan_id = 224, user_id = 224}, + {gre_id = 0, svlan_id = 48, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 49, cvlan_id = 225, user_id = 225}, + {gre_id = 0, svlan_id = 48, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 49, cvlan_id = 226, user_id = 226}, + {gre_id = 0, svlan_id = 48, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 49, cvlan_id = 227, user_id = 227}, + {gre_id = 0, svlan_id = 48, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 49, cvlan_id = 228, user_id = 228}, + {gre_id = 0, svlan_id = 48, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 49, cvlan_id = 229, user_id = 229}, + {gre_id = 0, svlan_id = 48, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 49, cvlan_id = 230, user_id = 230}, + {gre_id = 0, svlan_id = 48, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 49, cvlan_id = 231, user_id = 231}, + {gre_id = 0, svlan_id = 48, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 49, cvlan_id = 232, user_id = 232}, + {gre_id = 0, svlan_id = 48, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 49, cvlan_id = 233, user_id = 233}, + {gre_id = 0, svlan_id = 48, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 49, cvlan_id = 234, user_id = 234}, + {gre_id = 0, svlan_id = 48, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 49, cvlan_id = 235, user_id = 235}, + {gre_id = 0, svlan_id = 48, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 49, cvlan_id = 236, user_id = 236}, + {gre_id = 0, svlan_id = 48, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 49, cvlan_id = 237, user_id = 237}, + {gre_id = 0, svlan_id = 48, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 49, cvlan_id = 238, user_id = 238}, + {gre_id = 0, svlan_id = 48, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 49, cvlan_id = 239, user_id = 239}, + {gre_id = 0, svlan_id = 48, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 49, cvlan_id = 240, user_id = 240}, + {gre_id = 0, svlan_id = 48, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 49, cvlan_id = 241, user_id = 241}, + {gre_id = 0, svlan_id = 48, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 49, cvlan_id = 242, user_id = 242}, + {gre_id = 0, svlan_id = 48, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 49, cvlan_id = 243, user_id = 243}, + {gre_id = 0, svlan_id = 48, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 49, cvlan_id = 244, user_id = 244}, + {gre_id = 0, svlan_id = 48, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 49, cvlan_id = 245, user_id = 245}, + {gre_id = 0, svlan_id = 48, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 49, cvlan_id = 246, user_id = 246}, + {gre_id = 0, svlan_id = 48, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 49, cvlan_id = 247, user_id = 247}, + {gre_id = 0, svlan_id = 48, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 49, cvlan_id = 248, user_id = 248}, + {gre_id = 0, svlan_id = 48, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 49, cvlan_id = 249, user_id = 249}, + {gre_id = 0, svlan_id = 48, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 49, cvlan_id = 250, user_id = 250}, + {gre_id = 0, svlan_id = 48, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 49, cvlan_id = 251, user_id = 251}, + {gre_id = 0, svlan_id = 48, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 49, cvlan_id = 252, user_id = 252}, + {gre_id = 0, svlan_id = 48, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 49, cvlan_id = 253, user_id = 253}, + {gre_id = 0, svlan_id = 48, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 49, cvlan_id = 254, user_id = 254}, + {gre_id = 0, svlan_id = 48, cvlan_id = 255, user_id = 255}, + {gre_id = 0, svlan_id = 49, cvlan_id = 255, user_id = 255}, +} diff --git a/VNFs/DPPD-PROX/cqm.c b/VNFs/DPPD-PROX/cqm.c new file mode 100644 index 00000000..19ea19de --- /dev/null +++ b/VNFs/DPPD-PROX/cqm.c @@ -0,0 +1,310 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <unistd.h> +#include <stdio.h> +#include <fcntl.h> + +#include "msr.h" +#include "cqm.h" +#include "log.h" +#include "prox_cfg.h" + +#define IA32_QM_EVTSEL 0xC8D +#define IA32_QM_CTR 0xC8E +#define IA32_QM_ASSOC 0xC8F +#define IA32_QM_L3CA_START 0xC90 +#define IA32_QM_L3CA_END 0xD0F + +#define L3_CACHE_OCCUPANCY 1 +#define L3_TOTAL_EXTERNAL_BANDWIDTH 2 +#define L3_LOCAL_EXTERNAL_BANDWIDTH 3 + +static struct rdt_features rdt_features; +static int cat_features = 0; + +static int stat_core; + +struct reg { + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; +}; + +static void cpuid(struct reg* r, uint32_t a, uint32_t b, uint32_t c, uint32_t d) +{ + asm volatile("cpuid" + : "=a" (r->eax), "=b" (r->ebx), "=c" (r->ecx), "=d" (r->edx) + : "a" (a), "b" (b), "c" (c), "d" (d)); +} + +void read_rdt_info(void) +{ + struct reg r; + int i; + uint64_t tmp_rmid; + int rc; + + cpuid(&r, 0x7, 0x0, 0x0, 0x0); + if ((r.ebx >> 12) & 1) { + plog_info("\tRDT-M. Supports Intel RDT Monitoring capability\n"); + rdt_features.rdtm_supported = 1; + } else { + plog_info("\tDoes not support Intel RDT Monitoring capability\n"); + return; + } + if ((r.ebx >> 15) & 1) { + plog_info("\tRDT-A. Supports Intel RDT Allocation capability\n"); + rdt_features.rdta_supported = 1; + } else { + plog_info("\tDoes not support Intel RDT Allocation capability\n"); + } + + cpuid(&r, 0xf, 0x0, 0x0, 0x0); + if ((r.edx >> 1) & 1) { + plog_info("\tSupports L3 Cache Intel RDT Monitoring\n"); + rdt_features.cmt_supported = 1; + } + plog_info("\tIntel RDT Monitoring has %d maximum RMID\n", r.ebx); + rdt_features.rdtm_max_rmid = r.ebx; + + cpuid(&r, 0xf, 0x0, 0x1, 0x0); + if ((r.edx >> 0) & 1) { + plog_info("\tSupports L3 occupancy monitoring\n"); + rdt_features.cmt_supported = 1; + } + if ((r.edx >> 1) & 1) { + plog_info("\tSupports L3 Total bandwidth monitoring\n"); + rdt_features.mbm_tot_supported = 1; + } + if ((r.edx >> 2) & 1) { + plog_info("\tSupports L3 Local bandwidth monitoring\n"); + rdt_features.mbm_loc_supported = 1; + } + rdt_features.cmt_max_rmid = r.ecx; + rdt_features.upscaling_factor = r.ebx; + rdt_features.event_types = r.edx; + + plog_info("\tL3 Cache Intel RDT Monitoring Capability has %d maximum RMID\n", r.ecx); + plog_info("\tUpscaling_factor = %d\n", rdt_features.upscaling_factor); + + cpuid(&r, 0x10, 0x0, 0x0, 0x0); + if ((r.ebx >> 1) & 1) { + plog_info("\tSupports L3 Cache Allocation Technology\n"); + rdt_features.l3_cat_supported = 1; + } + if ((r.ebx >> 2) & 1) { + plog_info("\tSupports L2 Cache Allocation Technology\n"); + rdt_features.l2_cat_supported = 1; + } + if ((r.ebx >> 3) & 1) { + plog_info("\tSupports MBA Allocation Technology\n"); + rdt_features.mba_supported = 1; + } + + cpuid(&r, 0x10, 0x0, 0x1, 0x0); + if ((r.ecx >> 2) & 1) + plog_info("\tCode and Data Prioritization Technology supported\n"); + plog_info("\tL3 Cache Allocation Technology Enumeration Highest COS number = %d\n", r.edx & 0xffff); + rdt_features.cat_max_rmid = r.edx & 0xffff; + rdt_features.cat_num_ways = r.eax + 1; + + cpuid(&r, 0x10, 0x0, 0x2, 0x0); + plog_info("\tL2 Cache Allocation Technology Enumeration COS number = %d\n", r.edx & 0xffff); + + cpuid(&r, 0x10, 0x0, 0x3, 0x0); + plog_info("\tMemory Bandwidth Allocation Enumeration COS number = %d\n", r.edx & 0xffff); + rdt_features.mba_max_rmid = r.ecx; +} +int mbm_is_supported(void) +{ + return (rdt_features.rdtm_supported && rdt_features.mbm_tot_supported && rdt_features.mbm_loc_supported); +} + +int mba_is_supported(void) +{ + return (rdt_features.rdta_supported && rdt_features.mba_supported); +} + +int cmt_is_supported(void) +{ + if ((rdt_features.rdtm_supported || rdt_features.rdta_supported) && (prox_cfg.flags & DSF_DISABLE_CMT)) { + rdt_features.rdtm_supported = rdt_features.rdta_supported = 0; + plog_info("cqm and cat features disabled by config file\n"); + } + return (rdt_features.rdtm_supported && rdt_features.cmt_supported); +} + +int cat_is_supported(void) +{ + if ((rdt_features.rdtm_supported || rdt_features.rdta_supported) && (prox_cfg.flags & DSF_DISABLE_CMT)) { + rdt_features.rdtm_supported = rdt_features.rdta_supported = 0; + plog_info("cqm and cat features disabled by config file\n"); + } + return (rdt_features.rdta_supported && rdt_features.l3_cat_supported); +} + +int rdt_is_supported(void) +{ + return (cmt_is_supported() || cat_is_supported()); +} + +int rdt_get_features(struct rdt_features* feat) +{ + if (!cmt_is_supported() && !cat_is_supported()) + return 1; + + *feat = rdt_features; + return 0; +} + +int cqm_assoc(uint8_t lcore_id, uint64_t rmid) +{ + uint64_t val = 0; + int ret = 0; + ret = msr_read(&val, lcore_id, IA32_QM_ASSOC); + if (ret != 0) { + plog_err("Unable to read msr %x on core %u\n", IA32_QM_ASSOC, lcore_id); + } + val &= 0x3FFULL; + plog_dbg("core %u, rmid was %lu, now setting to %lu\n", lcore_id, val, rmid); + val |= (uint64_t)(rmid & 0x3FFULL); + ret = msr_write(lcore_id, rmid, IA32_QM_ASSOC); + if (ret != 0) { + plog_err("Unable to set msr %x on core %u to value %lx\n", IA32_QM_ASSOC, lcore_id, val); + } + return ret; +} + +int cqm_assoc_read(uint8_t lcore_id, uint64_t *rmid) +{ + return msr_read(rmid, lcore_id, IA32_QM_ASSOC); +} + +void rdt_init_stat_core(uint8_t lcore_id) +{ + stat_core = lcore_id; +} + +/* read a specific rmid value using core 0 */ +int cmt_read_ctr(uint64_t* ret, uint64_t rmid, uint8_t lcore_id) +{ + uint64_t event_id = L3_CACHE_OCCUPANCY; + + uint64_t es = rmid; + es = (es << 32) | event_id; + + if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) { + return 1; + } + + if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) { + return 2; + } + + return 0; +} + +int mbm_read_tot_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id) +{ + uint64_t event_id = L3_TOTAL_EXTERNAL_BANDWIDTH; + + uint64_t es = rmid; + es = (es << 32) | event_id; + + if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) { + return 1; + } + + if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) { + return 2; + } + return 0; +} + +int mbm_read_loc_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id) +{ + uint64_t event_id = L3_LOCAL_EXTERNAL_BANDWIDTH; + + uint64_t es = rmid; + es = (es << 32) | event_id; + + if (msr_write(lcore_id, es, IA32_QM_EVTSEL) < 0) { + return 1; + } + + if (msr_read(ret, lcore_id, IA32_QM_CTR) < 0) { + return 2; + } + return 0; +} + +int cat_log_init(uint8_t lcore_id) +{ + uint64_t tmp_rmid; + int rc, i = 0; + for (i = 0; i < IA32_QM_L3CA_END - IA32_QM_L3CA_START; i++) { + rc = msr_read(&tmp_rmid,lcore_id,IA32_QM_L3CA_START + i); + if (rc < 0) { + break; + } + plog_info("\tAt initialization: Cache allocation set %d (msr %x): mask %lx\n", i, IA32_QM_L3CA_START + i, tmp_rmid); + } + return i; +} + +int cat_set_class_mask(uint8_t lcore_id, uint32_t set, uint32_t mask) +{ + uint64_t tmp_rmid; + int rc; + rc = msr_write(lcore_id, mask, IA32_QM_L3CA_START + set); + if (rc < 0) { + plog_err("Failed to write Cache allocation\n"); + return -1; + } + return 0; +} + +int cat_get_class_mask(uint8_t lcore_id, uint32_t set, uint32_t *mask) +{ + uint64_t tmp_rmid; + int rc; + rc = msr_read(&tmp_rmid,lcore_id,IA32_QM_L3CA_START + set); + if (rc < 0) { + plog_err("Failed to read Cache allocation\n"); + return -1; + } + *mask = tmp_rmid & 0xffffffff; + return 0; +} + +void cat_reset_cache(uint32_t lcore_id) +{ + int rc; + uint32_t mask = (1 << rdt_features.cat_num_ways) -1; + for (uint32_t set = 0; set <= rdt_features.cat_max_rmid; set++) { + rc = msr_write(lcore_id, mask, IA32_QM_L3CA_START + set); + if (rc < 0) { + plog_err("Failed to reset Cache allocation\n"); + } + } +} + +int cat_get_num_ways(void) +{ + return rdt_features.cat_num_ways; +} diff --git a/VNFs/DPPD-PROX/cqm.h b/VNFs/DPPD-PROX/cqm.h new file mode 100644 index 00000000..65b1f453 --- /dev/null +++ b/VNFs/DPPD-PROX/cqm.h @@ -0,0 +1,73 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CQM_H_ +#define _CQM_H_ + +#include <inttypes.h> +#include <stdio.h> + +#define PROX_MAX_CACHE_SET 16 + +struct rdt_features { + uint8_t rdtm_supported; + uint8_t rdta_supported; + uint8_t cmt_supported; + uint8_t mbm_tot_supported; + uint8_t mbm_loc_supported; + uint8_t l3_cat_supported; + uint8_t l2_cat_supported; + uint8_t mba_supported; + uint32_t rdtm_max_rmid; + uint32_t cmt_max_rmid; + uint32_t cat_max_rmid; + uint32_t mba_max_rmid; + uint32_t cat_num_ways; + uint32_t upscaling_factor; + uint32_t event_types; +}; + +struct prox_cache_set_cfg { + uint32_t mask; + uint32_t lcore_id; + int32_t socket_id; +}; + +int rdt_is_supported(void); +int cmt_is_supported(void); +int cat_is_supported(void); +int mbm_is_supported(void); +int mba_is_supported(void); + +int rdt_get_features(struct rdt_features* feat); + +int cqm_assoc(uint8_t lcore_id, uint64_t rmid); +int cqm_assoc_read(uint8_t lcore_id, uint64_t *rmid); + +void rdt_init_stat_core(uint8_t lcore_id); + +int cmt_read_ctr(uint64_t* ret, uint64_t rmid, uint8_t lcore_id); +int mbm_read_tot_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id); +int mbm_read_loc_bdw(uint64_t* ret, uint64_t rmid, uint8_t lcore_id); +void read_rdt_info(void); +extern struct prox_cache_set_cfg prox_cache_set_cfg[PROX_MAX_CACHE_SET]; +int cat_log_init(uint8_t lcore_id); +int cat_set_class_mask(uint8_t lcore_id, uint32_t set, uint32_t mask); +int cat_get_class_mask(uint8_t lcore_id, uint32_t set, uint32_t *mask); +void cat_reset_cache(uint32_t lcore_id); +int cat_get_num_ways(void); + +#endif /* _CQM_H_ */ diff --git a/VNFs/DPPD-PROX/defaults.c b/VNFs/DPPD-PROX/defaults.c new file mode 100644 index 00000000..eeb21b2d --- /dev/null +++ b/VNFs/DPPD-PROX/defaults.c @@ -0,0 +1,186 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <libgen.h> +#include <rte_sched.h> +#include <rte_version.h> + +#include "lconf.h" +#include "defaults.h" +#include "defines.h" +#include "prox_cfg.h" +#include "prox_port_cfg.h" +#include "etypes.h" +#include "toeplitz.h" + +#define TEN_GIGABIT 1250000000 +#define QUEUE_SIZES 128 +#define NB_PIPES 32768 +#define NB_MBUF 4096 +#define RING_RX_SIZE 256 +#define NB_RX_RING_DESC 256 +#define NB_TX_RING_DESC 256 + +/* 1500000 milliseconds */ +#define DEFAULT_CPE_TIMEOUT_MS 1500000 + +/**/ +#if DEFAULT_CPE_TIMEOUT_MS < (DRAIN_TIMEOUT/3000000) +#error DEFAULT_CPE_TIMEOUT_MS too small (needs to be at least 2 ms) +#endif + +static const struct rte_eth_conf default_port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /* Header Split disabled */ + .hw_ip_checksum = 0, /* IP checksum offload disabled */ + .hw_vlan_filter = 0, /* VLAN filtering disabled */ + .hw_vlan_strip = 0, /* VLAN filtering disabled */ + .jumbo_frame = 0, /* Jumbo frame support disabled */ + .hw_strip_crc = 1, /* CRC stripped by hardware --- always set to 1 in VF */ + .hw_vlan_extend = 0, + .mq_mode = 0 + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + }, + }, + .intr_conf = { + .lsc = 1, /* lsc interrupt feature enabled */ + }, +}; + +static const struct rte_eth_rxconf default_rx_conf = { + .rx_free_thresh = 32, +}; + +static struct rte_eth_txconf default_tx_conf = { + .tx_thresh = { + .pthresh = 32, + .hthresh = 8, + .wthresh = 0, + }, + .tx_free_thresh = 32, /* Use PMD default values */ + .tx_rs_thresh = 32, /* Use PMD default values */ +}; + +static struct rte_sched_port_params port_params_default = { + .name = "port_0", + .socket = 0, + .mtu = 6 + 6 + 4 + 4 + 2 + 1500, + .rate = 0, + .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT, + .n_subports_per_port = 1, + .n_pipes_per_subport = NB_PIPES, + .qsize = {QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES}, + .pipe_profiles = NULL, + .n_pipe_profiles = 1 /* only one profile */ +}; + +static struct rte_sched_pipe_params pipe_params_default = { + .tb_rate = TEN_GIGABIT / NB_PIPES, + .tb_size = 4000000, + + .tc_rate = {TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES}, + .tc_period = 40, + + .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, +}; + +static struct rte_sched_subport_params subport_params_default = { + .tb_rate = TEN_GIGABIT, + .tb_size = 4000000, + .tc_rate = {TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT}, + .tc_period = 40, /* default was 10 */ +}; + +void set_global_defaults(__attribute__((unused)) struct prox_cfg *prox_cfg) +{ +} + +void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_init) +{ + prox_cfg->master = RTE_MAX_LCORE; + + for (uint32_t i = 0; i < RTE_DIM(prox_cfg->cpe_table_ports); ++i) { + prox_cfg->cpe_table_ports[i] = -1; + } + + for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id]; + cur_lcore_cfg_init->id = lcore_id; + for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) { + struct task_args *targ = &cur_lcore_cfg_init->targs[task_id]; + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + targ->rx_port_queue[port_id].port = OUT_DISCARD; + } + targ->flags |= TASK_ARG_DROP; + targ->flags |= TASK_ARG_QINQ_ACL; + targ->cpe_table_timeout_ms = DEFAULT_CPE_TIMEOUT_MS; + targ->n_flows = NB_PIPES; + /* configure default values for QoS (can be overwritten by config) */ + targ->qos_conf.port_params = port_params_default; + targ->qos_conf.pipe_params[0] = pipe_params_default; + targ->qos_conf.subport_params[0] = subport_params_default; + targ->qos_conf.port_params.pipe_profiles = targ->qos_conf.pipe_params; + targ->qos_conf.port_params.rate = TEN_GIGABIT; + targ->qinq_tag = ETYPE_8021ad; + targ->n_concur_conn = 8192*2; + + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + targ->tx_port_queue[port_id].port = OUT_DISCARD; + } + + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + targ->mapping[i] = i; // identity + } + + targ->cbs = ETHER_MAX_LEN; + targ->ebs = ETHER_MAX_LEN; + targ->pbs = ETHER_MAX_LEN; + + targ->n_max_rules = 1024; + targ->ring_size = RING_RX_SIZE; + targ->nb_cache_mbuf = MAX_PKT_BURST * 4; + targ->overhead = ETHER_CRC_LEN + 20; + + targ->tunnel_hop_limit = 3; + targ->ctrl_freq = 1000; + targ->lb_friend_core = 0xFF; + targ->mbuf_size = MBUF_SIZE; + targ->n_pkts = 1024*64; + targ->runtime_flags |= TASK_TX_CRC; + targ->accuracy_limit_nsec = 5000; + } + } +} + +void set_port_defaults(void) +{ + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i ) { + prox_port_cfg[i].promiscuous = 1; + prox_port_cfg[i].n_rxd = NB_RX_RING_DESC; + prox_port_cfg[i].n_txd = NB_TX_RING_DESC; + prox_port_cfg[i].port_conf = default_port_conf; + prox_port_cfg[i].tx_conf = default_tx_conf; + prox_port_cfg[i].rx_conf = default_rx_conf; + prox_port_cfg[i].rx_ring[0] = '\0'; + prox_port_cfg[i].tx_ring[0] = '\0'; + prox_port_cfg[i].mtu = PROX_MTU; + } +} diff --git a/VNFs/DPPD-PROX/defaults.h b/VNFs/DPPD-PROX/defaults.h new file mode 100644 index 00000000..5fb31207 --- /dev/null +++ b/VNFs/DPPD-PROX/defaults.h @@ -0,0 +1,46 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _DEFAULTS_H_ +#define _DEFAULTS_H_ + +#include <rte_ether.h> + +struct prox_cfg; +struct lcore_cfg; + +void set_global_defaults(struct prox_cfg* prox_cfg); +void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_init); +void set_port_defaults(void); + +#define MAX_PKT_BURST 64 +#define MAX_RING_BURST 64 + +#if MAX_RING_BURST < MAX_PKT_BURST +#error MAX_RING_BURST < MAX_PKT_BURST +#endif + +#define NUM_VCPES 65536 +#define GRE_BUCKET_ENTRIES 4 +#define MAX_GRE (NUM_VCPES * GRE_BUCKET_ENTRIES) +#define MAX_RSS_QUEUE_BITS 9 + +#define PROX_VLAN_TAG_SIZE 4 +#define MBUF_SIZE (ETHER_MAX_LEN + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE) + +#define PROX_MTU ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN + +#endif /* _DEFAULTS_H_ */ diff --git a/VNFs/DPPD-PROX/defines.h b/VNFs/DPPD-PROX/defines.h new file mode 100644 index 00000000..c2309be1 --- /dev/null +++ b/VNFs/DPPD-PROX/defines.h @@ -0,0 +1,59 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _DEFINES_H_ +#define _DEFINES_H_ + +// with 3GHz CPU +#define DRAIN_TIMEOUT __UINT64_C(6000000) // drain TX buffer every 2ms +#define TERM_TIMEOUT __UINT64_C(3000000000) // check if terminated every 1s + +/* DRAIN_TIMEOUT should be smaller than TERM_TIMEOUT as TERM_TIMEOUT + is only checked after DRAIN_TIMEOUT */ +#if TERM_TIMEOUT < DRAIN_TIMEOUT +#error TERM_TIMEOUT < DRAIN_TIMEOUT +#endif + +#ifndef IPv4_BYTES +#define IPv4_BYTES_FMT "%d.%d.%d.%d" +#define IPv4_BYTES(addr) \ + addr[0], addr[1], addr[2], addr[3] +#endif + +#ifndef IPv6_BYTES +#define IPv6_BYTES_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x" +#define IPv6_BYTES(addr) \ + addr[0], addr[1], addr[2], addr[3], \ + addr[4], addr[5], addr[6], addr[7], \ + addr[8], addr[9], addr[10], addr[11], \ + addr[12], addr[13], addr[14], addr[15] +#endif + +#ifndef MAC_BYTES +#define MAC_BYTES_FMT "%02x:%02x:%02x:%02x:%02x:%02x" + +#define MAC_BYTES(addr) \ + addr[0], addr[1], \ + addr[2], addr[3], \ + addr[4], addr[5] +#endif + +/* assume cpu byte order is little endian */ +#define PKT_TO_LUTQINQ(svlan, cvlan) ((((uint32_t)svlan) & 0x000F) << 4 | (((uint32_t)svlan) & 0xFF00) << 8 | (((uint32_t)cvlan) & 0xFF0F)) + +#define ROUTE_ERR 254 + +#endif /* _DEFINES_H_ */ diff --git a/VNFs/DPPD-PROX/display.c b/VNFs/DPPD-PROX/display.c new file mode 100644 index 00000000..2a351a09 --- /dev/null +++ b/VNFs/DPPD-PROX/display.c @@ -0,0 +1,994 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <curses.h> + +#include <rte_cycles.h> +#include <string.h> +#include <signal.h> +#include <math.h> +#include <signal.h> + +#include "display_latency.h" +#include "display_mempools.h" +#include "display_ports.h" +#include "display_priority.h" +#include "display_rings.h" +#include "display_pkt_len.h" +#include "display_l4gen.h" +#include "display_tasks.h" + +#include "cqm.h" +#include "msr.h" +#include "display.h" +#include "log.h" +#include "commands.h" +#include "main.h" +#include "stats.h" +#include "stats_port.h" +#include "stats_latency.h" +#include "stats_global.h" +#include "stats_core.h" +#include "prox_cfg.h" +#include "prox_assert.h" +#include "version.h" +#include "quit.h" +#include "prox_port_cfg.h" + +static struct screen_state screen_state = { + .pps_unit = 1000, + .chosen_screen = -1, +}; + +static struct display_screen *display_screens[16]; +static struct display_screen *current_screen; +static size_t n_screens; +static size_t longest_title; + +void display_set_pps_unit(int val) +{ + screen_state.pps_unit = val; +} + +/* Set up the display mutex as recursive. This enables threads to use + display_[un]lock() to lock the display when multiple calls to for + instance plog_info() need to be made. */ +static pthread_mutex_t disp_mtx = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; + +static void stats_display_layout(uint8_t in_place); + +void display_lock(void) +{ + pthread_mutex_lock(&disp_mtx); +} + +void display_unlock(void) +{ + pthread_mutex_unlock(&disp_mtx); +} + +/* Advanced text output */ +static WINDOW *scr = NULL, *win_txt, *win_general, *win_cmd, *win_stat, *win_title, *win_tabs, *win_help; +static int win_txt_height = 1; +static int title_len; + +static uint16_t max_n_lines; + +static int cmd_cursor_pos; +static const char *cmd_cmd; +static int cmd_len; + +/* Colors used in the interface */ +enum colors { + INVALID_COLOR, + NO_COLOR, + RED_ON_BLACK, + BLACK_ON_CYAN, + BLACK_ON_GREEN, + BLACK_ON_WHITE, + BLACK_ON_YELLOW, + YELLOW_ON_BLACK, + WHITE_ON_RED, + YELLOW_ON_NOTHING, + GREEN_ON_NOTHING, + RED_ON_NOTHING, + BLUE_ON_NOTHING, + CYAN_ON_NOTHING, + MAGENTA_ON_NOTHING, + WHITE_ON_NOTHING, +}; + +int display_getch(void) +{ + int ret; + + display_lock(); + ret = wgetch(scr); + display_unlock(); + + return ret; +} + +void display_cmd(const char *cmd, int cl, int cursor_pos) +{ + cmd_len = cl; + if (cursor_pos == -1 || cursor_pos > cmd_len) + cursor_pos = cmd_len; + cmd_cursor_pos = cursor_pos; + cmd_cmd = cmd; + + display_lock(); + werase(win_cmd); + if (cursor_pos < cmd_len) { + waddnstr(win_cmd, cmd, cursor_pos); + wbkgdset(win_cmd, COLOR_PAIR(YELLOW_ON_BLACK)); + waddnstr(win_cmd, cmd + cursor_pos, 1); + wbkgdset(win_cmd, COLOR_PAIR(BLACK_ON_YELLOW)); + waddnstr(win_cmd, cmd + cursor_pos + 1, cmd_len - (cursor_pos + 1)); + } + else { + waddnstr(win_cmd, cmd, cmd_len); + wmove(win_cmd, cursor_pos, 0); + wbkgdset(win_cmd, COLOR_PAIR(YELLOW_ON_BLACK)); + waddstr(win_cmd, " "); + wbkgdset(win_cmd, COLOR_PAIR(BLACK_ON_YELLOW)); + } + + wattroff(win_stat, A_UNDERLINE); + wrefresh(win_cmd); + display_unlock(); +} + +static void refresh_cmd_win(void) +{ + display_cmd(cmd_cmd, cmd_len, cmd_cursor_pos); +} + +static WINDOW *create_subwindow(int height, int width, int y_pos, int x_pos) +{ + WINDOW *win = subwin(scr, height, width, y_pos, x_pos); + touchwin(scr); + return win; +} + +/* The limit parameter sets the last column that something can be + printed. If characters would be printed _past_ the limit, the last + character printed within the limit will be a '~' to signify that + the string cut off. The limit parameter will be ignored if its + value is -1 */ +static inline int mvwaddstrv(WINDOW *win, int y, int x, int limit, const char *fmt, va_list ap) +{ + char buf[1024]; + int ret; + + ret = vsnprintf(buf, sizeof(buf), fmt, ap); + int len = ret; + + wmove(win, y, x); + if (x > COLS - 1) { + return 0; + } + + /* To prevent strings from wrapping, cut the string at the end + of the screen. */ + if (x + len > COLS) { + buf[COLS - 1 - x] = 0; + len = COLS - x; + } + + if (limit != -1 && x + len > limit) { + int new_len = limit - x; + + if (new_len < 0) + return 0; + buf[new_len] = '~'; + buf[new_len + 1] = 0; + } + + waddstr(win, buf); + return ret; +} + +/* Format string capable [mv]waddstr() wrappers */ +__attribute__((format(printf, 4, 5))) static inline int mvwaddstrf(WINDOW* win, int y, int x, const char *fmt, ...) +{ + int ret; + va_list ap; + + va_start(ap, fmt); + ret = mvwaddstrv(win, y, x, -1, fmt, ap); + va_end(ap); + return ret; +} + +__attribute__((format(printf, 5, 6))) static inline int mvwaddstrf_limit(WINDOW* win, int y, int x, int limit, const char *fmt, ...) +{ + int ret; + va_list ap; + + va_start(ap, fmt); + ret = mvwaddstrv(win, y, x, limit, fmt, ap); + va_end(ap); + return ret; +} + +// red: link down; Green: link up +static short link_color(const uint8_t if_port) +{ + return COLOR_PAIR(prox_port_cfg[if_port].link_up? GREEN_ON_NOTHING : RED_ON_NOTHING); +} + +static void (*ncurses_sigwinch)(int); + +static void sigwinch(int in) +{ + if (ncurses_sigwinch) + ncurses_sigwinch(in); + refresh(); + stats_display_layout(0); +} + +static void set_signal_handler(void) +{ + struct sigaction old; + + sigaction(SIGWINCH, NULL, &old); + ncurses_sigwinch = old.sa_handler; + + signal(SIGWINCH, sigwinch); +} + +void display_column_port_ring(const struct display_column *display_column, int row, struct port_queue *ports, int port_count, struct rte_ring **rings, int ring_count) +{ + if (row >= max_n_lines) + return; + + int pos = display_column->offset; + int limit = pos + display_column->width; + + for (int i = 0; i < port_count && pos < limit; i++) { + wbkgdset(win_stat, link_color(ports[i].port)); + pos += mvwaddstrf_limit(win_stat, row + 2, pos, limit, "%u", ports[i].port); + wbkgdset(win_stat, COLOR_PAIR(NO_COLOR)); + + if (i != port_count - 1) + pos += mvwaddstrf_limit(win_stat, row + 2, pos, limit, " "); + } + + for (uint8_t ring_id = 0; ring_id < ring_count && pos < limit; ++ring_id) { + pos += mvwaddstrf_limit(win_stat, row + 2, pos, limit, "%s", rings[ring_id]->name); + } +} + +static void display_add_screen(struct display_screen *screen) +{ + display_screens[n_screens++] = screen; + if (longest_title < strlen(screen->title)) + longest_title = strlen(screen->title); +} + +static void display_init_screens(void) +{ + if (n_screens) + return; + + display_add_screen(display_tasks()); + display_add_screen(display_ports()); + display_add_screen(display_mempools()); + display_add_screen(display_latency()); + display_add_screen(display_rings()); + display_add_screen(display_l4gen()); + display_add_screen(display_pkt_len()); + display_add_screen(display_priority()); +} + +void display_init(void) +{ + scr = initscr(); + start_color(); + /* Assign default foreground/background colors to color number -1 */ + use_default_colors(); + + init_pair(NO_COLOR, -1, -1); + init_pair(RED_ON_BLACK, COLOR_RED, COLOR_BLACK); + init_pair(BLACK_ON_CYAN, COLOR_BLACK, COLOR_CYAN); + init_pair(BLACK_ON_GREEN, COLOR_BLACK, COLOR_GREEN); + init_pair(BLACK_ON_WHITE, COLOR_BLACK, COLOR_WHITE); + init_pair(BLACK_ON_YELLOW, COLOR_BLACK, COLOR_YELLOW); + init_pair(YELLOW_ON_BLACK, COLOR_YELLOW, COLOR_BLACK); + init_pair(WHITE_ON_RED, COLOR_WHITE, COLOR_RED); + init_pair(YELLOW_ON_NOTHING, COLOR_YELLOW, -1); + init_pair(GREEN_ON_NOTHING, COLOR_GREEN, -1); + init_pair(RED_ON_NOTHING, COLOR_RED, -1); + init_pair(BLUE_ON_NOTHING, COLOR_BLUE, -1); + init_pair(CYAN_ON_NOTHING, COLOR_CYAN, -1); + init_pair(MAGENTA_ON_NOTHING, COLOR_MAGENTA, -1); + init_pair(WHITE_ON_NOTHING, COLOR_WHITE, -1); + /* nodelay(scr, TRUE); */ + noecho(); + curs_set(0); + /* Create fullscreen log window. When stats are displayed + later, it is recreated with appropriate dimensions. */ + win_txt = create_subwindow(0, 0, 0, 0); + wbkgd(win_txt, COLOR_PAIR(0)); + + idlok(win_txt, FALSE); + /* Get scrolling */ + scrollok(win_txt, TRUE); + /* Leave cursor where it was */ + leaveok(win_txt, TRUE); + + refresh(); + + set_signal_handler(); + + max_n_lines = (LINES - 5 - 2 - 3); + /* core_port_height = max_n_lines < stats_get_n_tasks_tot()? max_n_lines : stats_get_n_tasks_tot(); */ + + display_init_screens(); + display_screen(0); + stats_display_layout(0); +} + +static void display_page_recalc_offsets(struct display_page *display_page) +{ + struct display_table *table; + struct display_column *col; + int total_offset = 0; + + for (int i = 0; i < display_page->n_tables; ++i) { + table = &display_page->tables[i]; + + if (i != 0) + total_offset += 1; + table->offset = total_offset; + for (int j = 0; j < table->n_cols; ++j) { + col = &table->cols[j]; + col->offset = total_offset; + if (j + 1 != table->n_cols) + total_offset += 1; + total_offset += col->width; + } + table->width = total_offset - table->offset; + } +} + +void display_page_init(struct display_page *display_page) +{ + struct display_table *table; + struct display_column *col; + int table_width = 0; + int table_offset = 0; + + memset(display_page, 0, sizeof(*display_page)); + display_page->n_tables = 0; + for (size_t i = 0; i < sizeof(display_page->tables)/sizeof(display_page->tables[0]); ++i) { + table = &display_page->tables[i]; + for (size_t j = 0; j < sizeof(table->cols)/sizeof(table->cols[0]); ++j) { + col = &table->cols[j]; + col->display_page = display_page; + } + } +} + +struct display_table *display_page_add_table(struct display_page *display_page) +{ + struct display_table *table = &display_page->tables[display_page->n_tables]; + + display_page->n_tables++; + return table; +} + +void display_table_init(struct display_table *table, const char *title) +{ + strcpy(table->title, title); + table->n_cols = 0; +} + +struct display_column *display_table_add_col(struct display_table *table) +{ + struct display_column *col = &table->cols[table->n_cols]; + + table->n_cols++; + return col; +} + +void display_column_init(struct display_column *display_column, const char *title, unsigned width) +{ + if (width < strlen(title)) + width = strlen(title); + + strcpy(display_column->title, title); + display_column->width = width; + display_page_recalc_offsets(display_column->display_page); +} + +int display_column_get_width(const struct display_column *display_column) +{ + return display_column->width; +} + +void display_page_draw_frame(const struct display_page *display_page, int height) +{ + const struct display_table *table; + const struct display_column *col; + + wattron(win_stat, A_BOLD); + wbkgdset(win_stat, COLOR_PAIR(YELLOW_ON_NOTHING)); + + for (int i = 0; i < display_page->n_tables; ++i) { + table = &display_page->tables[i]; + + if (i != 0) + mvwvline(win_stat, 0, table->offset - 1, ACS_VLINE, height + 2); + + mvwaddstrf(win_stat, 0, table->offset + table->width / 2 - strlen(table->title) / 2, "%s", table->title); + for (int j = 0; j < table->n_cols; ++j) { + col = &table->cols[j]; + + if (j != 0) + mvwvline(win_stat, 1, col->offset - 1, ACS_VLINE, height + 1); + mvwaddstrf(win_stat, 1, col->offset + col->width / 2 - strlen(col->title) / 2, "%s", col->title); + } + + if (i + 1 == display_page->n_tables) + mvwvline(win_stat, 0, table->offset + table->width, ACS_VLINE, height + 2); + } + wbkgdset(win_stat, COLOR_PAIR(NO_COLOR)); + wattroff(win_stat, A_BOLD); +} + +void display_column_print(const struct display_column *display_column, int row, const char *fmt, ...) +{ + if (row >= max_n_lines) + return; + + va_list ap; + char buffer[128] = {0}; + char *to_print = buffer + 64; + + va_start(ap, fmt); + int len = vsnprintf(to_print, sizeof(buffer) - 64, fmt, ap); + va_end(ap); + + int offset = 0; + /* If column is too long, add ~ at the end. If it is too + short, align on the right. */ + if (len > display_column->width) { + to_print[display_column->width - 1] = '~'; + to_print[display_column->width] = '\0'; + } else { + int diff = display_column->width - len; + + to_print += len; + to_print -= display_column->width; + for (int i = 0; i < diff; i++) + to_print[i] = ' '; + } + + mvwaddstrf(win_stat, row + 2, display_column->offset, "%s", to_print); +} + +void display_column_print_core_task(const struct display_column *display_column, int row, struct lcore_cfg *lconf, struct task_args *targ) +{ + if (row >= max_n_lines) + return; + + if (lconf->n_tasks_run == 0) { + wattron(win_stat, A_BOLD); + wbkgdset(win_stat, COLOR_PAIR(RED_ON_NOTHING)); + } + if (targ->id == 0) + mvwaddstrf(win_stat, row + 2, display_column->offset, "%2u/", lconf->id); + if (lconf->n_tasks_run == 0) { + wattroff(win_stat, A_BOLD); + wbkgdset(win_stat, COLOR_PAIR(NO_COLOR)); + } + if (!lconf_task_is_running(lconf, targ->id)) { + wattron(win_stat, A_BOLD); + wbkgdset(win_stat, COLOR_PAIR(RED_ON_NOTHING)); + } + mvwaddstrf(win_stat, row + 2, display_column->offset + 3, "%1u", targ->id); + if (!lconf_task_is_running(lconf, targ->id)) { + wattroff(win_stat, A_BOLD); + wbkgdset(win_stat, COLOR_PAIR(NO_COLOR)); + } +} + +static void redraw_tabs(unsigned screen_id) +{ + const size_t len = longest_title + 1; + + for (size_t i = 0; i < n_screens; ++i) { + if (i == screen_id) + wbkgdset(win_tabs, COLOR_PAIR(BLACK_ON_GREEN)); + + mvwaddstrf(win_tabs, 0, i*(len + 3), "%zu ", i+1); + if (i != screen_id) + wbkgdset(win_tabs, COLOR_PAIR(GREEN_ON_NOTHING)); + mvwaddstrf(win_tabs, 0, i*(len + 3) + 2, "%s", display_screens[i]->title); + for (size_t j = strlen(display_screens[i]->title); j < len - 1; ++j) + mvwaddstrf(win_tabs, 0, i*(len + 3) + 2 + j, " "); + if (i != screen_id) + wbkgdset(win_tabs, COLOR_PAIR(NO_COLOR)); + if (i == screen_id) + wbkgdset(win_tabs, COLOR_PAIR(NO_COLOR)); + } + + wrefresh(win_tabs); +} + +static void draw_title(void) +{ + char title_str[128]; + + snprintf(title_str, sizeof(title_str), "%s %s: %s", PROGRAM_NAME, VERSION_STR, prox_cfg.name); + + wbkgd(win_title, COLOR_PAIR(BLACK_ON_GREEN)); + title_len = strlen(title_str); + mvwaddstrf(win_title, 0, (COLS - title_len)/2, "%s", title_str); + + redraw_tabs(screen_state.chosen_screen); +} + +static void draw_general_frame(void) +{ + if (screen_state.toggle == 0) { + wattron(win_general, A_BOLD); + wbkgdset(win_general, COLOR_PAIR(MAGENTA_ON_NOTHING)); + mvwaddstrf(win_general, 0, 9, "rx: tx: diff: rx: tx: %%:"); + mvwaddstrf(win_general, 1, 9, "rx: tx: err: rx: tx: err: %%:"); + wbkgdset(win_general, COLOR_PAIR(NO_COLOR)); + + wbkgdset(win_general, COLOR_PAIR(BLUE_ON_NOTHING)); + mvwaddstrf(win_general, 0, 0, "Host pps "); + mvwaddstrf(win_general, 1, 0, "NICs pps "); + + wbkgdset(win_general, COLOR_PAIR(CYAN_ON_NOTHING)); + mvwaddstrf(win_general, 0, 56, "avg"); + mvwaddstrf(win_general, 1, 56, "avg"); + wbkgdset(win_general, COLOR_PAIR(NO_COLOR)); + wattroff(win_general, A_BOLD); + } else { + wattron(win_general, A_BOLD); + wbkgdset(win_general, COLOR_PAIR(BLUE_ON_NOTHING)); + mvwaddstrf(win_general, 0, 9, "rx: tx: rx-tx: tx/rx: rx/tx:"); + mvwaddstrf(win_general, 1, 9, "rx: tx: err: tx/rx: rx/tx:"); + wbkgdset(win_general, COLOR_PAIR(NO_COLOR)); + + wbkgdset(win_general, COLOR_PAIR(CYAN_ON_NOTHING)); + mvwaddstrf(win_general, 0, 0, "Host tot "); + mvwaddstrf(win_general, 1, 0, "NICs tot "); + wattroff(win_general, A_BOLD); + } +} + +static void draw_status_bar(void) +{ + wbkgd(win_help, COLOR_PAIR(BLACK_ON_WHITE)); + werase(win_help); + mvwaddstrf(win_help, 0, 0, + "Enter 'help' or command, <ESC> or 'quit' to exit, " + "1-%zu to switch screens and 0 to reset stats, '=' to toggle between per-sec and total stats", + n_screens); + wrefresh(win_help); + mvwin(win_help, LINES - 1, 0); +} + +static void draw_log_window(void) +{ + idlok(win_txt, FALSE); + /* Get scrolling */ + scrollok(win_txt, TRUE); + + /* Leave cursor where it was */ + leaveok(win_txt, TRUE); + wbkgd(win_txt, COLOR_PAIR(BLACK_ON_CYAN)); + wrefresh(win_txt); +} + +static void stats_display_layout(uint8_t in_place) +{ + uint8_t cur_stats_height; + + cur_stats_height = current_screen->get_height(); + cur_stats_height = cur_stats_height > max_n_lines? max_n_lines: cur_stats_height; + + display_lock(); + if (!in_place) { + // moving existing windows does not work + delwin(win_txt); + delwin(win_general); + delwin(win_title); + delwin(win_tabs); + delwin(win_cmd); + delwin(win_txt); + delwin(win_help); + + clear(); + } + + if (!in_place) { + win_stat = create_subwindow(cur_stats_height + 2, 0, 4, 0); + win_tabs = create_subwindow(1, 0, 1, 0); + win_general = create_subwindow(2, 0, 2, 0); + win_title = create_subwindow(1, 0, 0, 0); + win_cmd = create_subwindow(1, 0, cur_stats_height + 2 + 4, 0); + win_txt_height = LINES - cur_stats_height - 2 - 3 - 3; + win_txt = create_subwindow(win_txt_height, 0, cur_stats_height + 4 + 3, 0); + win_help = create_subwindow(1, 0, LINES - 1, 0); + } + + draw_title(); + draw_general_frame(); + /* Command line */ + wbkgd(win_cmd, COLOR_PAIR(BLACK_ON_YELLOW)); + idlok(win_cmd, FALSE); + /* Move cursor at insertion point */ + leaveok(win_cmd, FALSE); + + draw_status_bar(); + draw_log_window(); + + /* Draw everything to the screen */ + refresh(); + current_screen->draw_frame(&screen_state); + display_unlock(); + + refresh_cmd_win(); + display_stats(); +} + +void display_end(void) +{ + pthread_mutex_destroy(&disp_mtx); + + if (scr != NULL) { + endwin(); + } +} + +static void pps_print(WINDOW *dst_scr, int y, int x, uint64_t val, int is_blue) +{ + uint64_t rx_pps_disp = val; + uint64_t rx_pps_disp_frac = 0; + uint32_t ten_pow3 = 0; + static const char *units = " KMG"; + char rx_unit = ' '; + + while (rx_pps_disp > 1000) { + rx_pps_disp /= 1000; + rx_pps_disp_frac = (val - rx_pps_disp*1000) / 10; + val /= 1000; + ten_pow3++; + } + + if (ten_pow3 >= strlen(units)) { + wbkgdset(dst_scr, COLOR_PAIR(RED_ON_NOTHING)); + mvwaddstrf(dst_scr, y, x, "---"); + wbkgdset(dst_scr, COLOR_PAIR(NO_COLOR)); + return; + } + + rx_unit = units[ten_pow3]; + + wattron(dst_scr, A_BOLD); + if (is_blue) { + wbkgdset(dst_scr, COLOR_PAIR(BLUE_ON_NOTHING)); + } + else + wbkgdset(dst_scr, COLOR_PAIR(CYAN_ON_NOTHING)); + + mvwaddstrf(dst_scr, y, x, "%3lu", rx_pps_disp); + if (rx_unit != ' ') { + mvwaddstrf(dst_scr, y, x + 3, ".%02lu", rx_pps_disp_frac); + wattroff(dst_scr, A_BOLD); + wbkgdset(dst_scr, COLOR_PAIR(WHITE_ON_NOTHING)); + wattron(dst_scr, A_BOLD); + mvwaddstrf(dst_scr, y, x + 6, "%c", rx_unit); + wattroff(dst_scr, A_BOLD); + wbkgdset(dst_scr, COLOR_PAIR(NO_COLOR)); + } + else { + mvwaddstrf(dst_scr, y, x + 3, " "); + } + wattroff(dst_scr, A_BOLD); + wbkgdset(dst_scr, COLOR_PAIR(NO_COLOR)); +} + +static void display_stats_general_per_sec(void) +{ + struct global_stats_sample *gsl = stats_get_global_stats(1); + struct global_stats_sample *gsp = stats_get_global_stats(0); + + uint64_t rx_pps = val_to_rate(gsl->host_rx_packets - gsp->host_rx_packets, gsl->tsc - gsp->tsc); + uint64_t tx_pps = val_to_rate(gsl->host_tx_packets - gsp->host_tx_packets, gsl->tsc - gsp->tsc); + /* Host: RX, TX, Diff */ + pps_print(win_general, 0, 12, rx_pps, 1); + pps_print(win_general, 0, 25, tx_pps, 1); + + uint64_t diff = 0; + if (rx_pps > tx_pps) + diff = rx_pps - tx_pps; + pps_print(win_general, 0, 40, diff, 1); + + uint64_t nics_rx_pps = val_to_rate(gsl->nics_rx_packets - gsp->nics_rx_packets, gsl->tsc - gsp->tsc); + uint64_t nics_tx_pps = val_to_rate(gsl->nics_tx_packets - gsp->nics_tx_packets, gsl->tsc - gsp->tsc); + uint64_t nics_ierrors = val_to_rate(gsl->nics_ierrors - gsp->nics_ierrors, gsl->tsc - gsp->tsc); + uint64_t nics_imissed = val_to_rate(gsl->nics_imissed - gsp->nics_imissed, gsl->tsc - gsp->tsc); + + /* NIC: RX, TX, Diff */ + pps_print(win_general, 1, 12, nics_rx_pps, 1); + pps_print(win_general, 1, 25, nics_tx_pps, 1); + pps_print(win_general, 1, 40, nics_ierrors + nics_imissed, 1); + + wbkgdset(win_general, COLOR_PAIR(CYAN_ON_NOTHING)); + wattron(win_general, A_BOLD); + mvwaddstrf(win_general, 0, 103, "%6.2f", tx_pps > rx_pps? 100 : tx_pps * 100.0 / rx_pps); + wattroff(win_general, A_BOLD); + wbkgdset(win_general, COLOR_PAIR(NO_COLOR)); + + struct global_stats_sample *gsb = stats_get_global_stats_beg(); + if (gsb) { + uint64_t rx_pps = val_to_rate(gsl->host_rx_packets - gsb->host_rx_packets, gsl->tsc - gsb->tsc); + uint64_t tx_pps = val_to_rate(gsl->host_tx_packets - gsb->host_tx_packets, gsl->tsc - gsb->tsc); + + uint64_t nics_rx_pps = val_to_rate(gsl->nics_rx_packets - gsb->nics_rx_packets, gsl->tsc - gsb->tsc); + uint64_t nics_tx_pps = val_to_rate(gsl->nics_tx_packets - gsb->nics_tx_packets, gsl->tsc - gsb->tsc); + uint64_t nics_ierrors = val_to_rate(gsl->nics_ierrors - gsb->nics_ierrors, gsl->tsc - gsb->tsc); + uint64_t nics_imissed = val_to_rate(gsl->nics_imissed - gsb->nics_imissed, gsl->tsc - gsb->tsc); + + pps_print(win_general, 0, 64, rx_pps, 0); + pps_print(win_general, 0, 77, tx_pps, 0); + + pps_print(win_general, 1, 64, nics_rx_pps, 0); + pps_print(win_general, 1, 77, nics_tx_pps, 0); + pps_print(win_general, 1, 91, nics_ierrors + nics_imissed, 0); + + wbkgdset(win_general, COLOR_PAIR(CYAN_ON_NOTHING)); + wattron(win_general, A_BOLD); + uint64_t nics_in = gsl->host_rx_packets - gsb->host_rx_packets + gsl->nics_ierrors - gsb->nics_ierrors + gsl->nics_imissed - gsb->nics_imissed; + uint64_t nics_out = gsl->host_tx_packets - gsb->host_tx_packets; + mvwaddstrf(win_general, 1, 103, "%6.2f", nics_out > nics_in? + 100 : nics_out * 100.0 / nics_in); + wattron(win_general, A_BOLD); + wbkgdset(win_general, COLOR_PAIR(NO_COLOR)); + } +} + +static void display_stats_general_total(void) +{ + struct global_stats_sample *gsl = stats_get_global_stats(1); + + int64_t diff = (int64_t)gsl->host_rx_packets - gsl->host_tx_packets; + uint32_t percent; + + /* Host: RX, TX, Diff */ + mvwaddstrf(win_general, 0, 13, "%16lu", gsl->host_rx_packets); + mvwaddstrf(win_general, 0, 35, "%16lu", gsl->host_tx_packets); + mvwaddstrf(win_general, 0, 60, "%16"PRId64"", diff); + if (gsl->host_rx_packets == 0) + percent = 1000000; + else + percent = gsl->host_tx_packets * 1000000 / gsl->host_rx_packets; + mvwaddstrf(win_general, 0, 88, "%3u.%04u%%", percent / 10000, percent % 10000); + if (gsl->host_tx_packets == 0) + percent = 1000000; + else + percent = gsl->host_rx_packets * 1000000 / gsl->host_tx_packets; + mvwaddstrf(win_general, 0, 106, "%3u.%04u%%", percent / 10000, percent % 10000); + + mvwaddstrf(win_general, 1, 13, "%16lu", gsl->nics_rx_packets); + mvwaddstrf(win_general, 1, 35, "%16lu", gsl->nics_tx_packets); + mvwaddstrf(win_general, 1, 60, "%16lu", gsl->nics_ierrors + gsl->nics_imissed); + if (gsl->nics_rx_packets == 0) + percent = 1000000; + else + percent = gsl->nics_tx_packets * 1000000 / gsl->nics_rx_packets; + mvwaddstrf(win_general, 1, 88, "%3u.%04u%%", percent / 10000, percent % 10000); + if (gsl->nics_tx_packets == 0) + percent = 1000000; + else + percent = gsl->nics_rx_packets * 1000000 / gsl->nics_tx_packets; + mvwaddstrf(win_general, 1, 106, "%3u.%04u%%", percent / 10000, percent % 10000); +} + +static void display_stats_general(void) +{ + /* moment when stats were gathered. */ + uint64_t cur_tsc = stats_get_last_tsc(); + uint64_t up_time = tsc_to_sec(cur_tsc - stats_global_start_tsc()); + uint64_t up_time2 = tsc_to_sec(cur_tsc - stats_global_beg_tsc()); + uint64_t rem_time = -1; + char title_str[128] = {0}; + + if (stats_global_end_tsc()) { + uint64_t rem_tsc = stats_global_end_tsc() > cur_tsc? stats_global_end_tsc() - cur_tsc : 0; + + rem_time = tsc_to_sec(rem_tsc); + } + + if (up_time != up_time2 && cur_tsc >= stats_global_beg_tsc()) { + if (stats_global_end_tsc()) + snprintf(title_str, sizeof(title_str), "%5lu (%lu) up, %lu rem", up_time, up_time2, rem_time); + else + snprintf(title_str, sizeof(title_str), "%5lu (%lu) up", up_time, up_time2); + } + else { + if (stats_global_end_tsc()) + snprintf(title_str, sizeof(title_str), "%5lu up, %lu rem", up_time, rem_time); + else + snprintf(title_str, sizeof(title_str), "%5lu up", up_time); + } + + /* Only print up time information if there is enough space */ + if ((int)((COLS + title_len)/2 + strlen(title_str) + 1) < COLS) { + mvwaddstrf(win_title, 0, COLS - strlen(title_str), "%s", title_str); + wrefresh(win_title); + } + + if (screen_state.toggle == 0) + display_stats_general_per_sec(); + else + display_stats_general_total(); + + wrefresh(win_general); +} + +char *print_time_unit_err_usec(char *dst, struct time_unit_err *t) +{ + uint64_t nsec_total = time_unit_to_nsec(&t->time); + + uint64_t usec = nsec_total/1000; + uint64_t nsec = nsec_total - usec*1000; + + uint64_t nsec_total_error = time_unit_to_nsec(&t->error); + + uint64_t usec_error = nsec_total_error/1000; + uint64_t nsec_error = nsec_total_error - usec_error*1000; + + sprintf(dst, "%4"PRIu64".%03"PRIu64" +/- %2"PRIu64".%03"PRIu64"", usec, nsec, usec_error, nsec_error); + return dst; +} + +char *print_time_unit_usec(char *dst, struct time_unit *t) +{ + uint64_t nsec_total = time_unit_to_nsec(t); + + uint64_t usec = nsec_total/1000; + uint64_t nsec = nsec_total - usec*1000; + + sprintf(dst, "%4"PRIu64".%03"PRIu64"", usec, nsec); + return dst; +} + +void toggle_display_screen(void) +{ + screen_state.toggle = !screen_state.toggle; + stats_display_layout(0); +} + +void display_screen(unsigned screen_id) +{ + if (screen_id >= n_screens) { + plog_err("Unsupported screen %d\n", screen_id + 1); + return; + } + + if (screen_state.chosen_screen == screen_id) { + stats_display_layout(1); + } + else { + screen_state.chosen_screen = screen_id; + current_screen = display_screens[screen_id]; + stats_display_layout(0); + } +} + +void display_page_up(void) +{ +} + +void display_page_down(void) +{ +} + +void display_refresh(void) +{ + stats_display_layout(1); +} + +void display_stats(void) +{ + display_lock(); + current_screen->draw_stats(&screen_state); + display_stats_general(); + wrefresh(win_stat); + display_unlock(); +} + +static char pages[32768] = {0}; +static int cur_idx = 0; +static size_t pages_len = 0; + +void display_print_page(void) +{ + int n_lines = 0; + int cur_idx_prev = cur_idx; + + if (cur_idx >= (int)pages_len) { + return; + } + + display_lock(); + for (size_t i = cur_idx; i < pages_len; ++i) { + if (pages[i] == '\n') { + n_lines++; + if (n_lines == win_txt_height - 2) { + pages[i] = 0; + cur_idx = i + 1; + break; + } + } + } + + waddstr(win_txt, pages + cur_idx_prev); + if (cur_idx != cur_idx_prev && cur_idx < (int)pages_len) + waddstr(win_txt, "\nPRESS ENTER FOR MORE...\n"); + else { + pages_len = 0; + } + wrefresh(win_txt); + display_unlock(); +} + +void display_print(const char *str) +{ + display_lock(); + + if (scr == NULL) { + fputs(str, stdout); + fflush(stdout); + display_unlock(); + return; + } + + /* Check if the whole string can fit on the screen. */ + pages_len = strlen(str); + int n_lines = 0; + memset(pages, 0, sizeof(pages)); + memcpy(pages, str, pages_len); + cur_idx = 0; + for (size_t i = 0; i < pages_len; ++i) { + if (pages[i] == '\n') { + n_lines++; + if (n_lines == win_txt_height - 2) { + pages[i] = 0; + cur_idx = i + 1; + break; + } + } + } + + waddstr(win_txt, pages); + if (cur_idx != 0) + waddstr(win_txt, "\nPRESS ENTER FOR MORE...\n"); + else + pages_len = 0; + + wrefresh(win_txt); + display_unlock(); +} diff --git a/VNFs/DPPD-PROX/display.h b/VNFs/DPPD-PROX/display.h new file mode 100644 index 00000000..4b517546 --- /dev/null +++ b/VNFs/DPPD-PROX/display.h @@ -0,0 +1,109 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _DISPLAY_H_ +#define _DISPLAY_H_ + +#include <inttypes.h> +#include <stdarg.h> +#include <stdio.h> + +#include "display_latency.h" +#include "stats_cons.h" +#include "clock.h" + +struct display_column { + char title[32]; + int offset; + int width; + struct display_page *display_page; +}; + +struct display_table { + struct display_column cols[16]; + char title[32]; + int n_cols; + int offset; + int width; +}; + +struct display_page { + struct display_table tables[8]; + int n_tables; + int width; +}; + +struct screen_state { + unsigned chosen_screen; + unsigned chosen_page; + int toggle; + int pps_unit; +}; + +struct display_screen { + void (*draw_frame)(struct screen_state *screen_state); + void (*draw_stats)(struct screen_state *screen_state); + int (*get_height)(void); + const char *title; +}; + +void display_set_pps_unit(int val); + +struct lcore_cfg; +struct task_args; + +void display_page_draw_frame(const struct display_page *display_page, int height); +int display_column_get_width(const struct display_column *display_column); +void display_column_init(struct display_column *display_column, const char *title, unsigned width); +struct display_column *display_table_add_col(struct display_table *table); +void display_table_init(struct display_table *table, const char *title); +struct display_table *display_page_add_table(struct display_page *display_page); +void display_page_init(struct display_page *display_page); +__attribute__((format(printf, 3, 4))) void display_column_print(const struct display_column *display_column, int row, const char *fmt, ...); +void display_column_print_core_task(const struct display_column *display_column, int row, struct lcore_cfg *lconf, struct task_args *targ); +void display_column_print_number(const struct display_column *display_column, int row, uint64_t number); + +char *print_time_unit_err_usec(char *dst, struct time_unit_err *t); +char *print_time_unit_usec(char *dst, struct time_unit *t); +struct port_queue; +struct rte_ring; +void display_column_port_ring(const struct display_column *display_column, int row, struct port_queue *ports, int port_count, struct rte_ring **rings, int ring_count); + +void display_init(void); +void display_end(void); +void display_stats(void); +void display_refresh(void); +void display_print(const char *str); +void display_cmd(const char *cmd, int cmd_len, int cursor_pos); +void display_screen(unsigned screen_id); +void toggle_display_screen(void); +void display_page_up(void); +void display_page_down(void); +void display_print_page(void); +void display_lock(void); +void display_unlock(void); + +int display_getch(void); + +static struct stats_cons display = { + .init = display_init, + .notify = display_stats, + .refresh = display_refresh, + .finish = display_end, + .flags = STATS_CONS_F_ALL, +}; + +#endif /* _DISPLAY_H_ */ diff --git a/VNFs/DPPD-PROX/display_l4gen.c b/VNFs/DPPD-PROX/display_l4gen.c new file mode 100644 index 00000000..7cc1f5f3 --- /dev/null +++ b/VNFs/DPPD-PROX/display_l4gen.c @@ -0,0 +1,172 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "display.h" +#include "display_l4gen.h" +#include "stats_l4gen.h" + +static struct display_page display_page_l4gen; + +static struct display_column *core_col; +static struct display_column *tcp_setup_col; +static struct display_column *udp_setup_col; +static struct display_column *all_setup_col; +static struct display_column *bundles_setup_col; +static struct display_column *tcp_teardown_col; +static struct display_column *tcp_teardown_retx_col; +static struct display_column *udp_teardown_col; +static struct display_column *tcp_expire_col; +static struct display_column *udp_expire_col; +static struct display_column *active_col; +static struct display_column *retx_col; + +static void display_l4gen_draw_frame(struct screen_state *state) +{ + const uint32_t n_l4gen = stats_get_n_l4gen(); + + display_page_init(&display_page_l4gen); + + struct display_table *core = display_page_add_table(&display_page_l4gen); + struct display_table *setup_rate = display_page_add_table(&display_page_l4gen); + struct display_table *teardown_rate = display_page_add_table(&display_page_l4gen); + struct display_table *expire_rate = display_page_add_table(&display_page_l4gen); + struct display_table *other = display_page_add_table(&display_page_l4gen); + + display_table_init(core, "Core"); + display_table_init(setup_rate, "Setup rate (flows/s)"); + display_table_init(teardown_rate, "Teardown rate (flows/s)"); + display_table_init(expire_rate, "Expire rate (flows/s)"); + display_table_init(other, "Other"); + + core_col = display_table_add_col(core); + display_column_init(core_col, "Nb", 4); + + tcp_setup_col = display_table_add_col(setup_rate); + display_column_init(tcp_setup_col, "TCP", 10); + udp_setup_col = display_table_add_col(setup_rate); + display_column_init(udp_setup_col, "UDP", 10); + all_setup_col = display_table_add_col(setup_rate); + display_column_init(all_setup_col, "TCP + UDP", 9); + bundles_setup_col = display_table_add_col(setup_rate); + display_column_init(bundles_setup_col, "Bundles", 9); + + tcp_teardown_col = display_table_add_col(teardown_rate); + display_column_init(tcp_teardown_col, "TCP w/o reTX", 12); + tcp_teardown_retx_col = display_table_add_col(teardown_rate); + display_column_init(tcp_teardown_retx_col, "TCP w/ reTX", 12); + udp_teardown_col = display_table_add_col(teardown_rate); + display_column_init(udp_teardown_col, "UDP", 12); + + tcp_expire_col = display_table_add_col(expire_rate); + display_column_init(tcp_expire_col, "TCP", 10); + udp_expire_col = display_table_add_col(expire_rate); + display_column_init(udp_expire_col, "TCP", 10); + + active_col = display_table_add_col(other); + display_column_init(active_col, "Active (#)", 10); + retx_col = display_table_add_col(other); + display_column_init(retx_col, "reTX (/s)", 10); + + display_page_draw_frame(&display_page_l4gen, n_l4gen); + + for (uint16_t i = 0; i < n_l4gen; ++i) { + struct task_l4_stats *tls = stats_get_l4_stats(i); + + display_column_print(core_col, i, "%2u/%1u", tls->lcore_id, tls->task_id); + } +} + +static void display_l4gen_draw_stats_line(int row, struct l4_stats_sample *clast, struct l4_stats_sample *cprev) +{ + struct l4_stats *last = &clast->stats; + struct l4_stats *prev = &cprev->stats; + + uint64_t delta_t = clast->tsc - cprev->tsc; + + uint64_t tcp_created = last->tcp_created - prev->tcp_created; + uint64_t udp_created = last->udp_created - prev->udp_created; + + uint64_t tcp_finished_no_retransmit = last->tcp_finished_no_retransmit - prev->tcp_finished_no_retransmit; + uint64_t tcp_finished_retransmit = last->tcp_finished_retransmit - prev->tcp_finished_retransmit; + uint64_t tcp_expired = last->tcp_expired - prev->tcp_expired; + uint64_t tcp_retransmits = last->tcp_retransmits - prev->tcp_retransmits; + uint64_t udp_finished = last->udp_finished - prev->udp_finished; + uint64_t udp_expired = last->udp_expired - prev->udp_expired; + uint64_t bundles_created = last->bundles_created - prev->bundles_created; + + uint64_t tcp_setup_rate = val_to_rate(tcp_created, delta_t); + uint64_t udp_setup_rate = val_to_rate(udp_created, delta_t); + uint64_t all_setup_rate = val_to_rate(tcp_created + udp_created, delta_t); + uint64_t bundle_setup_rate = val_to_rate(bundles_created, delta_t); + + uint64_t tcp_teardown_rate = val_to_rate(tcp_finished_no_retransmit, delta_t); + uint64_t tcp_teardown_retx_rate = val_to_rate(tcp_finished_retransmit, delta_t); + uint64_t udp_teardown_rate = val_to_rate(udp_finished, delta_t); + + uint64_t tcp_expire_rate = val_to_rate(tcp_expired, delta_t); + uint64_t udp_expire_rate = val_to_rate(udp_expired, delta_t); + + display_column_print(tcp_setup_col, row, "%"PRIu64"", tcp_setup_rate); + display_column_print(udp_setup_col, row, "%"PRIu64"", udp_setup_rate); + display_column_print(all_setup_col, row, "%"PRIu64"", all_setup_rate); + display_column_print(bundles_setup_col, row, "%"PRIu64"", bundle_setup_rate); + + display_column_print(tcp_teardown_col, row, "%"PRIu64"", tcp_teardown_rate); + display_column_print(tcp_teardown_retx_col, row, "%"PRIu64"", tcp_teardown_retx_rate); + display_column_print(udp_teardown_col, row, "%"PRIu64"", udp_teardown_rate); + + display_column_print(tcp_expire_col, row, "%"PRIu64"", tcp_expire_rate); + display_column_print(udp_expire_col, row, "%"PRIu64"", udp_expire_rate); + + uint64_t tot_created = last->tcp_created + last->udp_created; + uint64_t tot_finished = last->tcp_finished_retransmit + last->tcp_finished_no_retransmit + + last->udp_finished + last->udp_expired + last->tcp_expired; + + uint64_t active = tot_created - tot_finished; + uint64_t retx = tcp_retransmits; + + display_column_print(active_col, row, "%10"PRIu64"", active); + display_column_print(retx_col, row, "%10"PRIu64"", retx); +} + +static void display_l4gen_draw_stats(struct screen_state *state) +{ + const uint32_t n_l4gen = stats_get_n_l4gen(); + + for (uint16_t i = 0; i < n_l4gen; ++i) { + struct l4_stats_sample *clast = stats_get_l4_stats_sample(i, 1); + struct l4_stats_sample *cprev = stats_get_l4_stats_sample(i, 0); + + display_l4gen_draw_stats_line(i, clast, cprev); + } +} + +static int display_l4gen_get_height(void) +{ + return stats_get_n_l4gen(); +} + +static struct display_screen display_screen_l4gen = { + .draw_frame = display_l4gen_draw_frame, + .draw_stats = display_l4gen_draw_stats, + .get_height = display_l4gen_get_height, + .title = "l4gen", +}; + +struct display_screen *display_l4gen(void) +{ + return &display_screen_l4gen; +} diff --git a/VNFs/DPPD-PROX/display_l4gen.h b/VNFs/DPPD-PROX/display_l4gen.h new file mode 100644 index 00000000..24b6c5af --- /dev/null +++ b/VNFs/DPPD-PROX/display_l4gen.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_L4GEN_H +#define DISPLAY_L4GEN_H + +struct display_screen; +struct display_screen *display_l4gen(void); + +#endif /* DISPLAY_L4GEN_H */ diff --git a/VNFs/DPPD-PROX/display_latency.c b/VNFs/DPPD-PROX/display_latency.c new file mode 100644 index 00000000..04382e46 --- /dev/null +++ b/VNFs/DPPD-PROX/display_latency.c @@ -0,0 +1,154 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "display.h" +#include "display_latency.h" +#include "stats_latency.h" +#include "lconf.h" + +static struct display_column *min_col; +static struct display_column *max_col; +static struct display_column *avg_col; +static struct display_column *stddev_col; +static struct display_column *accuracy_limit_col; +static struct display_column *used_col; +static struct display_column *lost_col; +static struct display_page display_page_latency; + +static void display_latency_draw_frame(struct screen_state *screen_state) +{ + const uint32_t n_latency = stats_get_n_latency(); + struct display_column *core_col; + struct display_column *port_col; + + display_page_init(&display_page_latency); + + struct display_table *core = display_page_add_table(&display_page_latency); + struct display_table *port = display_page_add_table(&display_page_latency); + struct display_table *lat = display_page_add_table(&display_page_latency); + struct display_table *acc = display_page_add_table(&display_page_latency); + struct display_table *other = display_page_add_table(&display_page_latency); + + display_table_init(core, "Core"); + core_col = display_table_add_col(core); + display_column_init(core_col, "Nb", 4); + + display_table_init(port, "Port Nb"); + port_col = display_table_add_col(port); + display_column_init(port_col, "RX", 8); + + if (screen_state->toggle == 0) + display_table_init(lat, "Measured Latency per interval"); + else + display_table_init(lat, "Measured Latency since reset"); + + min_col = display_table_add_col(lat); + display_column_init(min_col, "Min (us)", 20); + max_col = display_table_add_col(lat); + display_column_init(max_col, "Max (us)", 20); + avg_col = display_table_add_col(lat); + display_column_init(avg_col, "Avg (us)", 20); + stddev_col = display_table_add_col(lat); + display_column_init(stddev_col, "Stddev (us)", 20); + + display_table_init(acc, "Accuracy "); + used_col = display_table_add_col(acc); + display_column_init(used_col, "Used Packets (%)", 16); + accuracy_limit_col = display_table_add_col(acc); + display_column_init(accuracy_limit_col, "limit (us)", 16); + + display_table_init(other, "Other"); + + lost_col = display_table_add_col(other); + display_column_init(lost_col, "Lost Packets", 16); + + display_page_draw_frame(&display_page_latency, n_latency); + + for (uint16_t i = 0; i < n_latency; ++i) { + uint32_t lcore_id = stats_latency_get_core_id(i); + uint32_t task_id = stats_latency_get_task_id(i); + struct task_args *targ = &lcore_cfg[lcore_id].targs[task_id]; + + display_column_print(core_col, i, "%2u/%1u", lcore_id, task_id); + display_column_port_ring(port_col, i, targ->rx_port_queue, targ->nb_rxports, targ->rx_rings, targ->nb_rxrings); + } +} + +#define AFTER_POINT 1000000 + +static void display_stats_latency_entry(int row, struct stats_latency *stats_latency) +{ + struct time_unit_err avg = stats_latency->avg; + struct time_unit_err min = stats_latency->min; + struct time_unit_err max = stats_latency->max; + struct time_unit_err stddev = stats_latency->stddev; + struct time_unit accuracy_limit = stats_latency->accuracy_limit; + + uint32_t used = 0; + + if (stats_latency->tot_all_packets) + used = stats_latency->tot_packets * (100 * AFTER_POINT) / stats_latency->tot_all_packets; + + char dst[32]; + + if (stats_latency->tot_packets) { + display_column_print(min_col, row, "%s", print_time_unit_err_usec(dst, &min)); + display_column_print(max_col, row, "%s", print_time_unit_err_usec(dst, &max)); + display_column_print(avg_col, row, "%s", print_time_unit_err_usec(dst, &avg)); + display_column_print(stddev_col, row, "%s", print_time_unit_err_usec(dst, &stddev)); + } else { + display_column_print(min_col, row, "%s", "N/A"); + display_column_print(max_col, row, "%s", "N/A"); + display_column_print(avg_col, row, "%s", "N/A"); + display_column_print(stddev_col, row, "%s", "N/A"); + } + + display_column_print(accuracy_limit_col, row, "%s", print_time_unit_usec(dst, &accuracy_limit)); + display_column_print(lost_col, row, "%16"PRIu64"", stats_latency->lost_packets); + display_column_print(used_col, row, "%3u.%06u", used / AFTER_POINT, used % AFTER_POINT); +} + +static void display_latency_draw_stats(struct screen_state *screen_state) +{ + const uint32_t n_latency = stats_get_n_latency(); + struct stats_latency *stats_latency; + + for (uint16_t i = 0; i < n_latency; ++i) { + if (screen_state->toggle == 0) + stats_latency = stats_latency_get(i); + else + stats_latency = stats_latency_tot_get(i); + + display_stats_latency_entry(i, stats_latency); + } +} + +static int display_latency_get_height(void) +{ + return stats_get_n_latency(); +} + +static struct display_screen display_screen_latency = { + .draw_frame = display_latency_draw_frame, + .draw_stats = display_latency_draw_stats, + .get_height = display_latency_get_height, + .title = "latency", +}; + +struct display_screen *display_latency(void) +{ + return &display_screen_latency; +} diff --git a/VNFs/DPPD-PROX/display_latency.h b/VNFs/DPPD-PROX/display_latency.h new file mode 100644 index 00000000..0821b2d9 --- /dev/null +++ b/VNFs/DPPD-PROX/display_latency.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_LATENCY_H +#define DISPLAY_LATENCY_H + +struct display_screen; +struct display_screen *display_latency(void); + +#endif /* DISPLAY_LATENCY_H */ diff --git a/VNFs/DPPD-PROX/display_mempools.c b/VNFs/DPPD-PROX/display_mempools.c new file mode 100644 index 00000000..2982104b --- /dev/null +++ b/VNFs/DPPD-PROX/display_mempools.c @@ -0,0 +1,111 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "display_mempools.h" +#include "stats_mempool.h" +#include "display.h" +#include "defaults.h" + +static struct display_page display_page_mempools; +static struct display_column *nb_col; +static struct display_column *queue_col; +static struct display_column *occup_col; +static struct display_column *used_col; +static struct display_column *free_col; +static struct display_column *total_col; +static struct display_column *mem_used_col; +static struct display_column *mem_free_col; +static struct display_column *mem_tot_col; + +static void display_mempools_draw_frame(struct screen_state *screen_state) +{ + const uint32_t n_mempools = stats_get_n_mempools(); + + display_page_init(&display_page_mempools); + + struct display_table *port = display_page_add_table(&display_page_mempools); + struct display_table *stats = display_page_add_table(&display_page_mempools); + + display_table_init(port, "Port"); + display_table_init(stats, "Sampled statistics"); + + nb_col = display_table_add_col(port); + queue_col = display_table_add_col(port); + display_column_init(nb_col, "Nb", 4); + display_column_init(queue_col, "Queue", 5); + + occup_col = display_table_add_col(stats); + display_column_init(occup_col, "Occup (%)", 9); + used_col = display_table_add_col(stats); + display_column_init(used_col, "Used (#)", 12); + free_col = display_table_add_col(stats); + display_column_init(free_col, "Free (#)", 12); + total_col = display_table_add_col(stats); + display_column_init(total_col, "Total (#)", 13); + + mem_used_col = display_table_add_col(stats); + display_column_init(mem_used_col, "Mem Used (KB)", 13); + mem_free_col = display_table_add_col(stats); + display_column_init(mem_free_col, "Mem Free (KB)", 13); + mem_tot_col = display_table_add_col(stats); + display_column_init(mem_tot_col, "Mem Tot (KB)", 12); + + display_page_draw_frame(&display_page_mempools, n_mempools); + + for (uint16_t i = 0; i < n_mempools; ++i) { + struct mempool_stats *ms = stats_get_mempool_stats(i); + + display_column_print(nb_col, i, "%4u", ms->port); + display_column_print(queue_col, i, "%5u", ms->queue); + display_column_print(total_col, i, "%13zu", ms->size); + display_column_print(mem_tot_col, i, "%12zu", ms->size * MBUF_SIZE/1024); + } +} + +static void display_mempools_draw_stats(struct screen_state *state) +{ + const uint32_t n_mempools = stats_get_n_mempools(); + + for (uint16_t i = 0; i < n_mempools; ++i) { + struct mempool_stats *ms = stats_get_mempool_stats(i); + const size_t used = ms->size - ms->free; + const uint32_t used_frac = used*10000/ms->size; + + display_column_print(occup_col, i, "%6u.%02u", used_frac/100, used_frac % 100); + display_column_print(used_col, i, "%12zu", used); + display_column_print(free_col, i, "%12zu", ms->free); + + display_column_print(mem_free_col, i, "%13zu", used * MBUF_SIZE/1024); + display_column_print(mem_used_col, i, "%13zu", ms->free * MBUF_SIZE/1024); + } +} + +static int display_mempools_get_height(void) +{ + return stats_get_n_mempools(); +} + +static struct display_screen display_screen_mempools = { + .draw_frame = display_mempools_draw_frame, + .draw_stats = display_mempools_draw_stats, + .get_height = display_mempools_get_height, + .title = "mempools", +}; + +struct display_screen *display_mempools(void) +{ + return &display_screen_mempools; +} diff --git a/VNFs/DPPD-PROX/display_mempools.h b/VNFs/DPPD-PROX/display_mempools.h new file mode 100644 index 00000000..b5c4d99c --- /dev/null +++ b/VNFs/DPPD-PROX/display_mempools.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_MEMPOOLS_H +#define DISPLAY_MEMPOOLS_H + +struct display_screen; +struct display_screen *display_mempools(void); + +#endif /* DISPLAY_MEMPOOLS_H */ diff --git a/VNFs/DPPD-PROX/display_pkt_len.c b/VNFs/DPPD-PROX/display_pkt_len.c new file mode 100644 index 00000000..df34616a --- /dev/null +++ b/VNFs/DPPD-PROX/display_pkt_len.c @@ -0,0 +1,138 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "prox_globals.h" +#include "display_pkt_len.h" +#include "stats_port.h" +#include "display.h" +#include "defaults.h" +#include "prox_port_cfg.h" +#include "clock.h" + +static struct display_page display_page_pkt_len; +static struct display_column *port_col; +static struct display_column *name_col; +static struct display_column *type_col; +static struct display_column *stats_col[PKT_SIZE_COUNT]; + +const char *titles[] = { + "64B (#)", + "65-127B (#)", + "128-255B (#)", + "256-511B (#)", + "512-1023B (#)", + "1024-1522B (#)", + "1523B+ (#)", +}; + +static int port_disp[PROX_MAX_PORTS]; +static int n_port_disp; + +static void display_pkt_len_draw_frame(struct screen_state *screen_state) +{ + n_port_disp = 0; + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + if (prox_port_cfg[i].active) { + port_disp[n_port_disp++] = i; + } + } + + display_page_init(&display_page_pkt_len); + + struct display_table *port_name = display_page_add_table(&display_page_pkt_len); + + display_table_init(port_name, "Port"); + port_col = display_table_add_col(port_name); + name_col = display_table_add_col(port_name); + type_col = display_table_add_col(port_name); + + display_column_init(port_col, "ID", 4); + display_column_init(name_col, "Name", 8); + display_column_init(type_col, "Type", 7); + + struct display_table *stats = display_page_add_table(&display_page_pkt_len); + + if (screen_state->toggle == 0) + display_table_init(stats, "Statistics per second"); + else + display_table_init(stats, "Total Statistics"); + + for (int i = 0; i < PKT_SIZE_COUNT; ++i) { + stats_col[i] = display_table_add_col(stats); + display_column_init(stats_col[i], titles[i], 13); + } + + display_page_draw_frame(&display_page_pkt_len, n_port_disp); + + for (uint8_t i = 0; i < n_port_disp; ++i) { + const uint32_t port_id = port_disp[i]; + + display_column_print(port_col, i, "%4u", port_id); + display_column_print(name_col, i, "%8s", prox_port_cfg[port_id].name); + display_column_print(type_col, i, "%7s", prox_port_cfg[port_id].short_name); + } +} + +static void display_pkt_len_draw_stats(struct screen_state *state) +{ + for (uint8_t i = 0; i < n_port_disp; ++i) { + const uint32_t port_id = port_disp[i]; + struct port_stats_sample *last = stats_get_port_stats_sample(port_id, 1); + struct port_stats_sample *prev = stats_get_port_stats_sample(port_id, 0); + + uint64_t delta_t = last->tsc - prev->tsc; + if (delta_t == 0) // This could happen if we just reset the screen => stats will be updated later + continue; + + if (state->toggle == 0) { + uint64_t diff; + + for (int j = 0; j < PKT_SIZE_COUNT; ++j) { + if (last->tx_pkt_size[j] == (uint64_t)-1) { + display_column_print(stats_col[j], i, " --- "); + } else { + diff = last->tx_pkt_size[j] - prev->tx_pkt_size[j]; + display_column_print(stats_col[j], i, "%13lu", val_to_rate(diff, delta_t)); + } + } + } else { + for (int j = 0; j < PKT_SIZE_COUNT; ++j) { + if (last->tx_pkt_size[j] == (uint64_t)-1) { + display_column_print(stats_col[j], i, " --- "); + } else { + display_column_print(stats_col[j], i, "%13lu", last->tx_pkt_size[j]); + } + } + } + } +} + +static int display_pkt_len_get_height(void) +{ + return stats_get_n_ports(); +} + +static struct display_screen display_screen_pkt_len = { + .draw_frame = display_pkt_len_draw_frame, + .draw_stats = display_pkt_len_draw_stats, + .get_height = display_pkt_len_get_height, + .title = "pkt_len", +}; + +struct display_screen *display_pkt_len(void) +{ + return &display_screen_pkt_len; +} diff --git a/VNFs/DPPD-PROX/display_pkt_len.h b/VNFs/DPPD-PROX/display_pkt_len.h new file mode 100644 index 00000000..2c7af420 --- /dev/null +++ b/VNFs/DPPD-PROX/display_pkt_len.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_PKT_LEN_H +#define DISPLAY_PKT_LEN_H + +struct display_screen; +struct display_screen *display_pkt_len(void); + +#endif /* DISPLAY_PKT_LEN_H */ diff --git a/VNFs/DPPD-PROX/display_ports.c b/VNFs/DPPD-PROX/display_ports.c new file mode 100644 index 00000000..b1027f93 --- /dev/null +++ b/VNFs/DPPD-PROX/display_ports.c @@ -0,0 +1,252 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "clock.h" +#include "display_ports.h" +#include "display.h" +#include "stats_port.h" +#include "prox_globals.h" +#include "prox_port_cfg.h" + +static struct display_page display_page_ports; +static struct display_column *nb_col; +static struct display_column *name_col; +static struct display_column *type_col; + +static struct display_column *no_mbufs_col; +static struct display_column *ierrors_col; +static struct display_column *imissed_col; +static struct display_column *oerrors_col; +static struct display_column *rx_col; +static struct display_column *tx_col; +static struct display_column *rx_bytes_col; +static struct display_column *tx_bytes_col; +static struct display_column *rx_percent_col; +static struct display_column *tx_percent_col; + +static int port_disp[PROX_MAX_PORTS]; +static int n_port_disp; + +static void display_ports_draw_frame(struct screen_state *state) +{ + n_port_disp = 0; + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + if (prox_port_cfg[i].active) { + port_disp[n_port_disp++] = i; + } + } + + const uint32_t n_ports = stats_get_n_ports(); + char name[32]; + char *ptr; + + display_page_init(&display_page_ports); + + struct display_table *port = display_page_add_table(&display_page_ports); + struct display_table *stats = display_page_add_table(&display_page_ports); + + display_table_init(port, "Port"); + + nb_col = display_table_add_col(port); + name_col = display_table_add_col(port); + type_col = display_table_add_col(port); + + display_column_init(nb_col, "Nb", 4); + display_column_init(name_col, "Name", 8); + display_column_init(type_col, "Type", 7); + + if (state->toggle == 0) { + display_table_init(stats, "Statistics per second"); + no_mbufs_col = display_table_add_col(stats); + ierrors_col = display_table_add_col(stats); + imissed_col = display_table_add_col(stats); + oerrors_col = display_table_add_col(stats); + rx_col = display_table_add_col(stats); + tx_col = display_table_add_col(stats); + rx_bytes_col = display_table_add_col(stats); + tx_bytes_col = display_table_add_col(stats); + rx_percent_col = display_table_add_col(stats); + tx_percent_col = display_table_add_col(stats); + + display_column_init(no_mbufs_col, "no mbufs (#)", 12); + display_column_init(ierrors_col, "ierrors (#)", 12); + display_column_init(imissed_col, "imissed (#)", 12); + display_column_init(oerrors_col, "oerrors (#)", 12); + display_column_init(rx_col, "RX (Kpps)", 10); + display_column_init(tx_col, "TX (Kpps)", 10); + display_column_init(rx_bytes_col, "RX (Kbps)", 10); + display_column_init(tx_bytes_col, "TX (Kbps)", 10); + display_column_init(rx_percent_col, "RX (%)", 8); + display_column_init(tx_percent_col, "TX (%)", 8); + } else { + display_table_init(stats, "Total statistics"); + no_mbufs_col = display_table_add_col(stats); + ierrors_col = display_table_add_col(stats); + imissed_col = display_table_add_col(stats); + oerrors_col = display_table_add_col(stats); + rx_col = display_table_add_col(stats); + tx_col = display_table_add_col(stats); + + display_column_init(no_mbufs_col, "no mbufs (#)", 13); + display_column_init(ierrors_col, "ierrors (#)", 13); + display_column_init(imissed_col, "imissed (#)", 13); + display_column_init(oerrors_col, "oerrors (#)", 13); + display_column_init(rx_col, "RX (#)", 13); + display_column_init(tx_col, "TX (#)", 13); + } + + display_page_draw_frame(&display_page_ports, n_port_disp); + for (uint8_t i = 0; i < n_port_disp; ++i) { + const uint32_t port_id = port_disp[i]; + + display_column_print(nb_col, i, "%u", port_id); + display_column_print(name_col, i, "%s", prox_port_cfg[port_id].name); + display_column_print(type_col, i, "%s", prox_port_cfg[port_id].short_name); + } +} + +struct percent { + uint32_t percent; + uint32_t part; +}; + +static struct percent calc_percent(uint64_t val, uint64_t delta_t) +{ + struct percent ret; + uint64_t normalized = 0; + + if (val == 0) { + ret.percent = 0; + ret.part = 0; + } else if (val < thresh) { + ret.percent = val * tsc_hz / delta_t / 12500000; + ret.part = (val * tsc_hz / delta_t / 1250) % 10000; + } else if (delta_t > tsc_hz) { + ret.percent = val / (delta_t / tsc_hz) / 12500000; + ret.part = (val / (delta_t / tsc_hz) / 1250) % 10000; + } else { + ret.percent = 0; + ret.part = 0; + } + return ret; +} + +static void display_ports_draw_per_sec_stats(void) +{ + for (uint8_t i = 0; i < n_port_disp; ++i) { + const uint32_t port_id = port_disp[i]; + struct port_stats_sample *last = stats_get_port_stats_sample(port_id, 1); + struct port_stats_sample *prev = stats_get_port_stats_sample(port_id, 0); + + uint64_t delta_t = last->tsc - prev->tsc; + + /* This could happen if we just reset the screen. + stats will be updated later */ + if (delta_t == 0) + continue; + + uint64_t no_mbufs_rate = val_to_rate(last->no_mbufs - prev->no_mbufs, delta_t); + uint64_t ierrors_rate = val_to_rate(last->ierrors - prev->ierrors, delta_t); + uint64_t imissed_rate = val_to_rate(last->imissed - prev->imissed, delta_t); + uint64_t oerrors_rate = val_to_rate(last->oerrors - prev->oerrors, delta_t); + + uint64_t rx_kbps_rate = val_to_rate((last->rx_bytes - prev->rx_bytes) * 8, delta_t) / 1000; + uint64_t tx_kbps_rate = val_to_rate((last->tx_bytes - prev->tx_bytes) * 8, delta_t) / 1000; + + uint64_t rx_rate = val_to_rate(last->rx_tot - prev->rx_tot, delta_t) / 1000; + if (unlikely(prev->rx_tot > last->rx_tot)) + rx_rate = 0; + uint64_t tx_rate = val_to_rate(last->tx_tot - prev->tx_tot, delta_t) / 1000; + if (unlikely(prev->tx_tot > last->tx_tot)) + tx_rate = 0; + + /* Take 20 bytes overhead (or 24 if crc strip is enabled) into accound */ + struct percent rx_percent; + struct percent tx_percent; + if (strcmp(prox_port_cfg[port_id].short_name, "i40e") == 0) { + if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) { + rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t); + tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t); + } else { + rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t); + tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t); + } + } else { + if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) { + rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t); + tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t); + } else { + rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t); + tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t); + } + } + + display_column_print(no_mbufs_col, i, "%lu", no_mbufs_rate); + display_column_print(ierrors_col, i, "%lu", ierrors_rate); + display_column_print(imissed_col, i, "%lu", imissed_rate); + display_column_print(oerrors_col, i, "%lu", oerrors_rate); + + display_column_print(rx_bytes_col, i, "%lu", rx_kbps_rate); + display_column_print(tx_bytes_col, i, "%lu", tx_kbps_rate); + display_column_print(rx_col, i, "%lu", rx_rate); + display_column_print(tx_col, i, "%lu", tx_rate); + + display_column_print(rx_percent_col, i, "%3u.%04u", rx_percent.percent, rx_percent.part); + display_column_print(tx_percent_col, i, "%3u.%04u", tx_percent.percent, tx_percent.part); + } +} + +static void display_ports_draw_total_stats(void) +{ + for (uint8_t i = 0; i < n_port_disp; ++i) { + const uint32_t port_id = port_disp[i]; + struct port_stats_sample *last = stats_get_port_stats_sample(port_id, 1); + + display_column_print(no_mbufs_col, i, "%lu", last->no_mbufs); + display_column_print(ierrors_col, i, "%lu", last->ierrors); + display_column_print(imissed_col, i, "%lu", last->imissed); + display_column_print(oerrors_col, i, "%lu", last->oerrors); + display_column_print(rx_col, i, "%lu", last->rx_tot); + display_column_print(tx_col, i, "%lu", last->tx_tot); + } +} + +static void display_ports_draw_stats(struct screen_state *state) +{ + if (state->toggle == 0) + display_ports_draw_per_sec_stats(); + else + display_ports_draw_total_stats(); +} + +static int display_ports_get_height(void) +{ + return stats_get_n_ports(); +} + +static struct display_screen display_screen_ports = { + .draw_frame = display_ports_draw_frame, + .draw_stats = display_ports_draw_stats, + .get_height = display_ports_get_height, + .title = "ports", +}; + +struct display_screen *display_ports(void) +{ + return &display_screen_ports; +} diff --git a/VNFs/DPPD-PROX/display_ports.h b/VNFs/DPPD-PROX/display_ports.h new file mode 100644 index 00000000..520662fb --- /dev/null +++ b/VNFs/DPPD-PROX/display_ports.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_PORTS_H +#define DISPLAY_PORTS_H + +struct display_screen; +struct display_screen *display_ports(void); + +#endif /* DISPLAY_PORTS_H */ diff --git a/VNFs/DPPD-PROX/display_priority.c b/VNFs/DPPD-PROX/display_priority.c new file mode 100644 index 00000000..c997d85d --- /dev/null +++ b/VNFs/DPPD-PROX/display_priority.c @@ -0,0 +1,144 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "display_priority.h" +#include "stats_prio_task.h" +#include "display.h" +#include "lconf.h" + +#define PRIORITY_COUNT 8 + +static struct display_page display_page_priority; +static struct display_column *stats_tx[PRIORITY_COUNT]; +static struct display_column *stats_drop[PRIORITY_COUNT]; +static struct display_column *core_col; +static struct display_column *name_col; + +static void display_priority_draw_frame(struct screen_state *state) +{ + uint32_t n_tasks = stats_get_n_prio_tasks_tot(); + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + char name[32]; + char *ptr; + + display_page_init(&display_page_priority); + + struct display_table *core_name = display_page_add_table(&display_page_priority); + + display_table_init(core_name, "Core/task"); + core_col = display_table_add_col(core_name); + name_col = display_table_add_col(core_name); + display_column_init(core_col, "Nb", 4); + display_column_init(name_col, "Name", 5); + + struct display_table *stats = display_page_add_table(&display_page_priority); + if (state->toggle == 0) { + display_table_init(stats, "Statistics per second"); + + char title[64]; + for (int i = 0; i < PRIORITY_COUNT; ++i) { + stats_tx[i] = display_table_add_col(stats); + snprintf(title, sizeof(title), "TX %d (K)", i); + display_column_init(stats_tx[i], title, 9); + + stats_drop[i] = display_table_add_col(stats); + snprintf(title, sizeof(title), "DRP %d (K)", i); + display_column_init(stats_drop[i], title, 9); + } + } else { + display_table_init(stats, "Total statistics"); + + char title[64]; + for (int i = 0; i < PRIORITY_COUNT; ++i) { + stats_tx[i] = display_table_add_col(stats); + snprintf(title, sizeof(title), "TX %d (#)", i); + display_column_init(stats_tx[i], title, 9); + + stats_drop[i] = display_table_add_col(stats); + snprintf(title, sizeof(title), "DRP %d (#)", i); + display_column_init(stats_drop[i], title, 9); + } + } + + display_page_draw_frame(&display_page_priority, n_tasks); + + uint32_t count = 0; + lconf = NULL; + while (core_targ_next(&lconf, &targ, 0) == 0) { + if (strcmp(targ->task_init->mode_str, "aggreg") == 0) { + display_column_print_core_task(core_col, count, lconf, targ); + if (targ->id == 0) + display_column_print(name_col, count, "%s", lconf->name); + count++; + } + } +} + +static void display_priority_draw_stats(struct screen_state *state) +{ + uint64_t rx_prio; + uint64_t drop_tx_fail_prio; + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + const uint32_t n_stats_prio = stats_get_n_prio_tasks_tot(); + + if (state->toggle == 0) { + for (uint32_t count = 0; count < n_stats_prio; ++count) { + struct prio_task_stats_sample *last = stats_get_prio_task_stats_sample(count, 1); + struct prio_task_stats_sample *prev = stats_get_prio_task_stats_sample(count, 0); + + uint64_t delta_t = (last->tsc - prev->tsc) * 1000; + if (delta_t == 0) // This could happen if we just reset the screen => stats will be updated later + continue; + + for (uint8_t i = 0; i < PRIORITY_COUNT; i++) { + rx_prio = last->rx_prio[i] - prev->rx_prio[i]; + drop_tx_fail_prio = last->drop_tx_fail_prio[i] - prev->drop_tx_fail_prio[i]; + + display_column_print(stats_tx[i], count, "%9lu", val_to_rate(rx_prio, delta_t)); + display_column_print(stats_drop[i], count, "%9lu", val_to_rate(drop_tx_fail_prio, delta_t)); + } + } + } else { + for (uint32_t count = 0; count < n_stats_prio; ++count) { + for (uint8_t i = 0; i < PRIORITY_COUNT; i++) { + rx_prio = stats_core_task_tot_rx_prio(count, i); + drop_tx_fail_prio = stats_core_task_tot_drop_tx_fail_prio(count, i); + + display_column_print(stats_tx[i], count, "%9lu", rx_prio); + display_column_print(stats_drop[i], count, "%9lu", drop_tx_fail_prio); + } + } + } +} + +static int display_priority_get_height(void) +{ + return stats_get_n_prio_tasks_tot(); +} + +static struct display_screen display_screen_priority = { + .draw_frame = display_priority_draw_frame, + .draw_stats = display_priority_draw_stats, + .get_height = display_priority_get_height, + .title = "priority", +}; + +struct display_screen *display_priority(void) +{ + return &display_screen_priority; +} diff --git a/VNFs/DPPD-PROX/display_priority.h b/VNFs/DPPD-PROX/display_priority.h new file mode 100644 index 00000000..a15c03e6 --- /dev/null +++ b/VNFs/DPPD-PROX/display_priority.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_PRIORITY_H +#define DISPLAY_PRIORITY_H + +struct display_screen; +struct display_screen *display_priority(void); + +#endif /* DISPLAY_PRIORITY_H */ diff --git a/VNFs/DPPD-PROX/display_rings.c b/VNFs/DPPD-PROX/display_rings.c new file mode 100644 index 00000000..618350e2 --- /dev/null +++ b/VNFs/DPPD-PROX/display_rings.c @@ -0,0 +1,111 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ring.h> + +#include "display.h" +#include "display_rings.h" +#include "stats_ring.h" +#include "prox_port_cfg.h" + +static struct display_page display_page_rings; +static struct display_column *ring_col; +static struct display_column *occup_col; +static struct display_column *free_col; +static struct display_column *size_col; +static struct display_column *sc_col; +static struct display_column *sp_col; + +static void display_rings_draw_frame(struct screen_state *state) +{ + const uint32_t n_rings = stats_get_n_rings(); + char sc_val, sp_val; + + display_page_init(&display_page_rings); + + struct display_table *ring_table = display_page_add_table(&display_page_rings); + struct display_table *stats_table = display_page_add_table(&display_page_rings); + + display_table_init(ring_table, "Name"); + + display_table_init(stats_table, "Sampled statistics"); + + ring_col = display_table_add_col(ring_table); + display_column_init(ring_col, "Ring/Port", 11); + occup_col = display_table_add_col(stats_table); + display_column_init(occup_col, "Occup (%)", 11); + free_col = display_table_add_col(stats_table); + display_column_init(free_col, "Free", 11); + size_col = display_table_add_col(stats_table); + display_column_init(size_col, "Size", 11); + sc_col = display_table_add_col(stats_table); + display_column_init(sc_col, "SC", 2); + sp_col = display_table_add_col(stats_table); + display_column_init(sp_col, "SP", 2); + + display_page_draw_frame(&display_page_rings, n_rings); + + for (uint16_t i = 0; i < n_rings; ++i) { + struct ring_stats *rs = stats_get_ring_stats(i); + + if (rs->nb_ports == 0) { + display_column_print(ring_col, i, "%s", rs->ring->name); + } else { + char name[64] = {0}; + int offset = 0; + + for (uint32_t j = 0; j < rs->nb_ports; j++) + offset += sprintf(name + offset, "%s", rs->port[j]->name); + } + + sc_val = (rs->ring->flags & RING_F_SC_DEQ) ? 'y' : 'n'; + sp_val = (rs->ring->flags & RING_F_SP_ENQ) ? 'y' : 'n'; + + display_column_print(sc_col, i, " %c", sc_val); + display_column_print(sp_col, i, " %c", sp_val); + } +} + +static void display_rings_draw_stats(struct screen_state *state) +{ + const uint32_t n_rings = stats_get_n_rings(); + + for (uint32_t i = 0; i < n_rings; ++i) { + struct ring_stats *rs = stats_get_ring_stats(i); + uint32_t used = ((rs->size - rs->free)*10000)/rs->size; + + display_column_print(occup_col, i, "%8u.%02u", used/100, used%100); + display_column_print(free_col, i, "%11u", rs->free); + display_column_print(size_col, i, "%11u", rs->size); + } +} + +static int display_rings_get_height(void) +{ + return stats_get_n_rings(); +} + +static struct display_screen display_screen_rings = { + .draw_frame = display_rings_draw_frame, + .draw_stats = display_rings_draw_stats, + .get_height = display_rings_get_height, + .title = "rings", +}; + +struct display_screen *display_rings(void) +{ + return &display_screen_rings; +} diff --git a/VNFs/DPPD-PROX/display_rings.h b/VNFs/DPPD-PROX/display_rings.h new file mode 100644 index 00000000..421bb16e --- /dev/null +++ b/VNFs/DPPD-PROX/display_rings.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_RINGS_H +#define DISPLAY_RINGS_H + +struct display_screen; +struct display_screen *display_rings(void); + +#endif /* DISPLAY_RINGS_H */ diff --git a/VNFs/DPPD-PROX/display_tasks.c b/VNFs/DPPD-PROX/display_tasks.c new file mode 100644 index 00000000..75075a10 --- /dev/null +++ b/VNFs/DPPD-PROX/display_tasks.c @@ -0,0 +1,331 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "display_tasks.h" +#include "display.h" +#include "prox_globals.h" +#include "stats_task.h" +#include "stats_core.h" +#include "lconf.h" + +struct task_stats_disp { + uint32_t lcore_id; + uint32_t task_id; + uint32_t lcore_stat_id; +}; + +static int col_offset; +static struct task_stats_disp task_stats_disp[RTE_MAX_LCORE * MAX_TASKS_PER_CORE]; + +static struct display_page display_page_tasks; + +static struct display_column *nb_col; +static struct display_column *name_col; +static struct display_column *mode_col; +static struct display_column *rx_name_col; +static struct display_column *tx_name_col; +static struct display_column *idle_col; +static struct display_column *rx_col; +static struct display_column *tx_col; +static struct display_column *tx_fail_col; +static struct display_column *discard_col; +static struct display_column *handled_col; +static struct display_column *cpp_col; +static struct display_column *ghz_col; +static struct display_column *rx_col; +static struct display_column *tx_col; +static struct display_column *tx_fail_col; +static struct display_column *discard_col; +static struct display_column *handled_col; +static struct display_column *occup_col; +static struct display_column *mask_col; +static struct display_column *class_col; +static struct display_column *mbm_tot_col; +static struct display_column *mbm_loc_col; +static struct display_column *frac_col; + +static void stats_display_core_task_entry(struct lcore_cfg *lconf, struct task_args *targ, unsigned row) +{ + display_column_print_core_task(nb_col, row, lconf, targ); + + display_column_print(name_col, row, "%s", targ->id == 0 ? lconf->name : ""); + display_column_print(mode_col, row, "%s", targ->task_init->mode_str); + + display_column_port_ring(rx_name_col, row, targ->rx_port_queue, targ->nb_rxports, targ->rx_rings, targ->nb_rxrings); + display_column_port_ring(tx_name_col, row, targ->tx_port_queue, targ->nb_txports, targ->tx_rings, targ->nb_txrings); +} + +static void display_tasks_draw_frame(struct screen_state *state) +{ + const uint32_t n_tasks_tot = stats_get_n_tasks_tot(); + + display_page_init(&display_page_tasks); + + struct display_table *core_task = display_page_add_table(&display_page_tasks); + struct display_table *rx_tx = display_page_add_table(&display_page_tasks); + + display_table_init(core_task, "Core/Task"); + + nb_col = display_table_add_col(core_task); + display_column_init(nb_col, "Nb", 4); + name_col = display_table_add_col(core_task); + display_column_init(name_col, "Name", 7); + mode_col = display_table_add_col(core_task); + display_column_init(mode_col, "Mode", 9); + + display_table_init(rx_tx, "Port ID/Ring Name"); + rx_name_col = display_table_add_col(rx_tx); + display_column_init(rx_name_col, "RX", 9); + tx_name_col = display_table_add_col(rx_tx); + display_column_init(tx_name_col, "TX", 9); + + struct display_table *stats = display_page_add_table(&display_page_tasks); + + if (state->toggle == 0) { + display_table_init(stats, "Statistics per second"); + + idle_col = display_table_add_col(stats); + display_column_init(idle_col, "Idle (%)", 5); + + rx_col = display_table_add_col(stats); + display_column_init(rx_col, "RX (K)", 9); + + tx_col = display_table_add_col(stats); + display_column_init(tx_col, "TX (K)", 9); + + tx_fail_col = display_table_add_col(stats); + display_column_init(tx_fail_col, "TX Fail (K)", 9); + + discard_col = display_table_add_col(stats); + display_column_init(discard_col, "Discard (K)", 9); + + handled_col = display_table_add_col(stats); + display_column_init(handled_col, "Handled (K)", 9); + + if (stats_cpu_freq_enabled()) { + struct display_table *other = display_page_add_table(&display_page_tasks); + + display_table_init(other, "Other"); + + cpp_col = display_table_add_col(other); + display_column_init(cpp_col, "CPP", 9); + + ghz_col = display_table_add_col(other); + display_column_init(ghz_col, "Clk (GHz)", 9); + } + if (stats_mbm_enabled()) { + struct display_table *other = display_page_add_table(&display_page_tasks); + mbm_tot_col = display_table_add_col(other); + display_column_init(mbm_tot_col, "Tot Bdw(M)", 10); + mbm_loc_col = display_table_add_col(other); + display_column_init(mbm_loc_col, "Loc Bdw(M)", 10); + } + } else { + display_table_init(stats, "Total Statistics"); + + rx_col = display_table_add_col(stats); + display_column_init(rx_col, "RX (K)", 14); + + tx_col = display_table_add_col(stats); + display_column_init(tx_col, "TX (K)", 14); + + tx_fail_col = display_table_add_col(stats); + display_column_init(tx_fail_col, "TX Fail (K)", 14); + + discard_col = display_table_add_col(stats); + display_column_init(discard_col, "Discard (K)", 14); + + handled_col = display_table_add_col(stats); + display_column_init(handled_col, "Handled (K)", 14); + + if (stats_cmt_enabled()) { + struct display_table *other = display_page_add_table(&display_page_tasks); + + display_table_init(other, "Cache QoS Monitoring"); + + occup_col = display_table_add_col(other); + display_column_init(occup_col, "Occupancy (KB)", 15); + + frac_col = display_table_add_col(other); + display_column_init(frac_col, "Fraction", 9); + } + if (stats_cat_enabled()) { + struct display_table *other = display_page_add_table(&display_page_tasks); + mask_col = display_table_add_col(other); + display_column_init(mask_col, "Cache mask", 10); + class_col = display_table_add_col(other); + display_column_init(class_col, "Class", 5); + } + } + display_page_draw_frame(&display_page_tasks, n_tasks_tot); + + uint16_t element_count = 0; + + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + PROX_ASSERT(element_count < RTE_MAX_LCORE * MAX_TASKS_PER_CORE); + + stats_display_core_task_entry(lconf, targ, element_count); + + task_stats_disp[element_count].lcore_id = lconf->id; + task_stats_disp[element_count].task_id = targ->id; + task_stats_disp[element_count].lcore_stat_id = stats_lcore_find_stat_id(lconf->id); + element_count++; + } +} + +static void print_kpps(struct display_column *col, int row, uint64_t nb_pkts, uint64_t delta_t) +{ + nb_pkts *= tsc_hz; + if (nb_pkts && nb_pkts /100 < delta_t) { + uint64_t int_part = nb_pkts/delta_t; + uint64_t frac_part = (nb_pkts - int_part * delta_t) * 1000 /delta_t; + display_column_print(col, row, "%2lu.%03lu", int_part, frac_part); + } + else { + display_column_print(col, row, "%9lu", nb_pkts / delta_t); + } +} + +static void display_core_task_stats_per_sec(const struct task_stats_disp *t, struct screen_state *state, int row) +{ + struct task_stats_sample *last = stats_get_task_stats_sample(t->lcore_id, t->task_id, 1); + struct task_stats_sample *prev = stats_get_task_stats_sample(t->lcore_id, t->task_id, 0); + + /* delta_t in units of clock ticks */ + uint64_t delta_t = last->tsc - prev->tsc; + + uint64_t empty_cycles = last->empty_cycles - prev->empty_cycles; + + if (empty_cycles > delta_t) { + empty_cycles = 10000; + } + else { + empty_cycles = empty_cycles * 10000 / delta_t; + } + + /* empty_cycles has 2 digits after point, (usefull when only a very small idle time) */ + + display_column_print(idle_col, row, "%3lu.%02lu", empty_cycles / 100, empty_cycles % 100); + + // Display per second statistics in Kpps unit + delta_t *= state->pps_unit; + + print_kpps(rx_col, row, last->rx_pkt_count - prev->rx_pkt_count, delta_t); + print_kpps(tx_col, row, last->tx_pkt_count - prev->tx_pkt_count, delta_t); + print_kpps(tx_fail_col, row, last->drop_tx_fail - prev->drop_tx_fail, delta_t); + print_kpps(discard_col, row, last->drop_discard - prev->drop_discard, delta_t); + print_kpps(handled_col, row, last->drop_handled - prev->drop_handled, delta_t); + + if (stats_cpu_freq_enabled()) { + uint8_t lcore_stat_id = t->lcore_stat_id; + struct lcore_stats_sample *clast = stats_get_lcore_stats_sample(lcore_stat_id, 1); + struct lcore_stats_sample *cprev = stats_get_lcore_stats_sample(lcore_stat_id, 0); + + uint64_t adiff = clast->afreq - cprev->afreq; + uint64_t mdiff = clast->mfreq - cprev->mfreq; + + uint64_t cpp = 0; + + uint64_t pkt_diff_rx = last->rx_pkt_count - prev->rx_pkt_count; + uint64_t pkt_diff_tx = last->tx_pkt_count - prev->tx_pkt_count; + + uint64_t pkt_diff = pkt_diff_tx > pkt_diff_rx? pkt_diff_tx : pkt_diff_rx; + + if (pkt_diff && mdiff) { + cpp = delta_t/pkt_diff*adiff/mdiff/1000; + } + + uint64_t mhz; + if (mdiff) + mhz = tsc_hz*adiff/mdiff/1000000; + else + mhz = 0; + + display_column_print(cpp_col, row, "%lu", cpp); + display_column_print(ghz_col, row, "%lu.%03lu", mhz/1000, mhz%1000); + } + if (stats_mbm_enabled()) { + struct lcore_stats *c = stats_get_lcore_stats(t->lcore_stat_id); + uint8_t lcore_stat_id = t->lcore_stat_id; + struct lcore_stats_sample *clast = stats_get_lcore_stats_sample(lcore_stat_id, 1); + struct lcore_stats_sample *cprev = stats_get_lcore_stats_sample(lcore_stat_id, 0); + if ((clast->mbm_tot_bytes - cprev->mbm_tot_bytes) >> 20) + display_column_print(mbm_tot_col, row, "%lu", (clast->mbm_tot_bytes - cprev->mbm_tot_bytes) >> 20); + else + display_column_print(mbm_tot_col, row, "0.%03lu", (clast->mbm_tot_bytes - cprev->mbm_tot_bytes) >> 10); + if( (clast->mbm_loc_bytes - cprev->mbm_loc_bytes) >> 20) + display_column_print(mbm_loc_col, row, "%lu", (clast->mbm_loc_bytes - cprev->mbm_loc_bytes) >> 20); + else + display_column_print(mbm_loc_col, row, "0.%03lu", (clast->mbm_loc_bytes - cprev->mbm_loc_bytes) >> 10); + } +} + +static void display_core_task_stats_tot(const struct task_stats_disp *t, struct screen_state *state, int row) +{ + struct task_stats *ts = stats_get_task_stats(t->lcore_id, t->task_id); + + display_column_print(rx_col, row, "%lu", ts->tot_rx_pkt_count); + display_column_print(tx_col, row, "%lu", ts->tot_tx_pkt_count); + display_column_print(tx_fail_col, row, "%lu", ts->tot_drop_tx_fail); + display_column_print(discard_col, row, "%lu", ts->tot_drop_discard); + display_column_print(handled_col, row, "%lu", ts->tot_drop_handled); + + if (stats_cmt_enabled()) { + struct lcore_stats *c = stats_get_lcore_stats(t->lcore_stat_id); + display_column_print(occup_col, row, "%lu", c->cmt_bytes >> 10); + display_column_print(frac_col, row, "%3lu.%02lu", c->cmt_fraction/100, c->cmt_fraction%100); + } + if (stats_cat_enabled()) { + struct lcore_stats *c = stats_get_lcore_stats(t->lcore_stat_id); + display_column_print(mask_col, row, "%x", c->cat_mask); + display_column_print(class_col, row, "%x", c->class); + } +} + +static void display_tasks_draw_stats(struct screen_state *state) +{ + const uint32_t n_tasks_tot = stats_get_n_tasks_tot(); + + for (uint8_t i = 0; i < n_tasks_tot; ++i) { + const struct task_stats_disp *disp = &task_stats_disp[i]; + + if (state->toggle == 0) { + display_core_task_stats_per_sec(disp, state, i); + } else { + display_core_task_stats_tot(disp, state, i); + } + } +} + +static int display_tasks_get_height(void) +{ + return stats_get_n_tasks_tot(); +} + +static struct display_screen display_screen_tasks = { + .draw_frame = display_tasks_draw_frame, + .draw_stats = display_tasks_draw_stats, + .get_height = display_tasks_get_height, + .title = "tasks", +}; + +struct display_screen *display_tasks(void) +{ + return &display_screen_tasks; +} diff --git a/VNFs/DPPD-PROX/display_tasks.h b/VNFs/DPPD-PROX/display_tasks.h new file mode 100644 index 00000000..b369b6bd --- /dev/null +++ b/VNFs/DPPD-PROX/display_tasks.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef DISPLAY_TASKS_H +#define DISPLAY_TASKS_H + +struct display_screen; +struct display_screen *display_tasks(void); + +#endif /* DISPLAY_TASKS_H */ diff --git a/VNFs/DPPD-PROX/dpi/Makefile b/VNFs/DPPD-PROX/dpi/Makefile new file mode 100644 index 00000000..fc943580 --- /dev/null +++ b/VNFs/DPPD-PROX/dpi/Makefile @@ -0,0 +1,18 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +all: + gcc -fpic -shared dpi_stub.c -o dpi_stub.so diff --git a/VNFs/DPPD-PROX/dpi/dpi.h b/VNFs/DPPD-PROX/dpi/dpi.h new file mode 100644 index 00000000..5ce1015d --- /dev/null +++ b/VNFs/DPPD-PROX/dpi/dpi.h @@ -0,0 +1,76 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _DPI_H_ +#define _DPI_H_ + +#include <sys/time.h> +#include <inttypes.h> +#include <stddef.h> + +struct flow_info { + uint32_t ip_src; + uint32_t ip_dst; + uint8_t ip_proto; + uint16_t port_src; + uint16_t port_dst; + uint8_t reservered[3]; +} __attribute__((packed)); + +struct dpi_payload { + uint8_t *payload; + uint16_t len; + uint16_t client_to_server; + struct timeval tv; +}; + +struct dpi_engine { + /* Returns 0 on success, This function is called from an + arbitrary thread before any other function in this struct + is called. */ + int (*dpi_init)(uint32_t thread_count, int argc, const char *argv[]); + /* Return the size that should be allocated in the flow + table. It is the sizeof(*flow_data) passed to + dpi_process(). */ + size_t (*dpi_get_flow_entry_size)(void); + /* Called before the flow entry is expired. */ + void (*dpi_flow_expire)(void *flow_data); + /* start function called from a DPI thread itself. The opaque + pointer returned here will be passed to dpi_thread_stop and + dpi_process. */ + void *(*dpi_thread_start)(void); + /* Stop function called from a DPI thread itself. */ + void (*dpi_thread_stop)(void *opaque); + /* Processing function to perform actual DPI work. struct + flow_info contains the 5 tuple, flow_data is the entry in + the flow table which has a size specified by + dpi_get_flow_entry_size(). The payload (together with the + time and the direction) is passed through the payload + parameter. DPI results are returned by the results + array. The function returns 0 on success. */ + int (*dpi_process)(void *opaque, struct flow_info *fi, void *flow_data, + struct dpi_payload *payload, uint32_t results[], + size_t *result_len); + /* Called once at cleanup. */ + void (*dpi_finish)(void); + /* Function used for printing. */ + int (*dpi_print)(const char *fmt, ...); +}; + +/* Returns the implementation of a dpi_engine. */ +struct dpi_engine *get_dpi_engine(void); + +#endif /* _DPI_H_ */ diff --git a/VNFs/DPPD-PROX/dpi/dpi_stub.c b/VNFs/DPPD-PROX/dpi/dpi_stub.c new file mode 100644 index 00000000..8febbcb9 --- /dev/null +++ b/VNFs/DPPD-PROX/dpi/dpi_stub.c @@ -0,0 +1,57 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdio.h> + +#include "dpi.h" + +/* The following functions are not a real implementation of a + DPI. They serve only to create dpi_stub.so which can be loaded into + prox. */ + +static int dpi_init(uint32_t thread_count, int argc, const char *argv[]) +{ + return 0; +} + +size_t dpi_get_flow_entry_size(void) {return 0;} +void flow_data_dpi_flow_expire(void *flow_data) {} +void *dpi_thread_start() {return NULL;} +void dpi_thread_stop(void *opaque) {} +void dpi_finish(void) {} + +int dpi_process(void *opaque, struct flow_info *fi, void *flow_data, + struct dpi_payload *payload, uint32_t results[], + size_t *result_len) +{ + return 0; +} + +static struct dpi_engine dpi_engine = { + .dpi_init = dpi_init, + .dpi_get_flow_entry_size = dpi_get_flow_entry_size, + .dpi_flow_expire = flow_data_dpi_flow_expire, + .dpi_thread_start = dpi_thread_start, + .dpi_thread_stop = dpi_thread_stop, + .dpi_process = dpi_process, + .dpi_finish = dpi_finish, + .dpi_print = printf, +}; + +struct dpi_engine *get_dpi_engine(void) +{ + return &dpi_engine; +} diff --git a/VNFs/DPPD-PROX/eld.h b/VNFs/DPPD-PROX/eld.h new file mode 100644 index 00000000..b5de59d7 --- /dev/null +++ b/VNFs/DPPD-PROX/eld.h @@ -0,0 +1,82 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _ELD_H_ +#define _ELD_H_ + +#define PACKET_QUEUE_BITS 14 +#define PACKET_QUEUE_SIZE (1 << PACKET_QUEUE_BITS) +#define PACKET_QUEUE_MASK (PACKET_QUEUE_SIZE - 1) + +#define QUEUE_ID_BITS (32 - PACKET_QUEUE_BITS) +#define QUEUE_ID_SIZE (1 << QUEUE_ID_BITS) +#define QUEUE_ID_MASK (QUEUE_ID_SIZE - 1) + +struct early_loss_detect { + uint32_t entries[PACKET_QUEUE_SIZE]; + uint32_t last_pkt_idx; +}; + +static void early_loss_detect_reset(struct early_loss_detect *eld) +{ + for (size_t i = 0; i < PACKET_QUEUE_SIZE; i++) { + eld->entries[i] = -1; + } +} + +static uint32_t early_loss_detect_count_remaining_loss(struct early_loss_detect *eld) +{ + uint32_t queue_id; + uint32_t n_loss; + uint32_t n_loss_total = 0; + + /* Need to check if we lost any packet before last packet + received Any packet lost AFTER the last packet received + cannot be counted. Such a packet will be counted after both + lat and gen restarted */ + queue_id = eld->last_pkt_idx >> PACKET_QUEUE_BITS; + for (uint32_t i = (eld->last_pkt_idx + 1) & PACKET_QUEUE_MASK; i < PACKET_QUEUE_SIZE; i++) { + // We ** might ** have received OOO packets; do not count them as lost next time... + if (queue_id - eld->entries[i] != 0) { + n_loss = (queue_id - eld->entries[i] - 1) & QUEUE_ID_MASK; + n_loss_total += n_loss; + } + } + for (uint32_t i = 0; i < (eld->last_pkt_idx & PACKET_QUEUE_MASK); i++) { + // We ** might ** have received OOO packets; do not count them as lost next time... + if (eld->entries[i] - queue_id != 1) { + n_loss = (queue_id - eld->entries[i]) & QUEUE_ID_MASK; + n_loss_total += n_loss; + } + } + + eld->entries[eld->last_pkt_idx & PACKET_QUEUE_MASK] = -1; + return n_loss_total; +} + +static uint32_t early_loss_detect_add(struct early_loss_detect *eld, uint32_t packet_index) +{ + uint32_t old_queue_id, queue_pos, n_loss; + + eld->last_pkt_idx = packet_index; + queue_pos = packet_index & PACKET_QUEUE_MASK; + old_queue_id = eld->entries[queue_pos]; + eld->entries[queue_pos] = packet_index >> PACKET_QUEUE_BITS; + + return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK; +} + +#endif /* _ELD_H_ */ diff --git a/VNFs/DPPD-PROX/etypes.h b/VNFs/DPPD-PROX/etypes.h new file mode 100644 index 00000000..97ce5c0d --- /dev/null +++ b/VNFs/DPPD-PROX/etypes.h @@ -0,0 +1,30 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _ETYPES_H_ +#define _ETYPES_H_ + +#define ETYPE_IPv4 0x0008 /* IPv4 in little endian */ +#define ETYPE_IPv6 0xDD86 /* IPv6 in little endian */ +#define ETYPE_ARP 0x0608 /* ARP in little endian */ +#define ETYPE_VLAN 0x0081 /* 802-1aq - VLAN */ +#define ETYPE_MPLSU 0x4788 /* MPLS unicast */ +#define ETYPE_MPLSM 0x4888 /* MPLS multicast */ +#define ETYPE_8021ad 0xA888 /* Q-in-Q */ +#define ETYPE_LLDP 0xCC88 /* Link Layer Discovery Protocol (LLDP) */ +#define ETYPE_EoGRE 0x5865 /* EoGRE in little endian */ + +#endif /* _ETYPES_H_ */ diff --git a/VNFs/DPPD-PROX/expire_cpe.c b/VNFs/DPPD-PROX/expire_cpe.c new file mode 100644 index 00000000..4f2f5cd8 --- /dev/null +++ b/VNFs/DPPD-PROX/expire_cpe.c @@ -0,0 +1,43 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "hash_entry_types.h" +#include "hash_utils.h" +#include "expire_cpe.h" + +#define MAX_TSC __UINT64_C(0xFFFFFFFFFFFFFFFF) + +void check_expire_cpe(void* data) +{ + struct expire_cpe *um = (struct expire_cpe *)data; + uint64_t cur_tsc = rte_rdtsc(); + struct cpe_data *entries[4] = {0}; + void *key[4] = {0}; + uint64_t n_buckets = get_bucket_key8(um->cpe_table, um->bucket_index, key, (void**)entries); + + for (uint8_t i = 0; i < 4 && entries[i]; ++i) { + if (entries[i]->tsc < cur_tsc) { + int key_found = 0; + void* entry = 0; + rte_table_hash_key8_ext_dosig_ops.f_delete(um->cpe_table, key[i], &key_found, entry); + } + } + + um->bucket_index++; + um->bucket_index &= (n_buckets - 1); +} diff --git a/VNFs/DPPD-PROX/expire_cpe.h b/VNFs/DPPD-PROX/expire_cpe.h new file mode 100644 index 00000000..ad697f76 --- /dev/null +++ b/VNFs/DPPD-PROX/expire_cpe.h @@ -0,0 +1,30 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _EXPIRE_CPE_H_ +#define _EXPIRE_CPE_H_ + +#include <rte_table_hash.h> + +struct expire_cpe { + struct rte_table_hash *cpe_table; + struct cpe_data *cpe_data; + uint32_t bucket_index; +}; + +void check_expire_cpe(void *data); + +#endif /* _EXPIRE_CPE_H_ */ diff --git a/VNFs/DPPD-PROX/file_utils.c b/VNFs/DPPD-PROX/file_utils.c new file mode 100644 index 00000000..b3cf0846 --- /dev/null +++ b/VNFs/DPPD-PROX/file_utils.c @@ -0,0 +1,92 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdio.h> +#include <limits.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> + +#include "prox_args.h" +#include "file_utils.h" + +static char file_error_string[128] = {0}; + +const char *file_get_error(void) +{ + return file_error_string; +} + +__attribute__((format(printf, 1 ,2))) static void file_set_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vsnprintf(file_error_string, sizeof(file_error_string), fmt, ap); + va_end(ap); +} + +static void resolve_path_cfg_dir(char *file_name, size_t len, const char *path) +{ + if (path[0] != '/') + snprintf(file_name, len, "%s/%s", get_cfg_dir(), path); + else + strncpy(file_name, path, len); +} + +long file_get_size(const char *path) +{ + char file_name[PATH_MAX]; + struct stat s; + + resolve_path_cfg_dir(file_name, sizeof(file_name), path); + + if (stat(file_name, &s)) { + file_set_error("Stat failed on '%s': %s", path, strerror(errno)); + return -1; + } + + if ((s.st_mode & S_IFMT) != S_IFREG) { + snprintf(file_error_string, sizeof(file_error_string), "'%s' is not a file", path); + return -1; + } + + return s.st_size; +} + +int file_read_content(const char *path, uint8_t *mem, size_t beg, size_t len) +{ + char file_name[PATH_MAX]; + FILE *f; + + resolve_path_cfg_dir(file_name, sizeof(file_name), path); + f = fopen(file_name, "r"); + if (!f) { + file_set_error("Failed to read '%s': %s", path, strerror(errno)); + return -1; + } + + fseek(f, beg, SEEK_SET); + + size_t ret = fread(mem, 1, len, f); + if ((uint32_t)ret != len) { + file_set_error("Failed to read '%s:%zu' for %zu bytes: got %zu\n", file_name, beg, len, ret); + return -1; + } + + fclose(f); + return 0; +} diff --git a/VNFs/DPPD-PROX/file_utils.h b/VNFs/DPPD-PROX/file_utils.h new file mode 100644 index 00000000..2458dff8 --- /dev/null +++ b/VNFs/DPPD-PROX/file_utils.h @@ -0,0 +1,27 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _FILE_UTILS_H_ +#define _FILE_UTILS_H_ + +#include <inttypes.h> +#include <stddef.h> + +long file_get_size(const char *path); +int file_read_content(const char *path, uint8_t *mem, size_t beg, size_t len); +const char *file_get_error(void); + +#endif /* _FILE_UTILS_H_ */ diff --git a/VNFs/DPPD-PROX/flow_gen/README b/VNFs/DPPD-PROX/flow_gen/README new file mode 100644 index 00000000..28f5d97c --- /dev/null +++ b/VNFs/DPPD-PROX/flow_gen/README @@ -0,0 +1,47 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +While it is possible to manually run stateful traffic generation as +described below, it is recommended to use the provided dpi scripts +available in the help-scripts directory.. + +Before running flow based generation, a traffic profile needs to be +extracted and copied into this directory. This is done by running the +flow extract tool. An example of running the tool is shown below. For +more details on the flow extract tool, please read the provided help +by running the tool with the -h argument. + +./build/flowextract2 -s 500000 -i input.pcap -o output_directory + +After the output has been copied to this directory, the configuration +can be launched as shown below: + +./build/prox -f flow_gen/flow_gen_4ports.cfg -e \ + -q max_setup_rate=2000 \ + -q connections=50000 \ + -q ss=19.46 \ + -q test_system_id=0 + +The parameters provided through -q depend on the traffic profile. The +following command can be used to find the maximum value of ss: + +./build/prox -f flow_gen/flow_gen_4ports.cfg -e \ + -q max_ss_and_quit=true \ + -q test_system_id=0 + +This will cause prox to read the traffic profile, calculate the maximum +value and quit immediately. No packets will be sent and the value for +ss will be printed on stdout. diff --git a/VNFs/DPPD-PROX/flow_gen/bundle_maker.lua b/VNFs/DPPD-PROX/flow_gen/bundle_maker.lua new file mode 100644 index 00000000..ca24d4bb --- /dev/null +++ b/VNFs/DPPD-PROX/flow_gen/bundle_maker.lua @@ -0,0 +1,94 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +function get_client_bundles(bundles) + local client_bundles = {}; + + for i,b in ipairs(bundles) do + client_bundles[i] = {bundle = b, imix_fraction = 1} + end + + return client_bundles; +end + +function get_server_streams(bundles) + local server_streams = {} + n_listen = 0 + for i, bundle in ipairs(bundles) do + for j, stream in ipairs(bundle) do + n_listen = n_listen + 1 + server_streams[n_listen] = stream + end + end + return server_streams; +end + +function setup_bundles(first_ip_byte, speed_scaling) + bundles = dofile("cfg.lua") + + local client_bundles = get_client_bundles(bundles); + local server_streams = get_server_streams(bundles); + + for i,e in ipairs(client_bundles) do + for j,stream in ipairs(e.bundle) do + stream.clients.ip[1] = first_ip_byte + stream.clients.port_mask = 0xffff + end + end + + for i,stream in ipairs(server_streams) do + stream.servers.ip[1] = first_ip_byte + end + + local highest_bps = 0; + for i,e in ipairs(client_bundles) do + for j,s in ipairs(e.bundle) do + if (s.up_bps ~= 1250000000 and s.dn_bps ~= 1250000000) then + if (highest_bps < s.up_bps) then + highest_bps = s.up_bps + end + if (highest_bps < s.dn_bps) then + highest_bps = s.dn_bps + end + end + end + end + + if (highest_bps == 0) then + highest_bps = 1250000000 + end + max_ss = 1250000000/highest_bps + + if (max_ss_and_quit == not nil and max_ss_and_quit == true) then + print("max ss=" .. max_ss .. "") + os.exit(0); + end + + if (speed_scaling > max_ss) then + error("Scaling too high (maximum scaling is " .. max_ss .. ")") + end + + for i,e in ipairs(client_bundles) do + for j,s in ipairs(e.bundle) do + if (s.up_bps ~= 1250000000 and s.dn_bps ~= 1250000000) then + s.up_bps = s.up_bps * speed_scaling; + s.dn_bps = s.dn_bps * speed_scaling; + end + end + end + + return client_bundles, server_streams +end diff --git a/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.cfg b/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.cfg new file mode 100644 index 00000000..ccac3eb7 --- /dev/null +++ b/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.cfg @@ -0,0 +1,150 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 2] +name=port_a +mac=00:00:00:00:00:03 +rx desc=512 +tx desc=1024 +[port 3] +name=port_b +mac=00:00:00:00:00:04 +rx desc=512 +tx desc=1024 + +[port 4] +name=port_c +mac=00:00:00:00:00:01 +rx desc=512 +tx desc=1024 +[port 5] +name=port_d +mac=00:00:00:00:00:02 +rx desc=512 +tx desc=1024 + +[lua] +dofile("flow_gen_4ports.lua") +[variables] +$drop=no + +[defaults] +mempool size=$mempool_size + +[global] +start time=5 +name=L4 Gen + +[core 0s0] +mode=master + +[core 1s0] +task=0 +mode=lbpos +tx cores=$port_a_clients +rx port=port_a +mempool size=32K +mbuf size=2560 +byte offset=26 +drop=$drop +ring size=16384 + +[core 1s0h] +task=0 +mode=lbpos +tx cores=$port_b_servers +rx port=port_b +mbuf size=2560 +byte offset=26 +drop=$drop +ring size=16384 + +;;;------------------------------ + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[core $port_a_clients] +name=p0 +task=0 +mode=genl4 +tx port=port_a +rx ring=yes +bps=$bps +streams=c_${self} +concur conn=$conn +max setup rate=$msr + +[core $port_b_servers] +name=p0 +task=0 +mode=genl4 +sub mode=server +rx ring=yes +tx port=port_b +bps=$bps +streams=s_${self} +concur conn=$conn + +;;;;;;; socket 1 ;;;;;;;;;;;;;;;;;;;;;;; + +[core 1s1] +name=ld +task=0 +mode=lbpos +tx cores=$port_c_clients +rx port=port_c +mempool size=32K +mbuf size=2560 +byte offset=26 +drop=$drop +ring size=16384 + +[core 1s1h] +name=ld +task=0 +mode=lbpos +tx cores=$port_d_servers +rx port=port_d +mbuf size=2560 +byte offset=26 +drop=$drop +ring size=16384 + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +[core $port_c_clients] +name=p0 +task=0 +mode=genl4 +tx port=port_c +rx ring=yes +bps=$bps +streams=c_${self} +concur conn=$conn +max setup rate=$msr + +[core $port_d_servers] +name=p0 +task=0 +mode=genl4 +sub mode=server +rx ring=yes +tx port=port_d +bps=$bps +streams=s_${self} +concur conn=$conn diff --git a/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.lua b/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.lua new file mode 100644 index 00000000..ed674ef6 --- /dev/null +++ b/VNFs/DPPD-PROX/flow_gen/flow_gen_4ports.lua @@ -0,0 +1,83 @@ +-- +-- Copyright (c) 2010-2017 Intel Corporation +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +dofile("bundle_maker.lua") + +if (test_system_id == nil) then + error("test_system_id not set") +end + +offset = 8 * test_system_id + +c_2s0, s_3s0 = setup_bundles(128 + offset, ss) +c_4s0, s_5s0 = setup_bundles(129 + offset, ss) +c_2s0h, s_3s0h = setup_bundles(130 + offset, ss) +c_4s0h, s_5s0h = setup_bundles(131 + offset, ss) + +c_6s0, s_7s0 = setup_bundles(132 + offset, ss) +c_8s0, s_9s0 = setup_bundles(133 + offset, ss) +c_6s0h, s_7s0h = setup_bundles(134 + offset, ss) +c_8s0h, s_9s0h = setup_bundles(135 + offset, ss) + +---------------- + +c_2s1, s_3s1 = setup_bundles(64 + offset, ss) +c_4s1, s_5s1 = setup_bundles(65 + offset, ss) +c_2s1h, s_3s1h = setup_bundles(66 + offset, ss) +c_4s1h, s_5s1h = setup_bundles(67 + offset, ss) + +c_6s1, s_7s1 = setup_bundles(68 + offset, ss) +c_8s1, s_9s1 = setup_bundles(69 + offset, ss) +c_6s1h, s_7s1h = setup_bundles(70 + offset, ss) +c_8s1h, s_9s1h = setup_bundles(71 + offset, ss) + +if (max_setup_rate == nil) then + error("max_setup_rate not set") +end + +if (connections == nil) then + error("connections not set") +end + +port_a_clients="2s0,4s0,2s0h,4s0h,6s0,8s0,6s0h,8s0h" +port_b_servers="3s0,5s0,3s0h,5s0h,7s0,9s0,7s0h,9s0h" + + +port_c_clients="2s1,4s1,2s1h,4s1h,6s1,8s1,6s1h,8s1h" +port_d_servers="3s1,5s1,3s1h,5s1h,7s1,9s1,7s1h,9s1h" + +all_clients = port_a_clients + .. "," .. port_c_clients + +all_servers = port_b_servers + .. "," .. port_d_servers + +all_workers = all_clients .. "," .. all_servers + +all_ld = "1s0,1s0h,1s1,1s1h" + +client_port_count = 2; + +bps = 1250000000/task_count(port_a_clients) +msr = max_setup_rate/client_port_count/task_count(port_a_clients) +conn = connections/client_port_count/task_count(port_a_clients) + +mempool_size = connections +if (mempool_size > 100000) then + mempool_size = 100000 +elseif (mempool_size < 2048) then + mempool_size = 2048 +end diff --git a/VNFs/DPPD-PROX/flow_iter.h b/VNFs/DPPD-PROX/flow_iter.h new file mode 100644 index 00000000..1ff5eeeb --- /dev/null +++ b/VNFs/DPPD-PROX/flow_iter.h @@ -0,0 +1,37 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _FLOW_ITER_H_ +#define _FLOW_ITER_H_ + +struct task_args; + +struct flow_iter { + /* Returns a new iterator pointing to the beginning of the collection. */ + void (*beg)(struct flow_iter *iter, struct task_args *targ); + /* Returns non-zero when parameter is pointing past the end of the collection. */ + int (*is_end)(struct flow_iter *iter, struct task_args *targ); + /* Moves iterator parameter forward by one. */ + void (*next)(struct flow_iter *iter, struct task_args *targ); + /* Access data. */ + uint16_t (*get_svlan)(struct flow_iter *iter, struct task_args *targ); + uint16_t (*get_cvlan)(struct flow_iter *iter, struct task_args *targ); + uint32_t (*get_gre_id)(struct flow_iter *iter, struct task_args *targ); + int idx; + uint8_t data; +}; + +#endif /* _FLOW_ITER_H_ */ diff --git a/VNFs/DPPD-PROX/fqueue.h b/VNFs/DPPD-PROX/fqueue.h new file mode 100644 index 00000000..ea38e85b --- /dev/null +++ b/VNFs/DPPD-PROX/fqueue.h @@ -0,0 +1,86 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _FQUEUE_H_ +#define _FQUEUE_H_ + +#include <rte_mbuf.h> + +#include <inttypes.h> + +struct fqueue { + uint32_t prod; + uint32_t cons; + uint32_t mask; + struct rte_mbuf *entries[0]; +}; + +static uint32_t fqueue_put(struct fqueue *q, struct rte_mbuf **mbufs, uint32_t count) +{ + uint32_t free_entries = q->mask + q->cons - q->prod; + uint32_t beg = q->prod & q->mask; + + count = count > free_entries? free_entries : count; + + if ((q->prod & q->mask) + count <= q->mask) { + rte_memcpy(&q->entries[q->prod & q->mask], mbufs, sizeof(mbufs[0]) * count); + q->prod += count; + } + else { + for (uint32_t i = 0; i < count; ++i) { + q->entries[q->prod & q->mask] = mbufs[i]; + q->prod++; + } + } + return count; +} + +static uint32_t fqueue_get(struct fqueue *q, struct rte_mbuf **mbufs, uint32_t count) +{ + uint32_t entries = q->prod - q->cons; + + count = count > entries? entries : count; + + if ((q->cons & q->mask) + count <= q->mask) { + rte_memcpy(mbufs, &q->entries[q->cons & q->mask], sizeof(mbufs[0]) * count); + q->cons += count; + } + else { + for (uint32_t i = 0; i < count; ++i) { + mbufs[i] = q->entries[q->cons & q->mask]; + q->cons++; + } + } + return count; +} + +static struct fqueue *fqueue_create(uint32_t size, int socket) +{ + size_t mem_size = 0; + + mem_size += sizeof(struct fqueue); + mem_size += sizeof(((struct fqueue *)(0))->entries[0]) * size; + + struct fqueue *ret = prox_zmalloc(mem_size, socket); + + if (!ret) + return NULL; + + ret->mask = size - 1; + return ret; +} + +#endif /* _FQUEUE_H_ */ diff --git a/VNFs/DPPD-PROX/gen/bng-4ports-gen.cfg b/VNFs/DPPD-PROX/gen/bng-4ports-gen.cfg new file mode 100644 index 00000000..ed0f0147 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/bng-4ports-gen.cfg @@ -0,0 +1,162 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:02 +[port 2] +name=cpe1 +mac=00:00:00:00:00:03 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=BNG gen +shuffle=yes +[core 0s0] +mode=master + +[core 1s0] +name=arp +task=0 +mode=gen +tx port=cpe0 + +bps=2138556 + +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 2s0] +name=cpe +task=0 +mode=gen +tx port=cpe0 +bps=1069289928 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 +lat pos=42 + +[core 3s0] +name=arp +task=0 +mode=gen +tx port=cpe1 + +bps=2138556 + +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 4s0] +name=cpe +task=0 +mode=gen +tx port=cpe1 +bps=1069289928 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 5s0] +name=inet0 +task=0 +mode=gen +tx port=inet0 +bps=1250000000; "1250000000./98" +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b + +random=0000000000000000XXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 6s0] +name=inet1 +task=0 +mode=gen +tx port=inet1 +bps=1250000000; "1250000000./98" +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=0000000000000000XXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 7s0] +name=CPE0 +task=0 +mode=lat +rx port=cpe0 +lat pos=42 + +[core 8s0] +name=CPE1 +task=0 +mode=lat +rx port=cpe1 +lat pos=42 + +[core 9s0] +name=INET0 +task=0 +mode=lat +rx port=inet0 +lat pos=66 + +[core 10s0] +name=INET1 +task=0 +mode=lat +rx port=inet1 +lat pos=66 diff --git a/VNFs/DPPD-PROX/gen/bng-8ports-gen-18cores.cfg b/VNFs/DPPD-PROX/gen/bng-8ports-gen-18cores.cfg new file mode 100644 index 00000000..7135648b --- /dev/null +++ b/VNFs/DPPD-PROX/gen/bng-8ports-gen-18cores.cfg @@ -0,0 +1,296 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=6 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:02 +[port 2] +name=cpe1 +mac=00:00:00:00:00:03 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 +[port 4] +name=cpe2 +mac=00:00:00:00:00:04 +[port 5] +name=inet2 +mac=00:00:00:00:00:04 +[port 6] +name=cpe3 +mac=00:00:00:00:00:04 +[port 7] +name=inet3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=BNG gen +shuffle=yes +[core 0s0] +mode=master + +[core 1s0] +name=arp +task=0 +mode=gen +;rx port=cpe0 +tx port=cpe0 + +bps=2138556 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +task=1 +mode=gen +;rx port=cpe1 +tx port=cpe1 + +bps=2138556 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000010XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +name=arp +task=2 +mode=gen +;rx port=cpe2 +tx port=cpe2 +bps=2138556 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +name=arp +task=3 +mode=gen +;rx port=cpe3 +tx port=cpe3 +bps=2138556 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000011XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 2s0] +name=cpe +task=0 +mode=gen +;rx port=cpe0 +tx port=cpe0 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 +lat pos=42 + +[core 3s0] +name=cpe +task=0 +mode=gen +;rx port=cpe1 +tx port=cpe1 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000010XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 4s0] +name=cpe +task=0 +mode=gen +;rx port=cpe2 +tx port=cpe2 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 5s0] +name=cpe +task=0 +mode=gen +;rx port=cpe3 +tx port=cpe3 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 +random=000000011XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 6s0] +name=inet0 +task=0 +mode=gen +;rx port=inet0 +tx port=inet0 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b + +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 7s0] +name=inet1 +task=0 +mode=gen +;rx port=inet1 +tx port=inet1 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 8s0] +name=inet2 +task=0 +mode=gen +;rx port=inet2 +tx port=inet2 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 9s0] +name=inet3 +task=0 +mode=gen +;rx port=inet3 +tx port=inet3 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 10s0] +name=CPE0 +task=0 +mode=lat +rx port=cpe0 +lat pos=42 + +[core 11s0] +name=CPE1 +task=0 +mode=lat +rx port=cpe1 +lat pos=42 + +[core 12s0] +name=CPE2 +task=0 +mode=lat +rx port=cpe2 +lat pos=42 + +[core 13s0] +name=CPE3 +task=0 +mode=lat +rx port=cpe3 +lat pos=42 + +[core 14s0] +name=INET0 +task=0 +mode=lat +rx port=inet0 +lat pos=66 + +[core 15s0] +name=INET1 +task=0 +mode=lat +rx port=inet1 +lat pos=66 + +[core 16s0] +name=INET2 +task=0 +mode=lat +rx port=inet2 +lat pos=66 + +[core 17s0] +name=INET3 +task=0 +mode=lat +rx port=inet3 +lat pos=66 diff --git a/VNFs/DPPD-PROX/gen/bng-8ports-gen.cfg b/VNFs/DPPD-PROX/gen/bng-8ports-gen.cfg new file mode 100644 index 00000000..a988f65c --- /dev/null +++ b/VNFs/DPPD-PROX/gen/bng-8ports-gen.cfg @@ -0,0 +1,300 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:02 +[port 2] +name=cpe1 +mac=00:00:00:00:00:03 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 +[port 4] +name=cpe2 +mac=00:00:00:00:00:04 +[port 5] +name=inet2 +mac=00:00:00:00:00:04 +[port 6] +name=cpe3 +mac=00:00:00:00:00:04 +[port 7] +name=inet3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=BNG gen +shuffle=yes +[core 0s0] +mode=master + +[core 1s0] +name=arp +task=0 +mode=gen +;rx port=cpe0 +tx port=cpe0 + +bps=2138556 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 1s0h] +name=cpe +task=0 +mode=gen +;rx port=cpe0 +tx port=cpe0 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000000XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 +lat pos=42 + +[core 2s0] +name=arp +task=0 +mode=gen +;rx port=cpe1 +tx port=cpe1 + +bps=2138556 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000010XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 2s0h] +name=cpe +task=0 +mode=gen +;rx port=cpe1 +tx port=cpe1 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 + +random=000000010XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 3s0] +name=arp +task=0 +mode=gen +;rx port=cpe2 +tx port=cpe2 +bps=2138556 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 3s0h] +name=cpe +task=0 +mode=gen +;rx port=cpe2 +tx port=cpe2 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 +random=000000001XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 4s0] +name=arp +task=0 +mode=gen +;rx port=cpe3 +tx port=cpe3 +bps=2138556 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 06 00 01 08 00 06 04 00 02 00 1e 67 3e b8 df c0 a8 01 01 00 00 00 00 00 00 c0 a8 01 01 +random=000000011XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +[core 4s0h] +name=cpe +task=0 +mode=gen +;rx port=cpe3 +tx port=cpe3 +bps=1069289928 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 21 +random=000000011XXXXXXX +rand_offset=14 + +random=0000XXXX00XX00XX +rand_offset=18 + +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +lat pos=42 + +[core 5s0] +name=inet0 +task=0 +mode=gen +;rx port=inet0 +tx port=inet0 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b + +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 6s0] +name=inet1 +task=0 +mode=gen +;rx port=inet1 +tx port=inet1 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 7s0] +name=inet2 +task=0 +mode=gen +;rx port=inet2 +tx port=inet2 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 0s0h] +name=inet3 +task=0 +mode=gen +;rx port=inet3 +tx port=inet3 +bps=1250000000; "1250000000./98" +pkt inline=ab cd ef 01 23 45 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 38 00 01 00 00 40 2f 7c 94 7f 00 00 01 7f 00 00 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 f6 b7 c0 a8 01 c7 c0 a8 01 01 00 35 00 35 00 08 7b 5b +random=000000000000000XXXXXXXXXXXXXXXXX +rand_offset=42 ; gre ID + +lat pos=66 + +[core 0s1] +name=CPE0 +task=0 +mode=lat +rx port=cpe0 +lat pos=42 + +[core 1s1] +name=CPE1 +task=0 +mode=lat +rx port=cpe1 +lat pos=42 + +[core 2s1] +name=CPE2 +task=0 +mode=lat +rx port=cpe2 +lat pos=42 + +[core 3s1] +name=CPE3 +task=0 +mode=lat +rx port=cpe3 +lat pos=42 + +[core 4s1] +name=INET0 +task=0 +mode=lat +rx port=inet0 +lat pos=66 + +[core 5s1] +name=INET1 +task=0 +mode=lat +rx port=inet1 +lat pos=66 + +[core 6s1] +name=INET2 +task=0 +mode=lat +rx port=inet2 +lat pos=66 + +[core 7s1] +name=INET3 +task=0 +mode=lat +rx port=inet3 +lat pos=66 diff --git a/VNFs/DPPD-PROX/gen/bng-ovs-usv-4ports-gen.cfg b/VNFs/DPPD-PROX/gen/bng-ovs-usv-4ports-gen.cfg new file mode 100644 index 00000000..13f4472c --- /dev/null +++ b/VNFs/DPPD-PROX/gen/bng-ovs-usv-4ports-gen.cfg @@ -0,0 +1,89 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +[port 1] +name=p1 +mac=00:00:00:00:00:02 +[port 2] +name=p2 +mac=00:00:00:00:00:03 +[port 3] +name=p3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=BNG OVS USV gen + +[core 0s1] +mode=master + +[core 1s1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 05 00 00 05 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b +random=0000000000000000000001XX00XX00XX +rand_offset=26 + +random=0000101X000000000XXX000000000000 +rand_offset=30 + +[core 2s1] +name=p1 +task=0 +mode=gen +tx port=p1 +bps=1250000000 +pkt inline=00 00 02 00 00 02 00 00 06 00 00 06 08 00 45 00 00 38 00 01 00 00 40 2f f7 43 c0 a8 01 01 c0 a8 01 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 88 f5 17 18 19 1a c0 a8 01 01 13 88 13 88 00 08 e6 f2 +random=000000000XXXXXXX +rand_offset=40 + +[core 3s1] +name=p2 +task=0 +mode=gen +tx port=p2 +bps=1250000000 +pkt inline=00 00 03 00 00 03 00 00 07 00 00 07 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b + +random=0000000000000000000000XX00XX00XX +rand_offset=26 + +random=0000101X000000000XXX000000000000 +rand_offset=30 + +[core 4s1] +name=p3 +task=0 +mode=gen +tx port=p3 +bps=1250000000 +pkt inline=00 00 04 00 00 04 00 00 08 00 00 08 08 00 45 00 00 38 00 01 00 00 40 2f f7 43 c0 a8 01 01 c0 a8 01 01 20 00 08 00 00 00 00 00 45 00 00 1c 00 01 00 00 40 11 88 f5 17 18 19 1a c0 a8 01 01 13 88 13 88 00 08 e6 f2 +random=000000000XXXXXXX +rand_offset=40 diff --git a/VNFs/DPPD-PROX/gen/l3fwd-gen.cfg b/VNFs/DPPD-PROX/gen/l3fwd-gen.cfg new file mode 100644 index 00000000..4d830043 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/l3fwd-gen.cfg @@ -0,0 +1,82 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +[port 1] +name=p1 +mac=00:00:00:00:00:02 +[port 2] +name=p2 +mac=00:00:00:00:00:03 +[port 3] +name=p3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Routing Gen + +[core 0s1] +mode=master + +[core 1s1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 0a 00 00 00 13 88 13 88 00 08 55 7b +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=30 + +[core 2s1] +name=p1 +task=0 +mode=gen +tx port=p1 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 0a 00 00 00 13 88 13 88 00 08 55 7b +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=30 + +[core 3s1] +name=p2 +task=0 +mode=gen +tx port=p2 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 0a 00 00 00 13 88 13 88 00 08 55 7b +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=30 + +[core 4s1] +name=p3 +task=0 +mode=gen +tx port=p3 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 0a 00 00 00 13 88 13 88 00 08 55 7b +random=0000101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=30 diff --git a/VNFs/DPPD-PROX/gen/lb_5tuple-gen.cfg b/VNFs/DPPD-PROX/gen/lb_5tuple-gen.cfg new file mode 100644 index 00000000..65d352a3 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/lb_5tuple-gen.cfg @@ -0,0 +1,82 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +[port 1] +name=p1 +mac=00:00:00:00:00:02 +[port 2] +name=p2 +mac=00:00:00:00:00:03 +[port 3] +name=p3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=5-tuple Gen + +[core 0s1] +mode=master + +[core 1s1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 47 00 f7 7d 00 00 00 00 00 00 00 00 00 00 00 00 77 23 55 7b +random=XXX00000 +rand_offset=23 +random=000000000000000000000000000XXXXX +rand_offset=26 +random=000000000000000000000000000XXXXX +rand_offset=30 +random=00000000000XXXXX00000000000XXXXX +rand_offset=34 + +[core 2s1] +name=p0 +task=0 +mode=nop +rx port=p0 + +[core 3s1] +name=p1 +task=0 +mode=nop +rx port=p1 + +[core 4s1] +name=p2 +task=0 +mode=nop +rx port=p2 + +[core 5s1] +name=p3 +task=0 +mode=nop +rx port=p3 diff --git a/VNFs/DPPD-PROX/gen/lw_aftr-gen.cfg b/VNFs/DPPD-PROX/gen/lw_aftr-gen.cfg new file mode 100644 index 00000000..a9aad397 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/lw_aftr-gen.cfg @@ -0,0 +1,106 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=inet_0 +mac=00:00:00:00:01:01 +[port 2] +name=lwB4_0 +mac=00:00:00:00:00:02 +[port 4] +name=inet_1 +mac=00:00:00:00:01:03 +[port 6] +name=lwB4_1 +mac=00:00:00:00:00:04 + +[variables] +$tun_pcap=./lwAFTR_tun_100k.pcap +$inet_pcap=./lwAFTR_inet_100k.pcap + +[defaults] +mempool size=16K + +[global] +start time=20 +name=Gen lwAFTR + +[core 0s0] +mode=master + +[core 1s0,2s0] +name=tun_0 +task=0 +mode=gen +tx port=lwB4_0 +pcap file=$tun_pcap +lat pos=58 + +[core 3s0,4s0] +name=inet_0 +task=0 +mode=gen +tx port=inet_0 +pcap file=$inet_pcap +lat pos=18 + +[core 1s1,2s1] +name=tun_1 +task=0 +mode=gen +tx port=lwB4_1 +pcap file=$tun_pcap +lat pos=58 + +[core 3s1,4s1] +name=inet_1 +task=0 +mode=gen +tx port=inet_1 +pcap file=$inet_pcap +lat pos=18 + +[core 5s0] +name=lat_in0 +task=0 +mode=lat +rx port=inet_0 +lat pos=18 + +[core 6s0] +name=lat_tun0 +task=0 +mode=lat +rx port=lwB4_0 +lat pos=58 + +[core 5s1] +name=lat_in1 +task=0 +mode=lat +rx port=inet_1 +lat pos=18 + +[core 6s1] +name=lat_tun1 +task=0 +mode=lat +rx port=lwB4_1 +lat pos=58 diff --git a/VNFs/DPPD-PROX/gen/nop-gen.cfg b/VNFs/DPPD-PROX/gen/nop-gen.cfg new file mode 100644 index 00000000..8c801034 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/nop-gen.cfg @@ -0,0 +1,71 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +[port 1] +name=p1 +mac=00:00:00:00:00:02 +[port 2] +name=p2 +mac=00:00:00:00:00:03 +[port 3] +name=p3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Basic Gen + +[core 0s0] +mode=master + +[core 1s0] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b +[core 2s0] +name=p1 +task=0 +mode=gen +tx port=p1 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b +[core 3s0] +name=p2 +task=0 +mode=gen +tx port=p2 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b +[core 4s0] +name=p3 +task=0 +mode=gen +tx port=p3 +bps=1250000000 +pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d c0 a8 01 01 c0 a8 01 01 13 88 13 88 00 08 55 7b diff --git a/VNFs/DPPD-PROX/gen/nsh-gen.cfg b/VNFs/DPPD-PROX/gen/nsh-gen.cfg new file mode 100644 index 00000000..8502d0ef --- /dev/null +++ b/VNFs/DPPD-PROX/gen/nsh-gen.cfg @@ -0,0 +1,50 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +[port 1] +name=p1 +mac=00:00:00:00:00:02 +[port 2] +name=p2 +mac=00:00:00:00:00:03 +[port 3] +name=p3 +mac=00:00:00:00:00:04 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Basic Gen + +[core 0s1] +mode=master + +[core 1s1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=68 05 ca 30 6b d0 68 05 ca 30 6c b0 08 00 45 00 04 20 00 00 40 00 40 11 a5 fd c8 02 00 65 c8 02 00 66 9c c4 12 b6 04 0c 00 00 0c 40 00 04 00 00 00 00 40 06 01 03 00 03 e9 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 00 07 00 00 00 01 00 06 00 08 00 45 00 03 d6 00 00 00 00 40 06 48 15 0a 00 00 01 c0 a8 64 64 00 00 00 00 00 00 00 00 00 00 00 00 50 00 00 00 d9 b0 00 00 00 01 02 03 04 05 06 07 08 09 0a diff --git a/VNFs/DPPD-PROX/gen/pe-4ports-gen.cfg b/VNFs/DPPD-PROX/gen/pe-4ports-gen.cfg new file mode 100644 index 00000000..c7a01615 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/pe-4ports-gen.cfg @@ -0,0 +1,239 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:02 +[port 2] +name=cpe1 +mac=00:00:00:00:00:03 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 + + +[variables] +$up_size=60 +$dn_size=60 +[defaults] +mempool size=4K + +[global] +start time=5 +name=PE gen +[core 0s1] +mode=master + +[core 1s1] +name=cpe0 +task=0 +mode=gen +tx port=cpe0 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d +random=000000000000000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000000XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + +pkt size=$up_size +lat pos=50 + + +[core 2s1] +name=cpe1 +task=0 +mode=gen +tx port=cpe1 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000010000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000010XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + + +pkt size=$up_size +lat pos=50 + +[core 3s1] +name=cpe0 +task=0 +mode=gen +tx port=cpe0 +bps=625000000 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000001000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000001XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + + +pkt size=$up_size +lat pos=50 + +[core 4s1] +name=cpe1 +task=0 +mode=gen +tx port=cpe1 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000011000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000011XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + +pkt size=$up_size +lat pos=50 + + +[core 5s1] +name=inet0 +task=0 +mode=gen +tx port=inet0 +bps=1250000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + +pkt size=$dn_size +lat pos=46 + +[core 6s1] +name=inet1 +task=0 +mode=gen +tx port=inet1 +bps=1250000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + +pkt size=$dn_size +lat pos=46 + +[core 7s1] +name=none +task=0 +mode=lat +rx port=cpe0 +lat pos=50 + +[core 8s1] +name=none +task=0 +mode=lat +rx port=cpe1 +lat pos=50 + +[core 9s1] +name=none +task=0 +mode=lat +rx port=inet0 +lat pos=46 + +[core 10s1] +name=none +task=0 +mode=lat +rx port=inet1 +lat pos=46 diff --git a/VNFs/DPPD-PROX/gen/pe-8ports-gen.cfg b/VNFs/DPPD-PROX/gen/pe-8ports-gen.cfg new file mode 100644 index 00000000..461fd4b0 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/pe-8ports-gen.cfg @@ -0,0 +1,314 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=cpe0 +mac=00:00:00:00:00:01 +[port 1] +name=inet0 +mac=00:00:00:00:00:02 +[port 2] +name=cpe1 +mac=00:00:00:00:00:03 +[port 3] +name=inet1 +mac=00:00:00:00:00:04 + + +[port 4] +name=cpe2 +mac=00:00:00:00:00:01 +[port 5] +name=inet2 +mac=00:00:00:00:00:02 +[port 6] +name=cpe3 +mac=00:00:00:00:00:03 +[port 7] +name=inet3 +mac=00:00:00:00:00:04 + +[variables] +$up_size=60 +$dn_size=60 +[defaults] +mempool size=4K + +[global] +start time=5 +name=PE gen +[core 0s1] +mode=master + +[core 1s1,1s1h] +name=cpe0 +task=0 +mode=gen +tx port=cpe0 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d +random=000000000000000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000000XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + +pkt size=$up_size +lat pos=50 + + +[core 2s1,2s1h] +name=cpe1 +task=0 +mode=gen +tx port=cpe1 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000010000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000010XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + + +pkt size=$up_size +lat pos=50 + +[core 3s1,3s1h] +name=inet0 +task=0 +mode=gen +tx port=inet0 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + + +pkt size=$dn_size +lat pos=46 + +[core 4s1,4s1h] +name=inet1 +task=0 +mode=gen +tx port=inet1 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + +pkt size=$dn_size +lat pos=46 + +[core 5s1] +name=none +task=0 +mode=lat +rx port=cpe0 +lat pos=50 + +[core 5s1h] +name=none +task=0 +mode=lat +rx port=cpe1 +lat pos=50 + +[core 6s1] +name=none +task=0 +mode=lat +rx port=inet0 +lat pos=46 + +[core 6s1h] +name=none +task=0 +mode=lat +rx port=inet1 +lat pos=46 + +[core 1s0,1s0h] +name=cpe2 +task=0 +mode=gen +tx port=cpe2 +bps=625000000 + +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000001000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000001XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + + +pkt size=$up_size +lat pos=50 + +[core 2s0,2s0h] +name=cpe3 +task=0 +mode=gen +tx port=cpe3 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 a8 00 01 81 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 f7 79 c0 a8 01 01 c0 a8 01 01 00 35 00 35 00 08 7c 1d + +random=000000000011000X +rand_offset=14 + +random=00000000XXXXXXXX +rand_offset=18 + +random=11000000101010000011XXXXXXXXXXXX +rand_offset=34 + +random=0X00101XXXXXXXXXXXXX0000XXXXXXXX +rand_offset=38 + +random=XXXXXX00 +rand_offset=23 + +random=000000000000XXX0 +rand_offset=42 + +random=0000000XXXXXXXXX +rand_offset=44 + +pkt size=$up_size +lat pos=50 + +[core 3s0,3s0h] +name=inet2 +task=0 +mode=gen +tx port=inet2 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + +pkt size=$dn_size +lat pos=46 + +[core 4s0,4s0h] +name=inet3 +task=0 +mode=gen +tx port=inet3 +bps=625000000 +pkt inline=ff ff ff ff ff ff 00 00 00 00 00 00 88 47 00 00 31 00 45 00 00 20 00 01 00 00 40 11 77 fa c0 a8 80 80 c0 a8 01 01 00 35 00 35 00 08 fc 9d + +random=110000001010100000XXXXXXXXXXXXXX +rand_offset=34 + +random=XXXXXX00 +rand_offset=19 + +pkt size=$dn_size +lat pos=46 + +[core 5s0] +name=none +task=0 +mode=lat +rx port=cpe2 +lat pos=50 + +[core 5s0h] +name=none +task=0 +mode=lat +rx port=cpe3 +lat pos=50 + +[core 6s0] +name=none +task=0 +mode=lat +rx port=inet2 +lat pos=46 + +[core 6s0h] +name=none +task=0 +mode=lat +rx port=inet3 +lat pos=46 diff --git a/VNFs/DPPD-PROX/gen/vRouter-gen-4ports.cfg b/VNFs/DPPD-PROX/gen/vRouter-gen-4ports.cfg new file mode 100644 index 00000000..403ac7df --- /dev/null +++ b/VNFs/DPPD-PROX/gen/vRouter-gen-4ports.cfg @@ -0,0 +1,179 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +rx desc=$rxd +tx desc=$txd +[port 1] +name=p1 +mac=00:00:00:00:00:02 +rx desc=$rxd +tx desc=$txd +[port 2] +name=p2 +mac=00:00:00:00:00:03 +rx desc=$rxd +tx desc=$txd +[port 3] +name=p3 +mac=00:00:00:00:00:04 +rx desc=$rxd +tx desc=$txd + +[variables] +$bulk=8 +$rxd=1024 +$txd=256 +$c1=1s1,1s1h,9s1,9s1h +$c2=2s1,2s1h,10s1,10s1h +$c3=3s1,3s1h,11s1,11s1h +$c4=4s1,4s1h,12s1,12s1h +$r1=5s1 +$r2=6s1 +$r3=7s1 +$r4=8s1 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Basic Gen + +[core 0s1] +mode=master + +[core $c1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 38 00 00 01 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 00 00 11 00 00 02 42 00 00 02 13 88 13 88 00 0c 00 00 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c2] +name=p1 +task=0 +mode=gen +tx port=p1 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 39 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 19 00 00 02 43 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c3] +name=p2 +task=0 +mode=gen +tx port=p2 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 44 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 01 00 00 02 40 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c4] +name=p3 +task=0 +mode=gen +tx port=p3 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 45 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 09 00 00 02 41 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $r1] +name=r1 +task=0 +mode=arp +rx port=p0 +tx port=p0 +tx cores=(${r1})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r2] +name=r2 +task=0 +mode=arp +rx port=p1 +tx port=p1 +tx cores=(${r2})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r3] +name=r3 +task=0 +mode=arp +rx port=p2 +tx port=p2 +tx cores=(${r3})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r4] +name=r4 +task=0 +mode=arp +rx port=p3 +tx port=p3 +tx cores=(${r4})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 diff --git a/VNFs/DPPD-PROX/gen/vRouter-gen.cfg b/VNFs/DPPD-PROX/gen/vRouter-gen.cfg new file mode 100644 index 00000000..c02183c0 --- /dev/null +++ b/VNFs/DPPD-PROX/gen/vRouter-gen.cfg @@ -0,0 +1,323 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[port 0] +name=p0 +mac=00:00:00:00:00:01 +rx desc=$rxd +tx desc=$txd +[port 1] +name=p1 +mac=00:00:00:00:00:02 +rx desc=$rxd +tx desc=$txd +[port 2] +name=p2 +mac=00:00:00:00:00:03 +rx desc=$rxd +tx desc=$txd +[port 3] +name=p3 +mac=00:00:00:00:00:04 +rx desc=$rxd +tx desc=$txd +[port 4] +name=p4 +mac=00:00:00:00:00:05 +rx desc=$rxd +tx desc=$txd +[port 5] +name=p5 +mac=00:00:00:00:00:06 +rx desc=$rxd +tx desc=$txd +[port 6] +name=p6 +mac=00:00:00:00:00:07 +rx desc=$rxd +tx desc=$txd +[port 7] +name=p7 +mac=00:00:00:00:00:08 +rx desc=$rxd +tx desc=$txd + +[variables] +$bulk=8 +$rxd=1024 +$txd=256 +$c1=1s1,1s1h,9s1,9s1h +$c2=2s1,2s1h,10s1,10s1h +$c3=3s1,3s1h,11s1,11s1h +$c4=4s1,4s1h,12s1,12s1h +$c5=5s1,5s1h,13s1,13s1h +$c6=6s1,6s1h,14s1,14s1h +$c7=7s1,7s1h,15s1,15s1h +$c8=8s1,8s1h,16s1,16s1h +$r1=1s0 +$r2=2s0 +$r3=3s0 +$r4=4s0 +$r5=5s0 +$r6=6s0 +$r7=7s0 +$r8=10s0 + +[defaults] +mempool size=4K + +[global] +start time=5 +name=Basic Gen + +[core 0s1] +mode=master + +[core $c1] +name=p0 +task=0 +mode=gen +tx port=p0 +bps=1250000000 +pkt inline=00 1b 21 b1 23 14 00 00 01 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 00 00 21 00 00 02 44 00 00 02 13 88 13 88 00 0c 00 00 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c2] +name=p1 +task=0 +mode=gen +tx port=p1 +bps=1250000000 +pkt inline=00 1b 21 b1 23 15 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 29 00 00 02 45 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c3] +name=p2 +task=0 +mode=gen +tx port=p2 +bps=1250000000 +pkt inline=90 e2 ba a7 64 44 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 31 00 00 02 46 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c4] +name=p3 +task=0 +mode=gen +tx port=p3 +bps=1250000000 +pkt inline=90 e2 ba a7 64 45 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 39 00 00 02 47 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c5] +name=p4 +task=0 +mode=gen +tx port=p4 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 38 00 00 01 00 00 01 08 00 45 00 00 20 00 01 00 00 40 11 00 00 11 00 00 02 42 00 00 02 13 88 13 88 00 0c 00 00 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c6] +name=p5 +task=0 +mode=gen +tx port=p5 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 39 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 19 00 00 02 43 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c7] +name=p6 +task=0 +mode=gen +tx port=p6 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 44 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 01 00 00 02 40 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $c8] +name=p7 +task=0 +mode=gen +tx port=p7 +bps=1250000000 +pkt inline=90 e2 ba a5 a4 45 00 00 02 00 00 02 08 00 45 00 00 20 00 01 00 00 40 11 00 00 09 00 00 02 41 00 00 02 13 88 13 88 00 0c 55 7b 00 00 00 00 00 00 00 00 00 +min bulk size=$bulk +max bulk size=$bulk +random=0XXXXXXXXXXXXX10 +rand_offset=34 +random=0XXXXXXXXXXXXX10 +rand_offset=36 +lat pos=42 +packet id pos=46 + +[core $r1] +name=r1 +task=0 +mode=arp +rx port=p0 +tx port=p0 +tx cores=(${r1})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r2] +name=r2 +task=0 +mode=arp +rx port=p1 +tx port=p1 +tx cores=(${r2})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r3] +name=r3 +task=0 +mode=arp +rx port=p2 +tx port=p2 +tx cores=(${r3})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r4] +name=r4 +task=0 +mode=arp +rx port=p3 +tx port=p3 +tx cores=(${r4})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r5] +name=r5 +task=0 +mode=arp +rx port=p4 +tx port=p4 +tx cores=(${r5})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r6] +name=r6 +task=0 +mode=arp +rx port=p5 +tx port=p5 +tx cores=(${r6})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r7] +name=r7 +task=0 +mode=arp +rx port=p6 +tx port=p6 +tx cores=(${r7})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 + +[core $r8] +name=r8 +task=0 +mode=arp +rx port=p7 +tx port=p7 +tx cores=(${r8})t1 +task=1 +mode=lat +rx ring=yes +lat pos=42 +packet id pos=46 diff --git a/VNFs/DPPD-PROX/genl4_bundle.c b/VNFs/DPPD-PROX/genl4_bundle.c new file mode 100644 index 00000000..7d4a0141 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_bundle.c @@ -0,0 +1,369 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <rte_hash.h> +#include <rte_memory.h> +#include <rte_hash_crc.h> +#include <rte_cycles.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "prox_assert.h" +#include "cdf.h" +#include "defines.h" +#include "genl4_bundle.h" +#include "log.h" +#include "pkt_parser.h" +#include "prox_lua_types.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#define RTE_CACHE_LINE_ROUNDUP CACHE_LINE_ROUNDUP +#endif + +/* zero on success */ +int bundle_ctx_pool_create(const char *name, uint32_t n_elems, struct bundle_ctx_pool *ret, uint32_t *occur, uint32_t n_occur, struct bundle_cfg *cfg, int socket_id) +{ + size_t memsize; + uint8_t *mem; + + const struct rte_hash_parameters params = { + .name = name, + .entries = rte_align32pow2(n_elems) * 8, + //.bucket_entries = 8, + .key_len = sizeof(struct pkt_tuple), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = socket_id, + }; + + ret->hash = rte_hash_create(¶ms); + if (NULL == ret->hash) + return -1; + + uint32_t rand_pool_size = 0, tot_occur = 0; + + if (occur) { + for (uint32_t i = 0; i < n_occur; ++i) { + tot_occur += occur[i]; + } + + rand_pool_size = (n_elems + (tot_occur - 1))/tot_occur*tot_occur; + } + + memsize = 0; + memsize += RTE_CACHE_LINE_ROUNDUP(params.entries * sizeof(ret->hash_entries[0])); + memsize += RTE_CACHE_LINE_ROUNDUP(n_elems * sizeof(ret->free_bundles[0])); + memsize += RTE_CACHE_LINE_ROUNDUP(n_elems * sizeof(ret->bundles[0])); + if (occur) + memsize += RTE_CACHE_LINE_ROUNDUP(rand_pool_size * sizeof(ret->occur)); + mem = prox_zmalloc(memsize, socket_id); + if (NULL == mem) + return -1; + + ret->hash_entries = (struct bundle_ctx **) mem; + mem += RTE_CACHE_LINE_ROUNDUP(params.entries * sizeof(ret->hash_entries[0])); + ret->free_bundles = (struct bundle_ctx **) mem; + mem += RTE_CACHE_LINE_ROUNDUP(n_elems * sizeof(ret->free_bundles[0])); + if (occur) { + ret->occur = (uint32_t *)mem; + mem += RTE_CACHE_LINE_ROUNDUP(rand_pool_size * sizeof(ret->occur)); + + ret->seed = rte_rdtsc(); + + size_t cur_occur = 0; + size_t j = 0; + + for (uint32_t i = 0; i < rand_pool_size; ++i) { + while (j >= occur[cur_occur]) { + cur_occur++; + if (cur_occur == n_occur) + cur_occur = 0; + j = 0; + } + j++; + ret->occur[i] = cur_occur; + } + ret->n_occur = rand_pool_size; + } + ret->bundles = (struct bundle_ctx *) mem; + + ret->bundle_cfg = cfg; + for (unsigned i = 0; i < n_elems; ++i) { + ret->free_bundles[i] = &ret->bundles[i]; + } + ret->n_free_bundles = n_elems; + ret->tot_bundles = n_elems; + + return 0; +} + +struct bundle_ctx *bundle_ctx_pool_get(struct bundle_ctx_pool *p) +{ + if (p->n_free_bundles > 0) + return p->free_bundles[--p->n_free_bundles]; + return NULL; +} + +static struct bundle_cfg *bundle_ctx_get_cfg(struct bundle_ctx_pool *p) +{ + uint32_t rand = 0; + + /* get rand in [0, RAND_MAX rounded down] */ + do { + rand = rand_r(&p->seed); + } while (rand >= RAND_MAX/p->n_occur*p->n_occur); + + rand /= RAND_MAX/p->n_occur; + + PROX_ASSERT(p->n_occur); + PROX_ASSERT(rand < p->n_occur); + + uint32_t r = p->occur[rand]; + p->occur[rand] = p->occur[--p->n_occur]; + + return &p->bundle_cfg[r]; +} + +static void bundle_ctx_put_cfg(struct bundle_ctx_pool *p, const struct bundle_cfg *cfg) +{ + if (p->occur) { + uint32_t r = cfg - p->bundle_cfg; + p->occur[p->n_occur++] = r; + } +} + +struct bundle_ctx *bundle_ctx_pool_get_w_cfg(struct bundle_ctx_pool *p) +{ + if (p->n_free_bundles > 0) { + struct bundle_ctx *ret = p->free_bundles[--p->n_free_bundles]; + ret->cfg = bundle_ctx_get_cfg(p); + return ret; + } + + return NULL; +} + +void bundle_ctx_pool_put(struct bundle_ctx_pool *p, struct bundle_ctx *bundle) +{ + bundle_ctx_put_cfg(p, bundle->cfg); + p->free_bundles[p->n_free_bundles++] = bundle; +} + +static void bundle_cleanup(struct bundle_ctx *bundle) +{ + if (bundle->heap_ref.elem != NULL) { + heap_del(bundle->heap, &bundle->heap_ref); + } +} + +static int bundle_iterate_streams(struct bundle_ctx *bundle, struct bundle_ctx_pool *pool, unsigned *seed, struct l4_stats *l4_stats) +{ + enum l4gen_peer peer; + int ret = 0, old; + + while (bundle->ctx.stream_cfg->is_ended(&bundle->ctx)) { + + if (bundle->ctx.stream_cfg->proto == IPPROTO_TCP) { + if (bundle->ctx.retransmits == 0) + l4_stats->tcp_finished_no_retransmit++; + else + l4_stats->tcp_finished_retransmit++; + } + else + l4_stats->udp_finished++; + + if (bundle->stream_idx + 1 != bundle->cfg->n_stream_cfgs) { + ret = 1; + bundle->stream_idx++; + + stream_ctx_reset_move(&bundle->ctx, bundle->cfg->stream_cfgs[bundle->stream_idx]); + + /* Update tuple */ + old = rte_hash_del_key(pool->hash, &bundle->tuple); + if (old < 0) { + plogx_err("Failed to delete key while trying to change tuple: %d (%s)\n",old, strerror(-old)); + } + plogx_dbg("Moving to stream with idx %d\n", bundle->stream_idx); + + /* In case there are multiple streams, clients + randomized but ports fixed, it is still + possible to hit an infinite loop here. The + situations is hit if a client:port is + connected to a server:port in one of the + streams while client:port is regenerated + for the first stream. There is no conflict + yet since the server:port is + different. Note that this is bug since a + client:port can only have one open + connection. */ + int retries = 0; + do { + bundle_create_tuple(&bundle->tuple, &bundle->cfg->clients, bundle->ctx.stream_cfg, 0, seed); + + ret = rte_hash_lookup(pool->hash, (const void *)&bundle->tuple); + if (++retries == 1000) { + plogx_warn("Already tried 1K times\n"); + plogx_warn("Going from %d to %d\n", bundle->stream_idx -1, bundle->stream_idx); + } + } while (ret >= 0); + + ret = rte_hash_add_key(pool->hash, &bundle->tuple); + if (ret < 0) { + plogx_err("Failed to add key while moving to next stream!\n"); + return -1; + } + pool->hash_entries[ret] = pool->hash_entries[old]; + + if (bundle->ctx.stream_cfg->proto == IPPROTO_TCP) + l4_stats->tcp_created++; + else + l4_stats->udp_created++; + } + else { + int a = rte_hash_del_key(pool->hash, &bundle->tuple); + PROX_PANIC(a < 0, "Del failed (%d)! during finished all bundle (%d)\n", a, bundle->cfg->n_stream_cfgs); + bundle_cleanup(bundle); + bundle_ctx_pool_put(pool, bundle); + + return -1; + } + } + return ret; +} + +void bundle_create_tuple(struct pkt_tuple *tp, const struct host_set *clients, const struct stream_cfg *stream_cfg, int rnd_ip, unsigned *seed) +{ + tp->dst_port = clients->port; + tp->dst_port &= ~clients->port_mask; + tp->dst_port |= rand_r(seed) & clients->port_mask; + + if (rnd_ip) { + tp->dst_addr = clients->ip; + tp->dst_addr &= ~clients->ip_mask; + tp->dst_addr |= rand_r(seed) & clients->ip_mask; + } + + tp->src_addr = stream_cfg->servers.ip; + tp->src_port = stream_cfg->servers.port; + plogx_dbg("bundle_create_tuple() with proto = %x, %d\n", stream_cfg->proto, rnd_ip); + tp->proto_id = stream_cfg->proto; + + tp->l2_types[0] = 0x0008; +} + +void bundle_init_w_cfg(struct bundle_ctx *bundle, const struct bundle_cfg *cfg, struct heap *heap, enum l4gen_peer peer, unsigned *seed) +{ + bundle->cfg = cfg; + bundle_init(bundle, heap, peer, seed); +} + +void bundle_init(struct bundle_ctx *bundle, struct heap *heap, enum l4gen_peer peer, unsigned *seed) +{ + bundle->heap_ref.elem = NULL; + bundle->heap = heap; + memset(&bundle->ctx, 0, sizeof(bundle->ctx)); + // TODO; assert that there is at least one stream + bundle->stream_idx = 0; + + stream_ctx_init(&bundle->ctx, peer, bundle->cfg->stream_cfgs[bundle->stream_idx], &bundle->tuple); + bundle_create_tuple(&bundle->tuple, &bundle->cfg->clients, bundle->ctx.stream_cfg, peer == PEER_CLIENT, seed); +} + +void bundle_expire(struct bundle_ctx *bundle, struct bundle_ctx_pool *pool, struct l4_stats *l4_stats) +{ + struct pkt_tuple *pt = &bundle->tuple; + + plogx_dbg("Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", + IPv4_BYTES(((uint8_t*)&pt->dst_addr)), + rte_bswap16(pt->dst_port), + IPv4_BYTES(((uint8_t*)&pt->src_addr)), + rte_bswap16(pt->src_port)); + + int a = rte_hash_del_key(pool->hash, bundle); + if (a < 0) { + plogx_err("Del failed with error %d: '%s'\n", a, strerror(-a)); + plogx_err("ended = %d\n", bundle->ctx.flags & STREAM_CTX_F_TCP_ENDED); + } + + if (bundle->ctx.stream_cfg->proto == IPPROTO_TCP) + l4_stats->tcp_expired++; + else + l4_stats->udp_expired++; + + bundle_cleanup(bundle); + bundle_ctx_pool_put(pool, bundle); +} + +int bundle_proc_data(struct bundle_ctx *bundle, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct bundle_ctx_pool *pool, unsigned *seed, struct l4_stats *l4_stats) +{ + int ret; + uint64_t next_tsc; + + if (bundle->heap_ref.elem != NULL) { + heap_del(bundle->heap, &bundle->heap_ref); + } + + if (bundle_iterate_streams(bundle, pool, seed, l4_stats) < 0) + return -1; + + uint32_t retx_before = bundle->ctx.retransmits; + next_tsc = UINT64_MAX; + ret = bundle->ctx.stream_cfg->proc(&bundle->ctx, mbuf, l4_meta, &next_tsc); + + if (bundle->ctx.flags & STREAM_CTX_F_EXPIRED) { + bundle_expire(bundle, pool, l4_stats); + return -1; + } + else if (next_tsc != UINT64_MAX) { + heap_add(bundle->heap, &bundle->heap_ref, rte_rdtsc() + next_tsc); + } + l4_stats->tcp_retransmits += bundle->ctx.retransmits - retx_before; + + if (bundle_iterate_streams(bundle, pool, seed, l4_stats) > 0) { + if (bundle->heap_ref.elem != NULL) { + heap_del(bundle->heap, &bundle->heap_ref); + } + heap_add(bundle->heap, &bundle->heap_ref, rte_rdtsc()); + } + + return ret; +} + +uint32_t bundle_cfg_length(struct bundle_cfg *cfg) +{ + uint32_t ret = 0; + + for (uint32_t i = 0; i < cfg->n_stream_cfgs; ++i) { + ret += cfg->stream_cfgs[i]->n_bytes; + } + + return ret; +} + +uint32_t bundle_cfg_max_n_segments(struct bundle_cfg *cfg) +{ + uint32_t ret = 0; + uint32_t cur; + + for (uint32_t i = 0; i < cfg->n_stream_cfgs; ++i) { + cur = stream_cfg_max_n_segments(cfg->stream_cfgs[i]); + ret = ret > cur? ret: cur; + } + + return ret; +} diff --git a/VNFs/DPPD-PROX/genl4_bundle.h b/VNFs/DPPD-PROX/genl4_bundle.h new file mode 100644 index 00000000..94ceed91 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_bundle.h @@ -0,0 +1,89 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _GENL4_BUNDLE_H_ +#define _GENL4_BUNDLE_H_ + +#include "heap.h" +#include "genl4_stream.h" +#include "lconf.h" + +/* Configured once and used during packet generation. The structure + describes a single set of consecutive streams. When used at the + server side, it only contains a simple stream to represent a + service. */ +struct bundle_cfg { + struct host_set clients; + uint32_t n_stream_cfgs; + struct stream_cfg **stream_cfgs; +}; + +/* A bundle_ctx represents a an active stream between a client and a + server of servers. */ +struct bundle_ctx { + struct pkt_tuple tuple; /* Client IP/PORT generated once at bundle creation time, client PORT and server IP/PORT created when stream_idx++ */ + struct heap_ref heap_ref; /* Back reference into heap */ + struct heap *heap; /* timer management */ + + const struct bundle_cfg *cfg; /* configuration time read only structure */ + + struct stream_ctx ctx; /* state management info for stream_cfg (reset when stream_idx++) */ + uint32_t stream_idx; /* iterate through cfg->straem_cfgs */ +}; + +#define BUNDLE_CTX_UPCAST(r) ((struct bundle_ctx *)((uint8_t *)r - offsetof(struct bundle_ctx, heap_ref))) + +struct bundle_ctx_pool { + struct rte_hash *hash; + struct bundle_ctx **hash_entries; + struct bundle_ctx **free_bundles; + struct bundle_ctx *bundles; /* Memory containing all communications */ + uint32_t *occur; + struct bundle_cfg *bundle_cfg; + uint32_t n_occur; + uint32_t seed; + uint32_t n_free_bundles; + uint32_t tot_bundles; +}; + +struct l4_stats { + uint64_t bundles_created; + uint64_t tcp_finished_no_retransmit; + uint64_t tcp_finished_retransmit; + uint64_t udp_finished; + uint64_t tcp_created; + uint64_t udp_created; + uint64_t tcp_expired; + uint64_t tcp_retransmits; + uint64_t udp_expired; +}; + +struct cdf; +int bundle_ctx_pool_create(const char *name, uint32_t n_elems, struct bundle_ctx_pool *ret, uint32_t *occur, uint32_t n_occur, struct bundle_cfg *cfg, int socket_id); + +struct bundle_ctx *bundle_ctx_pool_get(struct bundle_ctx_pool *p); +struct bundle_ctx *bundle_ctx_pool_get_w_cfg(struct bundle_ctx_pool *p); +void bundle_ctx_pool_put(struct bundle_ctx_pool *p, struct bundle_ctx *bundle); + +void bundle_create_tuple(struct pkt_tuple *tp, const struct host_set *clients, const struct stream_cfg *stream_cfg, int rnd_ip, unsigned *seed); +void bundle_init(struct bundle_ctx *bundle, struct heap *heap, enum l4gen_peer peer, unsigned *seed); +void bundle_init_w_cfg(struct bundle_ctx *bundle, const struct bundle_cfg *cfg, struct heap *heap, enum l4gen_peer peer, unsigned *seed); +void bundle_expire(struct bundle_ctx *bundle, struct bundle_ctx_pool *pool, struct l4_stats *l4_stats); +int bundle_proc_data(struct bundle_ctx *bundle, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct bundle_ctx_pool *pool, unsigned *seed, struct l4_stats *l4_stats); +uint32_t bundle_cfg_length(struct bundle_cfg *cfg); +uint32_t bundle_cfg_max_n_segments(struct bundle_cfg *cfg); + +#endif /* _GENL4_BUNDLE_H_ */ diff --git a/VNFs/DPPD-PROX/genl4_stream.h b/VNFs/DPPD-PROX/genl4_stream.h new file mode 100644 index 00000000..b180765d --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_stream.h @@ -0,0 +1,201 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _GENL4_STREAM_H_ +#define _GENL4_STREAM_H_ + +#include "prox_lua_types.h" +#include "pkt_parser.h" +#include "token_time.h" +#include "quit.h" + +enum tcp_state { + CLOSED, + LISTEN, + SYN_SENT, + SYN_RECEIVED, + ESTABLISHED, + CLOSE_WAIT, + LAST_ACK, + FIN_WAIT, + TIME_WAIT +}; + +static const char *tcp_state_to_str(const enum tcp_state s) +{ + switch(s) { + case CLOSED: + return "CLOSED"; + case LISTEN: + return "LISTEN"; + case SYN_SENT: + return "SYN_SENT"; + case SYN_RECEIVED: + return "SYN_RECEIVED"; + case ESTABLISHED: + return "ESTABLISHED"; + case CLOSE_WAIT: + return "CLOSE_WAIT"; + case LAST_ACK: + return "LAST_ACK"; + case FIN_WAIT: + return "FIN_WAIT"; + case TIME_WAIT: + return "TIME_WAIT"; + default: + return "INVALID_STATE"; + } +} + +#define STREAM_CTX_F_EXPIRED 0x01 +#define STREAM_CTX_F_NEW_DATA 0x02 /* Set on recv to track first ACK of data */ +#define STREAM_CTX_F_TCP_ENDED 0x04 +#define STREAM_CTX_F_TCP_GOT_SYN 0x08 /* Set only once when syn has been received */ +#define STREAM_CTX_F_TCP_GOT_FIN 0x10 /* Set only once when fin has been received */ +#define STREAM_CTX_F_MORE_DATA 0x20 +#define STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS 0x40 + +/* Run-time structure to management state information associated with current stream_cfg. */ +struct stream_ctx { + enum l4gen_peer peer; + uint32_t cur_action; + uint32_t cur_pos[2]; + enum tcp_state tcp_state; + struct token_time token_time; + struct token_time token_time_other; + uint16_t flags; + uint16_t same_state; + uint32_t next_seq; + uint32_t ackd_seq; + uint32_t recv_seq; + uint32_t ackable_data_seq; + uint32_t seq_first_byte; /* seq number - seq_first_byte gives offset within content. */ + uint32_t other_seq_first_byte; /* seq number - seq_first_byte gives offset within content. */ + uint32_t other_mss; + uint64_t sched_tsc; + uint32_t retransmits; + const struct stream_cfg *stream_cfg; /* Current active steam_cfg */ + struct pkt_tuple *tuple; +}; + +struct host_set { + uint32_t ip; + uint32_t ip_mask; + uint16_t port; + uint16_t port_mask; +}; + +struct stream_cfg { + struct peer_data data[2]; + struct host_set servers; // Current implementation only allows mask == 0. (i.e. single server) + struct token_time_cfg tt_cfg[2]; // bytes per period rate + uint16_t proto; + uint64_t tsc_timeout; + uint64_t tsc_timeout_time_wait; + uint32_t n_actions; + uint32_t n_pkts; + uint32_t n_bytes; + int (*proc)(struct stream_ctx *meta, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, uint64_t *next_tsc); + int (*is_ended)(struct stream_ctx *meta); + struct peer_action actions[0]; +}; + +static void scale_for_jitter(uint64_t *to_scale) +{ + (*to_scale) *= 2; +} + +static void reset_token_times(struct stream_ctx *ctx) +{ + const uint64_t now = rte_rdtsc(); + const struct stream_cfg *cfg = ctx->stream_cfg; + enum l4gen_peer peer = ctx->peer; + + token_time_init(&ctx->token_time, &cfg->tt_cfg[peer]); + token_time_reset_full(&ctx->token_time, now); + + token_time_init(&ctx->token_time_other, &cfg->tt_cfg[!peer]); + scale_for_jitter(&ctx->token_time_other.cfg.bytes_max); + token_time_reset_full(&ctx->token_time_other, now); +} + +static void stream_ctx_init(struct stream_ctx *ctx, enum l4gen_peer peer, struct stream_cfg *cfg, struct pkt_tuple *tuple) +{ + ctx->stream_cfg = cfg; + ctx->peer = peer; + ctx->tuple = tuple; + + /* Server's initial state is different from client for + TCP. For now, don't use a specific init function for + TCP/UDP since there is not a lot of difference and to avoid + an additional function pointer. */ + ctx->tcp_state = PEER_CLIENT == peer? CLOSED : LISTEN; + ctx->other_mss = 536; /* default 536 as per RFC 879 */ + + reset_token_times(ctx); +} + +static void stream_ctx_reset_move(struct stream_ctx *ctx, struct stream_cfg *cfg) +{ + enum l4gen_peer peer = ctx->peer; + struct pkt_tuple *tuple = ctx->tuple; + + memset(ctx, 0, sizeof(*ctx)); + stream_ctx_init(ctx, peer, cfg, tuple); +} + +static int stream_cfg_calc_max_payload_len(struct stream_cfg *cfg, enum l4gen_peer peer) +{ + const uint32_t l4_hdr_len = cfg->proto == IPPROTO_UDP? + sizeof(struct udp_hdr) : sizeof(struct tcp_hdr); + + return ETHER_MAX_LEN - ETHER_CRC_LEN - cfg->data[peer].hdr_len - l4_hdr_len; +} + +static int stream_cfg_max_n_segments(struct stream_cfg *cfg) +{ + if (cfg->proto == IPPROTO_UDP) + return 1; + + uint32_t ret = 1; + uint32_t cur; + + const uint32_t mss = stream_cfg_calc_max_payload_len(cfg, PEER_CLIENT); + + for (uint32_t i = 0; i < cfg->n_actions; ++i) { + cur = (cfg->actions[i].len + (mss - 1)) / mss; + ret = ret > cur? ret: cur; + } + + return ret; +} + +static int stream_cfg_verify_action(struct stream_cfg *cfg, struct peer_action *action) +{ + if (cfg->proto == IPPROTO_TCP) + return 0; + + uint16_t max_payload_len = stream_cfg_calc_max_payload_len(cfg, action->peer); + + PROX_PANIC(action->len > max_payload_len, + "Action %zu has length %u while for the maximum action length for UDP connections is limited to %u\n", + action - cfg->actions, + action->len, + max_payload_len); + return 0; +} + +#endif /* _GENL4_STREAM_H_ */ diff --git a/VNFs/DPPD-PROX/genl4_stream_tcp.c b/VNFs/DPPD-PROX/genl4_stream_tcp.c new file mode 100644 index 00000000..d05455b7 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_stream_tcp.c @@ -0,0 +1,965 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_ether.h> +#include <rte_eth_ctrl.h> + +#include "log.h" +#include "genl4_stream_tcp.h" +#include "prox_assert.h" +#include "mbuf_utils.h" + +static uint64_t tcp_retx_timeout(const struct stream_ctx *ctx) +{ + uint64_t delay = token_time_tsc_until_full(&ctx->token_time_other); + + return delay + ctx->stream_cfg->tsc_timeout; +} + +static uint64_t tcp_resched_timeout(const struct stream_ctx *ctx) +{ + uint64_t delay = token_time_tsc_until_full(&ctx->token_time); + + return delay; +} + +static void tcp_retx_timeout_start(struct stream_ctx *ctx, uint64_t *next_tsc) +{ + uint64_t now = rte_rdtsc(); + + *next_tsc = tcp_retx_timeout(ctx); + ctx->sched_tsc = now + *next_tsc; +} + +static int tcp_retx_timeout_occured(const struct stream_ctx *ctx, uint64_t now) +{ + return ctx->sched_tsc < now; +} + +static void tcp_retx_timeout_resume(const struct stream_ctx *ctx, uint64_t now, uint64_t *next_tsc) +{ + *next_tsc = ctx->sched_tsc - now; +} + +static void tcp_set_retransmit(struct stream_ctx *ctx) +{ + ctx->retransmits++; +} + +struct tcp_option { + uint8_t kind; + uint8_t len; +} __attribute__((packed)); + +void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct pkt_tuple *tuple) +{ + struct tcp_hdr *tcp = (struct tcp_hdr *)l4_meta->l4_hdr; + struct ipv4_hdr *ip = ((struct ipv4_hdr *)tcp) - 1; + + ip->src_addr = tuple->dst_addr; + ip->dst_addr = tuple->src_addr; + + tcp->dst_port = tuple->src_port; + tcp->src_port = tuple->dst_port; + + ip->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr)); + tcp->tcp_flags = TCP_RST_FLAG; + tcp->data_off = ((sizeof(struct tcp_hdr) / 4) << 4); + rte_pktmbuf_pkt_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *); + rte_pktmbuf_data_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *); +} + +static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_t tcp_flags, int data_beg, int data_len) +{ + uint8_t *pkt; + + const struct peer_action *act = &ctx->stream_cfg->actions[ctx->cur_action]; + const struct stream_cfg *stream_cfg = ctx->stream_cfg; + + pkt = rte_pktmbuf_mtod(mbuf, uint8_t *); + rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len); + + struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)]; + struct tcp_hdr *l4_hdr = (struct tcp_hdr *)&pkt[stream_cfg->data[act->peer].hdr_len]; + + l3_hdr->src_addr = ctx->tuple->dst_addr; + l3_hdr->dst_addr = ctx->tuple->src_addr; + l3_hdr->next_proto_id = IPPROTO_TCP; + + l4_hdr->src_port = ctx->tuple->dst_port; + l4_hdr->dst_port = ctx->tuple->src_port; + + uint32_t tcp_len = sizeof(struct tcp_hdr); + uint32_t tcp_payload_len = 0; + uint32_t seq_len = 0; + struct tcp_option *tcp_op; + + if (tcp_flags & TCP_RST_FLAG) { + tcp_flags |= TCP_RST_FLAG; + seq_len = 1; + } + else if (tcp_flags & TCP_SYN_FLAG) { + tcp_flags |= TCP_SYN_FLAG; + /* Window scaling */ + + /* TODO: make options come from the stream. */ + tcp_op = (struct tcp_option *)(l4_hdr + 1); + + tcp_op->kind = 2; + tcp_op->len = 4; + *(uint16_t *)(tcp_op + 1) = rte_bswap16(1460); /* TODO: Save this in this_mss */ + + tcp_len += 4; + seq_len = 1; + + ctx->seq_first_byte = ctx->ackd_seq + 1; + } + else if (tcp_flags & TCP_FIN_FLAG) { + tcp_flags |= TCP_FIN_FLAG; + seq_len = 1; + } + + if (tcp_flags & TCP_ACK_FLAG) { + l4_hdr->recv_ack = rte_bswap32(ctx->recv_seq); + tcp_flags |= TCP_ACK_FLAG; + } + else + l4_hdr->recv_ack = 0; + + uint16_t l4_payload_offset = stream_cfg->data[act->peer].hdr_len + tcp_len; + + if (data_len) { + seq_len = data_len; + plogx_dbg("l4 payload offset = %d\n", l4_payload_offset); + rte_memcpy(pkt + l4_payload_offset, stream_cfg->data[act->peer].content + data_beg, data_len); + } + + l4_hdr->sent_seq = rte_bswap32(ctx->next_seq); + l4_hdr->tcp_flags = tcp_flags; /* SYN */ + l4_hdr->rx_win = rte_bswap16(0x3890); // TODO: make this come from stream (config) + //l4_hdr->cksum = ...; + l4_hdr->tcp_urp = 0; + l4_hdr->data_off = ((tcp_len / 4) << 4); /* Highest 4 bits are TCP header len in units of 32 bit words */ + + /* ctx->next_seq = ctx->ackd_seq + seq_len; */ + ctx->next_seq += seq_len; + + /* No payload after TCP header. */ + rte_pktmbuf_pkt_len(mbuf) = l4_payload_offset + data_len; + rte_pktmbuf_data_len(mbuf) = l4_payload_offset + data_len; + + l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + tcp_len + data_len); + plogdx_dbg(mbuf, NULL); + + plogx_dbg("put tcp packet with flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n", + tcp_flags & TCP_SYN_FLAG? "SYN ":"", + tcp_flags & TCP_ACK_FLAG? "ACK ":"", + tcp_flags & TCP_FIN_FLAG? "FIN ":"", + data_len, rte_bswap32(l4_hdr->sent_seq), rte_bswap32(l4_hdr->recv_ack)); +} + +/* Get the length of the reply associated for the next packet. Note + that the packet will come from the other peer. In case the next + packet belongs to the current peer (again), the reply length will + be that of an empty TCP packet (i.e. the ACK). */ +uint16_t stream_tcp_reply_len(struct stream_ctx *ctx) +{ + if (stream_tcp_is_ended(ctx)) + return 0; + else if (ctx->tcp_state != ESTABLISHED) { + if (ctx->tcp_state == SYN_SENT || ctx->tcp_state == LISTEN) { + /* First packet received is a SYN packet. In + the current implementation this packet + contains the TCP option field to set the + MSS. For this, add 4 bytes. */ + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + 4; + } + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr); + } + else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) { + /* The reply _could_ (due to races, still possibly + receive an old ack) contain data. This means that + in some cases, the prediction of the reply size + will be an overestimate. */ + uint32_t data_beg = ctx->next_seq - ctx->seq_first_byte; + const struct peer_action *act = &ctx->stream_cfg->actions[ctx->cur_action]; + + uint32_t remaining_len = act->len - (data_beg - act->beg); + + if (remaining_len == 0) { + if (ctx->cur_action + 1 != ctx->stream_cfg->n_actions) { + if (ctx->stream_cfg->actions[ctx->cur_action + 1].peer == ctx->peer) + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + else { + uint32_t seq_beg = ctx->recv_seq - ctx->other_seq_first_byte; + uint32_t end = ctx->stream_cfg->actions[ctx->cur_action + 1].beg + + ctx->stream_cfg->actions[ctx->cur_action + 1].len; + uint32_t remaining = end - seq_beg; + uint16_t data_len = remaining > 1460? 1460: remaining; + + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len; + } + } + else { + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + } + } + else { + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + } + } + else if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) { + uint32_t seq_beg = ctx->recv_seq - ctx->other_seq_first_byte; + uint32_t end = ctx->stream_cfg->actions[ctx->cur_action].beg + + ctx->stream_cfg->actions[ctx->cur_action].len; + uint32_t remaining = end - seq_beg; + uint16_t data_len = remaining > 1460? 1460: remaining; + + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len; + } + else + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); +} + +static void stream_tcp_proc_in_order_data(struct stream_ctx *ctx, struct l4_meta *l4_meta, int *progress_seq) +{ + plogx_dbg("Got data with seq %d (as expected), with len %d\n", ctx->recv_seq, l4_meta->len); + + if (!l4_meta->len) + return; + + const struct peer_action *act = &ctx->stream_cfg->actions[ctx->cur_action]; + enum l4gen_peer peer = act->peer; + /* Since we have received the expected sequence number, the start address will not exceed the cfg memory buffer. */ + uint8_t *content = ctx->stream_cfg->data[peer].content; + uint32_t seq_beg = ctx->recv_seq - ctx->other_seq_first_byte; + uint32_t end = ctx->stream_cfg->actions[ctx->cur_action].beg + ctx->stream_cfg->actions[ctx->cur_action].len; + uint32_t remaining = end - seq_beg; + + if (l4_meta->len > remaining) { + plogx_err("Provided data is too long:\n"); + plogx_err("action.beg = %d, action.len = %d", act->beg, act->len); + plogx_err("tcp seq points at %d in action, l4_meta->len = %d\n", seq_beg, l4_meta->len); + } + else { + if (memcmp(content + seq_beg, l4_meta->payload, l4_meta->len) == 0) { + plogx_dbg("Good payload in %d: %u -> %u\n", ctx->cur_action, ctx->recv_seq, l4_meta->len); + ctx->recv_seq += l4_meta->len; + ctx->cur_pos[peer] += l4_meta->len; + /* Move forward only when this was the last piece of data within current action (i.e. end of received data == end of action data). */ + if (seq_beg + l4_meta->len == act->beg + act->len) { + plogx_dbg("Got last piece in action %d\n", ctx->cur_action); + ctx->cur_action++; + } + else { + plogx_dbg("Got data from %d with len %d, but waiting for more (tot len = %d)!\n", seq_beg, l4_meta->len, act->len); + } + *progress_seq = 1; + ctx->flags |= STREAM_CTX_F_NEW_DATA; + } + else { + plogx_err("ackable = %d, ackd = %d\n", ctx->ackable_data_seq ,ctx->ackd_seq); + plogx_err("Bad payload action[%d]{.len = %d, .peer = %s}\n", ctx->cur_action, act->len, peer == PEER_SERVER? "s" : "c"); + plogx_err(" pkt payload len = %d, beginning at %u\n", l4_meta->len, seq_beg); + /* plogx_err(" Payload starts %zu bytes after beginning of l4_hdr\n", l4_meta->payload - l4_meta->l4_hdr); */ + + plogx_err(" payload[0-3] = %02x %02x %02x %02x\n", + l4_meta->payload[0], + l4_meta->payload[1], + l4_meta->payload[2], + l4_meta->payload[3]); + plogx_err(" expect[0-3] = %02x %02x %02x %02x\n", + content[seq_beg + 0], + content[seq_beg + 1], + content[seq_beg + 2], + content[seq_beg + 3]); + } + } +} + +static int stream_tcp_proc_in(struct stream_ctx *ctx, struct l4_meta *l4_meta) +{ + struct tcp_hdr *tcp = NULL; + int got_syn = 0; + int got_ack = 0; + int got_fin = 0; + int got_rst = 0; + + tcp = (struct tcp_hdr *)l4_meta->l4_hdr; + + got_syn = tcp->tcp_flags & TCP_SYN_FLAG; + got_ack = tcp->tcp_flags & TCP_ACK_FLAG; + got_fin = tcp->tcp_flags & TCP_FIN_FLAG; + got_rst = tcp->tcp_flags & TCP_RST_FLAG; + plogx_dbg("TCP, flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n", got_syn? "SYN ":"", got_ack? "ACK ":"", got_fin? "FIN " : "", l4_meta->len, rte_bswap32(tcp->sent_seq), rte_bswap32(tcp->recv_ack)); + + if (got_syn) + ctx->flags |= STREAM_CTX_F_TCP_GOT_SYN; + if (got_fin) + ctx->flags |= STREAM_CTX_F_TCP_GOT_FIN; + + int progress_ack = 0, progress_seq = 0; + + /* RST => other side wants to terminate due to + inconsitent state (example: delay of retransmit of + last ACK while other side already closed the + connection. The other side will accept the packet + as a beginning of a new connection but there will + be no SYN. ) */ + if (got_rst) { + plogx_dbg("got rst\n"); + ctx->flags |= STREAM_CTX_F_TCP_ENDED; + return -1; + } + + if (got_ack) { + uint32_t ackd_seq = rte_bswap32(tcp->recv_ack); + + if (ackd_seq > ctx->ackd_seq) { + plogx_dbg("Got ACK for outstanding data, from %d to %d\n", ctx->ackd_seq, ackd_seq); + ctx->ackd_seq = ackd_seq; + plogx_dbg("ackable data = %d\n", ctx->ackable_data_seq); + /* Ackable_data_seq set to byte after + current action. */ + if (ctx->ackable_data_seq == ctx->ackd_seq) { + /* Due to retransmit in + combination with late acks, + is is possible to ack + future data. In this case, + the assumption that data + was lost is not true and + the next seq is moved + forward. */ + if (ctx->next_seq < ctx->ackable_data_seq) { + ctx->next_seq = ctx->ackable_data_seq; + } + + ctx->ackable_data_seq = 0; + const struct stream_cfg *stream_cfg = ctx->stream_cfg; + const struct peer_action *act = &stream_cfg->actions[ctx->cur_action]; + + ctx->cur_pos[act->peer] += act->len; + ctx->cur_action++; + plogx_dbg("Moving to next action %u\n", ctx->ackd_seq); + } + progress_ack = 1; + } + else { + plogx_dbg("Old data acked: acked = %d, ackable =%d\n", ackd_seq, ctx->ackd_seq); + } + } + + uint32_t seq = rte_bswap32(tcp->sent_seq); + + /* update recv_seq. */ + if (got_syn) { + /* When a syn is received, immediately reset recv_seq based on seq from packet. */ + ctx->recv_seq = seq + 1; + /* Syn packets have length 1, so the first real data will start after that. */ + ctx->other_seq_first_byte = seq + 1; + progress_seq = 1; + } + else if (got_fin) { + if (ctx->recv_seq == seq) { + plogx_dbg("Got fin with correct seq\n"); + ctx->recv_seq = seq + 1; + progress_seq = 1; + } + else { + plogx_dbg("Got fin but incorrect seq\n"); + } + } + else { + /* Only expect in-order packets. */ + if (ctx->recv_seq == seq) { + stream_tcp_proc_in_order_data(ctx, l4_meta, &progress_seq); + } + else if (ctx->recv_seq < seq) { + plogx_dbg("Future data received (got = %d, expected = %d), missing data! (data ignored)\n", seq, ctx->recv_seq); + } + else { + plogx_dbg("Old data received again (state = %s)\n", tcp_state_to_str(ctx->tcp_state)); + plogx_dbg("expecting seq %d, got seq %d, len = %d\n",ctx->recv_seq, seq, l4_meta->len); + plogx_dbg("ackd_seq = %d, next_seq = %d, action = %d\n", ctx->ackd_seq, ctx->next_seq, ctx->cur_action); + } + } + + /* parse options */ + if (((tcp->data_off >> 4)*4) > sizeof(struct tcp_hdr)) { + struct tcp_option *tcp_op = (struct tcp_option *)(tcp + 1); + uint8_t *payload = (uint8_t *)tcp + ((tcp->data_off >> 4)*4); + + do { + if (tcp_op->kind == 2 && tcp_op->len == 4) { + uint16_t mss = rte_bswap16(*(uint16_t *)(tcp_op + 1)); + ctx->other_mss = mss; + } + + tcp_op = (struct tcp_option *)(((uint8_t*)tcp_op) + tcp_op->len); + } while (((uint8_t*)tcp_op) < payload); + } + + if (progress_ack || progress_seq) { + ctx->same_state = 0; + ctx->flags |= STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS; + } + else { + ctx->flags &= ~STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS; + } + return 0; +} + +static int stream_tcp_proc_out_closed(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + /* create SYN packet in mbuf, return 0. goto SYN_SENT, set timeout */ + ctx->tcp_state = SYN_SENT; + + /* Initialize: */ + ctx->next_seq = 99; + ctx->ackd_seq = 99; + + create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; +} + +static int stream_tcp_proc_out_listen(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + if (!(ctx->flags & STREAM_CTX_F_TCP_GOT_SYN)) { + // TODO: keep connection around at end to catch retransmits from client + plogx_dbg("Got packet while listening without SYN (will send RST)\n"); + pkt_tuple_debug(ctx->tuple); + + ctx->flags |= STREAM_CTX_F_TCP_ENDED; + create_tcp_pkt(ctx, mbuf, TCP_RST_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } + + /* if syn received _now_, send ack + syn. goto SYN_RECEIVED. */ + plogx_dbg("Got packet while listen\n"); + + ctx->next_seq = 200; + ctx->ackd_seq = 200; + + ctx->tcp_state = SYN_RECEIVED; + + create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; +} + +static int stream_tcp_proc_out_syn_sent(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + if (ctx->ackd_seq < ctx->next_seq || !(ctx->flags & STREAM_CTX_F_TCP_GOT_SYN)) { + plogx_dbg("Retransmit SYN\n"); + /* Did not get packet, send syn again and keep state (waiting for ACK). */ + ++ctx->same_state; + tcp_set_retransmit(ctx); + return stream_tcp_proc_out_closed(ctx, mbuf, next_tsc); + } + + plogx_dbg("SYN_SENT and everything ACK'ed\n"); + plogx_dbg("ackd_seq = %d, next_seq = %d\n", ctx->ackd_seq, ctx->next_seq); + + /* If syn received for this stream, send ack and goto + ESTABLISHED. If first peer is this peer to send actual + data, schedule immediately. */ + + ctx->same_state = 0; + ctx->tcp_state = ESTABLISHED; + + /* third packet of three-way handshake will also contain + data. Don't send separate ACK yet. TODO: only send ACK if + data has not yet been ACK'ed. */ + if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) { + *next_tsc = tcp_resched_timeout(ctx); + plogx_dbg("immediately resched (%d)\n", ctx->cur_action); + return -1; + } + else { + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + } + return 0; +} + +static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + if (ctx->ackd_seq == ctx->next_seq) { + /* Possible from server side with ctx->cur_action == 1 + if the current packet received had ACK for syn from + server to client and also data completing the first + action. */ + + ctx->same_state = 0; + ctx->tcp_state = ESTABLISHED; + if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) { + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } + else { + /* While at this point, an ACK without data + any could be sent by the server, it is not + really required because the next pacekt + after reschedule will also contain an ACK + along with new data. + + In this implementation, if this is the + case, the client is not only expecting an + ACK, but also actual data. For this reason, + the empty ACK packet should not be sent, + otherwise the client will retransmit its + data. + */ + + /* create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); */ + /* token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); */ + *next_tsc = tcp_resched_timeout(ctx); + return -1; + } + } + else { + /* Either this portion is executed due to a time-out + or due to packet reception, the SYN that has been + sent is not yet ACK'ed. So, retransmit the SYN/ACK. */ + plogx_dbg("Retransmit SYN/ACK\n"); + ++ctx->same_state; + tcp_set_retransmit(ctx); + ctx->next_seq = ctx->ackd_seq; + create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } +} + +static int stream_tcp_proc_out_estab_tx(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + const struct peer_action *act = &ctx->stream_cfg->actions[ctx->cur_action]; + + if (act->len == 0) { + plogx_dbg("Closing connection\n"); + /* This would be an ACK combined with FIN. To + send a separate ack. keep the state in + established, put_ack and expire + immediately*/ + plogx_dbg("Moving to FIN_WAIT\n"); + ctx->tcp_state = FIN_WAIT; + ctx->same_state = 0; + create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } + /* remaining_len2 will be zero, while in case of + act->len == 0, the connection can be closed + immediately. */ + + plogx_dbg("This peer to send!\n"); + uint32_t outstanding_bytes = ctx->next_seq - ctx->ackd_seq; + + uint32_t data_beg2 = ctx->next_seq - ctx->seq_first_byte; + uint32_t remaining_len2 = act->len - (data_beg2 - act->beg); + + const uint32_t rx_win = 300000; + /* If still data to be sent and allowed by outstanding amount */ + if (outstanding_bytes <= rx_win && remaining_len2) { + plogx_dbg("Outstanding bytes = %d, and remaining_len = %d, next_seq = %d\n", outstanding_bytes, remaining_len2, ctx->next_seq); + + if (ctx->ackable_data_seq == 0) { + PROX_ASSERT(outstanding_bytes == 0); + + ctx->ackable_data_seq = ctx->next_seq + act->len; + } + else + plogx_dbg("This will not be the first part of the data within an action\n"); + } + /* still data yet to be acked || still data to be sent but blocked by RX win. */ + else { + if (ctx->flags & STREAM_CTX_F_MORE_DATA) { + /* Don't send any packet. */ + ctx->flags &= ~STREAM_CTX_F_MORE_DATA; + *next_tsc = tcp_retx_timeout(ctx); + ctx->sched_tsc = rte_rdtsc() + *next_tsc; + return -1; + } + else { + uint64_t now = rte_rdtsc(); + + if ((ctx->flags & STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS) && token_time_tsc_until_full(&ctx->token_time_other) != 0) { + tcp_retx_timeout_start(ctx, next_tsc); + ctx->flags &= ~STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS; + return -1; + } + /* This function might be called due to packet + reception. In that case, cancel here and + wait until the timeout really occurs before + reTX. */ + if (!tcp_retx_timeout_occured(ctx, now)) { + tcp_retx_timeout_resume(ctx, now, next_tsc); + return -1; + } + + ctx->same_state++; + tcp_set_retransmit(ctx); + /* This possibly means that now retransmit is resumed half-way in the action. */ + plogx_dbg("Retransmit: outstanding = %d\n", outstanding_bytes); + plogx_dbg("Assuming %d->%d lost\n", ctx->ackd_seq, ctx->next_seq); + ctx->next_seq = ctx->ackd_seq; + plogx_dbg("highest seq from other side = %d\n", ctx->recv_seq); + } + /* When STREAM_CTX_F_MORE_DATA is set, real timeouts + can't occur. If this is needed, timeouts + need to carry additional information. */ + } + + /* The following code will retransmit the same data if next_seq is not moved forward. */ + uint32_t data_beg = ctx->next_seq - ctx->seq_first_byte; + uint32_t remaining_len = act->len - (data_beg - act->beg); + uint32_t data_len = remaining_len > ctx->other_mss? ctx->other_mss: remaining_len; + if (data_len == 0) + plogx_warn("data_len == 0\n"); + + if (remaining_len > ctx->other_mss) + ctx->flags |= STREAM_CTX_F_MORE_DATA; + else + ctx->flags &= ~STREAM_CTX_F_MORE_DATA; + + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, data_beg, data_len); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + if (ctx->flags & STREAM_CTX_F_MORE_DATA) + *next_tsc = tcp_resched_timeout(ctx); + else + tcp_retx_timeout_start(ctx, next_tsc); + + return 0; +} + +static int stream_tcp_proc_out_estab_rx(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + if (ctx->flags & STREAM_CTX_F_TCP_GOT_FIN) { + plogx_dbg("Got fin!\n"); + if (1) { + ctx->tcp_state = LAST_ACK; + create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } + else { + ctx->tcp_state = CLOSE_WAIT; + create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_resched_timeout(ctx); + return 0; + } + } + + if (ctx->flags & STREAM_CTX_F_NEW_DATA) + ctx->flags &= ~STREAM_CTX_F_NEW_DATA; + else { + ctx->same_state++; + tcp_set_retransmit(ctx); + plogx_dbg("state++ (ack = %d)\n", ctx->recv_seq); + } + + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; +} + +static int stream_tcp_proc_out_estab(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) { + return stream_tcp_proc_out_estab_tx(ctx, mbuf, next_tsc); + } + else { + return stream_tcp_proc_out_estab_rx(ctx, mbuf, next_tsc); + } +} + +static int stream_tcp_proc_out_close_wait(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + /* CLOSE_WAIT is an intermediary stage that is only visited + when the FIN is sent after ACK'ing the incoming FIN. In any + case, it does not matter if there was a packet or not. */ + ctx->tcp_state = LAST_ACK; + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; +} + +static int stream_tcp_proc_out_last_ack(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + if (ctx->ackd_seq == ctx->next_seq) { + plogx_dbg("Last ACK received\n"); + ctx->flags |= STREAM_CTX_F_TCP_ENDED; + return -1; + } + else { + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + if (ctx->flags & STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS) { + ctx->flags &= ~STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS; + *next_tsc = tcp_retx_timeout(ctx); + return -1; + } + + plogx_dbg("Retransmit!\n"); + ctx->next_seq = ctx->ackd_seq; + ctx->same_state++; + tcp_set_retransmit(ctx); + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } +} + +static int stream_tcp_proc_out_fin_wait(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + if (ctx->ackd_seq == ctx->next_seq) { + if (ctx->flags & STREAM_CTX_F_TCP_GOT_FIN) { + ctx->same_state = 0; + ctx->tcp_state = TIME_WAIT; + ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait; + plogx_dbg("from FIN_WAIT to TIME_WAIT\n"); + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = ctx->stream_cfg->tsc_timeout_time_wait; + return 0; + } + else { + /* FIN will still need to come */ + *next_tsc = tcp_retx_timeout(ctx); + return -1; + } + } + else { + if (ctx->flags & STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS) { + ctx->flags &= ~STREAM_CTX_F_LAST_RX_PKT_MADE_PROGRESS; + *next_tsc = tcp_retx_timeout(ctx); + return -1; + } + + plogx_dbg("Retransmit!\n"); + ctx->same_state++; + tcp_set_retransmit(ctx); + ctx->next_seq = ctx->ackd_seq; + create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = tcp_retx_timeout(ctx); + return 0; + } +} + +static int stream_tcp_proc_out_time_wait(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + if (ctx->sched_tsc < rte_rdtsc()) { + plogx_dbg("TIME_WAIT expired! for %#x\n", ctx->tuple->dst_addr); + ctx->flags |= STREAM_CTX_F_TCP_ENDED; + return -1; + } + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + *next_tsc = wait_tsc; + return -1; + } + + plogx_dbg("Got packet while in TIME_WAIT (pkt ACK reTX)\n"); + ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait; + create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + *next_tsc = ctx->stream_cfg->tsc_timeout_time_wait; + return 0; +} + +static int stream_tcp_proc_out(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint64_t *next_tsc) +{ + if (ctx->same_state == 10) { + ctx->flags |= STREAM_CTX_F_EXPIRED; + return -1; + } + + switch (ctx->tcp_state) { + case CLOSED: /* Client initial state */ + return stream_tcp_proc_out_closed(ctx, mbuf, next_tsc); + case LISTEN: /* Server starts in this state. */ + return stream_tcp_proc_out_listen(ctx, mbuf, next_tsc); + case SYN_SENT: + return stream_tcp_proc_out_syn_sent(ctx, mbuf, next_tsc); + case SYN_RECEIVED: + return stream_tcp_proc_out_syn_recv(ctx, mbuf, next_tsc); + case ESTABLISHED: + return stream_tcp_proc_out_estab(ctx, mbuf, next_tsc); + case CLOSE_WAIT: + return stream_tcp_proc_out_close_wait(ctx, mbuf, next_tsc); + case LAST_ACK: + return stream_tcp_proc_out_last_ack(ctx, mbuf, next_tsc); + case FIN_WAIT: + return stream_tcp_proc_out_fin_wait(ctx, mbuf, next_tsc); + case TIME_WAIT: + return stream_tcp_proc_out_time_wait(ctx, mbuf, next_tsc); + } + + return -1; +} + +/* Return: zero: packet in mbuf is the reply, non-zero: data consumed, + nothing to send. The latter case might mean that the connection has + ended, or that a future event has been scheduled. l4_meta => + mbuf contains packet to be processed. */ +int stream_tcp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, uint64_t *next_tsc) +{ + token_time_update(&ctx->token_time, rte_rdtsc()); + token_time_update(&ctx->token_time_other, rte_rdtsc()); + if (l4_meta) { + int ret; + + token_time_take_clamp(&ctx->token_time_other, mbuf_wire_size(mbuf)); + ret = stream_tcp_proc_in(ctx, l4_meta); + if (ret) + return ret; + } + + return stream_tcp_proc_out(ctx, mbuf, next_tsc); +} + +int stream_tcp_is_ended(struct stream_ctx *ctx) +{ + return ctx->flags & STREAM_CTX_F_TCP_ENDED; +} + +static void add_pkt_bytes(uint32_t *n_pkts, uint32_t *n_bytes, uint32_t len) +{ + len = (len < 60? 60 : len) + 20 + ETHER_CRC_LEN; + + (*n_pkts)++; + *n_bytes += len; +} + +void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_bytes) +{ + const uint32_t client_hdr_len = cfg->data[PEER_CLIENT].hdr_len; + const uint32_t server_hdr_len = cfg->data[PEER_SERVER].hdr_len; + + *n_pkts = 0; + *n_bytes = 0; + + /* Connection setup */ + add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN */ + add_pkt_bytes(n_pkts, n_bytes, server_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN/ACK */ + add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr)); /* ACK */ + + for (uint32_t i = 0; i < cfg->n_actions; ++i) { + const uint32_t mss = 1440; /* TODO: should come from peer's own mss. */ + uint32_t remaining = cfg->actions[i].len; + const uint32_t send_hdr_len = cfg->actions[i].peer == PEER_CLIENT? client_hdr_len : server_hdr_len; + const uint32_t reply_hdr_len = cfg->actions[i].peer == PEER_CLIENT? server_hdr_len : client_hdr_len; + + if (remaining == 0) + break; + + while (remaining) { + uint32_t seg = remaining > mss? mss: remaining; + add_pkt_bytes(n_pkts, n_bytes, send_hdr_len + sizeof(struct tcp_hdr) + seg); + remaining -= seg; + } + + add_pkt_bytes(n_pkts, n_bytes, reply_hdr_len + sizeof(struct tcp_hdr)); + } + + /* Connection Tear-down */ + enum l4gen_peer last_peer = cfg->actions[cfg->n_actions - 1].peer; + + const uint32_t init_hdr_len = last_peer == PEER_CLIENT? client_hdr_len : server_hdr_len; + const uint32_t resp_hdr_len = last_peer == PEER_CLIENT? server_hdr_len : client_hdr_len; + + add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* FIN */ + add_pkt_bytes(n_pkts, n_bytes, resp_hdr_len + sizeof(struct tcp_hdr)); /* FIN/ACK */ + add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* ACK */ +} diff --git a/VNFs/DPPD-PROX/genl4_stream_tcp.h b/VNFs/DPPD-PROX/genl4_stream_tcp.h new file mode 100644 index 00000000..f7b04d51 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_stream_tcp.h @@ -0,0 +1,29 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _GENL4_STREAM_TCP_H_ +#define _GENL4_STREAM_TCP_H_ + +#include "genl4_stream.h" + +int stream_tcp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, uint64_t *next_tsc); +int stream_tcp_is_ended(struct stream_ctx *ctx); +uint16_t stream_tcp_reply_len(struct stream_ctx *ctx); +void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_bytes); + +void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct pkt_tuple *tuple); + +#endif /* _GENL4_STREAM_TCP_H_ */ diff --git a/VNFs/DPPD-PROX/genl4_stream_udp.c b/VNFs/DPPD-PROX/genl4_stream_udp.c new file mode 100644 index 00000000..3de2db09 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_stream_udp.c @@ -0,0 +1,165 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "genl4_stream_udp.h" +#include "mbuf_utils.h" + +int stream_udp_is_ended(struct stream_ctx *ctx) +{ + return ctx->cur_action == ctx->stream_cfg->n_actions; +} + +static void update_token_times(struct stream_ctx *ctx) +{ + uint64_t now = rte_rdtsc(); + + token_time_update(&ctx->token_time_other, now); + token_time_update(&ctx->token_time, now); +} + +int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, uint64_t *next_tsc) +{ + update_token_times(ctx); + + if (l4_meta) { + enum l4gen_peer peer = ctx->stream_cfg->actions[ctx->cur_action].peer; + plogx_dbg("Consuming UDP data\n"); + /* data should come from the other side */ + if (peer == ctx->peer) { + plogx_err("Wrong peer\n"); + return -1; + } + /* Fixed length data expected */ + if (ctx->stream_cfg->actions[ctx->cur_action].len != l4_meta->len) { + plogx_dbg("unexpected UDP len (expected = %u, got = %u, action = %u)\n", + ctx->stream_cfg->actions[ctx->cur_action].len, + l4_meta->len, + ctx->cur_action); + + return -1; + } + /* With specific payload */ + if (memcmp(ctx->stream_cfg->data[peer].content + ctx->stream_cfg->actions[ctx->cur_action].beg, l4_meta->payload, l4_meta->len) != 0) { + plogx_dbg("Bad payload at action_id %d, with peer = %d and pos = %d and len=%d\n", ctx->cur_action, peer, ctx->cur_pos[peer], l4_meta->len); + return -1; + } + ctx->cur_pos[peer] += l4_meta->len; + ctx->cur_action++; + + if (stream_udp_is_ended(ctx)) + return -1; + + token_time_take(&ctx->token_time_other, mbuf_wire_size(mbuf)); + /* Time before next packet is expected to + arrive. Note, addition amount of time is accounted + for due to rate limiting. */ + uint64_t wait = token_time_tsc_until_full(&ctx->token_time_other); + *next_tsc = wait + ctx->stream_cfg->tsc_timeout; + } + + if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) { + const char *other_peer_str = ctx->peer != PEER_SERVER? "server" : "client"; + + plogx_dbg("Expecting more UDP data from %s, will expire = %s\n", other_peer_str, l4_meta == NULL? "yes" : "no"); + if (!l4_meta) { + ctx->flags |= STREAM_CTX_F_EXPIRED; + } + return -1; + } + + uint64_t wait_tsc = token_time_tsc_until_full(&ctx->token_time); + + if (wait_tsc != 0) { + plogx_dbg("Wait = %"PRIu64"\n", wait_tsc); + *next_tsc = wait_tsc; + return -1; + } + + const struct stream_cfg *stream_cfg = ctx->stream_cfg; + + uint8_t *pkt = rte_pktmbuf_mtod(mbuf, uint8_t *); + const struct peer_action *act = &stream_cfg->actions[ctx->cur_action]; + + uint16_t pkt_len = stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr) + act->len; + + rte_pktmbuf_pkt_len(mbuf) = pkt_len; + rte_pktmbuf_data_len(mbuf) = pkt_len; + plogx_dbg("Creating UDP data (peer = %s, payload len = %u)\n", act->peer == PEER_CLIENT? "client" : "server", act->len); + /* Construct the packet. The template is used up to L4 header, + a gap of sizeof(l4_hdr) is skipped, followed by the payload. */ + rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len); + rte_memcpy(pkt + stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr), stream_cfg->data[act->peer].content + act->beg, act->len); + + struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)]; + struct udp_hdr *l4_hdr = (struct udp_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len]; + + l3_hdr->src_addr = ctx->tuple->dst_addr; + l3_hdr->dst_addr = ctx->tuple->src_addr; + l3_hdr->next_proto_id = IPPROTO_UDP; + l4_hdr->src_port = ctx->tuple->dst_port; + l4_hdr->dst_port = ctx->tuple->src_port; + l4_hdr->dgram_len = rte_bswap16(sizeof(struct udp_hdr) + act->len); + /* TODO: UDP checksum calculation */ + l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + act->len); + ctx->cur_pos[ctx->peer] += act->len; + ctx->cur_action++; + + /* When the stream has ended, there is no need to schedule + another timeout (which will be unscheduled at the end of + the stream). */ + if (stream_udp_is_ended(ctx)) + return 0; + + token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); + + /* Send next packet as soon as possible */ + if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) { + *next_tsc = token_time_tsc_until_full(&ctx->token_time); + } + else { + uint64_t wait = token_time_tsc_until_full(&ctx->token_time_other); + *next_tsc = wait + ctx->stream_cfg->tsc_timeout; + } + + return 0; +} + +uint16_t stream_udp_reply_len(struct stream_ctx *ctx) +{ + if (stream_udp_is_ended(ctx)) + return 0; + else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) + return 0; + else + return ctx->stream_cfg->data[ctx->stream_cfg->actions[ctx->cur_action].peer].hdr_len + sizeof(struct udp_hdr) + + ctx->stream_cfg->actions[ctx->cur_action].len; +} + +void stream_udp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_bytes) +{ + const uint32_t client_hdr_len = cfg->data[PEER_CLIENT].hdr_len; + const uint32_t server_hdr_len = cfg->data[PEER_SERVER].hdr_len; + + *n_pkts = 0; + *n_bytes = 0; + + for (uint32_t i = 0; i < cfg->n_actions; ++i) { + const uint32_t send_hdr_len = cfg->actions[i].peer == PEER_CLIENT? client_hdr_len : server_hdr_len; + uint32_t len = send_hdr_len + sizeof(struct udp_hdr) + cfg->actions[i].len; + *n_bytes += (len < 60? 60 : len) + 24; + (*n_pkts)++; + } +} diff --git a/VNFs/DPPD-PROX/genl4_stream_udp.h b/VNFs/DPPD-PROX/genl4_stream_udp.h new file mode 100644 index 00000000..c82d7951 --- /dev/null +++ b/VNFs/DPPD-PROX/genl4_stream_udp.h @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _GENL4_STREAM_UDP_H_ +#define _GENL4_STREAM_UDP_H_ + +#include "genl4_stream.h" + +int stream_udp_is_ended(struct stream_ctx *ctx); + +int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_meta *l4_meta, uint64_t *next_tsc); +uint16_t stream_udp_reply_len(struct stream_ctx *ctx); +void stream_udp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_bytes); + +#endif /* _GENL4_STREAM_UDP_H_ */ diff --git a/VNFs/DPPD-PROX/gre.h b/VNFs/DPPD-PROX/gre.h new file mode 100644 index 00000000..23f68b62 --- /dev/null +++ b/VNFs/DPPD-PROX/gre.h @@ -0,0 +1,35 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _GRE_H_ +#define _GRE_H_ + +#define GRE_CRC_PRESENT 0x10 +#define GRE_ROUTING_PRESENT 0x08 +#define GRE_KEY_PRESENT 0x04 +#define GRE_SEQNUM_PRESENT 0x02 +#define GRE_STRICT_ROUTE 0x01 + +struct gre_hdr { + uint8_t recur: 3; /* recur */ + uint8_t bits: 5; /* bits: Checksum, Routing, Key, Sequence Number, strict Route */ + uint8_t version: 3; /* Version: must be 0 */ + uint8_t flags: 5; /* Flags: must be 0 */ + uint16_t type; /* Protocol type */ + uint32_t gre_id; /* Key ID */ +} __attribute__((__packed__)); + +#endif /* _GRE_H_ */ diff --git a/VNFs/DPPD-PROX/handle_acl.c b/VNFs/DPPD-PROX/handle_acl.c new file mode 100644 index 00000000..03949360 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_acl.c @@ -0,0 +1,314 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_acl.h> +#include <rte_ip.h> +#include <rte_cycles.h> +#include <rte_version.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "log.h" +#include "quit.h" +#include "parse_utils.h" +#include "ip_subnet.h" +#include "handle_acl.h" +#include "acl_field_def.h" +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "prefetch.h" +#include "etypes.h" + +struct task_acl { + struct task_base base; + struct rte_acl_ctx *context; + const uint8_t *ptuples[64]; + + uint32_t n_rules; + uint32_t n_max_rules; + + void *field_defs; + size_t field_defs_size; + uint32_t n_field_defs; +}; + +static void set_tc(struct rte_mbuf *mbuf, uint32_t tc) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + uint32_t subport, pipe, traffic_class, queue; + enum rte_meter_color color; + + rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue); + color = rte_sched_port_pkt_read_color(mbuf); + + rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue, color); +#else + struct rte_sched_port_hierarchy *sched = + (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched; + sched->traffic_class = tc; +#endif +} + +static int handle_acl_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_acl *task = (struct task_acl *)tbase; + uint32_t results[64]; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + +#ifdef PROX_PREFETCH_OFFSET + for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + /* TODO: detect version_ihl != 0x45. Extract relevant + fields of that packet and point ptuples[j] to the + extracted verion. Note that this is very unlikely. */ + task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + task->ptuples[j] = rte_pktmbuf_mtod(mbufs[j], uint8_t *); + } +#endif + + rte_acl_classify(task->context, (const uint8_t **)task->ptuples, results, n_pkts, 1); + + for (uint8_t i = 0; i < n_pkts; ++i) { + switch (results[i]) { + default: + case ACL_NOT_SET: + case ACL_DROP: + out[i] = OUT_DISCARD; + break; + case ACL_ALLOW: + out[i] = 0; + case ACL_RATE_LIMIT: + set_tc(mbufs[i], 3); + break; + }; + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void acl_msg(struct task_base *tbase, void **data, uint16_t n_msgs) +{ + struct task_acl *task = (struct task_acl *)tbase; + struct acl4_rule **new_rules = (struct acl4_rule **)data; + uint16_t i; + + for (i = 0; i < n_msgs; ++i) { + if (task->n_rules == task->n_max_rules) { + plog_err("Failed to add %d rule%s (already at maximum number of rules (%d))", + n_msgs - i, (n_msgs - i)? "s" : "", task->n_max_rules); + break; + } + + new_rules[i]->data.priority = ++task->n_rules; + rte_acl_add_rules(task->context, (struct rte_acl_rule*) new_rules[i], 1); + } + + /* No need to rebuild if no rules have been added */ + if (!i) { + return ; + } + + struct rte_acl_config acl_build_param; + /* Perform builds */ + acl_build_param.num_categories = 1; + + acl_build_param.num_fields = task->n_field_defs; + rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size); + + int ret; + PROX_PANIC((ret = rte_acl_build(task->context, &acl_build_param)), + "Failed to build ACL trie (%d)\n", ret); +} + +static void init_task_acl(struct task_base *tbase, struct task_args *targ) +{ + struct task_acl *task = (struct task_acl *)tbase; + int use_qinq = targ->flags & TASK_ARG_QINQ_ACL; + + char name[PATH_MAX]; + struct rte_acl_param acl_param; + + /* Create ACL contexts */ + snprintf(name, sizeof(name), "acl-%d-%d", targ->lconf->id, targ->task); + + if (use_qinq) { + task->n_field_defs = RTE_DIM(pkt_qinq_ipv4_udp_defs); + task->field_defs = pkt_qinq_ipv4_udp_defs; + task->field_defs_size = sizeof(pkt_qinq_ipv4_udp_defs); + } else { + task->n_field_defs = RTE_DIM(pkt_eth_ipv4_udp_defs); + task->field_defs = pkt_eth_ipv4_udp_defs; + task->field_defs_size = sizeof(pkt_eth_ipv4_udp_defs); + } + + acl_param.name = name; + acl_param.socket_id = rte_lcore_to_socket_id(targ->lconf->id); + acl_param.rule_size = RTE_ACL_RULE_SZ(task->n_field_defs); + acl_param.max_rule_num = targ->n_max_rules; + + task->n_max_rules = targ->n_max_rules; + task->context = rte_acl_create(&acl_param); + + PROX_PANIC(task->context == NULL, "Failed to create ACL context\n"); + uint32_t free_rules = targ->n_max_rules; + + PROX_PANIC(!strcmp(targ->rules, ""), "No rule specified for ACL\n"); + + int ret = lua_to_rules(prox_lua(), GLOBAL, targ->rules, task->context, &free_rules, use_qinq, targ->qinq_tag); + PROX_PANIC(ret, "Failed to read rules from config:\n%s\n", get_lua_to_errors()); + task->n_rules = targ->n_max_rules - free_rules; + + plog_info("Configured %d rules\n", task->n_rules); + + if (task->n_rules) { + struct rte_acl_config acl_build_param; + /* Perform builds */ + acl_build_param.num_categories = 1; +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + acl_build_param.max_size = 0; +#endif + + acl_build_param.num_fields = task->n_field_defs; + rte_memcpy(&acl_build_param.defs, task->field_defs, task->field_defs_size); + + plog_info("Building trie structure\n"); + PROX_PANIC(rte_acl_build(task->context, &acl_build_param), + "Failed to build ACL trie\n"); + } + + targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq); + targ->lconf->ctrl_func_m[targ->task] = acl_msg; +} + +int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq) +{ + uint32_t svlan, svlan_mask; + uint32_t cvlan, cvlan_mask; + + uint32_t ip_proto, ip_proto_mask; + + struct ip4_subnet ip_src; + struct ip4_subnet ip_dst; + + uint32_t sport_lo, sport_hi; + uint32_t dport_lo, dport_hi; + + enum acl_action class = ACL_NOT_SET; + char class_str[24]; + + PROX_PANIC(parse_int_mask(&svlan, &svlan_mask, fields[0]), "Error parsing svlan: %s\n", get_parse_err()); + PROX_PANIC(parse_int_mask(&cvlan, &cvlan_mask, fields[1]), "Error parsing cvlan: %s\n", get_parse_err()); + PROX_PANIC(parse_int_mask(&ip_proto, &ip_proto_mask, fields[2]), "Error parsing ip protocol: %s\n", get_parse_err()); + PROX_PANIC(parse_ip4_cidr(&ip_src, fields[3]), "Error parsing source IP subnet: %s\n", get_parse_err()); + PROX_PANIC(parse_ip4_cidr(&ip_dst, fields[4]), "Error parsing dest IP subnet: %s\n", get_parse_err()); + + PROX_PANIC(parse_range(&sport_lo, &sport_hi, fields[5]), "Error parsing source port range: %s\n", get_parse_err()); + PROX_PANIC(parse_range(&dport_lo, &dport_hi, fields[6]), "Error parsing destination port range: %s\n", get_parse_err()); + + PROX_PANIC(parse_str(class_str, fields[7], sizeof(class_str)), "Error parsing action: %s\n", get_parse_err()); + + if (!strcmp(class_str, "drop")) { + class = ACL_DROP; + } + else if (!strcmp(class_str, "allow")) { + class = ACL_ALLOW; + } + else if (!strcmp(class_str, "rate limit")) { + class = ACL_RATE_LIMIT; + } + else { + plog_err("unknown class type: %s\n", class_str); + } + + rule->data.userdata = class; /* allow, drop or ratelimit */ + rule->data.category_mask = 1; + rule->data.priority = n_rules; + + /* Configuration for rules is done in little-endian so no bswap is needed here.. */ + + rule->fields[0].value.u8 = ip_proto; + rule->fields[0].mask_range.u8 = ip_proto_mask; + rule->fields[1].value.u32 = ip_src.ip; + rule->fields[1].mask_range.u32 = ip_src.prefix; + + rule->fields[2].value.u32 = ip_dst.ip; + rule->fields[2].mask_range.u32 = ip_dst.prefix; + + rule->fields[3].value.u16 = sport_lo; + rule->fields[3].mask_range.u16 = sport_hi; + + rule->fields[4].value.u16 = dport_lo; + rule->fields[4].mask_range.u16 = dport_hi; + + if (use_qinq) { + rule->fields[5].value.u16 = rte_bswap16(ETYPE_8021ad); + rule->fields[5].mask_range.u16 = 0xffff; + + /* To mask out the TCI and only keep the VID, the mask should be 0x0fff */ + rule->fields[6].value.u16 = svlan; + rule->fields[6].mask_range.u16 = svlan_mask; + + rule->fields[7].value.u16 = rte_bswap16(ETYPE_VLAN); + rule->fields[7].mask_range.u16 = 0xffff; + + rule->fields[8].value.u16 = cvlan; + rule->fields[8].mask_range.u16 = cvlan_mask; + } + else { + /* Reuse first ethertype from vlan to check if packet is IPv4 packet */ + rule->fields[5].value.u16 = rte_bswap16(ETYPE_IPv4); + rule->fields[5].mask_range.u16 = 0xffff; + + /* Other fields are ignored */ + rule->fields[6].value.u16 = 0; + rule->fields[6].mask_range.u16 = 0; + rule->fields[7].value.u16 = 0; + rule->fields[7].mask_range.u16 = 0; + rule->fields[8].value.u16 = 0; + rule->fields[8].mask_range.u16 = 0; + } + return 0; +} + +static struct task_init task_init_acl = { + .mode_str = "acl", + .init = init_task_acl, + .handle = handle_acl_bulk, + .size = sizeof(struct task_acl) +}; + +__attribute__((constructor)) static void reg_task_acl(void) +{ + reg_task(&task_init_acl); +} diff --git a/VNFs/DPPD-PROX/handle_acl.h b/VNFs/DPPD-PROX/handle_acl.h new file mode 100644 index 00000000..8e4c140c --- /dev/null +++ b/VNFs/DPPD-PROX/handle_acl.h @@ -0,0 +1,29 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_ACL_H_ +#define _HANDLE_ACL_H_ + +#include <rte_acl.h> + +struct acl4_rule { + struct rte_acl_rule_data data; + struct rte_acl_field fields[9]; +}; + +int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq); + +#endif /* _HANDLE_ACL_H_ */ diff --git a/VNFs/DPPD-PROX/handle_aggregator.c b/VNFs/DPPD-PROX/handle_aggregator.c new file mode 100644 index 00000000..6434d759 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_aggregator.c @@ -0,0 +1,229 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> +#include <stdio.h> +#include <string.h> +#include <rte_version.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "lconf.h" +#include "task_base.h" +#include "task_init.h" +#include "defines.h" +#include "prefetch.h" +#include "qinq.h" +#include "prox_cfg.h" +#include "log.h" +#include "quit.h" +#include "prox_shared.h" +#include "mbuf_utils.h" +#include "handle_aggregator.h" + +#define PRIORITY_DHCP (HIGH_PRIORITY) + +#define TASK_STATS_ADD_DROP_TX_FAIL_PRIO(stats, ntx, prio) do { \ + (stats)->drop_tx_fail_prio[prio] += ntx; \ + } while(0) +#define TASK_STATS_ADD_TX_PRIO(stats, ntx, prio) do { \ + (stats)->rx_prio[prio] += ntx; \ + } while(0) \ + +static inline uint8_t detect_l4_priority(uint8_t l3_priority, const struct ipv4_hdr *ipv4_hdr) +{ + if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { + const struct udp_hdr *udp = (const struct udp_hdr *)((const uint8_t *)ipv4_hdr + sizeof(struct ipv4_hdr)); + if (((udp->src_port == 0x67) && (udp->dst_port == 0x68)) || ((udp->src_port == 0x68) && (udp->dst_port == 0x67))) { + return PRIORITY_DHCP; + } + } + return l3_priority; +} + +static inline uint8_t detect_l3_priority(uint8_t l2_priority, const struct ipv4_hdr *ipv4_hdr) +{ + uint8_t dscp; + if ((ipv4_hdr->version_ihl >> 4) == 4) { + } else if ((ipv4_hdr->version_ihl >> 4) == 6) { + plog_warn("IPv6 Not implemented\n"); + return OUT_DISCARD; + } else { + plog_warn("Unexpected IP version\n"); + return OUT_DISCARD; + } + dscp = ipv4_hdr->type_of_service >> 2; + if (dscp) + return MAX_PRIORITIES - dscp - 1; + else + return l2_priority; +} + +static inline uint8_t detect_l2_priority(const struct qinq_hdr *pqinq) +{ + if (pqinq->cvlan.eth_proto != ETYPE_VLAN) { + plog_warn("Unexpected proto in QinQ = %#04x\n", pqinq->cvlan.eth_proto); + return OUT_DISCARD; + } + uint16_t svlan_priority = ntohs(pqinq->svlan.vlan_tci >> 13); + uint16_t cvlan_priority = ntohs(pqinq->cvlan.vlan_tci >> 13); + if (svlan_priority) + return svlan_priority; + else + return cvlan_priority; +} + +static inline void buffer_packet(struct task_aggregator *task, struct rte_mbuf *mbuf, uint8_t priority) +{ + struct task_base *tbase = (struct task_base *)task; + + struct task_buffer *prio = &task->priority[priority]; + if (prio->pkt_nb < BUFFER_LENGTH) { + prio->buffer[prio->pkt_pos] = mbuf; + prio->pkt_pos++; + if (prio->pkt_pos == BUFFER_LENGTH) + prio->pkt_pos = 0; + prio->pkt_nb++; + } else { + task->drop.buffer[task->drop.pkt_nb] = mbuf; + task->drop.pkt_nb++; + TASK_STATS_ADD_DROP_TX_FAIL_PRIO(&task->stats, 1, priority); + } +} + +static inline void handle_aggregator(struct task_aggregator *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + uint8_t priority = 0; + const struct qinq_hdr *pqinq; + const struct ipv4_hdr *ipv4_hdr; + + const uint16_t eth_type = peth->ether_type; + switch (eth_type) { + case ETYPE_MPLSU: + case ETYPE_MPLSM: + break; + case ETYPE_8021ad: + pqinq = rte_pktmbuf_mtod(mbuf, const struct qinq_hdr *); + if ((priority = detect_l2_priority(pqinq)) == OUT_DISCARD) + break; + ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + if ((priority = detect_l3_priority(priority, ipv4_hdr)) == OUT_DISCARD) + break; + if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD) + break; + break; + case ETYPE_VLAN: + break; + case ETYPE_IPv4: + ipv4_hdr = (const struct ipv4_hdr *)(peth+1); + if ((priority = detect_l3_priority(LOW_PRIORITY, ipv4_hdr)) == OUT_DISCARD) + break; + if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD) + break; + break; + case ETYPE_IPv6: + break; + case ETYPE_ARP: + break; + default: + break; + } + if (priority == OUT_DISCARD) { + task->drop.buffer[task->drop.pkt_nb] = mbuf; + task->drop.pkt_nb++; + return; + } + buffer_packet(task, mbuf, priority); +} + +static int handle_aggregator_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_aggregator *task = (struct task_aggregator *)tbase; + + uint16_t j; + uint32_t drop_bytes = 0; +#ifdef PROX_PREFETCH_OFFSET + for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(mbufs[j]); + } + for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(mbufs[j + PREFETCH_OFFSET]); + prefetch_nta(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + handle_aggregator(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + handle_aggregator(task, mbufs[j]); + } +#endif + + for (int i = 0 ; i < task->drop.pkt_nb; i++) { + drop_bytes += mbuf_wire_size(task->drop.buffer[i]); + rte_pktmbuf_free(task->drop.buffer[i]); + } + TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, task->drop.pkt_nb); + TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes); + task->drop.pkt_nb = 0; + + for (int priority = 0; priority < MAX_PRIORITIES; priority++) { + struct task_buffer *prio = &task->priority[priority]; + if (prio->pkt_nb) { + uint8_t n = 0; + if (prio->pkt_pos > prio->pkt_nb) { + struct rte_mbuf **buf = prio->buffer + prio->pkt_pos - prio->pkt_nb; + n = tbase->aux->tx_pkt_try(&task->base, buf, prio->pkt_nb); + } else { + struct rte_mbuf **buf = prio->buffer + BUFFER_LENGTH + prio->pkt_pos - prio->pkt_nb; + n = tbase->aux->tx_pkt_try(&task->base, buf, prio->pkt_nb - prio->pkt_pos); + if (n == (prio->pkt_nb - prio->pkt_pos)) + n += tbase->aux->tx_pkt_try(&task->base, prio->buffer, prio->pkt_pos); + } + prio->pkt_nb -=n; + TASK_STATS_ADD_TX_PRIO(&task->stats, n, priority); + if (prio->pkt_nb) + break; + } + } + return 0; +} + +static void init_task_aggregator(struct task_base *tbase, struct task_args *targ) +{ + struct task_aggregator *task = (struct task_aggregator *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); +} + +static struct task_init task_init_aggregator = { + .mode_str = "aggreg", + .init = init_task_aggregator, + .handle = handle_aggregator_bulk, + .flag_features = TASK_FEATURE_NEVER_DISCARDS, + .size = sizeof(struct task_aggregator) +}; + +__attribute__((constructor)) static void reg_task_aggregator(void) +{ + reg_task(&task_init_aggregator); +} diff --git a/VNFs/DPPD-PROX/handle_aggregator.h b/VNFs/DPPD-PROX/handle_aggregator.h new file mode 100644 index 00000000..1d5cd6c9 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_aggregator.h @@ -0,0 +1,42 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_AGGREGATOR_H_ +#define _HANDLE_AGGREGATOR_H_ + +#include "task_base.h" +#include "task_init.h" +#include "stats_prio_task.h" + +#define MAX_PRIORITIES 8 +#define LOW_PRIORITY (MAX_PRIORITIES - 1) +#define HIGH_PRIORITY 0 +#define BUFFER_LENGTH 256 + +struct task_buffer { + struct rte_mbuf *buffer[BUFFER_LENGTH]; + uint16_t pkt_pos; + uint16_t pkt_nb; +}; + +struct task_aggregator { + struct task_base base; + struct prio_task_rt_stats stats; + struct task_buffer priority[MAX_PRIORITIES]; + struct task_buffer drop; +}; + +#endif /* _HANDLE_AGGREGATOR_H_ */ diff --git a/VNFs/DPPD-PROX/handle_arp.c b/VNFs/DPPD-PROX/handle_arp.c new file mode 100644 index 00000000..106e19e5 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_arp.c @@ -0,0 +1,181 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "task_init.h" +#include "task_base.h" +#include "stats.h" +#include "arp.h" +#include "etypes.h" +#include "quit.h" +#include "log.h" +#include "prox_port_cfg.h" +#include "lconf.h" +#include "cmd_parser.h" +#include "handle_arp.h" + +struct task_arp { + struct task_base base; + struct ether_addr src_mac; + uint32_t seed; + uint32_t flags; + uint32_t ip; + uint32_t tmp_ip; + uint8_t arp_replies_ring; + uint8_t other_pkts_ring; + uint8_t send_arp_requests; +}; + +static void task_update_config(struct task_arp *task) +{ + if (unlikely(task->ip != task->tmp_ip)) + task->ip = task->tmp_ip; +} + +static void handle_arp(struct task_arp *task, struct ether_hdr_arp *hdr, struct ether_addr *s_addr) +{ + prepare_arp_reply(hdr, s_addr); + memcpy(hdr->ether_hdr.d_addr.addr_bytes, hdr->ether_hdr.s_addr.addr_bytes, 6); + memcpy(hdr->ether_hdr.s_addr.addr_bytes, s_addr, 6); +} + +static int handle_arp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct ether_hdr_arp *hdr; + struct task_arp *task = (struct task_arp *)tbase; + uint8_t out[MAX_PKT_BURST] = {0}; + struct rte_mbuf *replies_mbufs[64] = {0}, *arp_pkt_mbufs[64] = {0}; + int n_arp_reply_pkts = 0, n_other_pkts = 0,n_arp_pkts = 0; + struct ether_addr s_addr; + + for (uint16_t j = 0; j < n_pkts; ++j) { + hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *); + if (hdr->ether_hdr.ether_type == ETYPE_ARP) { + if (arp_is_gratuitous(hdr)) { + out[n_other_pkts] = OUT_DISCARD; + n_other_pkts++; + plog_info("Received gratuitous packet \n"); + } else if (hdr->arp.oper == 0x100) { + if (task->arp_replies_ring != OUT_DISCARD) { + arp_pkt_mbufs[n_arp_pkts] = mbufs[j]; + out[n_arp_pkts] = task->arp_replies_ring; + n_arp_pkts++; + } else if (task->ip == 0) { + create_mac(hdr, &s_addr); + handle_arp(task, hdr, &s_addr); + replies_mbufs[n_arp_reply_pkts] = mbufs[j]; + out[n_arp_reply_pkts] = 0; + n_arp_reply_pkts++; + } else if (hdr->arp.data.tpa == task->ip) { + handle_arp(task, hdr, &task->src_mac); + replies_mbufs[n_arp_reply_pkts] = mbufs[j]; + out[n_arp_reply_pkts] = 0; + n_arp_reply_pkts++; + } else { + out[n_other_pkts] = OUT_DISCARD; + mbufs[n_other_pkts] = mbufs[j]; + n_other_pkts++; + plogx_dbg("Received ARP on unexpected IP %x, expecting %x\n", rte_be_to_cpu_32(hdr->arp.data.tpa), rte_be_to_cpu_32(task->ip)); + } + } else if (hdr->arp.oper == 0x200) { + arp_pkt_mbufs[n_arp_pkts] = mbufs[j]; + out[n_arp_pkts] = task->arp_replies_ring; + n_arp_pkts++; + } else { + out[n_other_pkts] = task->other_pkts_ring; + mbufs[n_other_pkts] = mbufs[j]; + n_other_pkts++; + } + } else { + out[n_other_pkts] = task->other_pkts_ring; + mbufs[n_other_pkts] = mbufs[j]; + n_other_pkts++; + } + } + int ret = 0; + + if (n_arp_reply_pkts) { + ret+=task->base.aux->tx_pkt_hw(&task->base, replies_mbufs, n_arp_reply_pkts, out); + } + if (n_arp_pkts) + ret+= task->base.tx_pkt(&task->base, arp_pkt_mbufs, n_arp_pkts, out); + ret+= task->base.tx_pkt(&task->base, mbufs, n_other_pkts, out); + task_update_config(task); + return ret; +} + +void task_arp_set_local_ip(struct task_base *tbase, uint32_t ip) +{ + struct task_arp *task = (struct task_arp *)tbase; + task->tmp_ip = ip; +} + +static void init_task_arp(struct task_base *tbase, struct task_args *targ) +{ + struct task_arp *task = (struct task_arp *)tbase; + struct task_args *dtarg; + struct core_task ct; + int port_found = 0; + task->other_pkts_ring = OUT_DISCARD; + task->arp_replies_ring = OUT_DISCARD; + + task->seed = rte_rdtsc(); + memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw_sw.tx_port_queue.port].eth_addr, sizeof(struct ether_addr)); + + task->ip = rte_cpu_to_be_32(targ->local_ipv4); + task->tmp_ip = task->ip; + + PROX_PANIC(targ->nb_txrings > targ->core_task_set[0].n_elems, "%d txrings but %d elems in task_set\n", targ->nb_txrings, targ->core_task_set[0].n_elems); + for (uint32_t i = 0; i < targ->nb_txrings; ++i) { + ct = targ->core_task_set[0].core_task[i]; + plog_info("ARP mode checking whether core %d task %d (i.e. ring %d) can handle arp\n", ct.core, ct.task, i); + dtarg = core_targ_get(ct.core, ct.task); + dtarg = find_reachable_task_sending_to_port(dtarg); + if ((dtarg != NULL) && (task_is_sub_mode(dtarg->lconf->id, dtarg->id, "l3"))) { + plog_info("ARP task sending ARP replies to core %d and task %d to handle them\n", ct.core, ct.task); + task->arp_replies_ring = i; + } else { + plog_info("ARP task sending (potentially other) packets to core %d and task %d\n", ct.core, ct.task); + task->other_pkts_ring = i; + } + } + + if ((targ->nb_txports == 0) && (task->arp_replies_ring == OUT_DISCARD)) { + PROX_PANIC(1, "arp mode must have a tx_port or a ring able to a task in l3 reaching tx port"); + } +} + +// Reply to ARP requests with random MAC addresses +static struct task_init task_init_cpe_arp = { + .mode_str = "arp", + .init = init_task_arp, + .handle = handle_arp_bulk, + .size = sizeof(struct task_arp) +}; + +// Reply to ARP requests with MAC address of the interface +static struct task_init task_init_arp = { + .mode_str = "arp", + .sub_mode_str = "local", + .init = init_task_arp, + .handle = handle_arp_bulk, + .size = sizeof(struct task_arp) +}; + +__attribute__((constructor)) static void reg_task_arp(void) +{ + reg_task(&task_init_cpe_arp); + reg_task(&task_init_arp); +} diff --git a/VNFs/DPPD-PROX/handle_arp.h b/VNFs/DPPD-PROX/handle_arp.h new file mode 100644 index 00000000..0cde22ae --- /dev/null +++ b/VNFs/DPPD-PROX/handle_arp.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_ARP_H_ +#define _HANDLE_ARP_H_ + +struct task_base; +void task_arp_set_local_ip(struct task_base *tbase, uint32_t ip); + +#endif /* _HANDLE_ARP_H_ */ diff --git a/VNFs/DPPD-PROX/handle_blockudp.c b/VNFs/DPPD-PROX/handle_blockudp.c new file mode 100644 index 00000000..04c945e5 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_blockudp.c @@ -0,0 +1,61 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> +#include <rte_ether.h> + +#include "task_base.h" +#include "task_init.h" +#include "defines.h" +#include "etypes.h" +#include "prefetch.h" +#include "log.h" + +struct task_blockudp { + struct task_base base; +}; + +static int handle_blockudp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_blockudp *task = (struct task_blockudp *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + for (j = 0; j < n_pkts; ++j) { + struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + struct ipv4_hdr *pip = (struct ipv4_hdr *) (peth + 1); + out[j] = peth->ether_type == ETYPE_IPv4 && pip->next_proto_id == 0x11 ? OUT_DISCARD : 0; + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void init_task_blockudp(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ +} + +static struct task_init task_init_blockudp = { + .mode_str = "blockudp", + .init = init_task_blockudp, + .handle = handle_blockudp_bulk, + .size = sizeof(struct task_blockudp) +}; + +__attribute__((constructor)) static void reg_task_blockudp(void) +{ + reg_task(&task_init_blockudp); +} diff --git a/VNFs/DPPD-PROX/handle_cgnat.c b/VNFs/DPPD-PROX/handle_cgnat.c new file mode 100644 index 00000000..6f176c08 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_cgnat.c @@ -0,0 +1,987 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_hash.h> +#include <rte_hash_crc.h> +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_version.h> +#include <rte_byteorder.h> +#include <rte_lpm.h> + +#include "prox_lua_types.h" +#include "prox_lua.h" +#include "prox_malloc.h" +#include "prox_cksum.h" +#include "prefetch.h" +#include "etypes.h" +#include "log.h" +#include "quit.h" +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "prox_port_cfg.h" +#include "hash_entry_types.h" +#include "prox_shared.h" +#include "handle_cgnat.h" + +#define ALL_32_BITS 0xffffffff +#define BIT_16_TO_31 0xffff0000 +#define BIT_8_TO_15 0x0000ff00 +#define BIT_0_TO_15 0x0000ffff + +#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 + +struct private_key { + uint32_t ip_addr; + uint16_t l4_port; +} __attribute__((packed)); + +struct private_flow_entry { + uint64_t flow_time; + uint32_t ip_addr; + uint32_t private_ip_idx; + uint16_t l4_port; +}; + +struct public_key { + uint32_t ip_addr; + uint16_t l4_port; +} __attribute__((packed)); + +struct public_entry { + uint32_t ip_addr; + uint16_t l4_port; + uint32_t private_ip_idx; + uint8_t dpdk_port; +}; + +struct public_ip_config_info { + uint32_t public_ip; + uint32_t max_port_count; + uint32_t port_free_count; + uint16_t *port_list; +}; + +struct private_ip_info { + uint64_t mac_aging_time; + uint32_t public_ip; + uint32_t public_ip_idx; + struct rte_ether *private_mac; + uint8_t static_entry; +}; + +struct task_nat { + struct task_base base; + struct rte_hash *private_ip_hash; + struct rte_hash *private_ip_port_hash; + struct rte_hash *public_ip_port_hash; + struct private_flow_entry *private_flow_entries; + struct public_entry *public_entries; + struct next_hop *next_hops; + struct lcore_cfg *lconf; + struct rte_lpm *ipv4_lpm; + uint32_t total_free_port_count; + uint32_t number_free_rules; + int private; + uint32_t public_ip_count; + uint32_t last_ip; + struct public_ip_config_info *public_ip_config_info; + struct private_ip_info *private_ip_info; + uint8_t runtime_flags; + int offload_crc; + uint64_t src_mac[PROX_MAX_PORTS]; + uint64_t src_mac_from_dpdk_port[PROX_MAX_PORTS]; + volatile int dump_public_hash; + volatile int dump_private_hash; +}; +static __m128i proto_ipsrc_portsrc_mask; +static __m128i proto_ipdst_portdst_mask; +struct pkt_eth_ipv4 { + struct ether_hdr ether_hdr; + struct ipv4_hdr ipv4_hdr; + struct udp_hdr udp_hdr; +} __attribute__((packed)); + +void task_cgnat_dump_public_hash(struct task_nat *task) +{ + task->dump_public_hash = 1; +} + +void task_cgnat_dump_private_hash(struct task_nat *task) +{ + task->dump_private_hash = 1; +} + +static void set_l2(struct task_nat *task, struct rte_mbuf *mbuf, uint8_t nh_idx) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; + *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; +} + +static uint8_t route_ipv4(struct task_nat *task, struct rte_mbuf *mbuf) +{ + struct pkt_eth_ipv4 *pkt = rte_pktmbuf_mtod(mbuf, struct pkt_eth_ipv4 *); + struct ipv4_hdr *ip = &pkt->ipv4_hdr; + struct ether_hdr *peth_out; + uint8_t tx_port; + uint32_t dst_ip; + + switch(ip->next_proto_id) { + case IPPROTO_TCP: + case IPPROTO_UDP: + dst_ip = ip->dst_addr; + break; + default: + /* Routing for other protocols is not implemented */ + plogx_info("Routing nit implemented for this protocol\n"); + return OUT_DISCARD; + } + +#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1) + uint32_t next_hop_index; +#else + uint8_t next_hop_index; +#endif + if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(dst_ip), &next_hop_index) != 0)) { + uint8_t* dst_ipp = (uint8_t*)&dst_ip; + plog_warn("lpm_lookup failed for ip %d.%d.%d.%d: rc = %d\n", + dst_ipp[0], dst_ipp[1], dst_ipp[2], dst_ipp[3], -ENOENT); + return OUT_DISCARD; + } + + tx_port = task->next_hops[next_hop_index].mac_port.out_idx; + set_l2(task, mbuf, next_hop_index); + return tx_port; +} + +static int release_ip(struct task_nat *task, uint32_t *ip_addr, int public_ip_idx) +{ + return 0; +} + +static int release_port(struct task_nat *task, uint32_t public_ip_idx, uint16_t udp_src_port) +{ + struct public_ip_config_info *public_ip_config_info = &task->public_ip_config_info[public_ip_idx]; + if (public_ip_config_info->max_port_count > public_ip_config_info->port_free_count) { + public_ip_config_info->port_list[public_ip_config_info->port_free_count] = udp_src_port; + public_ip_config_info->port_free_count++; + task->total_free_port_count ++; + plogx_dbg("Now %d free ports for IP %d.%d.%d.%d\n", public_ip_config_info->port_free_count, IP4(public_ip_config_info->public_ip)); + } else { + plogx_err("Unable to release port for ip index %d: max_port_count = %d, port_free_count = %d", public_ip_idx, public_ip_config_info->max_port_count, public_ip_config_info->port_free_count); + return -1; + } + return 0; +} + +static int get_new_ip(struct task_nat *task, uint32_t *ip_addr) +{ + struct public_ip_config_info *ip_info; + if (++task->last_ip >= task->public_ip_count) + task->last_ip = 0; + for (uint32_t ip_idx = task->last_ip; ip_idx < task->public_ip_count; ip_idx++) { + ip_info = &task->public_ip_config_info[ip_idx]; + plogx_dbg("Checking public IP index %d\n", ip_idx); + if ((ip_info->port_free_count) > 0) { + plogx_dbg("Public IP index %d (IP %d.%d.%d.%d) has %d free ports\n", ip_idx, IP4(ip_info->public_ip), ip_info->port_free_count); + *ip_addr = ip_info->public_ip; + task->last_ip = ip_idx; + return ip_idx; + } + } + for (uint32_t ip_idx = 0; ip_idx < task->last_ip; ip_idx++) { + ip_info = &task->public_ip_config_info[ip_idx]; + if ((ip_info->port_free_count) > 0) { + plogx_dbg("Public IP index %d (IP %d.%d.%d.%d) has %d free ports\n", ip_idx, IP4(ip_info->public_ip), ip_info->port_free_count); + *ip_addr = ip_info->public_ip; + task->last_ip = ip_idx; + return ip_idx; + } + } + return -1; +} + +static int get_new_port(struct task_nat *task, uint32_t ip_idx, uint16_t *udp_src_port) +{ + int ret; + struct public_ip_config_info *public_ip_config_info = &task->public_ip_config_info[ip_idx]; + if (public_ip_config_info->port_free_count > 0) { + public_ip_config_info->port_free_count--; + *udp_src_port = public_ip_config_info->port_list[public_ip_config_info->port_free_count]; + task->total_free_port_count --; + plogx_info("Now %d free ports for IP %d.%d.%d.%d\n", public_ip_config_info->port_free_count, IP4(public_ip_config_info->public_ip)); + } else + return -1; + return 0; +} + +static int delete_port_entry(struct task_nat *task, uint8_t proto, uint32_t private_ip, uint16_t private_port, uint32_t public_ip, uint16_t public_port, int public_ip_idx) +{ + int ret; + struct private_key private_key; + struct public_key public_key; +// private_key.proto = proto; + private_key.ip_addr = private_ip; + private_key.l4_port = private_port; + ret = rte_hash_del_key(task->private_ip_port_hash, (const void *)&private_key); + if (ret < 0) { + plogx_info("Unable delete key ip %d.%d.%d.%d / port %x in private ip_port hash\n", IP4(private_ip), private_port); + return -1; + } else { + plogx_dbg("Deleted ip %d.%d.%d.%d / port %x from private ip_port hash\n", IP4(private_ip), private_port); + } + public_key.ip_addr = public_ip; + public_key.l4_port = public_port; + ret = rte_hash_del_key(task->public_ip_port_hash, (const void *)&public_key); + if (ret < 0) { + plogx_info("Unable delete key ip %d.%d.%d.%d / port %x in public ip_port hash\n", IP4(public_ip), public_port); + return -1; + } else { + plogx_dbg("Deleted ip %d.%d.%d.%d / port %x (hash index %d) from public ip_port hash\n", IP4(public_ip), public_port, ret); + release_port(task, public_ip_idx, public_port); + } + return 0; +} + +static int add_new_port_entry(struct task_nat *task, uint8_t proto, int public_ip_idx, int private_ip_idx, uint32_t private_src_ip, uint16_t private_udp_port, struct rte_mbuf *mbuf, uint64_t tsc, uint16_t *port) +{ + struct private_key private_key; + struct public_key public_key; + uint32_t ip = task->public_ip_config_info[public_ip_idx].public_ip; + int ret; + if (get_new_port(task, public_ip_idx, port) < 0) { + plogx_info("Unable to find new port for IP %x\n", private_src_ip); + return -1; + } +// private_key.proto = proto; + private_key.ip_addr = private_src_ip; + private_key.l4_port = private_udp_port; + ret = rte_hash_add_key(task->private_ip_port_hash, (const void *)&private_key); + if (ret < 0) { + plogx_info("Unable add ip %d.%d.%d.%d / port %x in private ip_port hash\n", IP4(private_src_ip), private_udp_port); + release_port(task, public_ip_idx, *port); + return -1; + } else if (task->private_flow_entries[ret].ip_addr) { + plogx_dbg("Race condition properly handled: port alrerady added\n"); + release_port(task, public_ip_idx, *port); + return ret; + } else { + plogx_dbg("Added ip %d.%d.%d.%d / port %x in private ip_port hash => %d.%d.%d.%d / %d - index = %d\n", IP4(private_src_ip), private_udp_port, IP4(ip), *port, ret); + } + task->private_flow_entries[ret].ip_addr = ip; + task->private_flow_entries[ret].l4_port = *port; + task->private_flow_entries[ret].flow_time = tsc; + task->private_flow_entries[ret].private_ip_idx = private_ip_idx; + + public_key.ip_addr = ip; + public_key.l4_port = *port; + plogx_dbg("Adding key ip %d.%d.%d.%d / port %x in public ip_port hash\n", IP4(ip), *port); + ret = rte_hash_add_key(task->public_ip_port_hash, (const void *)&public_key); + if (ret < 0) { + plogx_info("Unable add ip %x / port %x in public ip_port hash\n", ip, *port); + // TODO: remove from private_ip_port_hash + release_port(task, public_ip_idx, *port); + return -1; + } else { + plogx_dbg("Added ip %d.%d.%d.%d / port %x in public ip_port hash\n", IP4(ip), *port); + } + task->public_entries[ret].ip_addr = private_src_ip; + task->public_entries[ret].l4_port = private_udp_port; + task->public_entries[ret].dpdk_port = mbuf->port; + task->public_entries[ret].private_ip_idx = private_ip_idx; + return ret; +} + +static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_nat *task = (struct task_nat *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + uint32_t *ip_addr, public_ip, private_ip; + uint16_t *udp_src_port, port, private_port, public_port; + struct pkt_eth_ipv4 *pkt[MAX_PKT_BURST]; + int ret, private_ip_idx, public_ip_idx = -1, port_idx; + int new_entry = 0; + uint8_t proto; + uint64_t tsc = rte_rdtsc(); + void *keys[MAX_PKT_BURST]; + int32_t positions[MAX_PKT_BURST]; + int map[MAX_PKT_BURST] = {0}; + + if (unlikely(task->dump_public_hash)) { + const struct public_key *next_key; + void *next_data; + uint32_t iter = 0; + int i = 0; + int ret; + + while ((ret = rte_hash_iterate(task->public_ip_port_hash, (const void **)&next_key, &next_data, &iter)) >= 0) { + plogx_info("Public entry %d (index %d): ip = %d.%d.%d.%d, port = %d ===> private entry: ip = %d.%d.%d.%d, port = %d\n", i++, ret, IP4(next_key->ip_addr), next_key->l4_port, IP4(task->public_entries[ret].ip_addr),task->public_entries[ret].l4_port); + } + task->dump_public_hash = 0; + } + if (unlikely(task->dump_private_hash)) { + const struct private_key *next_key; + void *next_data; + uint32_t iter = 0; + int i = 0; + int ret; + + while ((ret = rte_hash_iterate(task->private_ip_port_hash, (const void **)&next_key, &next_data, &iter)) >= 0) { + plogx_info("Private entry %d (index %d): ip = %d.%d.%d.%d, port = %d ===> public entry: ip = %d.%d.%d.%d, port = %d\n", i++, ret, IP4(next_key->ip_addr), next_key->l4_port, IP4(task->private_flow_entries[ret].ip_addr),task->private_flow_entries[ret].l4_port); + } + task->dump_private_hash = 0; + } + + for (j = 0; j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (j = 0; j < n_pkts; ++j) { + pkt[j] = rte_pktmbuf_mtod(mbufs[j], struct pkt_eth_ipv4 *); + PREFETCH0(pkt[j]); + } + if (task->private) { + struct private_key key[MAX_PKT_BURST]; + for (j = 0; j < n_pkts; ++j) { + /* Currently, only support eth/ipv4 packets */ + if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) { + plogx_info("Currently, only support eth/ipv4 packets\n"); + out[j] = OUT_DISCARD; + keys[j] = (void *)NULL; + continue; + } + key[j].ip_addr = pkt[j]->ipv4_hdr.src_addr; + key[j].l4_port = pkt[j]->udp_hdr.src_port; + keys[j] = &key[j]; + } + ret = rte_hash_lookup_bulk(task->private_ip_port_hash, (const void **)&keys, n_pkts, positions); + if (unlikely(ret < 0)) { + plogx_info("lookup_bulk failed in private_ip_port_hash\n"); + return -1; + } + int n_new_mapping = 0; + for (j = 0; j < n_pkts; ++j) { + port_idx = positions[j]; + if (unlikely(port_idx < 0)) { + plogx_dbg("ip %d.%d.%d.%d / port %x not found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port); + map[n_new_mapping] = j; + keys[n_new_mapping++] = (void *)&(pkt[j]->ipv4_hdr.src_addr); + } else { + ip_addr = &(pkt[j]->ipv4_hdr.src_addr); + udp_src_port = &(pkt[j]->udp_hdr.src_port); + plogx_dbg("ip/port %d.%d.%d.%d / %x found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port); + *ip_addr = task->private_flow_entries[port_idx].ip_addr; + *udp_src_port = task->private_flow_entries[port_idx].l4_port; + uint64_t flow_time = task->private_flow_entries[port_idx].flow_time; + if (flow_time + tsc_hz < tsc) { + task->private_flow_entries[port_idx].flow_time = tsc; + } + private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx; + if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc) + task->private_ip_info[private_ip_idx].mac_aging_time = tsc; + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + out[j] = route_ipv4(task, mbufs[j]); + } + } + + if (n_new_mapping) { + // Find whether at least IP is already known... + ret = rte_hash_lookup_bulk(task->private_ip_hash, (const void **)&keys, n_new_mapping, positions); + if (unlikely(ret < 0)) { + plogx_info("lookup_bulk failed for private_ip_hash\n"); + for (int k = 0; k < n_new_mapping; ++k) { + j = map[k]; + out[j] = OUT_DISCARD; + } + n_new_mapping = 0; + } + for (int k = 0; k < n_new_mapping; ++k) { + private_ip_idx = positions[k]; + j = map[k]; + ip_addr = &(pkt[j]->ipv4_hdr.src_addr); + proto = pkt[j]->ipv4_hdr.next_proto_id; + udp_src_port = &(pkt[j]->udp_hdr.src_port); + int new_ip_entry = 0; + + if (unlikely(private_ip_idx < 0)) { + private_ip = *ip_addr; + private_port = *udp_src_port; + plogx_dbg("Did not find private ip %d.%d.%d.%d in ip hash table, looking for new public ip\n", IP4(*ip_addr)); + // IP not found, need to get a new IP/port mapping + public_ip_idx = get_new_ip(task, &public_ip); + if (public_ip_idx < 0) { + plogx_info("Unable to find new ip/port\n"); + out[j] = OUT_DISCARD; + continue; + } else { + plogx_dbg("found new public ip %d.%d.%d.%d at public IP index %d\n", IP4(public_ip), public_ip_idx); + } + private_ip_idx = rte_hash_add_key(task->private_ip_hash, (const void *)ip_addr); + // The key might be added multiple time - in case the same key was present in the bulk_lookup multiple times + // As such this is not an issue - the add_key will returns the index as for a new key + // This scenario should not happen often in real time use case + // as a for a new flow (flow renewal), probably only one packet will be sent (e.g. TCP SYN) + if (private_ip_idx < 0) { + release_ip(task, &public_ip, public_ip_idx); + plogx_info("Unable add ip %d.%d.%d.%d in private ip hash\n", IP4(*ip_addr)); + out[j] = OUT_DISCARD; + continue; + } else if (task->private_ip_info[private_ip_idx].public_ip) { + plogx_info("race condition properly handled : ip %d.%d.%d.%d already in private ip hash\n", IP4(*ip_addr)); + release_ip(task, &public_ip, public_ip_idx); + public_ip = task->private_ip_info[private_ip_idx].public_ip; + public_ip_idx = task->private_ip_info[private_ip_idx].public_ip_idx; + } else { + plogx_dbg("Added ip %d.%d.%d.%d in private ip hash\n", IP4(*ip_addr)); + rte_memcpy(&task->private_ip_info[private_ip_idx].private_mac, ((uint8_t *)pkt) + 6, 6); + task->private_ip_info[private_ip_idx].public_ip = public_ip; + task->private_ip_info[private_ip_idx].static_entry = 0; + task->private_ip_info[private_ip_idx].public_ip_idx = public_ip_idx; + new_ip_entry = 1; + } + } else { + public_ip = task->private_ip_info[private_ip_idx].public_ip; + public_ip_idx = task->private_ip_info[private_ip_idx].public_ip_idx; + } + port_idx = add_new_port_entry(task, proto, public_ip_idx, private_ip_idx, *ip_addr, *udp_src_port, mbufs[j], tsc, &public_port); + if (port_idx < 0) { + // TODO: delete IP in ip_hash + if ((new_ip_entry) && (task->last_ip != 0)) { + release_ip(task, &public_ip, public_ip_idx); + task->last_ip--; + } else if (new_ip_entry) { + release_ip(task, &public_ip, public_ip_idx); + task->last_ip = task->public_ip_count-1; + } + plogx_info("Failed to add new port entry\n"); + out[j] = OUT_DISCARD; + continue; + } else { + private_ip = *ip_addr; + private_port = *udp_src_port; + plogx_info("Added new ip/port: private ip/port = %d.%d.%d.%d/%x public ip/port = %d.%d.%d.%d/%x, index = %d\n", IP4(private_ip), private_port, IP4(public_ip), public_port, port_idx); + } + // task->private_flow_entries[port_idx].ip_addr = task->private_ip_info[private_ip_idx].public_ip; + plogx_info("Added new port: private ip/port = %d.%d.%d.%d/%x, public ip/port = %d.%d.%d.%d/%x\n", IP4(private_ip), private_port, IP4(task->private_ip_info[private_ip_idx].public_ip), public_port); + *ip_addr = public_ip ; + *udp_src_port = public_port; + uint64_t flow_time = task->private_flow_entries[port_idx].flow_time; + if (flow_time + tsc_hz < tsc) { + task->private_flow_entries[port_idx].flow_time = tsc; + } + if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc) + task->private_ip_info[private_ip_idx].mac_aging_time = tsc; + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + // TODO: if route fails while just added new key in table, should we delete the key from the table? + out[j] = route_ipv4(task, mbufs[j]); + if (out[j] && new_entry) { + delete_port_entry(task, proto, private_ip, private_port, *ip_addr, *udp_src_port, public_ip_idx); + plogx_info("Deleted port: private ip/port = %d.%d.%d.%d/%x, public ip/port = %d.%d.%d.%d/%x\n", IP4(private_ip), private_port, IP4(*ip_addr), *udp_src_port); + } + } + } + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + } else { + struct public_key public_key[MAX_PKT_BURST]; + for (j = 0; j < n_pkts; ++j) { + /* Currently, only support eth/ipv4 packets */ + if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) { + plogx_info("Currently, only support eth/ipv4 packets\n"); + out[j] = OUT_DISCARD; + keys[j] = (void *)NULL; + continue; + } + public_key[j].ip_addr = pkt[j]->ipv4_hdr.dst_addr; + public_key[j].l4_port = pkt[j]->udp_hdr.dst_port; + keys[j] = &public_key[j]; + } + ret = rte_hash_lookup_bulk(task->public_ip_port_hash, (const void **)&keys, n_pkts, positions); + if (ret < 0) { + plogx_err("Failed lookup bulk public_ip_port_hash\n"); + return -1; + } + for (j = 0; j < n_pkts; ++j) { + port_idx = positions[j]; + ip_addr = &(pkt[j]->ipv4_hdr.dst_addr); + udp_src_port = &(pkt[j]->udp_hdr.dst_port); + if (port_idx < 0) { + plogx_err("Failed to find ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port); + out[j] = OUT_DISCARD; + } else { + plogx_dbg("Found ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port); + *ip_addr = task->public_entries[port_idx].ip_addr; + *udp_src_port = task->public_entries[port_idx].l4_port; + private_ip_idx = task->public_entries[port_idx].private_ip_idx; + plogx_dbg("Found private IP info for ip %d.%d.%d.%d\n", IP4(*ip_addr)); + rte_memcpy(((uint8_t *)(pkt[j])) + 0, &task->private_ip_info[private_ip_idx].private_mac, 6); + rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6); + out[j] = task->public_entries[port_idx].dpdk_port; + } + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + } + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + } + +} + +static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua_place from, const char *name, uint8_t socket) +{ + struct rte_hash *tmp_priv_ip_hash, *tmp_priv_hash, *tmp_pub_hash; + struct private_flow_entry *tmp_priv_flow_entries; + struct public_entry *tmp_pub_entries; + uint32_t n_entries = 0;; + uint32_t ip_from, ip_to; + uint16_t port_from, port_to; + int ret, idx, pop, pop2, pop3, n_static_entries = 0; + uint32_t dst_ip1, dst_ip2; + struct val_range dst_port; + struct public_ip_config_info *ip_info; + struct public_ip_config_info *tmp_public_ip_config_info; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + plogx_err("Can't read cgnat since data is not a table\n"); + return -1; + } + + struct tmp_public_ip { + uint32_t ip_beg; + uint32_t ip_end; + uint16_t port_beg; + uint16_t port_end; + }; + struct tmp_static_ip { + uint32_t private_ip; + uint32_t public_ip; + }; + struct tmp_static_ip_port { + uint32_t private_ip; + uint32_t public_ip; + uint32_t n_ports; + uint16_t private_port; + uint16_t public_port; + int ip_found; + uint8_t port_found; + }; + uint32_t n_public_groups = 0; + uint32_t n_public_ip = 0; + uint32_t n_static_ip = 0; + uint32_t n_static_ip_port = 0; + unsigned int i = 0; + struct tmp_public_ip *tmp_public_ip = NULL; + struct tmp_static_ip *tmp_static_ip = NULL; + struct tmp_static_ip_port *tmp_static_ip_port = NULL; + + // Look for Dynamic entries configuration + plogx_info("Reading dynamic NAT table\n"); + if ((pop2 = lua_getfrom(L, TABLE, "dynamic")) < 0) { + plogx_info("No dynamic table found\n"); + } else { + uint64_t n_ip, n_port; + if (!lua_istable(L, -1)) { + plogx_err("Can't read cgnat since data is not a table\n"); + return -1; + } + lua_len(L, -1); + n_public_groups = lua_tointeger(L, -1); + plogx_info("%d groups of public IP\n", n_public_groups); + tmp_public_ip = (struct tmp_public_ip *)malloc(n_public_groups * sizeof(struct tmp_public_ip)); + PROX_PANIC(tmp_public_ip == NULL, "Failed to allocated tmp_public_ip\n"); + lua_pop(L, 1); + lua_pushnil(L); + + while (lua_next(L, -2)) { + if (lua_to_ip(L, TABLE, "public_ip_range_start", &dst_ip1) || + lua_to_ip(L, TABLE, "public_ip_range_stop", &dst_ip2) || + lua_to_val_range(L, TABLE, "public_port", &dst_port)) + return -1; + PROX_PANIC(dst_ip2 < dst_ip1, "public_ip_range error: %d.%d.%d.%d < %d.%d.%d.%d\n", (dst_ip2 >> 24), (dst_ip2 >> 16) & 0xFF, (dst_ip2 >> 8) & 0xFF, dst_ip2 & 0xFF, dst_ip1 >> 24, (dst_ip1 >> 16) & 0xFF, (dst_ip1 >> 8) & 0xFF, dst_ip1 & 0xFF); + PROX_PANIC(dst_port.end < dst_port.beg, "public_port error: %d < %d\n", dst_port.end, dst_port.beg); + n_ip = dst_ip2 - dst_ip1 + 1; + n_port = dst_port.end - dst_port.beg + 1; + n_public_ip += n_ip; + plogx_info("Found IP from %d.%d.%d.%d to %d.%d.%d.%d and port from %d to %d\n", dst_ip1 >> 24, (dst_ip1 >> 16) & 0xFF, (dst_ip1 >> 8) & 0xFF, dst_ip1 & 0xFF, (dst_ip2 >> 24), (dst_ip2 >> 16) & 0xFF, (dst_ip2 >> 8) & 0xFF, dst_ip2 & 0xFF, dst_port.beg, dst_port.end); + tmp_public_ip[i].ip_beg = dst_ip1; + tmp_public_ip[i].ip_end = dst_ip2; + tmp_public_ip[i].port_beg = dst_port.beg; + tmp_public_ip[i++].port_end = dst_port.end; + n_entries += n_ip * n_port; + lua_pop(L, 1); + } + lua_pop(L, pop2); + + } + i = 0; + if ((pop2 = lua_getfrom(L, TABLE, "static_ip")) < 0) { + plogx_info("No static ip table found\n"); + } else { + if (!lua_istable(L, -1)) { + plogx_err("Can't read cgnat since data is not a table\n"); + return -1; + } + + lua_len(L, -1); + n_static_ip = lua_tointeger(L, -1); + plogx_info("%d entries in static ip table\n", n_static_ip); + lua_pop(L, 1); + tmp_static_ip = (struct tmp_static_ip *)malloc(n_static_ip * sizeof(struct tmp_static_ip)); + PROX_PANIC(tmp_static_ip == NULL, "Failed to allocated tmp_static_ip\n"); + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_ip(L, TABLE, "src_ip", &ip_from) || + lua_to_ip(L, TABLE, "dst_ip", &ip_to)) + return -1; + ip_from = rte_bswap32(ip_from); + ip_to = rte_bswap32(ip_to); + tmp_static_ip[i].private_ip = ip_from; + tmp_static_ip[i++].public_ip = ip_to; + for (unsigned int j = 0; j < n_public_groups; j++) { + if ((tmp_public_ip[j].ip_beg <= ip_to) && (ip_to <= tmp_public_ip[j].ip_end)) { + PROX_PANIC(1, "list of static ip mapping overlap with list of dynamic IP => not supported yet\n"); + } + } + n_public_ip++; + lua_pop(L, 1); + } + lua_pop(L, pop2); + } + + i = 0; + if ((pop2 = lua_getfrom(L, TABLE, "static_ip_port")) < 0) { + plogx_info("No static table found\n"); + } else { + if (!lua_istable(L, -1)) { + plogx_err("Can't read cgnat since data is not a table\n"); + return -1; + } + + lua_len(L, -1); + n_static_ip_port = lua_tointeger(L, -1); + plogx_info("%d entries in static table\n", n_static_ip_port); + lua_pop(L, 1); + tmp_static_ip_port = (struct tmp_static_ip_port *)malloc(n_static_ip_port * sizeof(struct tmp_static_ip_port)); + PROX_PANIC(tmp_static_ip_port == NULL, "Failed to allocated tmp_static_ip_port\n"); + lua_pushnil(L); + + while (lua_next(L, -2)) { + if (lua_to_ip(L, TABLE, "src_ip", &ip_from) || + lua_to_ip(L, TABLE, "dst_ip", &ip_to) || + lua_to_port(L, TABLE, "src_port", &port_from) || + lua_to_port(L, TABLE, "dst_port", &port_to)) + return -1; + + ip_from = rte_bswap32(ip_from); + ip_to = rte_bswap32(ip_to); + port_from = rte_bswap16(port_from); + port_to = rte_bswap16(port_to); + tmp_static_ip_port[i].private_ip = ip_from; + tmp_static_ip_port[i].public_ip = ip_to; + tmp_static_ip_port[i].private_port = port_from; + tmp_static_ip_port[i].public_port = port_to; + tmp_static_ip_port[i].n_ports = 1; + for (unsigned int j = 0; j < n_public_groups; j++) { + if ((tmp_public_ip[j].ip_beg <= rte_bswap32(ip_to)) && (rte_bswap32(ip_to) <= tmp_public_ip[j].ip_end)) { + tmp_static_ip_port[i].ip_found = j + 11; + PROX_PANIC(1, "list of static ip/port mapping overlap with list of dynamic IP => not supported yet\n"); + } + } + for (unsigned int j = 0; j < n_static_ip; j++) { + if ((tmp_static_ip[j].public_ip == ip_to) ) { + tmp_static_ip_port[i].ip_found = j + 1; + PROX_PANIC(1, "list of static ip/port mapping overlap with list of static ip => not supported yet\n"); + } + } + for (unsigned int j = 0; j <= i; j++) { + if (ip_to == tmp_static_ip_port[j].public_ip) { + tmp_static_ip_port[i].ip_found = j + 1; + tmp_static_ip_port[j].n_ports++; + tmp_static_ip_port[i].n_ports = 0; + } + } + i++; + if (!tmp_static_ip_port[i].ip_found) { + n_public_ip++; + n_entries++; + } + lua_pop(L, 1); + } + lua_pop(L, pop2); + } + lua_pop(L, pop); + + tmp_public_ip_config_info = (struct public_ip_config_info *)prox_zmalloc(n_public_ip * sizeof(struct public_ip_config_info), socket); + PROX_PANIC(tmp_public_ip_config_info == NULL, "Failed to allocate PUBLIC IP INFO\n"); + plogx_info("%d PUBLIC IP INFO allocated\n", n_public_ip); + + struct private_ip_info *tmp_priv_ip_info = (struct private_ip_info *)prox_zmalloc(4 * n_public_ip * sizeof(struct public_ip_config_info), socket); + PROX_PANIC(tmp_priv_ip_info == NULL, "Failed to allocate PRIVATE IP INFO\n"); + plogx_info("%d PRIVATE IP INFO allocated\n", 4 * n_public_ip); + + uint32_t ip_free_count = 0; + for (i = 0; i < n_public_groups; i++) { + for (uint32_t ip = tmp_public_ip[i].ip_beg; ip <= tmp_public_ip[i].ip_end; ip++) { + ip_info = &tmp_public_ip_config_info[ip_free_count]; + ip_info->public_ip = rte_bswap32(ip); + ip_info->port_list = (uint16_t *)prox_zmalloc((dst_port.end - dst_port.beg) * sizeof(uint16_t), socket); + PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", ip); + for (uint32_t port = tmp_public_ip[i].port_beg; port <= tmp_public_ip[i].port_end; port++) { + ip_info->port_list[ip_info->port_free_count] = rte_bswap16(port); + ip_info->port_free_count++; + } + ip_info->max_port_count = ip_info->port_free_count; + plogx_dbg("Added IP %d.%d.%d.%d with ports from %x to %x at index %x\n", IP4(ip_info->public_ip), tmp_public_ip[i].port_beg, tmp_public_ip[i].port_end, ip_free_count); + ip_free_count++; + } + } + uint32_t public_ip_count = ip_free_count; + for (i = 0; i < n_static_ip; i++) { + ip_info = &tmp_public_ip_config_info[ip_free_count]; + ip_info->public_ip = tmp_static_ip[i].public_ip; + ip_info->port_list = NULL; + ip_info->max_port_count = 0; + ip_free_count++; + } + for (i = 0; i < n_static_ip_port; i++) { + if (!tmp_static_ip_port[i].ip_found) { + ip_info = &tmp_public_ip_config_info[ip_free_count]; + ip_info->public_ip = tmp_static_ip_port[i].public_ip; + ip_info->port_list = (uint16_t *)prox_zmalloc(tmp_static_ip_port[i].n_ports * sizeof(uint16_t), socket); + PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", tmp_static_ip_port[i].public_ip); + ip_info->port_list[ip_info->port_free_count] = tmp_static_ip_port[i].public_port; + ip_info->port_free_count++; + ip_info->max_port_count = ip_info->port_free_count; + ip_free_count++; + } else { + for (unsigned j = 0; j < ip_free_count; j++) { + ip_info = &tmp_public_ip_config_info[j]; + if (ip_info->public_ip == tmp_static_ip_port[i].public_ip) { + ip_info = &tmp_public_ip_config_info[j]; + ip_info->port_list[ip_info->port_free_count] = tmp_static_ip_port[i].public_port; + ip_info->port_free_count++; + ip_info->max_port_count = ip_info->port_free_count; + break; + } + } + } + } + plogx_info("%d entries in dynamic table\n", n_entries); + + n_entries = n_entries * 4; + static char hash_name[30]; + sprintf(hash_name, "A%03d_hash_nat_table", targ->lconf->id); + struct rte_hash_parameters hash_params = { + .name = hash_name, + .entries = n_entries, + .key_len = sizeof(struct private_key), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + }; + plogx_info("hash table name = %s\n", hash_params.name); + struct private_key private_key; + struct public_key public_key; + tmp_priv_hash = rte_hash_create(&hash_params); + PROX_PANIC(tmp_priv_hash == NULL, "Failed to set up private hash table for NAT\n"); + plogx_info("private hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len); + + tmp_priv_flow_entries = (struct private_flow_entry *)prox_zmalloc(n_entries * sizeof(struct private_flow_entry), socket); + PROX_PANIC(tmp_priv_flow_entries == NULL, "Failed to allocate memory for private NAT %u entries\n", n_entries); + plogx_info("private data allocated, with %d entries of size %ld\n", n_entries, sizeof(struct private_flow_entry)); + + hash_name[0]++; + //hash_params.name[0]++; + plogx_info("hash table name = %s\n", hash_params.name); + hash_params.key_len = sizeof(uint32_t); + hash_params.entries = 4 * ip_free_count; + tmp_priv_ip_hash = rte_hash_create(&hash_params); + PROX_PANIC(tmp_priv_ip_hash == NULL, "Failed to set up private ip hash table for NAT\n"); + plogx_info("private ip hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len); + + hash_name[0]++; + //hash_params.name[0]++; + plogx_info("hash table name = %s\n", hash_params.name); + hash_params.entries = n_entries; + hash_params.key_len = sizeof(struct public_key), + tmp_pub_hash = rte_hash_create(&hash_params); + PROX_PANIC(tmp_pub_hash == NULL, "Failed to set up public hash table for NAT\n"); + plogx_info("public hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len); + + hash_name[0]++; + //hash_params.name[0]++; + tmp_pub_entries = (struct public_entry *)prox_zmalloc(n_entries * sizeof(struct public_entry), socket); + PROX_PANIC(tmp_pub_entries == NULL, "Failed to allocate memory for public NAT %u entries\n", n_entries); + plogx_info("public data allocated, with %d entries of size %ld\n", n_entries, sizeof(struct private_flow_entry)); + + for (i = 0; i < n_static_ip_port; i++) { + ip_to = tmp_static_ip_port[i].public_ip; + ip_from = tmp_static_ip_port[i].private_ip; + port_to = tmp_static_ip_port[i].public_port; + port_from = tmp_static_ip_port[i].private_port; + private_key.ip_addr = ip_from; + private_key.l4_port = port_from; + ret = rte_hash_lookup(tmp_priv_hash, (const void *)&private_key); + PROX_PANIC(ret >= 0, "Key %x %x already exists in NAT private hash table\n", ip_from, port_from); + + idx = rte_hash_add_key(tmp_priv_ip_hash, (const void *)&ip_from); + PROX_PANIC(idx < 0, "Failed to add ip %x to NAT private hash table\n", ip_from); + ret = rte_hash_add_key(tmp_priv_hash, (const void *)&private_key); + PROX_PANIC(ret < 0, "Failed to add Key %x %x to NAT private hash table\n", ip_from, port_from); + tmp_priv_flow_entries[ret].ip_addr = ip_to; + tmp_priv_flow_entries[ret].flow_time = -1; + tmp_priv_flow_entries[ret].private_ip_idx = idx; + tmp_priv_flow_entries[ret].l4_port = port_to; + + public_key.ip_addr = ip_to; + public_key.l4_port = port_to; + ret = rte_hash_lookup(tmp_pub_hash, (const void *)&public_key); + PROX_PANIC(ret >= 0, "Key %d.%d.%d.%d port %x (for private IP %d.%d.%d.%d port %x) already exists in NAT public hash table fir IP %d.%d.%d.%d port %x\n", IP4(ip_to), port_to, IP4(ip_from), port_from, IP4(tmp_pub_entries[ret].ip_addr), tmp_pub_entries[ret].l4_port); + + ret = rte_hash_add_key(tmp_pub_hash, (const void *)&public_key); + PROX_PANIC(ret < 0, "Failed to add Key %x %x to NAT public hash table\n", ip_to, port_to); + tmp_pub_entries[ret].ip_addr = ip_from; + tmp_pub_entries[ret].l4_port = port_from; + tmp_pub_entries[ret].private_ip_idx = idx; + } + + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + struct task_args *target_targ = (struct task_args *)&(targ->lconf->targs[task_id]); + enum task_mode smode = target_targ->mode; + if (CGNAT == smode) { + target_targ->public_ip_count = public_ip_count; + target_targ->private_ip_hash = tmp_priv_ip_hash; + target_targ->private_ip_port_hash = tmp_priv_hash; + target_targ->private_ip_info = tmp_priv_ip_info; + target_targ->private_flow_entries = tmp_priv_flow_entries; + target_targ->public_ip_port_hash = tmp_pub_hash; + target_targ->public_entries = tmp_pub_entries; + target_targ->public_ip_config_info = tmp_public_ip_config_info; + } + } + return 0; +} + +static void early_init_task_nat(struct task_args *targ) +{ + int ret; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + if (!targ->private_ip_hash) { + ret = lua_to_hash_nat(targ, prox_lua(), GLOBAL, targ->nat_table, socket_id); + PROX_PANIC(ret != 0, "Failed to load NAT table from lua:\n%s\n", get_lua_to_errors()); + } +} + +static void init_task_nat(struct task_base *tbase, struct task_args *targ) +{ + struct task_nat *task = (struct task_nat *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + /* Use destination IP by default. */ + task->private = targ->use_src; + + PROX_PANIC(!strcmp(targ->nat_table, ""), "No nat table specified\n"); + task->lconf = targ->lconf; + task->runtime_flags = targ->runtime_flags; + + task->public_ip_count = targ->public_ip_count; + task->last_ip = targ->public_ip_count; + task->private_ip_hash = targ->private_ip_hash; + task->private_ip_port_hash = targ->private_ip_port_hash; + task->private_ip_info = targ->private_ip_info; + task->private_flow_entries = targ->private_flow_entries; + task->public_ip_port_hash = targ->public_ip_port_hash; + task->public_entries = targ->public_entries; + task->public_ip_config_info = targ->public_ip_config_info; + + proto_ipsrc_portsrc_mask = _mm_set_epi32(BIT_0_TO_15, 0, ALL_32_BITS, BIT_8_TO_15); + proto_ipdst_portdst_mask = _mm_set_epi32(BIT_16_TO_31, ALL_32_BITS, 0, BIT_8_TO_15); + + struct lpm4 *lpm; + + PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n"); + if (targ->flags & TASK_ARG_LOCAL_LPM) { + int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); + PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->route_table, lpm); + task->number_free_rules = lpm->n_free_rules; + } else { + lpm = prox_sh_find_socket(socket_id, targ->route_table); + if (!lpm) { + int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); + PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->route_table, lpm); + } + } + task->ipv4_lpm = lpm->rte_lpm; + task->next_hops = lpm->next_hops; + task->number_free_rules = lpm->n_free_rules; + + for (uint32_t i = 0; i < MAX_HOP_INDEX; i++) { + int tx_port = task->next_hops[i].mac_port.out_idx; + if ((tx_port > targ->nb_txports - 1) && (tx_port > targ->nb_txrings - 1)) { + PROX_PANIC(1, "Routing Table contains port %d but only %d tx port/ %d ring:\n", tx_port, targ->nb_txports, targ->nb_txrings); + } + } + + if (targ->nb_txrings) { + struct task_args *dtarg; + struct core_task ct; + for (uint32_t i = 0; i < targ->nb_txrings; ++i) { + ct = targ->core_task_set[0].core_task[i]; + dtarg = core_targ_get(ct.core, ct.task); + dtarg = find_reachable_task_sending_to_port(dtarg); + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16)); + task->src_mac_from_dpdk_port[dtarg->tx_port_queue[0].port] = task->src_mac[i]; + plogx_dbg("src_mac = %lx for port %d %d\n", task->src_mac[i], i, dtarg->tx_port_queue[0].port); + } + } else { + for (uint32_t i = 0; i < targ->nb_txports; ++i) { + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16)); + task->src_mac_from_dpdk_port[targ->tx_port_queue[0].port] = task->src_mac[i]; + plogx_dbg("src_mac = %lx for port %d %d\n", task->src_mac[i], i, targ->tx_port_queue[i].port); + } + } + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } +} + +/* Basic static nat. */ +static struct task_init task_init_nat = { + .mode = CGNAT, + .mode_str = "cgnat", + .early_init = early_init_task_nat, + .init = init_task_nat, + .handle = handle_nat_bulk, +#ifdef SOFT_CRC + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX, +#else + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX, +#endif + .size = sizeof(struct task_nat), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_nat(void) +{ + reg_task(&task_init_nat); +} diff --git a/VNFs/DPPD-PROX/handle_cgnat.h b/VNFs/DPPD-PROX/handle_cgnat.h new file mode 100644 index 00000000..ab26be34 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_cgnat.h @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_CGNAT_H_ +#define _HANDLE_CGNAT_H_ + +struct task_nat; + +void task_cgnat_dump_public_hash(struct task_nat *task); +void task_cgnat_dump_private_hash(struct task_nat *task); + +#endif diff --git a/VNFs/DPPD-PROX/handle_classify.c b/VNFs/DPPD-PROX/handle_classify.c new file mode 100644 index 00000000..f4f96aaf --- /dev/null +++ b/VNFs/DPPD-PROX/handle_classify.c @@ -0,0 +1,133 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> +#include <stdio.h> +#include <string.h> +#include <rte_version.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "lconf.h" +#include "task_base.h" +#include "task_init.h" +#include "defines.h" +#include "prefetch.h" +#include "qinq.h" +#include "prox_cfg.h" +#include "log.h" +#include "quit.h" +#include "prox_shared.h" + +struct task_classify { + struct task_base base; + uint16_t *user_table; + uint8_t *dscp; +}; + +static inline void handle_classify(struct task_classify *task, struct rte_mbuf *mbuf) +{ + const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, const struct qinq_hdr *); + + uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); + + /* Traffic class can be set by ACL task. If this is the case, + don't overwrite it using dscp. Instead, use the + traffic class that had been set. */ + + uint32_t prev_tc; +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + uint32_t dummy; + rte_sched_port_pkt_read_tree_path(mbuf, &dummy, &dummy, &prev_tc, &dummy); +#else + struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched; + prev_tc = sched->traffic_class; +#endif + + const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + uint8_t dscp = task->dscp[ipv4_hdr->type_of_service >> 2]; + + uint8_t queue = dscp & 0x3; + uint8_t tc = prev_tc? prev_tc : dscp >> 2; + + rte_sched_port_pkt_write(mbuf, 0, task->user_table[qinq], tc, queue, 0); +} + +static int handle_classify_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_classify *task = (struct task_classify *)tbase; + + uint16_t j; +#ifdef PROX_PREFETCH_OFFSET + for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(mbufs[j]); + } + for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(mbufs[j + PREFETCH_OFFSET]); + prefetch_nta(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + handle_classify(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + handle_classify(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); +} + +static void init_task_classify(struct task_base *tbase, struct task_args *targ) +{ + struct task_classify *task = (struct task_classify *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->user_table = prox_sh_find_socket(socket_id, "user_table"); + if (!task->user_table) { + PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n"); + int ret = lua_to_user_table(prox_lua(), GLOBAL, targ->user_table, socket_id, &task->user_table); + PROX_PANIC(ret, "Failed to create user table from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, "user_table", task->user_table); + } + + PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n"); + task->dscp = prox_sh_find_socket(socket_id, targ->dscp); + if (!task->dscp) { + int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp); + PROX_PANIC(ret, "Failed to create dscp table from config\n"); + prox_sh_add_socket(socket_id, targ->dscp, task->dscp); + } +} + +static struct task_init task_init_classify = { + .mode_str = "classify", + .init = init_task_classify, + .handle = handle_classify_bulk, + .flag_features = TASK_FEATURE_NEVER_DISCARDS, + .size = sizeof(struct task_classify) +}; + +__attribute__((constructor)) static void reg_task_classify(void) +{ + reg_task(&task_init_classify); +} diff --git a/VNFs/DPPD-PROX/handle_dump.c b/VNFs/DPPD-PROX/handle_dump.c new file mode 100644 index 00000000..c35a6e9e --- /dev/null +++ b/VNFs/DPPD-PROX/handle_dump.c @@ -0,0 +1,131 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <pcap.h> + +#include "prox_malloc.h" +#include "clock.h" +#include "log.h" +#include "lconf.h" +#include "task_init.h" +#include "task_base.h" +#include "stats.h" + +struct task_dump { + struct task_base base; + uint32_t n_mbufs; + struct rte_mbuf **mbufs; + uint32_t n_pkts; + char pcap_file[128]; +}; + +static uint16_t buffer_packets(struct task_dump *task, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + uint16_t j = 0; + + if (task->n_mbufs == task->n_pkts) + return 0; + + for (j = 0; j < n_pkts && task->n_mbufs < task->n_pkts; ++j) { + mbufs[j]->udata64 = rte_rdtsc(); + task->mbufs[task->n_mbufs++] = mbufs[j]; + } + + return j; +} + +static int handle_dump_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_dump *task = (struct task_dump *)tbase; + const uint16_t ofs = buffer_packets(task, mbufs, n_pkts); + + for (uint16_t j = ofs; j < n_pkts; ++j) + rte_pktmbuf_free(mbufs[j]); + TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, n_pkts - ofs); + return n_pkts; +} + +static void init_task_dump(struct task_base *tbase, __attribute__((unused)) struct task_args *targ) +{ + struct task_dump *task = (struct task_dump *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->mbufs = prox_zmalloc(sizeof(*task->mbufs) * targ->n_pkts, socket_id); + task->n_pkts = targ->n_pkts; + if (!strcmp(targ->pcap_file, "")) { + strcpy(targ->pcap_file, "out.pcap"); + } + strncpy(task->pcap_file, targ->pcap_file, sizeof(task->pcap_file)); +} + +static void stop(struct task_base *tbase) +{ + struct task_dump *task = (struct task_dump *)tbase; + static pcap_dumper_t *pcap_dump_handle; + pcap_t *handle; + uint32_t n_pkts = 65536; + struct pcap_pkthdr header = {{0}, 0, 0}; + static int once = 0; + char err_str[PCAP_ERRBUF_SIZE]; + const uint64_t hz = rte_get_tsc_hz(); + struct timeval tv = {0}; + uint64_t tsc, beg = 0; + + plogx_info("Dumping %d packets to '%s'\n", task->n_mbufs, task->pcap_file); + handle = pcap_open_dead(DLT_EN10MB, n_pkts); + pcap_dump_handle = pcap_dump_open(handle, task->pcap_file); + + if (task->n_mbufs) { + beg = task->mbufs[0]->udata64; + } + for (uint32_t j = 0; j < task->n_mbufs; ++j) { + tsc = task->mbufs[j]->udata64 - beg; + header.len = rte_pktmbuf_pkt_len(task->mbufs[j]); + header.caplen = header.len; + tsc_to_tv(&header.ts, tsc); + pcap_dump((unsigned char *)pcap_dump_handle, &header, rte_pktmbuf_mtod(task->mbufs[j], void *)); + } + + pcap_dump_close(pcap_dump_handle); + pcap_close(handle); + plogx_info("Dump complete, releasing mbufs\n"); + + uint32_t j = 0; + + while (j + 64 < task->n_mbufs) { + tbase->tx_pkt(tbase, &task->mbufs[j], 64, NULL); + j += 64; + } + if (j < task->n_mbufs) { + tbase->tx_pkt(tbase, &task->mbufs[j], task->n_mbufs - j, NULL); + } + task->n_mbufs = 0; +} + +static struct task_init task_init_dump = { + .mode_str = "dump", + .init = init_task_dump, + .handle = handle_dump_bulk, + .stop = stop, + .flag_features = TASK_FEATURE_ZERO_RX, + .size = sizeof(struct task_dump) +}; + +__attribute__((constructor)) static void reg_task_dump(void) +{ + reg_task(&task_init_dump); +} diff --git a/VNFs/DPPD-PROX/handle_fm.c b/VNFs/DPPD-PROX/handle_fm.c new file mode 100644 index 00000000..c4a10e67 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_fm.c @@ -0,0 +1,373 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <dlfcn.h> + +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_cycles.h> +#include <rte_ether.h> +#include <rte_eth_ctrl.h> + +#include "log.h" +#include "quit.h" +#include "lconf.h" +#include "task_init.h" +#include "task_base.h" +#include "kv_store_expire.h" +#include "stats.h" +#include "prox_shared.h" +#include "etypes.h" +#include "prox_cfg.h" +#include "dpi/dpi.h" + +struct task_dpi_per_core { + void *dpi_opaque; +}; + +struct task_fm { + struct task_base base; + /* FM related fields */ + struct kv_store_expire *kv_store_expire; + void *dpi_opaque; + + struct dpi_engine dpi_engine; + struct task_dpi_per_core *dpi_shared; /* Used only during init */ +}; + +struct eth_ip4_udp { + struct ether_hdr l2; + struct ipv4_hdr l3; + union { + struct udp_hdr udp; + struct tcp_hdr tcp; + } l4; +} __attribute__((packed)); + +union pkt_type { + struct { + uint16_t etype; + uint8_t ip_byte; + uint8_t next_proto; + } __attribute__((packed)); + uint32_t val; +}; + +static union pkt_type pkt_type_udp = { + .next_proto = IPPROTO_UDP, + .ip_byte = 0x45, + .etype = ETYPE_IPv4, +}; + +static union pkt_type pkt_type_tcp = { + .next_proto = IPPROTO_TCP, + .ip_byte = 0x45, + .etype = ETYPE_IPv4, +}; + +static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct flow_info *fi_flipped, uint32_t *len, uint8_t **payload) +{ + union pkt_type pkt_type = { + .next_proto = p->l3.next_proto_id, + .ip_byte = p->l3.version_ihl, + .etype = p->l2.ether_type, + }; + + memset(fi->reservered, 0, sizeof(fi->reservered)); + memset(fi_flipped->reservered, 0, sizeof(fi_flipped->reservered)); + + if (pkt_type.val == pkt_type_udp.val) { + fi->ip_src = p->l3.src_addr; + fi->ip_dst = p->l3.dst_addr; + fi->ip_proto = p->l3.next_proto_id; + fi->port_src = p->l4.udp.src_port; + fi->port_dst = p->l4.udp.dst_port; + + fi_flipped->ip_src = p->l3.dst_addr; + fi_flipped->ip_dst = p->l3.src_addr; + fi_flipped->ip_proto = p->l3.next_proto_id; + fi_flipped->port_src = p->l4.udp.dst_port; + fi_flipped->port_dst = p->l4.udp.src_port; + + *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(struct udp_hdr); + *payload = (uint8_t*)(&p->l4.udp) + sizeof(struct udp_hdr); + return 0; + } + else if (pkt_type.val == pkt_type_tcp.val) { + fi->ip_src = p->l3.src_addr; + fi->ip_dst = p->l3.dst_addr; + fi->ip_proto = p->l3.next_proto_id; + fi->port_src = p->l4.tcp.src_port; + fi->port_dst = p->l4.tcp.dst_port; + + fi_flipped->ip_src = p->l3.dst_addr; + fi_flipped->ip_dst = p->l3.src_addr; + fi_flipped->ip_proto = p->l3.next_proto_id; + fi_flipped->port_src = p->l4.tcp.dst_port; + fi_flipped->port_dst = p->l4.tcp.src_port; + + *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(struct ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4); + *payload = ((uint8_t*)&p->l4.tcp) + ((p->l4.tcp.data_off >> 4)*4); + return 0; + } + + return -1; +} + +static int is_flow_beg(const struct flow_info *fi, const struct eth_ip4_udp *p) +{ + return fi->ip_proto == IPPROTO_UDP || + (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & TCP_SYN_FLAG); +} + +static void *lookup_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc) +{ + struct kv_store_expire_entry *entry; + + entry = kv_store_expire_get(task->kv_store_expire, fi, now_tsc); + + return entry ? entry_value(task->kv_store_expire, entry) : NULL; +} + +static void *lookup_or_insert_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc) +{ + struct kv_store_expire_entry *entry; + + entry = kv_store_expire_get_or_put(task->kv_store_expire, fi, now_tsc); + + return entry ? entry_value(task->kv_store_expire, entry) : NULL; +} + +static int handle_fm(struct task_fm *task, struct rte_mbuf *mbuf, uint64_t now_tsc) +{ + struct eth_ip4_udp *p; + struct flow_info fi, fi_flipped; + void *flow_data; + uint32_t len; + uint8_t *payload; + uint32_t res[2]; + size_t res_len = 2; + int flow_beg; + struct dpi_payload dpi_payload; + int is_upstream = 0; + + p = rte_pktmbuf_mtod(mbuf, struct eth_ip4_udp *); + + if (0 != extract_flow_info(p, &fi, &fi_flipped, &len, &payload)) { + plogx_err("Unknown packet type\n"); + return OUT_DISCARD; + } + + /* First, try to see if the flow already exists where the + current packet is sent by the server. */ + if (!(flow_data = lookup_flow(task, &fi_flipped, now_tsc))) { + /* Insert a new flow, only if this is the first packet + in the flow. */ + is_upstream = 1; + if (is_flow_beg(&fi, p)) + flow_data = lookup_or_insert_flow(task, &fi, now_tsc); + else + flow_data = lookup_flow(task, &fi, now_tsc); + } + + if (!flow_data) + return OUT_DISCARD; + else if (!len) + return 0; + + dpi_payload.payload = payload; + dpi_payload.len = len; + dpi_payload.client_to_server = is_upstream; + gettimeofday(&dpi_payload.tv, NULL); + task->dpi_engine.dpi_process(task->dpi_opaque, is_upstream? &fi : &fi_flipped, flow_data, &dpi_payload, res, &res_len); + return OUT_HANDLED; +} + +static int handle_fm_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_fm *task = (struct task_fm *)tbase; + uint64_t now_tsc = rte_rdtsc(); + uint16_t handled = 0; + uint16_t discard = 0; + int ret; + + for (uint16_t i = 0; i < n_pkts; ++i) { + ret = handle_fm(task, mbufs[i], now_tsc); + if (ret == OUT_DISCARD) + discard++; + else if (ret == OUT_HANDLED) + handled++; + } + + for (uint16_t i = 0; i < n_pkts; ++i) + rte_pktmbuf_free(mbufs[i]); + + TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, handled); + TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, discard); + return 0; +} + +static void load_dpi_engine(const char *dpi_engine_path, struct dpi_engine *dst) +{ + void *handle = prox_sh_find_system(dpi_engine_path); + + if (handle == NULL) { + plogx_info("Loading DPI engine from '%s'\n", dpi_engine_path); + handle = dlopen(dpi_engine_path, RTLD_NOW | RTLD_GLOBAL); + + PROX_PANIC(handle == NULL, "Failed to load dpi engine from '%s' with error:\n\t\t%s\n", dpi_engine_path, dlerror()); + prox_sh_add_system(dpi_engine_path, handle); + } + + struct dpi_engine *(*get_dpi_engine)(void) = dlsym(handle, "get_dpi_engine"); + + PROX_PANIC(get_dpi_engine == NULL, "Failed to find get_dpi_engine function from '%s'\n", dpi_engine_path); + struct dpi_engine *dpi_engine = get_dpi_engine(); + + dpi_engine->dpi_print = plog_info; + rte_memcpy(dst, dpi_engine, sizeof(*dst)); +} + +static uint32_t count_fm_cores(void) +{ + uint32_t n_cores = 0; + uint32_t lcore_id = -1; + struct lcore_cfg *lconf; + + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + if (!strcmp(lconf->targs[task_id].task_init->mode_str, "fm")) { + n_cores++; + /* Only intersted in number of cores + so break here. */ + break; + } + } + } + + return n_cores; +} + +static struct kv_store_expire *get_shared_flow_table(struct task_args *targ, struct dpi_engine *de) +{ + struct kv_store_expire *ret = prox_sh_find_core(targ->lconf->id, "flow_table"); + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + if (!ret) { + ret = kv_store_expire_create(rte_align32pow2(targ->flow_table_size) * 4, + sizeof(struct flow_info), + de->dpi_get_flow_entry_size(), + socket_id, + de->dpi_flow_expire, + rte_get_tsc_hz() * 60); + PROX_PANIC(ret == NULL, "Failed to allocate KV store\n"); + prox_sh_add_core(targ->lconf->id, "flow_table", ret); + } + return ret; +} + +static struct task_dpi_per_core *get_shared_dpi_shared(struct task_args *targ) +{ + static const char *name = "dpi_shared"; + struct task_dpi_per_core *ret = prox_sh_find_core(targ->lconf->id, name); + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + if (!ret) { + ret = prox_zmalloc(sizeof(*ret), socket_id); + prox_sh_add_core(targ->lconf->id, name, ret); + } + return ret; +} + +static void init_task_fm(struct task_base *tbase, struct task_args *targ) +{ + struct task_fm *task = (struct task_fm *)tbase; + static int dpi_inited = 0; + + load_dpi_engine(targ->dpi_engine_path, &task->dpi_engine); + + task->kv_store_expire = get_shared_flow_table(targ, &task->dpi_engine); + task->dpi_shared = get_shared_dpi_shared(targ); + + if (!dpi_inited) { + uint32_t n_threads = count_fm_cores(); + const char *dpi_params[16]; + + plogx_info("Initializing DPI with %u threads\n", n_threads); + dpi_inited = 1; + + PROX_PANIC(targ->n_dpi_engine_args > 16, "Too many DPI arguments"); + for (size_t i = 0; i < targ->n_dpi_engine_args && i < 16; ++i) + dpi_params[i] = targ->dpi_engine_args[i]; + + int ret = task->dpi_engine.dpi_init(n_threads, targ->n_dpi_engine_args, dpi_params); + + PROX_PANIC(ret, "Failed to initialize DPI engine\n"); + } +} + +static void start_first(struct task_base *tbase) +{ + struct task_fm *task = (struct task_fm *)tbase; + void *ret = task->dpi_engine.dpi_thread_start(); + + task->dpi_shared->dpi_opaque = ret; + PROX_PANIC(ret == NULL, "dpi_thread_init failed\n"); +} + +static void start(struct task_base *tbase) +{ + struct task_fm *task = (struct task_fm *)tbase; + + task->dpi_opaque = task->dpi_shared->dpi_opaque; + PROX_PANIC(task->dpi_opaque == NULL, "dpi_opaque == NULL"); +} + +static void stop(struct task_base *tbase) +{ + struct task_fm *task = (struct task_fm *)tbase; + + size_t expired = kv_store_expire_expire_all(task->kv_store_expire); + size_t size = kv_store_expire_size(task->kv_store_expire); + + plogx_info("%zu/%zu\n", expired, size); +} + +static void stop_last(struct task_base *tbase) +{ + struct task_fm *task = (struct task_fm *)tbase; + + task->dpi_engine.dpi_thread_stop(task->dpi_shared->dpi_opaque); + task->dpi_shared->dpi_opaque = NULL; +} + +static struct task_init task_init_fm = { + .mode_str = "fm", + .init = init_task_fm, + .handle = handle_fm_bulk, + .start = start, + .stop = stop, + .start_first = start_first, + .stop_last = stop_last, + .size = sizeof(struct task_fm) +}; + +__attribute__((constructor)) static void reg_task_fm(void) +{ + reg_task(&task_init_fm); +} diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c new file mode 100644 index 00000000..e5e43fca --- /dev/null +++ b/VNFs/DPPD-PROX/handle_gen.c @@ -0,0 +1,1481 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <pcap.h> +#include <string.h> +#include <stdlib.h> +#include <rte_cycles.h> +#include <rte_version.h> +#include <rte_byteorder.h> +#include <rte_ether.h> + +#include "prox_shared.h" +#include "random.h" +#include "prox_malloc.h" +#include "handle_gen.h" +#include "handle_lat.h" +#include "task_init.h" +#include "task_base.h" +#include "prox_port_cfg.h" +#include "lconf.h" +#include "log.h" +#include "quit.h" +#include "prox_cfg.h" +#include "mbuf_utils.h" +#include "qinq.h" +#include "prox_cksum.h" +#include "etypes.h" +#include "prox_assert.h" +#include "prefetch.h" +#include "token_time.h" +#include "local_mbuf.h" +#include "arp.h" +#include "tx_pkt.h" +#include <rte_hash_crc.h> + +struct pkt_template { + uint64_t dst_mac; + uint32_t ip_src; + uint32_t ip_dst_pos; + uint16_t len; + uint16_t l2_len; + uint16_t l3_len; + uint8_t buf[ETHER_MAX_LEN]; +}; + +#define FLAG_DST_MAC_KNOWN 1 +#define FLAG_L3_GEN 2 +#define FLAG_RANDOM_IPS 4 + +#define MAX_TEMPLATE_INDEX 65536 +#define TEMPLATE_INDEX_MASK (MAX_TEMPLATE_INDEX - 1) +#define MBUF_ARP MAX_TEMPLATE_INDEX + +#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 + +static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt) +{ + const uint32_t pkt_size = pkt_template->len; + + rte_pktmbuf_pkt_len(mbuf) = pkt_size; + rte_pktmbuf_data_len(mbuf) = pkt_size; + init_mbuf_seg(mbuf); + rte_memcpy(pkt, pkt_template->buf, pkt_template->len); +} + +struct task_gen_pcap { + struct task_base base; + uint64_t hz; + struct local_mbuf local_mbuf; + uint32_t pkt_idx; + struct pkt_template *proto; + uint32_t loop; + uint32_t n_pkts; + uint64_t last_tsc; + uint64_t *proto_tsc; +}; + +struct task_gen { + struct task_base base; + uint64_t hz; + uint64_t link_speed; + struct token_time token_time; + struct local_mbuf local_mbuf; + struct pkt_template *pkt_template; /* packet templates used at runtime */ + uint64_t write_duration_estimate; /* how long it took previously to write the time stamps in the packets */ + uint64_t earliest_tsc_next_pkt; + uint64_t new_rate_bps; + uint64_t pkt_queue_index; + uint32_t n_pkts; /* number of packets in pcap */ + uint32_t pkt_idx; /* current packet from pcap */ + uint32_t pkt_count; /* how many pakets to generate */ + uint32_t runtime_flags; + uint16_t lat_pos; + uint16_t packet_id_pos; + uint16_t accur_pos; + uint16_t sig_pos; + uint32_t sig; + uint8_t generator_id; + uint8_t n_rands; /* number of randoms */ + uint8_t min_bulk_size; + uint8_t max_bulk_size; + uint8_t lat_enabled; + uint8_t runtime_checksum_needed; + struct { + struct random state; + uint32_t rand_mask; /* since the random vals are uniform, masks don't introduce bias */ + uint32_t fixed_bits; /* length of each random (max len = 4) */ + uint16_t rand_offset; /* each random has an offset*/ + uint8_t rand_len; /* # bytes to take from random (no bias introduced) */ + } rand[64]; + uint64_t accur[64]; + uint64_t pkt_tsc_offset[64]; + struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */ + struct ether_addr gw_mac; + struct ether_addr src_mac; + struct rte_hash *mac_hash; + uint64_t *dst_mac; + uint32_t gw_ip; + uint32_t src_ip; + uint8_t flags; + uint8_t cksum_offload; +} __rte_cache_aligned; + +static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip) +{ + /* Optimize for common case of IPv4 header without options. */ + if (ip->version_ihl == 0x45) + return sizeof(struct ipv4_hdr); + if (unlikely(ip->version_ihl >> 4 != 4)) { + plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4); + return 0; + } + return (ip->version_ihl & 0xF) * 4; +} + +static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len) +{ + *l2_len = sizeof(struct ether_hdr); + *l3_len = 0; + struct vlan_hdr *vlan_hdr; + struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt; + struct ipv4_hdr *ip; + uint16_t ether_type = eth_hdr->ether_type; + + // Unstack VLAN tags + while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) { + vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len); + *l2_len +=4; + ether_type = vlan_hdr->eth_proto; + } + + // No L3 cksum offload for IPv6, but TODO L4 offload + // ETYPE_EoGRE CRC not implemented yet + + switch (ether_type) { + case ETYPE_MPLSU: + case ETYPE_MPLSM: + *l2_len +=4; + break; + case ETYPE_IPv4: + break; + case ETYPE_EoGRE: + case ETYPE_ARP: + case ETYPE_IPv6: + *l2_len = 0; + break; + default: + *l2_len = 0; + plog_warn("Unsupported packet type %x - CRC might be wrong\n", ether_type); + break; + } + + if (*l2_len) { + struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len); + *l3_len = ipv4_get_hdr_len(ip); + } +} + +static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_template *pkt_template, int cksum_offload) +{ + uint16_t l2_len = pkt_template->l2_len; + uint16_t l3_len = pkt_template->l3_len; + + if (l2_len) { + struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len); + prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload); + } +} + +static void task_gen_reset_token_time(struct task_gen *task) +{ + token_time_set_bpp(&task->token_time, task->new_rate_bps); + token_time_reset(&task->token_time, rte_rdtsc(), 0); +} + +static void start(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + task->pkt_queue_index = 0; + + task_gen_reset_token_time(task); +} + +static void start_pcap(struct task_base *tbase) +{ + struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; + /* When we start, the first packet is sent immediately. */ + task->last_tsc = rte_rdtsc() - task->proto_tsc[0]; + task->pkt_idx = 0; +} + +static void task_gen_take_count(struct task_gen *task, uint32_t send_bulk) +{ + if (task->pkt_count == (uint32_t)-1) + return ; + else { + if (task->pkt_count >= send_bulk) + task->pkt_count -= send_bulk; + else + task->pkt_count = 0; + } +} + +static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts) +{ + struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; + uint64_t now = rte_rdtsc(); + uint64_t send_bulk = 0; + uint32_t pkt_idx_tmp = task->pkt_idx; + + if (pkt_idx_tmp == task->n_pkts) { + PROX_ASSERT(task->loop); + return 0; + } + + for (uint16_t j = 0; j < 64; ++j) { + uint64_t tsc = task->proto_tsc[pkt_idx_tmp]; + if (task->last_tsc + tsc <= now) { + task->last_tsc += tsc; + send_bulk++; + pkt_idx_tmp++; + if (pkt_idx_tmp == task->n_pkts) { + if (task->loop) + pkt_idx_tmp = 0; + else + break; + } + } + else + break; + } + + struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk); + if (new_pkts == NULL) + return 0; + + for (uint16_t j = 0; j < send_bulk; ++j) { + struct rte_mbuf *next_pkt = new_pkts[j]; + struct pkt_template *pkt_template = &task->proto[task->pkt_idx]; + uint8_t *hdr = rte_pktmbuf_mtod(next_pkt, uint8_t *); + + pkt_template_init_mbuf(pkt_template, next_pkt, hdr); + + task->pkt_idx++; + if (task->pkt_idx == task->n_pkts) { + if (task->loop) + task->pkt_idx = 0; + else + break; + } + } + + return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL); +} + +static uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes) +{ + const uint64_t hz = task->hz; + const uint64_t bytes_per_hz = task->link_speed; + + if (bytes_per_hz == UINT64_MAX) + return 0; + + return hz * bytes / bytes_per_hz; +} + +static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx) +{ + return pkt_idx + 1 == task->n_pkts? 0 : pkt_idx + 1; +} + +static uint32_t task_gen_offset_pkt_idx(const struct task_gen *task, uint32_t offset) +{ + return (task->pkt_idx + offset) % task->n_pkts; +} + +static uint32_t task_gen_calc_send_bulk(const struct task_gen *task, uint32_t *total_bytes) +{ + /* The biggest bulk we allow to send is task->max_bulk_size + packets. The max bulk size can also be limited by the + pkt_count field. At the same time, we are rate limiting + based on the specified speed (in bytes per second) so token + bucket based rate limiting must also be applied. The + minimum bulk size is also constrained. If the calculated + bulk size is less then the minimum, then don't send + anything. */ + + const uint32_t min_bulk = task->min_bulk_size; + uint32_t max_bulk = task->max_bulk_size; + + if (task->pkt_count != (uint32_t)-1 && task->pkt_count < max_bulk) { + max_bulk = task->pkt_count; + } + + uint32_t send_bulk = 0; + uint32_t pkt_idx_tmp = task->pkt_idx; + uint32_t would_send_bytes = 0; + uint32_t pkt_size; + + /* + * TODO - this must be improved to take into account the fact that, after applying randoms + * The packet can be replaced by an ARP + */ + for (uint16_t j = 0; j < max_bulk; ++j) { + struct pkt_template *pktpl = &task->pkt_template[pkt_idx_tmp]; + if (unlikely((task->flags & (FLAG_L3_GEN | FLAG_DST_MAC_KNOWN)) == FLAG_L3_GEN)) { + // Generator is supposed to get MAC address - MAC is still unknown for this template + // generate ARP Request to gateway instead of the intended packet + pkt_size = 60; + } else { + pkt_size = pktpl->len; + } + uint32_t pkt_len = pkt_len_to_wire_size(pkt_size); + if (pkt_len + would_send_bytes > task->token_time.bytes_now) + break; + + pkt_idx_tmp = task_gen_next_pkt_idx(task, pkt_idx_tmp); + + send_bulk++; + would_send_bytes += pkt_len; + } + + if (send_bulk < min_bulk) + return 0; + *total_bytes = would_send_bytes; + return send_bulk; +} + +static inline void create_arp(struct rte_mbuf *mbuf, uint8_t *pkt_hdr, uint64_t *src_mac, uint32_t ip_dst, uint32_t ip_src) +{ + uint64_t mac_bcast = 0xFFFFFFFFFFFF; + rte_pktmbuf_pkt_len(mbuf) = 42; + rte_pktmbuf_data_len(mbuf) = 42; + init_mbuf_seg(mbuf); + struct ether_hdr_arp *hdr_arp = (struct ether_hdr_arp *)pkt_hdr; + + memcpy(&hdr_arp->ether_hdr.d_addr.addr_bytes, &mac_bcast, 6); + memcpy(&hdr_arp->ether_hdr.s_addr.addr_bytes, src_mac, 6); + hdr_arp->ether_hdr.ether_type = ETYPE_ARP; + hdr_arp->arp.htype = 0x100, + hdr_arp->arp.ptype = 0x0008; + hdr_arp->arp.hlen = 6; + hdr_arp->arp.plen = 4; + hdr_arp->arp.oper = 0x100; + hdr_arp->arp.data.spa = ip_src; + hdr_arp->arp.data.tpa = ip_dst; + memset(&hdr_arp->arp.data.tha, 0, sizeof(struct ether_addr)); + memcpy(&hdr_arp->arp.data.sha, src_mac, sizeof(struct ether_addr)); +} + +static int task_gen_write_dst_mac(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + uint32_t ip_dst_pos, ip_src_pos, ip_dst, ip_src; + uint16_t i; + int ret; + + if (task->flags & FLAG_L3_GEN) { + if (task->gw_ip) { + if (unlikely((task->flags & FLAG_DST_MAC_KNOWN) == 0)) { + for (i = 0; i < count; ++i) { + struct pkt_template *pktpl = &task->pkt_template[mbufs[i]->udata64 & TEMPLATE_INDEX_MASK]; + create_arp(mbufs[i], pkt_hdr[i], (uint64_t *)&pktpl->buf[6], task->gw_ip, pktpl->ip_src); + mbufs[i]->udata64 |= MBUF_ARP; + } + } else { + for (i = 0; i < count; ++i) { + struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i]; + memcpy(&hdr->d_addr.addr_bytes, &task->gw_mac, 6); + } + } + } else if (unlikely((task->flags & FLAG_RANDOM_IPS) != 0) || (task->n_pkts >= 4)){ + // Find mac in lookup table. Send ARP if not found + int32_t positions[MAX_PKT_BURST], idx; + void *keys[MAX_PKT_BURST]; + uint32_t key[MAX_PKT_BURST]; + for (i = 0; i < count; ++i) { + uint8_t *hdr = (uint8_t *)pkt_hdr[i]; + struct pkt_template *pktpl = &task->pkt_template[mbufs[i]->udata64 & TEMPLATE_INDEX_MASK]; + ip_dst_pos = pktpl->ip_dst_pos; + ip_dst = *(uint32_t *)(hdr + ip_dst_pos); + key[i] = ip_dst; + keys[i] = &key[i]; + } + ret = rte_hash_lookup_bulk(task->mac_hash, (const void **)&keys, count, positions); + if (unlikely(ret < 0)) { + plogx_err("lookup_bulk failed in mac_hash\n"); + tx_pkt_drop_all((struct task_base *)task, mbufs, count, NULL); + return -1; + } + for (i = 0; i < count; ++i) { + idx = positions[i]; + if (unlikely(idx < 0)) { + // mac not found for this IP + struct pkt_template *pktpl = &task->pkt_template[mbufs[i]->udata64 & TEMPLATE_INDEX_MASK]; + uint8_t *hdr = (uint8_t *)pkt_hdr[i]; + ip_src_pos = pktpl->ip_dst_pos - 4; + ip_src = *(uint32_t *)(hdr + ip_src_pos); + create_arp(mbufs[i], pkt_hdr[i], (uint64_t *)&hdr[6], key[i], ip_src); + mbufs[i]->udata64 |= MBUF_ARP; + } else { + // mac found for this IP + struct ether_hdr_arp *hdr_arp = (struct ether_hdr_arp *)pkt_hdr[i]; + memcpy(&hdr_arp->ether_hdr.d_addr.addr_bytes, &task->dst_mac[idx], 6); + } + } + } else { + for (i = 0; i < count; ++i) { + uint8_t *hdr = (uint8_t *)pkt_hdr[i]; + struct pkt_template *pktpl = &task->pkt_template[mbufs[i]->udata64 & TEMPLATE_INDEX_MASK]; + + // Check if packet template already has the mac + if (unlikely(pktpl->dst_mac == 0)) { + // no random_ip, can take from from packet template but no mac (yet) + uint32_t ip_dst_pos = pktpl->ip_dst_pos; + ip_dst = *(uint32_t *)(hdr + ip_dst_pos); + create_arp(mbufs[i], pkt_hdr[i], (uint64_t *)&pktpl->buf[6], ip_dst, pktpl->ip_src); + mbufs[i]->udata64 |= MBUF_ARP; + } else { + // no random ip, mac known + struct ether_hdr_arp *hdr_arp = (struct ether_hdr_arp *)pkt_hdr[i]; + memcpy(&hdr_arp->ether_hdr.d_addr.addr_bytes, &pktpl->dst_mac, 6); + } + } + } + } + return 0; +} + +static void task_gen_apply_random_fields(struct task_gen *task, uint8_t *hdr) +{ + uint32_t ret, ret_tmp; + + for (uint16_t i = 0; i < task->n_rands; ++i) { + ret = random_next(&task->rand[i].state); + ret_tmp = (ret & task->rand[i].rand_mask) | task->rand[i].fixed_bits; + + ret_tmp = rte_bswap32(ret_tmp); + /* At this point, the lower order bytes (BE) contain + the generated value. The address where the values + of interest starts is at ret_tmp + 4 - rand_len. */ + uint8_t *pret_tmp = (uint8_t*)&ret_tmp; + rte_memcpy(hdr + task->rand[i].rand_offset, pret_tmp + 4 - task->rand[i].rand_len, task->rand[i].rand_len); + } +} + +static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count) +{ + if (!task->n_rands) + return; + + for (uint16_t i = 0; i < count; ++i) + task_gen_apply_random_fields(task, pkt_hdr[i]); +} + +static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy) +{ + *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy; +} + +static void task_gen_apply_sig(struct task_gen *task, uint8_t *pkt_hdr) +{ + *(uint32_t *)(pkt_hdr + task->sig_pos) = task->sig; +} + +static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + if (!task->accur_pos) + return; + + /* The accuracy of task->pkt_queue_index - 64 is stored in + packet task->pkt_queue_index. The ID modulo 64 is the + same. */ + for (uint16_t j = 0; j < count; ++j) { + if ((mbufs[j]->udata64 & MBUF_ARP) == 0) { + uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & 63]; + task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy); + } + } +} + +static void task_gen_apply_all_sig(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + if (!task->sig_pos) + return; + + for (uint16_t j = 0; j < count; ++j) { + if ((mbufs[j]->udata64 & MBUF_ARP) == 0) { + task_gen_apply_sig(task, pkt_hdr[j]); + } + } +} + +static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id) +{ + struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_pos); + + *dst = *id; +} + +static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + if (!task->packet_id_pos) + return; + + for (uint16_t i = 0; i < count; ++i) { + if ((mbufs[i]->udata64 & MBUF_ARP) == 0) { + struct unique_id id; + unique_id_init(&id, task->generator_id, task->pkt_queue_index++); + task_gen_apply_unique_id(task, pkt_hdr[i], &id); + } + } +} + +static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + if (!(task->runtime_flags & TASK_TX_CRC)) + return; + + if (!task->runtime_checksum_needed) + return; + + uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - count); + for (uint16_t i = 0; i < count; ++i) { + if ((mbufs[i]->udata64 & MBUF_ARP) == 0) { + struct pkt_template *pkt_template = &task->pkt_template[pkt_idx]; + checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload); + pkt_idx = task_gen_next_pkt_idx(task, pkt_idx); + } + } +} + +static void task_gen_consume_tokens(struct task_gen *task, uint32_t tokens, uint32_t send_count) +{ + /* If max burst has been sent, we can't keep up so just assume + that we can (leaving a "gap" in the packet stream on the + wire) */ + task->token_time.bytes_now -= tokens; + if (send_count == task->max_bulk_size && task->token_time.bytes_now > tokens) { + task->token_time.bytes_now = tokens; + } +} + +static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t count) +{ + uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1); + struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx]; + uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len); + uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len); + uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration; + + return bulk_duration; +} + +static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count) +{ + if (!task->lat_enabled) + return 0; + + uint64_t tx_tsc, delta_t; + uint64_t tsc_before_tx = 0; + + /* Just before sending the packets, apply the time stamp + relative to when the first packet will be sent. The first + packet will be sent now. The time is read for each packet + to reduce the error towards the actual time the packet will + be sent. */ + uint64_t write_tsc_after, write_tsc_before; + + write_tsc_before = rte_rdtsc(); + + /* The time it took previously to write the time stamps in the + packets is used as an estimate for how long it will take to + write the time stamps now. The estimated time at which the + packets will actually be sent will be at tx_tsc. */ + tx_tsc = write_tsc_before + task->write_duration_estimate; + + /* The offset delta_t tracks the difference between the actual + time and the time written in the packets. Adding the offset + to the actual time insures that the time written in the + packets is monotonically increasing. At the same time, + simply sleeping until delta_t is zero would leave a period + of silence on the line. The error has been introduced + earlier, but the packets have already been sent. */ + if (tx_tsc < task->earliest_tsc_next_pkt) + delta_t = task->earliest_tsc_next_pkt - tx_tsc; + else + delta_t = 0; + + for (uint16_t i = 0; i < count; ++i) { + uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos); + const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i]; + + *pos = pkt_tsc >> LATENCY_ACCURACY; + } + + uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count); + + task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration; + write_tsc_after = rte_rdtsc(); + task->write_duration_estimate = write_tsc_after - write_tsc_before; + + /* Make sure that the time stamps that were written + are valid. The offset must be taken into account */ + do { + tsc_before_tx = rte_rdtsc(); + } while (tsc_before_tx < tx_tsc); + return tsc_before_tx; +} + +static void task_gen_store_accuracy(struct task_gen *task, uint32_t count, uint64_t tsc_before_tx) +{ + if (!task->accur_pos) + return; + + uint64_t accur = rte_rdtsc() - tsc_before_tx; + uint64_t first_accuracy_idx = task->pkt_queue_index - count; + + for (uint32_t i = 0; i < count; ++i) { + uint32_t accuracy_idx = (first_accuracy_idx + i) & 63; + + task->accur[accuracy_idx] = accur; + } +} + +static void task_gen_load_and_prefetch(struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + for (uint16_t i = 0; i < count; ++i) + rte_prefetch0(mbufs[i]); + for (uint16_t i = 0; i < count; ++i) + pkt_hdr[i] = rte_pktmbuf_mtod(mbufs[i], uint8_t *); + for (uint16_t i = 0; i < count; ++i) + rte_prefetch0(pkt_hdr[i]); +} + +static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +{ + uint64_t will_send_bytes = 0; + + for (uint16_t i = 0; i < count; ++i) { + struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx]; + struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx]; + pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]); + mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK; + struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i]; + if (task->lat_enabled) { + task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes); + will_send_bytes += pkt_len_to_wire_size(pkt_template->len); + } + task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx); + } +} + +static void task_gen_update_config(struct task_gen *task) +{ + if (task->token_time.cfg.bpp != task->new_rate_bps) + task_gen_reset_token_time(task); +} + +static inline void handle_arp_pkts(struct task_gen *task, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + int j; + int ret; + struct ether_hdr_arp *hdr; + uint8_t out[MAX_PKT_BURST]; + static struct my_arp_t arp_reply = { + .htype = 0x100, + .ptype = 8, + .hlen = 6, + .plen = 4, + .oper = 0x200 + }; + static struct my_arp_t arp_request = { + .htype = 0x100, + .ptype = 8, + .hlen = 6, + .plen = 4, + .oper = 0x100 + }; + + for (j = 0; j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (j = 0; j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void *)); + } + for (j = 0; j < n_pkts; ++j) { + hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *); + if (hdr->ether_hdr.ether_type == ETYPE_ARP) { + if (memcmp(&hdr->arp, &arp_reply, 8) == 0) { + uint32_t ip = hdr->arp.data.spa; + // plog_info("Received ARP Reply for IP %x\n",ip); + if (ip == task->gw_ip) { + memcpy(&task->gw_mac, &hdr->arp.data.sha, 6);; + task->flags |= FLAG_DST_MAC_KNOWN; + out[j] = OUT_HANDLED; + continue; + } else if ((task->n_pkts >= 4) || (task->flags & FLAG_RANDOM_IPS)) { + // Ideally, we should add the key when making the arp request, + // We should only store the mac address key was created. + // Here we are storing MAC we did not asked for... + ret = rte_hash_add_key(task->mac_hash, (const void *)&ip); + if (ret < 0) { + plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip)); + out[j] = OUT_DISCARD; + } else { + task->dst_mac[ret] = *(uint64_t *)&(hdr->arp.data.sha); + out[j] = OUT_HANDLED; + } + continue; + } + // Need to find template back... + // Only try this if there are few templates + for (unsigned int idx = 0; idx < task->n_pkts; idx++) { + struct pkt_template *pktpl = &task->pkt_template[idx]; + uint32_t ip_dst_pos = pktpl->ip_dst_pos; + uint32_t *ip_dst = (uint32_t *)(((uint8_t *)pktpl->buf) + ip_dst_pos); + if (*ip_dst == ip) { + pktpl->dst_mac = *(uint64_t *)&(hdr->arp.data.sha); + } + out[j] = OUT_HANDLED; + } + } else if (memcmp(&hdr->arp, &arp_request, 8) == 0) { + struct ether_addr s_addr; + if (!task->src_ip) { + create_mac(hdr, &s_addr); + prepare_arp_reply(hdr, &s_addr); + memcpy(hdr->ether_hdr.d_addr.addr_bytes, hdr->ether_hdr.s_addr.addr_bytes, 6); + memcpy(hdr->ether_hdr.s_addr.addr_bytes, &s_addr, 6); + out[j] = 0; + } else if (hdr->arp.data.tpa == task->src_ip) { + prepare_arp_reply(hdr, &task->src_mac); + memcpy(hdr->ether_hdr.d_addr.addr_bytes, hdr->ether_hdr.s_addr.addr_bytes, 6); + memcpy(hdr->ether_hdr.s_addr.addr_bytes, &task->src_mac, 6); + out[j] = 0; + } else { + out[j] = OUT_DISCARD; + plogx_dbg("Received ARP on unexpected IP %x, expecting %x\n", rte_be_to_cpu_32(hdr->arp.data.tpa), rte_be_to_cpu_32(task->src_ip)); + } + } + } else { + out[j] = OUT_DISCARD; + } + } + ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_gen *task = (struct task_gen *)tbase; + uint8_t out[MAX_PKT_BURST] = {0}; + int ret; + + int i, j; + + if (unlikely((task->flags & FLAG_L3_GEN) && (n_pkts != 0))) { + handle_arp_pkts(task, mbufs, n_pkts); + } + + task_gen_update_config(task); + + if (task->pkt_count == 0) { + task_gen_reset_token_time(task); + return 0; + } + if (!task->token_time.cfg.bpp) + return 0; + + token_time_update(&task->token_time, rte_rdtsc()); + + uint32_t would_send_bytes; + const uint32_t send_bulk = task_gen_calc_send_bulk(task, &would_send_bytes); + + if (send_bulk == 0) + return 0; + task_gen_take_count(task, send_bulk); + task_gen_consume_tokens(task, would_send_bytes, send_bulk); + + struct rte_mbuf **new_pkts = local_mbuf_refill_and_take(&task->local_mbuf, send_bulk); + if (new_pkts == NULL) + return 0; + uint8_t *pkt_hdr[MAX_RING_BURST]; + + task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk); + task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk); + task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk); + if (task_gen_write_dst_mac(task, new_pkts, pkt_hdr, send_bulk) < 0) + return 0; + task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk); + task_gen_apply_all_sig(task, new_pkts, pkt_hdr, send_bulk); + task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk); + + uint64_t tsc_before_tx; + + tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk); + task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk); + ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out); + task_gen_store_accuracy(task, send_bulk, tsc_before_tx); + return ret; +} + +static void init_task_gen_seeds(struct task_gen *task) +{ + for (size_t i = 0; i < sizeof(task->rand)/sizeof(task->rand[0]); ++i) + random_init_seed(&task->rand[i].state); +} + +static uint32_t pcap_count_pkts(pcap_t *handle) +{ + struct pcap_pkthdr header; + const uint8_t *buf; + uint32_t ret = 0; + long pkt1_fpos = ftell(pcap_file(handle)); + + while ((buf = pcap_next(handle, &header))) { + ret++; + } + int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET); + PROX_PANIC(ret2 != 0, "Failed to reset reading pcap file\n"); + return ret; +} + +static uint64_t avg_time_stamp(uint64_t *time_stamp, uint32_t n) +{ + uint64_t tot_inter_pkt = 0; + + for (uint32_t i = 0; i < n; ++i) + tot_inter_pkt += time_stamp[i]; + return (tot_inter_pkt + n / 2)/n; +} + +static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp) +{ + struct pcap_pkthdr header; + const uint8_t *buf; + size_t len; + + for (uint32_t i = 0; i < n_pkts; ++i) { + buf = pcap_next(handle, &header); + + PROX_PANIC(buf == NULL, "Failed to read packet %d from pcap %s\n", i, file_name); + proto[i].len = header.len; + len = RTE_MIN(header.len, sizeof(proto[i].buf)); + if (header.len > len) + plogx_warn("Packet truncated from %u to %zu bytes\n", header.len, len); + + if (time_stamp) { + static struct timeval beg; + struct timeval tv; + + if (i == 0) + beg = header.ts; + + tv = tv_diff(&beg, &header.ts); + tv_to_tsc(&tv, time_stamp + i); + } + rte_memcpy(proto[i].buf, buf, len); + } + + if (time_stamp && n_pkts) { + for (uint32_t i = n_pkts - 1; i > 0; --i) + time_stamp[i] -= time_stamp[i - 1]; + /* Since the handle function will loop the packets, + there is one time-stamp that is not provided by the + pcap file. This is the time between the last and + the first packet. This implementation takes the + average of the inter-packet times here. */ + if (n_pkts > 1) + time_stamp[0] = avg_time_stamp(time_stamp + 1, n_pkts - 1); + } + + return 0; +} + +static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic) +{ + const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); + const uint16_t max_len = ETHER_MAX_LEN - 4; + + if (do_panic) { + PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n"); + PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len); + PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len); + return 0; + } else { + if (pkt_size == 0) { + plog_err("Invalid packet size length (no packet defined?)\n"); + return -1; + } + if (pkt_size > max_len) { + plog_err("pkt_size out of range (must be <= %u)\n", max_len); + return -1; + } + if (pkt_size < min_len) { + plog_err("pkt_size out of range (must be >= %u)\n", min_len); + return -1; + } + return 0; + } +} + +static int check_all_pkt_size(struct task_gen *task, int do_panic) +{ + int rc; + for (uint32_t i = 0; i < task->n_pkts;++i) { + if ((rc = check_pkt_size(task, task->pkt_template[i].len, do_panic)) != 0) + return rc; + } + return 0; +} + +static void check_fields_in_bounds(struct task_gen *task) +{ + const uint32_t pkt_size = task->pkt_template[0].len; + + if (task->lat_enabled) { + uint32_t pos_beg = task->lat_pos; + uint32_t pos_end = task->lat_pos + 3U; + + PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n", + pos_beg, pos_end, pkt_size); + } + if (task->packet_id_pos) { + uint32_t pos_beg = task->packet_id_pos; + uint32_t pos_end = task->packet_id_pos + 4U; + + PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n", + pos_beg, pos_end, pkt_size); + } + if (task->accur_pos) { + uint32_t pos_beg = task->accur_pos; + uint32_t pos_end = task->accur_pos + 3U; + + PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u%-u, but packet size is %u bytes\n", + pos_beg, pos_end, pkt_size); + } +} + +static void task_gen_pkt_template_recalc_metadata(struct task_gen *task) +{ + struct pkt_template *template; + + for (size_t i = 0; i < task->n_pkts; ++i) { + template = &task->pkt_template[i]; + parse_l2_l3_len(template->buf, &template->l2_len, &template->l3_len, template->len); + } +} + +static void task_gen_pkt_template_recalc_checksum(struct task_gen *task) +{ + struct pkt_template *template; + struct ipv4_hdr *ip; + + task->runtime_checksum_needed = 0; + for (size_t i = 0; i < task->n_pkts; ++i) { + template = &task->pkt_template[i]; + if (template->l2_len == 0) + continue; + ip = (struct ipv4_hdr *)(template->buf + template->l2_len); + + ip->hdr_checksum = 0; + prox_ip_cksum_sw(ip); + uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len; + + if (ip->next_proto_id == IPPROTO_UDP) { + struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len); + prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr); + } else if (ip->next_proto_id == IPPROTO_TCP) { + struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len); + prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr); + } + + /* The current implementation avoids checksum + calculation by determining that at packet + construction time, no fields are applied that would + require a recalculation of the checksum. */ + if (task->lat_enabled && task->lat_pos > template->l2_len) + task->runtime_checksum_needed = 1; + if (task->accur_pos > template->l2_len) + task->runtime_checksum_needed = 1; + if (task->packet_id_pos > template->l2_len) + task->runtime_checksum_needed = 1; + } +} + +static void task_gen_pkt_template_recalc_all(struct task_gen *task) +{ + task_gen_pkt_template_recalc_metadata(task); + task_gen_pkt_template_recalc_checksum(task); +} + +static void task_gen_reset_pkt_templates_len(struct task_gen *task) +{ + struct pkt_template *src, *dst; + + for (size_t i = 0; i < task->n_pkts; ++i) { + src = &task->pkt_template_orig[i]; + dst = &task->pkt_template[i]; + dst->len = src->len; + } +} + +static void task_gen_reset_pkt_templates_content(struct task_gen *task) +{ + struct pkt_template *src, *dst; + + for (size_t i = 0; i < task->n_pkts; ++i) { + src = &task->pkt_template_orig[i]; + dst = &task->pkt_template[i]; + memcpy(dst->buf, src->buf, dst->len); + } +} + +static void task_gen_reset_pkt_templates(struct task_gen *task) +{ + task_gen_reset_pkt_templates_len(task); + task_gen_reset_pkt_templates_content(task); + task_gen_pkt_template_recalc_all(task); +} + +static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_args *targ) +{ + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + if (targ->pkt_size > sizeof(task->pkt_template[0].buf)) + targ->pkt_size = sizeof(task->pkt_template[0].buf); + task->n_pkts = 1; + + size_t mem_size = task->n_pkts * sizeof(*task->pkt_template); + task->pkt_template = prox_zmalloc(mem_size, socket_id); + task->pkt_template_orig = prox_zmalloc(mem_size, socket_id); + + PROX_PANIC(task->pkt_template == NULL || + task->pkt_template_orig == NULL, + "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size); + + rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size); + task->pkt_template_orig[0].len = targ->pkt_size; + task_gen_reset_pkt_templates(task); + check_all_pkt_size(task, 1); + check_fields_in_bounds(task); +} + +static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *targ) +{ + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + char err[PCAP_ERRBUF_SIZE]; + pcap_t *handle = pcap_open_offline(targ->pcap_file, err); + PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err); + + task->n_pkts = pcap_count_pkts(handle); + plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file); + + if (targ->n_pkts) + task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts); + PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n"); + plogx_info("Loading %u packets from pcap\n", task->n_pkts); + size_t mem_size = task->n_pkts * sizeof(*task->pkt_template); + task->pkt_template = prox_zmalloc(mem_size, socket_id); + task->pkt_template_orig = prox_zmalloc(mem_size, socket_id); + PROX_PANIC(task->pkt_template == NULL || + task->pkt_template_orig == NULL, + "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size); + + pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL); + pcap_close(handle); + task_gen_reset_pkt_templates(task); +} + +static struct rte_mempool *task_gen_create_mempool(struct task_args *targ) +{ + static char name[] = "gen_pool"; + struct rte_mempool *ret; + const int sock_id = rte_lcore_to_socket_id(targ->lconf->id); + + name[0]++; + ret = rte_mempool_create(name, targ->nb_mbuf - 1, MBUF_SIZE, + targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, + sock_id, 0); + PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n", + sock_id, targ->nb_mbuf - 1); + return ret; +} + +void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count) +{ + struct task_gen *task = (struct task_gen *)tbase; + + task->pkt_count = count; +} + +int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size) +{ + struct task_gen *task = (struct task_gen *)tbase; + int rc; + + task->pkt_template[0].len = pkt_size; + if ((rc = check_all_pkt_size(task, 0)) != 0) + return rc; + check_fields_in_bounds(task); + return rc; +} + +void task_gen_set_gateway_ip(struct task_base *tbase, uint32_t ip) +{ + struct task_gen *task = (struct task_gen *)tbase; + task->gw_ip = ip; + task->flags &= ~FLAG_DST_MAC_KNOWN; +} + +void task_gen_set_rate(struct task_base *tbase, uint64_t bps) +{ + struct task_gen *task = (struct task_gen *)tbase; + + task->new_rate_bps = bps; +} + +void task_gen_reset_randoms(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + + for (uint32_t i = 0; i < task->n_rands; ++i) { + task->rand[i].rand_mask = 0; + task->rand[i].fixed_bits = 0; + task->rand[i].rand_offset = 0; + } + task->n_rands = 0; + task->flags &= ~FLAG_RANDOM_IPS; +} + +int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len) +{ + struct task_gen *task = (struct task_gen *)tbase; + + for (size_t i = 0; i < task->n_pkts; ++i) { + uint32_t to_write = rte_cpu_to_be_32(value) >> ((4 - len) * 8); + uint8_t *dst = task->pkt_template[i].buf; + + rte_memcpy(dst + offset, &to_write, len); + } + + task_gen_pkt_template_recalc_all(task); + + return 0; +} + +void task_gen_reset_values(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + + task_gen_reset_pkt_templates_content(task); +} + +uint32_t task_gen_get_n_randoms(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + + return task->n_rands; +} + +static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ) +{ + struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; + const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id); + + task->loop = targ->loop; + task->pkt_idx = 0; + task->hz = rte_get_tsc_hz(); + + task->local_mbuf.mempool = task_gen_create_mempool(targ); + + PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n"); + + char err[PCAP_ERRBUF_SIZE]; + pcap_t *handle = pcap_open_offline(targ->pcap_file, err); + PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err); + + task->n_pkts = pcap_count_pkts(handle); + plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file); + + if (targ->n_pkts) { + plogx_info("Configured to load %u packets\n", targ->n_pkts); + if (task->n_pkts > targ->n_pkts) + task->n_pkts = targ->n_pkts; + } + PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n"); + + plogx_info("Loading %u packets from pcap\n", task->n_pkts); + + size_t mem_size = task->n_pkts * (sizeof(*task->proto) + sizeof(*task->proto_tsc)); + uint8_t *mem = prox_zmalloc(mem_size, sockid); + + PROX_PANIC(mem == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size); + task->proto = (struct pkt_template *) mem; + task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto)); + + pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc); + pcap_close(handle); +} + +static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offset) +{ + for (uint32_t i = 0; i < task->n_rands; ++i) { + if (task->rand[i].rand_offset == offset) { + return i; + } + } + + return UINT32_MAX; +} + +int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id) +{ + struct task_gen *task = (struct task_gen *)tbase; + uint32_t existing_rand; + + if (rand_id == UINT32_MAX && task->n_rands == 64) { + plog_err("Too many randoms\n"); + return -1; + } + uint32_t mask, fixed, len; + + if (parse_random_str(&mask, &fixed, &len, rand_str)) { + plog_err("%s\n", get_parse_err()); + return -1; + } + task->runtime_checksum_needed = 1; + + existing_rand = task_gen_find_random_with_offset(task, offset); + if (existing_rand != UINT32_MAX) { + plog_warn("Random at offset %d already set => overwriting len = %d %s\n", offset, len, rand_str); + rand_id = existing_rand; + task->rand[rand_id].rand_len = len; + task->rand[rand_id].rand_offset = offset; + task->rand[rand_id].rand_mask = mask; + task->rand[rand_id].fixed_bits = fixed; + return 0; + } + + task->rand[task->n_rands].rand_len = len; + task->rand[task->n_rands].rand_offset = offset; + task->rand[task->n_rands].rand_mask = mask; + task->rand[task->n_rands].fixed_bits = fixed; + + struct pkt_template *pktpl = &task->pkt_template[0]; + if (!((offset >= pktpl->ip_dst_pos + 4) || (offset + len < pktpl->ip_dst_pos))) { + plog_info("\tUsing randoms IP destinations\n"); + task->flags |= FLAG_RANDOM_IPS; + } + + task->n_rands++; + return 0; +} + +static void init_task_gen_early(struct task_args *targ) +{ + uint8_t *generator_count = prox_sh_find_system("generator_count"); + + if (generator_count == NULL) { + generator_count = prox_zmalloc(sizeof(*generator_count), 0); + prox_sh_add_system("generator_count", generator_count); + } + targ->generator_id = *generator_count; + (*generator_count)++; +} + +static void init_task_gen(struct task_base *tbase, struct task_args *targ) +{ + struct task_gen *task = (struct task_gen *)tbase; + + task->packet_id_pos = targ->packet_id_pos; + + task->local_mbuf.mempool = task_gen_create_mempool(targ); + PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n"); + task->pkt_idx = 0; + task->hz = rte_get_tsc_hz(); + task->lat_pos = targ->lat_pos; + task->accur_pos = targ->accur_pos; + task->sig_pos = targ->sig_pos; + task->sig = targ->sig; + task->new_rate_bps = targ->rate_bps; + + struct token_time_cfg tt_cfg = token_time_cfg_create(1250000000, rte_get_tsc_hz(), -1); + + token_time_init(&task->token_time, &tt_cfg); + init_task_gen_seeds(task); + + task->min_bulk_size = targ->min_bulk_size; + task->max_bulk_size = targ->max_bulk_size; + if (task->min_bulk_size < 1) + task->min_bulk_size = 1; + if (task->max_bulk_size < 1) + task->max_bulk_size = 64; + PROX_PANIC(task->max_bulk_size > 64, "max_bulk_size higher than 64\n"); + PROX_PANIC(task->max_bulk_size < task->min_bulk_size, "max_bulk_size must be > than min_bulk_size\n"); + + task->pkt_count = -1; + task->lat_enabled = targ->lat_enabled; + task->runtime_flags = targ->runtime_flags; + PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n"); + + task->generator_id = targ->generator_id; + task->link_speed = UINT64_MAX; + if (targ->nb_txrings == 0 && targ->nb_txports == 1) + task->link_speed = 1250000000; + + if (!strcmp(targ->pcap_file, "")) { + plog_info("\tUsing inline definition of a packet\n"); + task_init_gen_load_pkt_inline(task, targ); + } else { + plog_info("Loading from pcap %s\n", targ->pcap_file); + task_init_gen_load_pcap(task, targ); + } + + if ((targ->flags & DSF_KEEP_SRC_MAC) == 0 && (targ->nb_txrings || targ->nb_txports)) { + uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes; + for (uint32_t i = 0; i < task->n_pkts; ++i) { + rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6); + } + } + memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr)); + if (!strcmp(targ->task_init->sub_mode_str, "l3")) { + // In L3 GEN, we need to receive ARP replies + task->flags = FLAG_L3_GEN; + task->gw_ip = rte_cpu_to_be_32(targ->gateway_ipv4); + uint32_t n_entries; + + if (targ->number_gen_ip == 0) + n_entries = 1048576; + else + n_entries = targ->number_gen_ip; + + static char hash_name[30]; + sprintf(hash_name, "A%03d_mac_table", targ->lconf->id); + + struct rte_hash_parameters hash_params = { + .name = hash_name, + .entries = n_entries, + .key_len = sizeof(uint32_t), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + }; + task->mac_hash = rte_hash_create(&hash_params); + PROX_PANIC(task->mac_hash == NULL, "Failed to set up mac hash table for %d IP\n", n_entries); + + const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id); + task->dst_mac = (uint64_t *)prox_zmalloc(n_entries * sizeof(uint64_t), socket); + PROX_PANIC(task->dst_mac == NULL, "Failed to allocate mac table for %d IP\n", n_entries); + + for (uint32_t i = 0; i < task->n_pkts; ++i) { + // For all destination IP, ARP request will need to be sent + // Store position of Destination IP in template + int ip_dst_pos = 0; + int maybe_ipv4 = 0; + int l2_len = sizeof(struct ether_hdr); + struct vlan_hdr *vlan_hdr; + uint8_t *pkt = task->pkt_template[i].buf; + struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt; + struct ipv4_hdr *ip; + uint16_t ether_type = eth_hdr->ether_type; + + // Unstack VLAN tags + while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < task->pkt_template[i].len)) { + vlan_hdr = (struct vlan_hdr *)(pkt + l2_len); + l2_len +=4; + ether_type = vlan_hdr->eth_proto; + } + if ((ether_type == ETYPE_MPLSU) || (ether_type == ETYPE_MPLSM)) { + l2_len +=4; + maybe_ipv4 = 1; + } + if ((ether_type == ETYPE_IPv4) || maybe_ipv4) { + struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len); + PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4); + // Even if IPv4 header contains options, options are after ip src and dst + ip_dst_pos = l2_len + sizeof(struct ipv4_hdr) - sizeof(uint32_t); + uint32_t *p = ((uint32_t *)(task->pkt_template[i].buf + ip_dst_pos - sizeof(uint32_t))); + task->pkt_template[i].ip_dst_pos = ip_dst_pos; + task->pkt_template[i].ip_src = *p; + uint32_t *p1 = ((uint32_t *)(task->pkt_template[i].buf + ip_dst_pos)); + plog_info("\tip_dst_pos = %d, ip_dst = %x\n", ip_dst_pos, *p1); + } + } + task->src_ip = rte_cpu_to_be_32(targ->local_ipv4); + } + for (uint32_t i = 0; i < targ->n_rand_str; ++i) { + PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX), + "Failed to add random\n"); + } + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->cksum_offload = port->capabilities.tx_offload_cksum; + } +} + +static struct task_init task_init_gen = { + .mode_str = "gen", + .init = init_task_gen, + .handle = handle_gen_bulk, + .start = start, +#ifdef SOFT_CRC + // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the + // vector mode is used by DPDK, resulting (theoretically) in higher performance. + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, +#else + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, +#endif + .size = sizeof(struct task_gen) +}; + +static struct task_init task_init_gen_l3 = { + .mode_str = "gen", + .sub_mode_str = "l3", + .init = init_task_gen, + .handle = handle_gen_bulk, + .start = start, +#ifdef SOFT_CRC + // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the + // vector mode is used by DPDK, resulting (theoretically) in higher performance. + .flag_features = TASK_FEATURE_ZERO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_ZERO_RX, +#else + .flag_features = TASK_FEATURE_ZERO_RX, +#endif + .size = sizeof(struct task_gen) +}; + +static struct task_init task_init_gen_pcap = { + .mode_str = "gen", + .sub_mode_str = "pcap", + .init = init_task_gen_pcap, + .handle = handle_gen_pcap_bulk, + .start = start_pcap, +#ifdef SOFT_CRC + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, +#else + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, +#endif + .size = sizeof(struct task_gen_pcap) +}; + +__attribute__((constructor)) static void reg_task_gen(void) +{ + reg_task(&task_init_gen); + reg_task(&task_init_gen_l3); + reg_task(&task_init_gen_pcap); +} diff --git a/VNFs/DPPD-PROX/handle_gen.h b/VNFs/DPPD-PROX/handle_gen.h new file mode 100644 index 00000000..6f00ca12 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_gen.h @@ -0,0 +1,51 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_GEN_H_ +#define _HANDLE_GEN_H_ + +struct unique_id { + uint8_t generator_id; + uint32_t packet_id; +} __attribute__((packed)); + +static void unique_id_init(struct unique_id *unique_id, uint8_t generator_id, uint32_t packet_id) +{ + unique_id->generator_id = generator_id; + unique_id->packet_id = packet_id; +} + +static void unique_id_get(struct unique_id *unique_id, uint8_t *generator_id, uint32_t *packet_id) +{ + *generator_id = unique_id->generator_id; + *packet_id = unique_id->packet_id; +} + +struct task_base; + +void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count); +int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size); +void task_gen_set_rate(struct task_base *tbase, uint64_t bps); +void task_gen_set_gateway_ip(struct task_base *tbase, uint32_t ip); +void task_gen_reset_randoms(struct task_base *tbase); +void task_gen_reset_values(struct task_base *tbase); +int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len); +int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id); + +uint32_t task_gen_get_n_randoms(struct task_base *tbase); +uint32_t task_gen_get_n_values(struct task_base *tbase); + +#endif /* _HANDLE_GEN_H_ */ diff --git a/VNFs/DPPD-PROX/handle_genl4.c b/VNFs/DPPD-PROX/handle_genl4.c new file mode 100644 index 00000000..4c62c641 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_genl4.c @@ -0,0 +1,1139 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <pcap.h> +#include <string.h> +#include <stdlib.h> +#include <rte_cycles.h> +#include <rte_version.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_hash.h> +#include <rte_hash_crc.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" +#include "prox_malloc.h" +#include "file_utils.h" +#include "hash_set.h" +#include "prox_assert.h" +#include "prox_args.h" +#include "defines.h" +#include "pkt_parser.h" +#include "handle_lat.h" +#include "task_init.h" +#include "task_base.h" +#include "prox_port_cfg.h" +#include "lconf.h" +#include "log.h" +#include "quit.h" +#include "heap.h" +#include "mbuf_utils.h" +#include "genl4_bundle.h" +#include "genl4_stream_udp.h" +#include "genl4_stream_tcp.h" +#include "cdf.h" +#include "fqueue.h" +#include "token_time.h" +#include "commands.h" +#include "prox_shared.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +struct new_tuple { + uint32_t dst_addr; + uint8_t proto_id; + uint16_t dst_port; + uint16_t l2_types[4]; +} __attribute__((packed)); + +enum handle_state {HANDLE_QUEUED, HANDLE_SCHEDULED}; + +struct task_gen_server { + struct task_base base; + struct l4_stats l4_stats; + struct rte_mempool *mempool; + struct rte_hash *listen_hash; + /* Listening bundles contain only 1 part since the state of a + multi_part comm is kept mostly at the client side*/ + struct bundle_cfg **listen_entries; + struct bundle_ctx_pool bundle_ctx_pool; + struct bundle_cfg *bundle_cfgs; /* Loaded configurations */ + struct token_time token_time; + enum handle_state handle_state; + struct heap *heap; + struct fqueue *fqueue; + struct rte_mbuf *cur_mbufs[MAX_PKT_BURST]; + uint32_t cur_mbufs_beg; + uint32_t cur_mbufs_end; + uint32_t cancelled; + uint8_t out_saved; + struct rte_mbuf *mbuf_saved; + uint64_t last_tsc; + unsigned seed; + /* Handle scheduled events */ + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + uint32_t n_new_mbufs; +}; + +struct task_gen_client { + struct task_base base; + struct l4_stats l4_stats; + struct rte_mempool *mempool; + struct bundle_ctx_pool bundle_ctx_pool; + struct bundle_cfg *bundle_cfgs; /* Loaded configurations */ + struct token_time token_time; + /* Create new connections and handle scheduled events */ + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + uint32_t new_conn_cost; + uint32_t new_conn_tokens; + uint64_t new_conn_last_tsc; + uint32_t n_new_mbufs; + uint64_t last_tsc; + struct cdf *cdf; + unsigned seed; + struct heap *heap; +}; + +static int refill_mbufs(uint32_t *n_new_mbufs, struct rte_mempool *mempool, struct rte_mbuf **mbufs) +{ + if (*n_new_mbufs == MAX_PKT_BURST) + return 0; + + if (rte_mempool_get_bulk(mempool, (void **)mbufs, MAX_PKT_BURST - *n_new_mbufs) < 0) { + plogx_err("4Mempool alloc failed for %d mbufs\n", MAX_PKT_BURST - *n_new_mbufs); + return -1; + } + + for (uint32_t i = 0; i < MAX_PKT_BURST - *n_new_mbufs; ++i) { + init_mbuf_seg(mbufs[i]); + } + + *n_new_mbufs = MAX_PKT_BURST; + + return 0; +} + +static const struct bundle_cfg *server_accept(struct task_gen_server *task, struct new_tuple *nt) +{ + int ret = rte_hash_lookup(task->listen_hash, nt); + + if (ret < 0) + return NULL; + else + return task->listen_entries[ret]; +} + +static int handle_gen_bulk_client(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_gen_client *task = (struct task_gen_client *)tbase; + uint8_t out[MAX_PKT_BURST] = {0}; + struct bundle_ctx *conn; + int ret; + + if (n_pkts) { + for (int i = 0; i < n_pkts; ++i) { + struct pkt_tuple pt; + struct l4_meta l4_meta; + + if (parse_pkt(mbufs[i], &pt, &l4_meta)) { + plogdx_err(mbufs[i], "Parsing failed\n"); + out[i] = OUT_DISCARD; + continue; + } + + ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pt); + + if (ret < 0) { + plogx_dbg("Client: packet RX that does not belong to connection:" + "Client = "IPv4_BYTES_FMT":%d, Server = "IPv4_BYTES_FMT":%d\n", + IPv4_BYTES(((uint8_t*)&pt.dst_addr)), + rte_bswap16(pt.dst_port), + IPv4_BYTES(((uint8_t*)&pt.src_addr)), + rte_bswap16(pt.src_port)); + + plogdx_dbg(mbufs[i], NULL); + + if (pt.proto_id == IPPROTO_TCP) { + stream_tcp_create_rst(mbufs[i], &l4_meta, &pt); + out[i] = 0; + continue; + } + else { + out[i] = OUT_DISCARD; + continue; + } + } + + conn = task->bundle_ctx_pool.hash_entries[ret]; + ret = bundle_proc_data(conn, mbufs[i], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); + out[i] = ret == 0? 0: OUT_HANDLED; + } + task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + } + + /* If there is at least one callback to handle, handle at most MAX_PKT_BURST */ + if (heap_top_is_lower(task->heap, rte_rdtsc())) { + if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) + return 0; + + uint16_t n_called_back = 0; + while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < MAX_PKT_BURST) { + conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); + + /* handle packet TX (retransmit or delayed transmit) */ + ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); + + if (ret == 0) { + out[n_called_back] = 0; + n_called_back++; + } + } + plogx_dbg("During callback, will send %d packets\n", n_called_back); + + task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); + task->n_new_mbufs -= n_called_back; + } + + uint32_t n_new = task->bundle_ctx_pool.n_free_bundles; + n_new = n_new > MAX_PKT_BURST? MAX_PKT_BURST : n_new; + + uint64_t diff = (rte_rdtsc() - task->new_conn_last_tsc)/task->new_conn_cost; + task->new_conn_last_tsc += diff * task->new_conn_cost; + task->new_conn_tokens += diff; + + if (task->new_conn_tokens > 16) + task->new_conn_tokens = 16; + if (n_new > task->new_conn_tokens) + n_new = task->new_conn_tokens; + task->new_conn_tokens -= n_new; + if (n_new == 0) + return 0; + + if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) + return 0; + + for (uint32_t i = 0; i < n_new; ++i) { + struct bundle_ctx *bundle_ctx = bundle_ctx_pool_get_w_cfg(&task->bundle_ctx_pool); + PROX_ASSERT(bundle_ctx); + + struct pkt_tuple *pt = &bundle_ctx->tuple; + + int n_retries = 0; + do { + /* Note that the actual packet sent will + contain swapped addresses and ports + (i.e. pkt.src <=> tuple.dst). The incoming + packet will match this struct. */ + bundle_init(bundle_ctx, task->heap, PEER_CLIENT, &task->seed); + + ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)pt); + if (ret >= 0) { + if (n_retries++ == 1000) { + plogx_err("Already tried 1K times\n"); + } + } + } while (ret >= 0); + + ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)pt); + + if (ret < 0) { + plogx_err("Failed to add key ret = %d, n_free = %d\n", ret, task->bundle_ctx_pool.n_free_bundles); + bundle_ctx_pool_put(&task->bundle_ctx_pool, bundle_ctx); + + pkt_tuple_debug2(pt); + out[i] = OUT_DISCARD; + continue; + } + + task->bundle_ctx_pool.hash_entries[ret] = bundle_ctx; + + if (bundle_ctx->ctx.stream_cfg->proto == IPPROTO_TCP) + task->l4_stats.tcp_created++; + else + task->l4_stats.udp_created++; + + task->l4_stats.bundles_created++; + + ret = bundle_proc_data(bundle_ctx, task->new_mbufs[i], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); + out[i] = ret == 0? 0: OUT_HANDLED; + } + + int ret2 = task->base.tx_pkt(&task->base, task->new_mbufs, n_new, out); + task->n_new_mbufs -= n_new; + return ret2; +} + +static int handle_gen_queued(struct task_gen_server *task) +{ + uint8_t out[MAX_PKT_BURST]; + struct bundle_ctx *conn; + struct pkt_tuple pkt_tuple; + struct l4_meta l4_meta; + uint16_t j; + uint16_t cancelled = 0; + int ret; + + if (task->cur_mbufs_beg == task->cur_mbufs_end) { + task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST); + task->cur_mbufs_beg = 0; + } + uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg; + struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; + + j = task->cancelled; + if (task->cancelled) { + uint16_t pkt_len = mbuf_wire_size(mbufs[0]); + + if (token_time_take(&task->token_time, pkt_len) != 0) + return -1; + + out[0] = task->out_saved; + task->cancelled = 0; + } + + /* Main proc loop */ + for (; j < n_pkts; ++j) { + + if (parse_pkt(mbufs[j], &pkt_tuple, &l4_meta)) { + plogdx_err(mbufs[j], "Unknown packet, parsing failed\n"); + out[j] = OUT_DISCARD; + } + + conn = NULL; + ret = rte_hash_lookup(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); + + if (ret >= 0) + conn = task->bundle_ctx_pool.hash_entries[ret]; + else { + /* If not part of existing connection, try to create a connection */ + struct new_tuple nt; + nt.dst_addr = pkt_tuple.dst_addr; + nt.proto_id = pkt_tuple.proto_id; + nt.dst_port = pkt_tuple.dst_port; + rte_memcpy(nt.l2_types, pkt_tuple.l2_types, sizeof(nt.l2_types)); + const struct bundle_cfg *n; + + if (NULL != (n = server_accept(task, &nt))) { + conn = bundle_ctx_pool_get(&task->bundle_ctx_pool); + if (!conn) { + out[j] = OUT_DISCARD; + plogx_err("No more free bundles to accept new connection\n"); + continue; + } + ret = rte_hash_add_key(task->bundle_ctx_pool.hash, (const void *)&pkt_tuple); + if (ret < 0) { + out[j] = OUT_DISCARD; + bundle_ctx_pool_put(&task->bundle_ctx_pool, conn); + plog_err("Adding key failed while trying to accept connection\n"); + continue; + } + + task->bundle_ctx_pool.hash_entries[ret] = conn; + + bundle_init_w_cfg(conn, n, task->heap, PEER_SERVER, &task->seed); + conn->tuple = pkt_tuple; + + if (conn->ctx.stream_cfg->proto == IPPROTO_TCP) + task->l4_stats.tcp_created++; + else + task->l4_stats.udp_created++; + } + else { + plog_err("Packet received for service that does not exist :\n" + "source ip = %0x:%u\n" + "dst ip = %0x:%u\n", + pkt_tuple.src_addr, rte_bswap16(pkt_tuple.src_port), + pkt_tuple.dst_addr, rte_bswap16(pkt_tuple.dst_port)); + } + } + + /* bundle contains either an active connection or a + newly created connection. If it is NULL, then not + listening. */ + if (NULL != conn) { + ret = bundle_proc_data(conn, mbufs[j], &l4_meta, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); + + out[j] = ret == 0? 0: OUT_HANDLED; + + if (ret == 0) { + uint16_t pkt_len = mbuf_wire_size(mbufs[j]); + + if (token_time_take(&task->token_time, pkt_len) != 0) { + task->out_saved = out[j]; + task->cancelled = 1; + task->base.tx_pkt(&task->base, mbufs, j, out); + task->cur_mbufs_beg += j; + return -1; + } + } + } + else { + pkt_tuple_debug(&pkt_tuple); + plogd_dbg(mbufs[j], NULL); + out[j] = OUT_DISCARD; + } + } + + task->base.tx_pkt(&task->base, mbufs, j, out); + + task->cur_mbufs_beg += j; + return 0; +} + +static int handle_gen_scheduled(struct task_gen_server *task) +{ + struct bundle_ctx *conn; + uint8_t out[MAX_PKT_BURST]; + int ret; + uint16_t n_called_back = 0; + + if (task->cancelled) { + struct rte_mbuf *mbuf = task->mbuf_saved; + + uint16_t pkt_len = mbuf_wire_size(mbuf); + if (token_time_take(&task->token_time, pkt_len) == 0) { + task->cancelled = 0; + out[0] = 0; + task->base.tx_pkt(&task->base, &mbuf, 1, out); + } + else { + return -1; + } + } + + if (0 != refill_mbufs(&task->n_new_mbufs, task->mempool, task->new_mbufs)) + return -1; + + conn = NULL; + while (heap_top_is_lower(task->heap, rte_rdtsc()) && n_called_back < task->n_new_mbufs) { + conn = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); + + /* handle packet TX (retransmit or delayed transmit) */ + ret = bundle_proc_data(conn, task->new_mbufs[n_called_back], NULL, &task->bundle_ctx_pool, &task->seed, &task->l4_stats); + + if (ret == 0) { + struct rte_mbuf *mbuf = task->new_mbufs[n_called_back]; + uint16_t pkt_len = mbuf_wire_size(mbuf); + + if (token_time_take(&task->token_time, pkt_len) == 0) { + out[n_called_back] = 0; + n_called_back++; + } + else { + + struct ether_hdr *eth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct ipv4_hdr *ip = (struct ipv4_hdr*)(eth + 1); + struct tcp_hdr *tcp = (struct tcp_hdr*)(ip + 1); + + task->out_saved = 0; + task->cancelled = 1; + task->mbuf_saved = mbuf; + task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); + /* The mbuf that is currently been + processed (and which has been + cancelled) is saved in + task->mbuf_saved. It will be + restored as the first mbuf when + this function is called again. */ + task->n_new_mbufs -= (n_called_back + 1); + return -1; + } + } + } + + task->base.tx_pkt(&task->base, task->new_mbufs, n_called_back, out); + task->n_new_mbufs -= n_called_back; + + return 0; +} + +static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_gen_server *task = (struct task_gen_server *)tbase; + struct bundle_ctx *conn; + int ret, ret2 = 0; + + token_time_update(&task->token_time, rte_rdtsc()); + + if ((ret = fqueue_put(task->fqueue, mbufs, n_pkts)) != n_pkts) { + uint8_t out[MAX_PKT_BURST]; + for (uint16_t j = 0; j < n_pkts - ret; ++j) + out[j] = OUT_DISCARD; + + ret2 = task->base.tx_pkt(&task->base, mbufs + ret, n_pkts - ret, out); + } + if (task->handle_state == HANDLE_QUEUED) { + if (handle_gen_queued(task) == 0) { + if (handle_gen_scheduled(task) != 0) + task->handle_state = HANDLE_SCHEDULED; + } + } + else { + if (handle_gen_scheduled(task) == 0) { + if (handle_gen_queued(task) != 0) + task->handle_state = HANDLE_QUEUED; + } + } + return ret2; +} + +static int lua_to_host_set(struct lua_State *L, enum lua_place from, const char *name, struct host_set *h) +{ + int pop; + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) + return -1; + + uint32_t port = 0, port_mask = 0; + + if (lua_to_ip(L, TABLE, "ip", &h->ip) || lua_to_int(L, TABLE, "port", &port)) + return -1; + + if (lua_to_int(L, TABLE, "ip_mask", &h->ip_mask)) + h->ip_mask = 0; + if (lua_to_int(L, TABLE, "port_mask", &port_mask)) + h->port_mask = 0; + + h->port = rte_bswap16(port); + h->port_mask = rte_bswap16(port_mask); + h->ip = rte_bswap32(h->ip); + h->ip_mask = rte_bswap32(h->ip_mask); + + lua_pop(L, pop); + return 0; +} + +static int file_read_cached(const char *file_name, uint8_t **mem, uint32_t beg, uint32_t len, uint32_t socket, struct hash_set *hs) +{ + if (len == 0) { + *mem = 0; + return 0; + } + + uint8_t *data_mem; + + /* Since the configuration can reference the same file from + multiple places, use prox_shared infrastructure to detect + this and return previously loaded data. */ + char name[256]; + + snprintf(name, sizeof(name), "%u-%u:%s", beg, len, file_name); + *mem = prox_sh_find_socket(socket, name); + if (*mem) + return 0; + + /* check if the file has been loaded on the other socket. */ + if (socket == 1 && (data_mem = prox_sh_find_socket(0, name))) { + uint8_t *data_find = hash_set_find(hs, data_mem, len); + if (!data_find) { + data_find = prox_zmalloc(len, socket); + PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); + + rte_memcpy(data_find, data_mem, len); + hash_set_add(hs, data_find, len); + } + *mem = data_find; + prox_sh_add_socket(socket, name, *mem); + return 0; + } + + /* It is possible that a file with a different name contains + the same data. In that case, search all loaded files and + compare the data to reduce memory utilization.*/ + data_mem = malloc(len); + PROX_PANIC(data_mem == NULL, "Failed to allocate temporary memory to hold data\n"); + + if (file_read_content(file_name, data_mem, beg, len)) { + plog_err("%s\n", file_get_error()); + return -1; + } + + uint8_t *data_find = hash_set_find(hs, data_mem, len); + if (!data_find) { + data_find = prox_zmalloc(len, socket); + PROX_PANIC(data_find == NULL, "Failed to allocate memory (%u bytes) to hold header for peer\n", len); + + rte_memcpy(data_find, data_mem, len); + hash_set_add(hs, data_find, len); + } + + free(data_mem); + + *mem = data_find; + prox_sh_add_socket(socket, name, *mem); + return 0; +} + +static int lua_to_peer_data(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct peer_data *peer_data, size_t *cl, struct hash_set *hs) +{ + uint32_t hdr_len, hdr_beg, content_len, content_beg; + char hdr_file[256], content_file[256]; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) + return -1; + + if (lua_getfrom(L, TABLE, "header") < 0) + return -1; + if (lua_to_int(L, TABLE, "len", &hdr_len) < 0) + return -1; + if (lua_to_int(L, TABLE, "beg", &hdr_beg) < 0) + return -1; + if (lua_to_string(L, TABLE, "file_name", hdr_file, sizeof(hdr_file)) < 0) + return -1; + lua_pop(L, 1); + + if (lua_getfrom(L, TABLE, "content") < 0) + return -1; + if (lua_to_int(L, TABLE, "len", &content_len) < 0) + return -1; + if (lua_to_int(L, TABLE, "beg", &content_beg) < 0) + return -1; + if (lua_to_string(L, TABLE, "file_name", content_file, sizeof(content_file)) < 0) + return -1; + lua_pop(L, 1); + + if (hdr_len == UINT32_MAX) { + long ret = file_get_size(hdr_file); + + if (ret < 0) { + plog_err("%s", file_get_error()); + return -1; + } + hdr_len = ret - hdr_beg; + } + + if (content_len == UINT32_MAX) { + long ret = file_get_size(content_file); + + if (ret < 0) { + plog_err("%s", file_get_error()); + return -1; + } + content_len = ret - content_beg; + } + *cl = content_len; + peer_data->hdr_len = hdr_len; + + if (file_read_cached(hdr_file, &peer_data->hdr, hdr_beg, hdr_len, socket, hs)) + return -1; + if (file_read_cached(content_file, &peer_data->content, content_beg, content_len, socket, hs)) + return -1; + + lua_pop(L, pop); + return 0; +} + +static int lua_to_peer_action(struct lua_State *L, enum lua_place from, const char *name, struct peer_action *action, size_t client_contents_len, size_t server_contents_len) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) + return -1; + + uint32_t peer, beg, len; + if (lua_to_int(L, TABLE, "peer", &peer) || + lua_to_int(L, TABLE, "beg", &beg) || + lua_to_int(L, TABLE, "len", &len)) { + return -1; + } + size_t data_len = (peer == PEER_CLIENT? client_contents_len : server_contents_len); + if (len == (uint32_t)-1) + len = data_len - beg; + + PROX_PANIC(beg + len > data_len, "Accessing data past the end (starting at %u for %u bytes) while total length is %zu\n", beg, len, data_len); + + action->peer = peer; + action->beg = beg; + action->len = len; + lua_pop(L, pop); + return 0; +} + +static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const char *name, uint32_t socket, struct stream_cfg **stream_cfg, struct hash_set *hs) +{ + int pop; + struct stream_cfg *ret; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (lua_getfrom(L, TABLE, "actions") < 0) + return -1; + + lua_len(prox_lua(), -1); + uint32_t n_actions = lua_tointeger(prox_lua(), -1); + lua_pop(prox_lua(), 1); + + lua_pop(L, 1); + + size_t mem_size = 0; + mem_size += sizeof(*ret); + /* one additional action is allocated to allow inserting an + additional "default" action to close down TCP sessions from + the client side. */ + mem_size += sizeof(ret->actions[0]) * (n_actions + 1); + + ret = prox_zmalloc(sizeof(*ret) + mem_size, socket); + ret->n_actions = n_actions; + + size_t client_contents_len, server_contents_len; + char proto[16]; + uint32_t timeout_us, timeout_time_wait_us; + plogx_dbg("loading stream\n"); + if (lua_to_host_set(L, TABLE, "servers", &ret->servers)) + return -1; + if (lua_to_string(L, TABLE, "l4_proto", proto, sizeof(proto))) + return -1; + if (lua_to_peer_data(L, TABLE, "client_data", socket, &ret->data[PEER_CLIENT], &client_contents_len, hs)) + return -1; + if (lua_to_peer_data(L, TABLE, "server_data", socket, &ret->data[PEER_SERVER], &server_contents_len, hs)) + return -1; + + if (lua_to_int(L, TABLE, "timeout", &timeout_us)) { + timeout_us = 1000000; + } + + ret->tsc_timeout = usec_to_tsc(timeout_us); + + double up, dn; + + if (lua_to_double(L, TABLE, "up_bps", &up)) + up = 5000;// Default rate is 40 Mbps + + if (lua_to_double(L, TABLE, "dn_bps", &dn)) + dn = 5000;// Default rate is 40 Mbps + + const uint64_t hz = rte_get_tsc_hz(); + + ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, ETHER_MAX_LEN + 20); + ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, ETHER_MAX_LEN + 20); + + if (!strcmp(proto, "tcp")) { + ret->proto = IPPROTO_TCP; + ret->proc = stream_tcp_proc; + ret->is_ended = stream_tcp_is_ended; + + if (lua_to_int(L, TABLE, "timeout_time_wait", &timeout_time_wait_us)) { + timeout_time_wait_us = 2000000; + } + + ret->tsc_timeout_time_wait = usec_to_tsc(timeout_time_wait_us); + } + else if (!strcmp(proto, "udp")) { + plogx_dbg("loading UDP\n"); + ret->proto = IPPROTO_UDP; + ret->proc = stream_udp_proc; + ret->is_ended = stream_udp_is_ended; + } + else + return -1; + + /* get all actions */ + if (lua_getfrom(L, TABLE, "actions") < 0) + return -1; + + uint32_t idx = 0; + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_peer_action(L, STACK, NULL, &ret->actions[idx], client_contents_len, server_contents_len)) + return -1; + + stream_cfg_verify_action(ret, &ret->actions[idx]); + + idx++; + + lua_pop(L, 1); + } + lua_pop(L, 1); + + /* For TCP, one of the peers initiates closing down the + connection. This is signified by the last action having + with zero length. If such an action is not specified in the + configuration file, the default is for the client to close + the connection. This means that the TCP connection at the + client will go into a TIME_WAIT state and the server + releases all the resources avoiding resource starvation at + the server. */ + if (ret->proto == IPPROTO_TCP && ret->actions[ret->n_actions - 1].len != 0) { + ret->actions[ret->n_actions].len = 0; + ret->actions[ret->n_actions].beg = 0; + ret->actions[ret->n_actions].peer = PEER_CLIENT; + ret->n_actions++; + } + + if (IPPROTO_TCP == ret->proto) + stream_tcp_calc_len(ret, &ret->n_pkts, &ret->n_bytes); + else + stream_udp_calc_len(ret, &ret->n_pkts, &ret->n_bytes); + + lua_pop(L, pop); + *stream_cfg = ret; + return 0; +} + +static int lua_to_bundle_cfg(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct bundle_cfg *bundle, struct hash_set *hs) +{ + int pop, pop2, idx; + int clients_loaded = 0; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) + return -1; + + lua_len(prox_lua(), -1); + bundle->n_stream_cfgs = lua_tointeger(prox_lua(), -1); + lua_pop(prox_lua(), 1); + + bundle->stream_cfgs = prox_zmalloc(sizeof(*bundle->stream_cfgs) * bundle->n_stream_cfgs, socket); + + plogx_dbg("loading bundle cfg with %d streams\n", bundle->n_stream_cfgs); + idx = 0; + lua_pushnil(L); + while (lua_next(L, -2)) { + if (!clients_loaded) { + if (lua_to_host_set(L, TABLE, "clients", &bundle->clients)) { + return -1; + } + clients_loaded = 1; + } + if (lua_to_stream_cfg(L, STACK, NULL, socket, &bundle->stream_cfgs[idx], hs)) { + return -1; + } + + ++idx; + lua_pop(L, 1); + } + + lua_pop(L, pop); + return 0; +} + +static void init_task_gen(struct task_base *tbase, struct task_args *targ) +{ + struct task_gen_server *task = (struct task_gen_server *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + static char name[] = "server_mempool"; + name[0]++; + task->mempool = rte_mempool_create(name, + 4*1024 - 1, MBUF_SIZE, + targ->nb_cache_mbuf, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, 0, + socket_id, 0); + PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); + int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); + PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); + + lua_len(prox_lua(), -1); + uint32_t n_listen = lua_tointeger(prox_lua(), -1); + lua_pop(prox_lua(), 1); + PROX_PANIC(n_listen == 0, "No services specified to listen on\n"); + + task->bundle_cfgs = prox_zmalloc(n_listen * sizeof(task->bundle_cfgs[0]), socket_id); + + plogx_info("n_listen = %d\n", n_listen); + + struct hash_set *hs = prox_sh_find_socket(socket_id, "genl4_streams"); + if (hs == NULL) { + /* Expected number of streams per bundle = 1, hash_set + will grow if full. */ + hs = hash_set_create(n_listen, socket_id); + prox_sh_add_socket(socket_id, "genl4_streams", hs); + } + + const struct rte_hash_parameters listen_table = { + .name = name, + .entries = n_listen * 4, + .key_len = sizeof(struct new_tuple), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = socket_id, + }; + name[0]++; + + task->listen_hash = rte_hash_create(&listen_table); + task->listen_entries = prox_zmalloc(listen_table.entries * sizeof(task->listen_entries[0]), socket_id); + + int idx = 0; + lua_pushnil(prox_lua()); + while (lua_next(prox_lua(), -2)) { + task->bundle_cfgs[idx].n_stream_cfgs = 1; + task->bundle_cfgs[idx].stream_cfgs = prox_zmalloc(sizeof(*task->bundle_cfgs[idx].stream_cfgs), socket_id); + int ret = lua_to_stream_cfg(prox_lua(), STACK, NULL, socket_id, &task->bundle_cfgs[idx].stream_cfgs[0], hs); + PROX_PANIC(ret, "Failed to load stream cfg\n"); + struct stream_cfg *stream = task->bundle_cfgs[idx].stream_cfgs[0]; + + // TODO: check mask and add to hash for each host + struct new_tuple nt = { + .dst_addr = stream->servers.ip, + .proto_id = stream->proto, + .dst_port = stream->servers.port, + .l2_types[0] = 0x0008, + }; + + ret = rte_hash_add_key(task->listen_hash, &nt); + PROX_PANIC(ret < 0, "Failed to add\n"); + + task->listen_entries[ret] = &task->bundle_cfgs[idx]; + + plogx_dbg("Server = "IPv4_BYTES_FMT":%d\n", IPv4_BYTES(((uint8_t*)&nt.dst_addr)), rte_bswap16(nt.dst_port)); + ++idx; + lua_pop(prox_lua(), 1); + } + + static char name2[] = "task_gen_hash2"; + + name2[0]++; + plogx_dbg("Creating bundle ctx pool\n"); + if (bundle_ctx_pool_create(name2, targ->n_concur_conn * 2, &task->bundle_ctx_pool, NULL, 0, NULL, socket_id)) { + cmd_mem_stats(); + PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); + } + + task->heap = heap_create(targ->n_concur_conn * 2, socket_id); + task->seed = rte_rdtsc(); + + /* TODO: calculate the CDF of the reply distribution and the + number of replies as the number to cover for 99% of the + replies. For now, assume that this is number is 2. */ + uint32_t queue_size = rte_align32pow2(targ->n_concur_conn * 2); + + PROX_PANIC(queue_size == 0, "Overflow resulted in queue size 0\n"); + task->fqueue = fqueue_create(queue_size, socket_id); + PROX_PANIC(task->fqueue == NULL, "Failed to allocate local queue\n"); + + uint32_t n_descriptors; + + if (targ->nb_txports) { + PROX_PANIC(targ->nb_txports != 1, "Need exactly one TX port for L4 generation\n"); + n_descriptors = prox_port_cfg[targ->tx_port_queue[0].port].n_txd; + } else { + PROX_PANIC(targ->nb_txrings != 1, "Need exactly one TX ring for L4 generation\n"); + n_descriptors = 256; + } + + struct token_time_cfg tt_cfg = { + .bpp = targ->rate_bps, + .period = rte_get_tsc_hz(), + .bytes_max = n_descriptors * (ETHER_MIN_LEN + 20), + }; + + token_time_init(&task->token_time, &tt_cfg); +} + +static void init_task_gen_client(struct task_base *tbase, struct task_args *targ) +{ + struct task_gen_client *task = (struct task_gen_client *)tbase; + static char name[] = "gen_pool"; + const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id); + name[0]++; + task->mempool = rte_mempool_create(name, + 4*1024 - 1, MBUF_SIZE, + targ->nb_cache_mbuf, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, 0, + socket, 0); + PROX_PANIC(task->mempool == NULL, "Failed to allocate memory pool with %u elements\n", 4*1024 - 1); + + /* streams contains a lua table. Go through it and read each + stream with associated imix_fraction. */ + uint32_t imix; + uint32_t i = 0; + + int pop = lua_getfrom(prox_lua(), GLOBAL, targ->streams); + PROX_PANIC(pop < 0, "Failed to find '%s' in lua\n", targ->streams); + + lua_len(prox_lua(), -1); + uint32_t n_bundle_cfgs = lua_tointeger(prox_lua(), -1); + lua_pop(prox_lua(), 1); + PROX_PANIC(n_bundle_cfgs == 0, "No configs specified\n"); + plogx_info("loading %d bundle_cfgs\n", n_bundle_cfgs); + + struct hash_set *hs = prox_sh_find_socket(socket, "genl4_streams"); + if (hs == NULL) { + /* Expected number of streams per bundle = 8, hash_set + will grow if full. */ + hs = hash_set_create(n_bundle_cfgs * 8, socket); + prox_sh_add_socket(socket, "genl4_streams", hs); + } + + task->bundle_cfgs = prox_zmalloc(n_bundle_cfgs * sizeof(task->bundle_cfgs[0]), socket); + lua_pushnil(prox_lua()); + + int total_imix = 0; + + uint32_t *occur = prox_zmalloc(n_bundle_cfgs * sizeof(*occur), socket); + struct cdf *cdf = cdf_create(n_bundle_cfgs, socket); + + while (lua_next(prox_lua(), -2)) { + PROX_PANIC(lua_to_int(prox_lua(), TABLE, "imix_fraction", &imix) || + lua_to_bundle_cfg(prox_lua(), TABLE, "bundle", socket, &task->bundle_cfgs[i], hs), + "Failed to load bundle cfg:\n%s\n", get_lua_to_errors()); + cdf_add(cdf, imix); + occur[i] = imix; + total_imix += imix; + ++i; + lua_pop(prox_lua(), 1); + } + + lua_pop(prox_lua(), pop); + cdf_setup(cdf); + + PROX_PANIC(targ->max_setup_rate == 0, "Max setup rate not set\n"); + + task->new_conn_cost = rte_get_tsc_hz()/targ->max_setup_rate; + + static char name2[] = "task_gen_hash"; + name2[0]++; + plogx_dbg("Creating bundle ctx pool\n"); + if (bundle_ctx_pool_create(name2, targ->n_concur_conn, &task->bundle_ctx_pool, occur, n_bundle_cfgs, task->bundle_cfgs, socket)) { + cmd_mem_stats(); + PROX_PANIC(1, "Failed to create conn_ctx_pool\n"); + } + + task->heap = heap_create(targ->n_concur_conn, socket); + task->seed = rte_rdtsc(); + /* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */ + + /* To avoid overflowing the tx descriptors, the token bucket + size needs to be limited. The descriptors are filled most + quickly with the smallest packets. For that reason, the + token bucket size is given by "number of tx descriptors" * + "smallest Ethernet packet". */ + PROX_ASSERT(targ->nb_txports == 1); + + struct token_time_cfg tt_cfg = { + .bpp = targ->rate_bps, + .period = rte_get_tsc_hz(), + .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20), + }; + + token_time_init(&task->token_time, &tt_cfg); +} + +static void start_task_gen_client(struct task_base *tbase) +{ + struct task_gen_client *task = (struct task_gen_client *)tbase; + + token_time_reset(&task->token_time, rte_rdtsc(), 0); + + task->new_conn_tokens = 0; + task->new_conn_last_tsc = rte_rdtsc(); +} + +static void stop_task_gen_client(struct task_base *tbase) +{ + struct task_gen_client *task = (struct task_gen_client *)tbase; + struct bundle_ctx *bundle; + + while (!heap_is_empty(task->heap)) { + bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); + bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); + } +} + +static void start_task_gen_server(struct task_base *tbase) +{ + struct task_gen_server *task = (struct task_gen_server *)tbase; + + token_time_reset(&task->token_time, rte_rdtsc(), 0); +} + +static void stop_task_gen_server(struct task_base *tbase) +{ + struct task_gen_server *task = (struct task_gen_server *)tbase; + struct bundle_ctx *bundle; + uint8_t out[MAX_PKT_BURST]; + + while (!heap_is_empty(task->heap)) { + bundle = BUNDLE_CTX_UPCAST(heap_pop(task->heap)); + bundle_expire(bundle, &task->bundle_ctx_pool, &task->l4_stats); + } + + if (task->cancelled) { + struct rte_mbuf *mbuf = task->mbuf_saved; + + out[0] = OUT_DISCARD; + task->cancelled = 0; + task->base.tx_pkt(&task->base, &mbuf, 1, out); + } + + do { + if (task->cur_mbufs_beg == task->cur_mbufs_end) { + task->cur_mbufs_end = fqueue_get(task->fqueue, task->cur_mbufs, MAX_PKT_BURST); + task->cur_mbufs_beg = 0; + if (task->cur_mbufs_end == 0) + break; + } + uint16_t n_pkts = task->cur_mbufs_end - task->cur_mbufs_beg; + struct rte_mbuf **mbufs = task->cur_mbufs + task->cur_mbufs_beg; + + if (n_pkts) { + for (uint16_t j = 0; j < n_pkts; ++j) { + out[j] = OUT_DISCARD; + } + task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + } + } while (1); +} + +static struct task_init task_init_gen1 = { + .mode_str = "genl4", + .sub_mode_str = "server", + .init = init_task_gen, + .handle = handle_gen_bulk, + .start = start_task_gen_server, + .stop = stop_task_gen_server, + .flag_features = TASK_FEATURE_ZERO_RX, + .size = sizeof(struct task_gen_server), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +static struct task_init task_init_gen2 = { + .mode_str = "genl4", + .init = init_task_gen_client, + .handle = handle_gen_bulk_client, + .start = start_task_gen_client, + .stop = stop_task_gen_client, + .flag_features = TASK_FEATURE_ZERO_RX, + .size = sizeof(struct task_gen_client), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_gen(void) +{ + reg_task(&task_init_gen1); + reg_task(&task_init_gen2); +} diff --git a/VNFs/DPPD-PROX/handle_gre_decap_encap.c b/VNFs/DPPD-PROX/handle_gre_decap_encap.c new file mode 100644 index 00000000..41f6dd33 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_gre_decap_encap.c @@ -0,0 +1,462 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_byteorder.h> +#include <rte_cycles.h> +#include <rte_hash.h> +#include <rte_ip.h> + +#include "prox_malloc.h" +#include "task_init.h" +#include "lconf.h" +#include "defines.h" +#include "stats.h" +#include "tx_pkt.h" +#include "hash_entry_types.h" +#include "prefetch.h" +#include "prox_cksum.h" +#include "gre.h" +#include "etypes.h" +#include "log.h" +#include "quit.h" +#include "prox_assert.h" +#include "pkt_prototypes.h" +#include "quit.h" + +struct cpe_gre_key { + struct ether_addr clt_mac; + uint16_t pad; +} __attribute__((__packed__)); + +struct cpe_gre_data { + uint32_t gre_id; + uint32_t cpe_ip; + uint64_t tsc; +#ifdef GRE_TP + uint64_t tp_tsc; + double tp_tbsize; +#endif +} __attribute__((__packed__)); + +struct task_gre_decap { + struct task_base base; + struct rte_hash *cpe_gre_hash; + struct cpe_gre_data *cpe_gre_data; + struct lcore_cfg *lconf; + uint8_t runtime_flags; + uint8_t mapping[PROX_MAX_PORTS]; + uint32_t bucket_index; + int offload_crc; + const void* key_ptr[16]; + struct cpe_gre_key key[16]; + uint64_t cpe_timeout; +#ifdef GRE_TP + double cycles_per_byte; + uint32_t tb_size; +#endif +}; + +static void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +static void handle_gre_encap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_mbuf *mbuf, struct cpe_gre_data *table); +static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +static inline uint8_t handle_gre_decap(struct task_gre_decap *tbase, struct rte_mbuf *mbuf); + +void update_arp_entries_gre(void *data); + +static void init_cpe_gre_hash(struct task_args *targ) +{ + char name[64]; + uint8_t socket_id; + uint8_t lcore_id; + uint8_t table_part; + + /* Already set up by other task */ + if (targ->cpe_gre_hash) { + return; + } + + lcore_id = targ->lconf->id; + socket_id = rte_lcore_to_socket_id(lcore_id); + sprintf(name, "core_%u_CPE_GRE_Table", targ->lconf->id); + table_part = targ->nb_slave_threads; + + if (table_part == 0) + table_part = 1; + if (!rte_is_power_of_2(table_part)) { + table_part = rte_align32pow2(table_part) >> 1; + } + + struct rte_hash_parameters hash_params = { + .name = name, + .entries = MAX_GRE / table_part, + .bucket_entries = GRE_BUCKET_ENTRIES, + .key_len = sizeof(struct cpe_gre_key), + .hash_func_init_val = 0, + .socket_id = socket_id + }; + + struct rte_hash* phash = rte_hash_create(&hash_params); + struct cpe_gre_data *cpe_gre_data = prox_zmalloc(MAX_GRE / table_part, socket_id); + + PROX_PANIC(phash == NULL, "Unable to allocate memory for IPv4 hash table on core %u\n", lcore_id); + + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (smode == GRE_DECAP || smode == GRE_ENCAP) { + targ->lconf->targs[task_id].cpe_gre_hash = phash; + targ->lconf->targs[task_id].cpe_gre_data = cpe_gre_data; + } + } +} + +static void init_task_gre_decap(struct task_base *tbase, struct task_args *targ) +{ + struct task_gre_decap *task = (struct task_gre_decap *)tbase; + + init_cpe_gre_hash(targ); + task->cpe_gre_hash = targ->cpe_gre_hash; + task->cpe_gre_data = targ->cpe_gre_data; + task->runtime_flags = targ->runtime_flags; + task->lconf = targ->lconf; + task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); + + targ->lconf->period_func = update_arp_entries_gre; + targ->lconf->period_data = tbase; + targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES; + + for (uint8_t i = 0; i < 16; ++i) { + task->key_ptr[i] = &task->key[i]; + } +} + +static void init_task_gre_encap(struct task_base *tbase, struct task_args *targ) +{ + struct task_gre_decap *task = (struct task_gre_decap *)tbase; + + init_cpe_gre_hash(targ); + task->cpe_gre_hash = targ->cpe_gre_hash; + task->cpe_gre_data = targ->cpe_gre_data; + task->runtime_flags = targ->runtime_flags; + task->lconf = targ->lconf; + + struct port_cfg *port = find_reachable_task_sending_to_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } + +#ifdef GRE_TP + if (targ->tb_rate) { + task->cycles_per_byte = ((double)rte_get_tsc_hz()) / ((double)targ->tb_rate); + task->tb_size = targ->tb_size != 0 ? targ->tb_size : 1520; + } + else { + /* traffic policing disabled */ + task->cycles_per_byte = 0; + } +#endif +} + +static struct task_init task_init_gre_decap = { + .mode = GRE_DECAP, + .mode_str = "gredecap", + .init = init_task_gre_decap, + .handle = handle_gre_decap_bulk, + .size = sizeof(struct task_gre_decap) +}; + +static struct task_init task_init_gre_encap = { + .mode = GRE_ENCAP, + .mode_str = "greencap", + .init = init_task_gre_encap, + .handle = handle_gre_encap_bulk, + .size = sizeof(struct task_gre_decap) +}; + +__attribute__((constructor)) static void reg_task_gre(void) +{ + reg_task(&task_init_gre_decap); + reg_task(&task_init_gre_encap); +} + +void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_gre_decap *task = (struct task_gre_decap *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_gre_decap(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_gre_decap(task, mbufs[j]); + } +#endif + + task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +struct gre_packet { + struct ether_hdr eth; + struct ipv4_hdr ip; + struct gre_hdr gre; + union { + struct ether_hdr eth2; + struct ipv4_hdr ip2; + }; +} __attribute__((__packed__)); + +/* Handle ipv4 over GRE and Ethernet over GRE. In case of ipv4 over + GRE remove gre and ipv4 header and retain space for ethernet + header. In case of Eth over GRE remove external eth, gre and ipv4 + headers and return pointer to payload */ +static inline struct ether_hdr *gre_decap(struct gre_hdr *pgre, struct rte_mbuf *mbuf) +{ + int16_t hsize = 0; + if (pgre->type == ETYPE_EoGRE) { + hsize = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + } + else if (pgre->type == ETYPE_IPv4) { + /* retain sizeof(struct ether_hdr) */ + hsize = sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + } + else { + return NULL; + } + + return (struct ether_hdr *)rte_pktmbuf_adj(mbuf, hsize); +} + +static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_mbuf *mbuf) +{ + struct ipv4_hdr *pip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbuf, struct ether_hdr *) + 1); + + if (pip->next_proto_id != IPPROTO_GRE) { + plog_warn("Invalid packet proto_id = 0x%x expect 0x%x\n", + pip->next_proto_id, IPPROTO_GRE); + return OUT_DISCARD; + } + + struct cpe_gre_data data; + struct cpe_gre_key key; + struct gre_hdr *pgre = (struct gre_hdr *)(pip + 1); + data.gre_id = pgre->gre_id; + data.cpe_ip = pip->src_addr; + + struct ether_hdr *peth = gre_decap(pgre, mbuf); + PROX_PANIC(peth != 0, "Failed to gre_decap"); + + pip = (struct ipv4_hdr *)(peth + 1); + +/* emulate client MAC for test purposes */ +#if 1 + if (pgre->type == ETYPE_IPv4) { + struct ether_hdr eth = { + .d_addr = {.addr_bytes = + {0x0A, 0x02, 0x0A, 0x0A, 0x00, 0x01}}, + .s_addr = {.addr_bytes = + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + .ether_type = ETYPE_IPv4 + }; + uint32_t hip = rte_bswap32(pip->src_addr); + eth.s_addr.addr_bytes[2] = (hip >> 24) & 0xFF; + eth.s_addr.addr_bytes[3] = (hip >> 16) & 0xFF; + eth.s_addr.addr_bytes[4] = (hip >> 8) & 0xFF; + eth.s_addr.addr_bytes[5] = (hip) & 0xFF; + rte_memcpy(peth, ð, sizeof(struct ether_hdr)); + } + ether_addr_copy(&peth->s_addr, &key.clt_mac); +#endif + + data.tsc = rte_rdtsc() + task->cpe_timeout; + + int32_t hash_index = rte_hash_add_key(task->cpe_gre_hash, &key); + if (unlikely(hash_index < 0)) { + plog_warn("Failed to add key, gre %x\n", data.gre_id); + } + else if (unlikely(hash_index >= MAX_GRE)) { + plog_warn("Failed to add: Invalid hash_index = 0x%x\n", + hash_index); + return OUT_DISCARD; + } + rte_memcpy(&task->cpe_gre_data[hash_index], &data, sizeof(data)); + if (task->runtime_flags & TASK_TX_CRC) { + prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + } + + return 0; +} + +void handle_gre_encap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_gre_decap *task = (struct task_gre_decap *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t done = 0; + + while (n_pkts) { + uint16_t chopped = RTE_MIN(n_pkts, 16); + prefetch_pkts(mbufs, chopped); + handle_gre_encap16(task, mbufs, chopped, out + done); + mbufs += chopped; + n_pkts -= chopped; + done += chopped; + } + + task->base.tx_pkt(&task->base, mbufs - done, done, out); +} + +#define DO_ENC_ETH_OVER_GRE 1 +#define DO_ENC_IP_OVER_GRE 0 + +static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + for (uint8_t i = 0; i < n_pkts; ++i) { + struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *); + ether_addr_copy(&peth->d_addr, &task->key[i].clt_mac); + } + + int32_t hash_index[16]; + rte_hash_lookup_bulk(task->cpe_gre_hash, task->key_ptr, n_pkts, hash_index); + for (uint8_t i = 0; i < n_pkts; ++i ) { + if (unlikely(hash_index[i] < 0)) { + plog_warn("Invalid hash_index (<0) = 0x%x\n", hash_index[i]); + out[i] = OUT_DISCARD; + } + else if (unlikely(hash_index[i] >= MAX_GRE)) { + plog_warn("Invalid hash_index = 0x%x\n", hash_index[i]); + out[i] = OUT_DISCARD; + } + rte_prefetch0(&task->cpe_gre_data[hash_index[i]]); + } + + for (uint8_t i = 0; i < n_pkts; ++i ) { + if (likely(out[i] != OUT_DISCARD)) { + out[i] = handle_gre_encap(task, mbufs[i], &task->cpe_gre_data[hash_index[i]]); + } + } +} + +#ifdef DO_ENC_ETH_OVER_GRE +#define PKT_PREPEND_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr)) +#elif DO_ENC_IP_OVER_GRE +#define PKT_PREPEND_LEN (sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr)) +#else + +static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_mbuf *mbuf, struct cpe_gre_data *table) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct ipv4_hdr *pip = (struct ipv4_hdr *)(peth + 1); + uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); + + struct cpe_gre_key key; + ether_addr_copy(&peth->d_addr, &key.clt_mac); + +#ifdef GRE_TP + /* policing enabled */ + if (task->cycles_per_byte) { + const uint16_t pkt_size = rte_pktmbuf_pkt_len(mbuf) + ETHER_CRC_LEN; + uint64_t tsc_now = rte_rdtsc(); + if (table->tp_tbsize < pkt_size) { + uint64_t cycles_diff = tsc_now - table->tp_tsc; + double dB = ((double)cycles_diff) / task->cycles_per_byte; + if (dB > (double)task->tb_size) { + dB = task->tb_size; + } + if ((table->tp_tbsize + dB) >= pkt_size) { + table->tp_tbsize += dB; + table->tp_tsc = tsc_now; + } + else { + TASK_STATS_ADD_DROP_DISCARD(&task->base.aux->stats, 1); + return OUT_DISCARD; + } + } + table->tp_tbsize -= pkt_size; + } +#endif /* GRE_TP */ + + /* reuse ethernet header from payload, retain payload (ip) in + case of DO_ENC_IP_OVER_GRE */ + peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, PKT_PREPEND_LEN); + PREFETCH0(peth); + ip_len += PKT_PREPEND_LEN; + + pip = (struct ipv4_hdr *)(peth + 1); + struct gre_hdr *pgre = (struct gre_hdr *)(pip + 1); + + struct ether_hdr eth = { + .d_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x02}}, + .s_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x01}}, + .ether_type = ETYPE_IPv4 + }; + rte_memcpy(peth, ð, sizeof(struct ether_hdr)); + + rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr)); +#if DO_ENC_ETH_OVER_GRE + pgre->type = ETYPE_EoGRE; +#elif DO_ENC_IP_OVER_GRE + pgre->type = ETYPE_IPv4; +#endif + pgre->gre_id = table->gre_id; + + rte_memcpy(pip, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); + pip->src_addr = 0x02010a0a; //emulate port ip + pip->dst_addr = table->cpe_ip; + pip->total_length = rte_cpu_to_be_16(ip_len); + + if (task->runtime_flags & TASK_TX_CRC) { + prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + } + + return 0; +} + +void update_arp_entries_gre(void *data) +{ + uint64_t cur_tsc = rte_rdtsc(); + struct task_gre_decap *task = (struct task_gre_decap *)data; + +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + // rte_hash_iterate might take a long time if no entries found => we should not use it here + // struct rte_hash is now internal..... + // => Not implemented +#else + uint32_t *sig_bucket = (hash_sig_t *)&(task->cpe_gre_hash->sig_tbl[task->bucket_index * task->cpe_gre_hash->sig_tbl_bucket_size]); + uint32_t table_index = task->bucket_index * task->cpe_gre_hash->bucket_entries; + + uint8_t *entry_bucket = + (uint8_t *) & task->cpe_gre_hash->key_tbl[task->bucket_index * task->cpe_gre_hash->bucket_entries * task->cpe_gre_hash->key_tbl_key_size]; + + for (uint32_t pos = 0; pos < task->cpe_gre_hash->bucket_entries; ++pos, ++table_index) { + struct cpe_gre_entry *key = (struct cpe_gre_entry *)&entry_bucket[pos * task->cpe_gre_hash->key_tbl_key_size]; + if (task->cpe_gre_data[table_index].tsc < cur_tsc) { + sig_bucket[pos] = 0; + task->cpe_gre_data[table_index].tsc = UINT64_MAX; + } + } + ++task->bucket_index; + task->bucket_index &= task->cpe_gre_hash->bucket_bitmask; +#endif +} diff --git a/VNFs/DPPD-PROX/handle_impair.c b/VNFs/DPPD-PROX/handle_impair.c new file mode 100644 index 00000000..3f2ee0eb --- /dev/null +++ b/VNFs/DPPD-PROX/handle_impair.c @@ -0,0 +1,421 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> +#include <rte_cycles.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "lconf.h" +#include "log.h" +#include "random.h" +#include "handle_impair.h" +#include "prefetch.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +#define DELAY_ACCURACY 11 // accuracy of 2048 cycles ~= 1 micro-second +#define DELAY_MAX_MASK 0x1FFFFF // Maximum 2M * 2K cycles ~1 second + +struct queue_elem { + struct rte_mbuf *mbuf; + uint64_t tsc; +}; + +struct queue { + struct queue_elem *queue_elem; + unsigned queue_head; + unsigned queue_tail; +}; + +struct task_impair { + struct task_base base; + struct queue_elem *queue; + uint32_t random_delay_us; + uint32_t delay_us; + uint64_t delay_time; + uint64_t delay_time_mask; + unsigned queue_head; + unsigned queue_tail; + unsigned queue_mask; + int tresh; + unsigned int seed; + struct random state; + uint64_t last_idx; + struct queue *buffer; + uint32_t socket_id; + int need_update; +}; + +static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +void task_impair_set_proba(struct task_base *tbase, float proba) +{ + struct task_impair *task = (struct task_impair *)tbase; + task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000; +} + +void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us) +{ + struct task_impair *task = (struct task_impair *)tbase; + task->need_update = 1; + task->random_delay_us = random_delay_us; + task->delay_us = delay_us; +} + +static void task_impair_update(struct task_base *tbase) +{ + struct task_impair *task = (struct task_impair *)tbase; + uint32_t queue_len = 0; + size_t mem_size; + if (!task->need_update) + return; + task->need_update = 0; + uint64_t now = rte_rdtsc(); + uint8_t out[MAX_PKT_BURST] = {0}; + uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK; + + if (task->random_delay_us) { + tbase->handle_bulk = handle_bulk_impair_random; + task->delay_time = usec_to_tsc(task->random_delay_us); + task->delay_time_mask = rte_align32pow2(task->delay_time) - 1; + queue_len = rte_align32pow2((1250L * task->random_delay_us) / 84 / (DELAY_MAX_MASK + 1)); + } else if (task->delay_us == 0) { + tbase->handle_bulk = handle_bulk_random_drop; + task->delay_time = 0; + } else { + tbase->handle_bulk = handle_bulk_impair; + task->delay_time = usec_to_tsc(task->delay_us); + queue_len = rte_align32pow2(1250 * task->delay_us / 84); + } + if (task->queue) { + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + while (task->queue_tail != task->queue_head) { + now = rte_rdtsc(); + uint16_t idx = 0; + while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) { + if (task->queue[task->queue_tail].tsc <= now) { + out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; + new_mbufs[idx++] = task->queue[task->queue_tail].mbuf; + task->queue_tail = (task->queue_tail + 1) & task->queue_mask; + } + else { + break; + } + } + if (idx) + task->base.tx_pkt(&task->base, new_mbufs, idx, out); + } + prox_free(task->queue); + task->queue = NULL; + } + if (task->buffer) { + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + while (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK)) { + now = rte_rdtsc(); + uint16_t pkt_idx = 0; + while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) { + struct queue *queue = &task->buffer[task->last_idx]; + while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) { + out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; + new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf; + queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask; + } + task->last_idx = (task->last_idx + 1) & DELAY_MAX_MASK; + } + + if (pkt_idx) + task->base.tx_pkt(&task->base, new_mbufs, pkt_idx, out); + } + for (int i = 0; i < DELAY_MAX_MASK + 1; i++) { + if (task->buffer[i].queue_elem) + prox_free(task->buffer[i].queue_elem); + } + prox_free(task->buffer); + task->buffer = NULL; + } + + if (queue_len < MAX_PKT_BURST) + queue_len= MAX_PKT_BURST; + task->queue_mask = queue_len - 1; + if (task->queue_mask < MAX_PKT_BURST - 1) + task->queue_mask = MAX_PKT_BURST - 1; + mem_size = (task->queue_mask + 1) * sizeof(task->queue[0]); + + if (task->delay_us) { + task->queue_head = 0; + task->queue_tail = 0; + task->queue = prox_zmalloc(mem_size, task->socket_id); + if (task->queue == NULL) { + plog_err("Not enough memory to allocate queue\n"); + task->queue_mask = 0; + } + } else if (task->random_delay_us) { + size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue); + plog_info("Allocating %zd bytes\n", size); + task->buffer = prox_zmalloc(size, task->socket_id); + PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n"); + plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size); + + for (int i = 0; i < DELAY_MAX_MASK + 1; i++) { + task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id); + PROX_PANIC(task->buffer[i].queue_elem == NULL, "Not enough memory to allocate buffer elems\n"); + } + } + random_init_seed(&task->state); +} + +static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_impair *task = (struct task_impair *)tbase; + uint8_t out[MAX_PKT_BURST]; + for (uint16_t i = 0; i < n_pkts; ++i) { + out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; + } + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + task_impair_update(tbase); +} + +static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_impair *task = (struct task_impair *)tbase; + uint64_t now = rte_rdtsc(); + uint8_t out[MAX_PKT_BURST] = {0}; + uint16_t enqueue_failed; + uint16_t i; + int ret = 0; + + int nb_empty_slots = (task->queue_tail - task->queue_head + task->queue_mask) & task->queue_mask; + if (likely(nb_empty_slots >= n_pkts)) { + /* We know n_pkts fits, no need to check for every packet */ + for (i = 0; i < n_pkts; ++i) { + task->queue[task->queue_head].tsc = now + task->delay_time; + task->queue[task->queue_head].mbuf = mbufs[i]; + task->queue_head = (task->queue_head + 1) & task->queue_mask; + } + } else { + for (i = 0; i < n_pkts; ++i) { + if (((task->queue_head + 1) & task->queue_mask) != task->queue_tail) { + task->queue[task->queue_head].tsc = now + task->delay_time; + task->queue[task->queue_head].mbuf = mbufs[i]; + task->queue_head = (task->queue_head + 1) & task->queue_mask; + } + else { + /* Rest does not fit, need to drop those packets. */ + enqueue_failed = i; + for (;i < n_pkts; ++i) { + out[i] = OUT_DISCARD; + } + ret+= task->base.tx_pkt(&task->base, mbufs + enqueue_failed, + n_pkts - enqueue_failed, out + enqueue_failed); + break; + } + } + } + + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + uint16_t idx = 0; + + if (task->tresh != RAND_MAX) { + while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) { + if (task->queue[task->queue_tail].tsc <= now) { + out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; + new_mbufs[idx] = task->queue[task->queue_tail].mbuf; + PREFETCH0(new_mbufs[idx]); + PREFETCH0(&new_mbufs[idx]->cacheline1); + idx++; + task->queue_tail = (task->queue_tail + 1) & task->queue_mask; + } + else { + break; + } + } + } else { + while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) { + if (task->queue[task->queue_tail].tsc <= now) { + out[idx] = 0; + new_mbufs[idx] = task->queue[task->queue_tail].mbuf; + PREFETCH0(new_mbufs[idx]); + PREFETCH0(&new_mbufs[idx]->cacheline1); + idx++; + task->queue_tail = (task->queue_tail + 1) & task->queue_mask; + } + else { + break; + } + } + } + + if (idx) + ret+= task->base.tx_pkt(&task->base, new_mbufs, idx, out); + task_impair_update(tbase); + return ret; +} + +/* + * We want to avoid using division and mod for performance reasons. + * We also want to support up to one second delay, and express it in tsc + * So the delay in tsc needs up to 32 bits (supposing procesor freq is less than 4GHz). + * If the max_delay is smaller, we make sure we use less bits. + * Note that we lose the MSB of the xorshift - 64 bits could hold + * two or three delays in TSC - but would probably make implementation more complex + * and not huge gain expected. Maybe room for optimization. + * Using this implementation, we might have to run random more than once for a delay + * but in average this should occur less than 50% of the time. +*/ + +static inline uint64_t random_delay(struct random *state, uint64_t max_delay, uint64_t max_delay_mask) +{ + uint64_t val; + while(1) { + val = random_next(state); + if ((val & max_delay_mask) < max_delay) + return (val & max_delay_mask); + } +} + +static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_impair *task = (struct task_impair *)tbase; + uint64_t now = rte_rdtsc(); + uint8_t out[MAX_PKT_BURST]; + uint16_t enqueue_failed; + uint16_t i; + int ret = 0; + uint64_t packet_time, idx; + uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK; + + for (i = 0; i < n_pkts; ++i) { + packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask); + idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK; + while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) { + struct queue *queue = &task->buffer[idx]; + if (((queue->queue_head + 1) & task->queue_mask) != queue->queue_tail) { + queue->queue_elem[queue->queue_head].mbuf = mbufs[i]; + queue->queue_head = (queue->queue_head + 1) & task->queue_mask; + break; + } else { + idx = (idx + 1) & DELAY_MAX_MASK; + } + } + if (idx == ((now_idx - 1) & DELAY_MAX_MASK)) { + /* Rest does not fit, need to drop packet. Note that further packets might fit as might want to be sent earlier */ + out[0] = OUT_DISCARD; + ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out); + plog_warn("Unexpectdly dropping packets\n"); + } + } + + struct rte_mbuf *new_mbufs[MAX_PKT_BURST]; + uint16_t pkt_idx = 0; + + while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) { + struct queue *queue = &task->buffer[task->last_idx]; + while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) { + out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; + new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf; + PREFETCH0(new_mbufs[pkt_idx]); + PREFETCH0(&new_mbufs[pkt_idx]->cacheline1); + pkt_idx++; + queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask; + } + task->last_idx = (task->last_idx + 1) & DELAY_MAX_MASK; + } + + if (pkt_idx) + ret+= task->base.tx_pkt(&task->base, new_mbufs, pkt_idx, out); + task_impair_update(tbase); + return ret; +} + +static void init_task(struct task_base *tbase, struct task_args *targ) +{ + struct task_impair *task = (struct task_impair *)tbase; + uint32_t queue_len = 0; + size_t mem_size; + unsigned socket_id; + uint64_t delay_us = 0; + + task->seed = rte_rdtsc(); + if (targ->probability == 0) + targ->probability = 1000000; + + task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000; + + if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) { + tbase->handle_bulk = handle_bulk_random_drop; + task->delay_time = 0; + } else if (targ->random_delay_us) { + tbase->handle_bulk = handle_bulk_impair_random; + task->delay_time = usec_to_tsc(targ->random_delay_us); + task->delay_time_mask = rte_align32pow2(task->delay_time) - 1; + delay_us = targ->random_delay_us; + queue_len = rte_align32pow2((1250L * delay_us) / 84 / (DELAY_MAX_MASK + 1)); + } else { + task->delay_time = usec_to_tsc(targ->delay_us); + delay_us = targ->delay_us; + queue_len = rte_align32pow2(1250 * delay_us / 84); + } + /* Assume Line-rate is maximum transmit speed. + TODO: take link speed if tx is port. + */ + if (queue_len < MAX_PKT_BURST) + queue_len= MAX_PKT_BURST; + task->queue_mask = queue_len - 1; + if (task->queue_mask < MAX_PKT_BURST - 1) + task->queue_mask = MAX_PKT_BURST - 1; + + mem_size = (task->queue_mask + 1) * sizeof(task->queue[0]); + socket_id = rte_lcore_to_socket_id(targ->lconf->id); + task->socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + if (targ->delay_us) { + task->queue = prox_zmalloc(mem_size, socket_id); + PROX_PANIC(task->queue == NULL, "Not enough memory to allocate queue\n"); + task->queue_head = 0; + task->queue_tail = 0; + } else if (targ->random_delay_us) { + size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue); + plog_info("Allocating %zd bytes\n", size); + task->buffer = prox_zmalloc(size, socket_id); + PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n"); + plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size); + + for (int i = 0; i < DELAY_MAX_MASK + 1; i++) { + task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id); + PROX_PANIC(task->buffer[i].queue_elem == NULL, "Not enough memory to allocate buffer elems\n"); + } + } + random_init_seed(&task->state); +} + +static struct task_init tinit = { + .mode_str = "impair", + .init = init_task, + .handle = handle_bulk_impair, + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_ZERO_RX, + .size = sizeof(struct task_impair) +}; + +__attribute__((constructor)) static void ctor(void) +{ + reg_task(&tinit); +} diff --git a/VNFs/DPPD-PROX/handle_impair.h b/VNFs/DPPD-PROX/handle_impair.h new file mode 100644 index 00000000..162213ed --- /dev/null +++ b/VNFs/DPPD-PROX/handle_impair.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_IMPAIR_H_ +#define _HANDLE_IMPAIR_H_ + +void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us); +void task_impair_set_proba(struct task_base *tbase, float proba); + +#endif /* _HANDLE_IMPAIR_H_ */ diff --git a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c new file mode 100644 index 00000000..a92f9cdc --- /dev/null +++ b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c @@ -0,0 +1,466 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_table_hash.h> +#include <rte_ether.h> +#include <rte_version.h> +#include <rte_byteorder.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "tx_pkt.h" +#include "task_init.h" +#include "task_base.h" +#include "prox_port_cfg.h" +#include "prefetch.h" +#include "lconf.h" +#include "hash_utils.h" +#include "etypes.h" +#include "prox_cksum.h" +#include "defines.h" +#include "log.h" +#include "quit.h" +#include "prox_cfg.h" +#include "parse_utils.h" +#include "cfgfile.h" +#include "prox_shared.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define IPPROTO_IPIP IPPROTO_IPV4 +#endif + +struct ipv6_tun_dest { + struct ipv6_addr dst_addr; + struct ether_addr dst_mac; +}; + +typedef enum ipv6_tun_dir_t { + TUNNEL_DIR_ENCAP = 0, + TUNNEL_DIR_DECAP = 1, +} ipv6_tun_dir_t; + +struct task_ipv6_tun_base { + struct task_base base; + struct ether_addr src_mac; + uint8_t core_nb; + uint64_t keys[64]; + struct rte_mbuf* fake_packets[64]; + uint16_t lookup_port_mask; // Mask used before looking up the port + void* lookup_table; // Fast lookup table for bindings + uint32_t runtime_flags; + int offload_crc; +}; + +struct task_ipv6_decap { + struct task_ipv6_tun_base base; + struct ether_addr dst_mac; +}; + +struct task_ipv6_encap { + struct task_ipv6_tun_base base; + uint32_t ipaddr; + struct ipv6_addr local_endpoint_addr; + uint8_t tunnel_hop_limit; +}; + +#define IPv6_VERSION 6 +#ifndef IPPROTO_IPV4 +#define IPPROTO_IPV4 4 +#endif + +#define MAKE_KEY_FROM_FIELDS(ipv4_addr, port, port_mask) ( ((uint64_t)ipv4_addr << 16) | (port & port_mask) ) + +static int handle_ipv6_decap_bulk(struct task_base* tbase, struct rte_mbuf** rx_mbuf, const uint16_t n_pkts); +static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** rx_mbuf, const uint16_t n_pkts); + +static void init_lookup_table(struct task_ipv6_tun_base* ptask, struct task_args *targ) +{ + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + /* The lookup table is a per-core data structure to reduce the + memory footprint and improve cache utilization. Since + operations on the hash table are not safe, the data + structure can't be used on a per socket or on a system wide + basis. */ + ptask->lookup_table = prox_sh_find_core(targ->lconf->id, "ipv6_binding_table"); + if (NULL == ptask->lookup_table) { + struct ipv6_tun_binding_table *table; + PROX_PANIC(!strcmp(targ->tun_bindings, ""), "No tun bindings specified\n"); + int ret = lua_to_ip6_tun_binding(prox_lua(), GLOBAL, targ->tun_bindings, socket_id, &table); + PROX_PANIC(ret, "Failed to read tun_bindings config:\n %s\n", get_lua_to_errors()); + + struct rte_table_hash_key8_ext_params table_hash_params = { + .n_entries = (table->num_binding_entries * 4), + .n_entries_ext = (table->num_binding_entries * 2) >> 1, + .f_hash = hash_crc32, + .seed = 0, + .signature_offset = HASH_METADATA_OFFSET(8), // Ignored for dosig tables + .key_offset = HASH_METADATA_OFFSET(0), + }; + plogx_info("IPv6 Tunnel allocating lookup table on socket %d\n", socket_id); + ptask->lookup_table = rte_table_hash_key8_ext_dosig_ops. + f_create(&table_hash_params, socket_id, sizeof(struct ipv6_tun_dest)); + PROX_PANIC(ptask->lookup_table == NULL, "Error creating IPv6 Tunnel lookup table"); + + for (unsigned idx = 0; idx < table->num_binding_entries; idx++) { + int key_found = 0; + void* entry_in_hash = NULL; + struct ipv6_tun_dest data; + struct ipv6_tun_binding_entry* entry = &table->entry[idx]; + uint64_t key = MAKE_KEY_FROM_FIELDS(rte_cpu_to_be_32(entry->public_ipv4), entry->public_port, ptask->lookup_port_mask); + rte_memcpy(&data.dst_addr, &entry->endpoint_addr, sizeof(struct ipv6_addr)); + rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(struct ether_addr)); + + int ret = rte_table_hash_key8_ext_dosig_ops.f_add(ptask->lookup_table, &key, &data, &key_found, &entry_in_hash); + PROX_PANIC(ret, "Error adding entry (%d) to binding lookup table", idx); + PROX_PANIC(key_found, "key_found!!! for idx=%d\n", idx); + +#ifdef DBG_IPV6_TUN_BINDING + plog_info("Bind: %x:0x%x (port_mask 0x%x) key=0x%"PRIx64"\n", entry->public_ipv4, entry->public_port, ptask->lookup_port_mask, key); + plog_info(" -> "IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(entry->endpoint_addr.bytes), MAC_BYTES(entry->next_hop_mac.addr_bytes)); + plog_info(" -> "IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(data.dst_addr.bytes), MAC_BYTES(data.dst_mac.addr_bytes)); + plog_info(" -> entry_in_hash=%p\n", entry_in_hash); +#endif + } + plogx_info("IPv6 Tunnel created %d lookup table entries\n", table->num_binding_entries); + + prox_sh_add_core(targ->lconf->id, "ipv6_binding_table", ptask->lookup_table); + } +} + +static void init_task_ipv6_tun_base(struct task_ipv6_tun_base* tun_base, struct task_args* targ) +{ + memcpy(&tun_base->src_mac, find_reachable_port(targ), sizeof(tun_base->src_mac)); + + tun_base->lookup_port_mask = targ->lookup_port_mask; // Mask used before looking up the port + + init_lookup_table(tun_base, targ); + + for (uint32_t i = 0; i < 64; ++i) { + tun_base->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&tun_base->keys[i] - sizeof (struct rte_mbuf)); + } + + plogx_info("IPv6 Tunnel MAC="MAC_BYTES_FMT" port_mask=0x%x\n", + MAC_BYTES(tun_base->src_mac.addr_bytes), tun_base->lookup_port_mask); + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + tun_base->offload_crc = port->capabilities.tx_offload_cksum; + } +} + +static void init_task_ipv6_decap(struct task_base* tbase, struct task_args* targ) +{ + struct task_ipv6_decap* tun_task = (struct task_ipv6_decap*)tbase; + struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)tun_task; + + init_task_ipv6_tun_base(tun_base, targ); + tun_base->runtime_flags = targ->runtime_flags; + + memcpy(&tun_task->dst_mac, &targ->edaddr, sizeof(tun_task->dst_mac)); +} + +static void init_task_ipv6_encap(struct task_base* tbase, struct task_args* targ) +{ + struct task_ipv6_encap* tun_task = (struct task_ipv6_encap*)tbase; + struct task_ipv6_tun_base *tun_base = (struct task_ipv6_tun_base*)tun_task; + + init_task_ipv6_tun_base(tun_base, targ); + + rte_memcpy(&tun_task->local_endpoint_addr, &targ->local_ipv6, sizeof(tun_task->local_endpoint_addr)); + tun_task->tunnel_hop_limit = targ->tunnel_hop_limit; + tun_base->runtime_flags = targ->runtime_flags; +} + +static struct task_init task_init_ipv6_decap = { + .mode_str = "ipv6_decap", + .init = init_task_ipv6_decap, + .handle = handle_ipv6_decap_bulk, + .size = sizeof(struct task_ipv6_decap) +}; + +static struct task_init task_init_ipv6_encap = { + .mode_str = "ipv6_encap", + .init = init_task_ipv6_encap, + .handle = handle_ipv6_encap_bulk, + .size = sizeof(struct task_ipv6_encap) +}; + +__attribute__((constructor)) static void reg_task_ipv6_decap(void) +{ + reg_task(&task_init_ipv6_decap); +} + +__attribute__((constructor)) static void reg_task_ipv6_encap(void) +{ + reg_task(&task_init_ipv6_encap); +} + +static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest); +static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest); + +static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort) +{ + *pAddr = (dir == TUNNEL_DIR_DECAP) ? pip4->src_addr : pip4->dst_addr; + + if (pip4->next_proto_id == IPPROTO_UDP) { + struct udp_hdr* pudp = (struct udp_hdr *)(pip4 + 1); + *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? pudp->src_port : pudp->dst_port); + } + else if (pip4->next_proto_id == IPPROTO_TCP) { + struct tcp_hdr* ptcp = (struct tcp_hdr *)(pip4 + 1); + *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? ptcp->src_port : ptcp->dst_port); + } + else { + plog_warn("IPv6 Tunnel: IPv4 packet of unexpected type proto_id=0x%x\n", pip4->next_proto_id); + *pPort = 0xffff; + return -1; + } + + return 0; +} + +static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey) +{ + uint32_t lookup_addr; + uint16_t lookup_port; + + if (unlikely( extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port))) { + plog_warn("IPv6 Tunnel: Unable to extract fields from packet\n"); + *pkey = 0xffffffffL; + return; + } + + *pkey = MAKE_KEY_FROM_FIELDS(lookup_addr, lookup_port, ptask->lookup_port_mask); +} + +static inline struct ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf) +{ + struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); + struct ipv4_hdr* pip4 = (struct ipv4_hdr*) (pip6 + 1); // TODO - Skip Option headers + + return pip4; +} + +static inline struct ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf) +{ + struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + + return pip4; +} + +static inline void extract_key_decap(struct task_ipv6_tun_base* ptask, struct rte_mbuf *mbuf, uint64_t* pkey) +{ + extract_key(ptask, get_ipv4_decap(mbuf), TUNNEL_DIR_DECAP, pkey); +} + +static inline void extract_key_decap_bulk(struct task_ipv6_tun_base* ptask, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + extract_key_decap(ptask, mbufs[j], &ptask->keys[j]); + } +} + +static inline void extract_key_encap(struct task_ipv6_tun_base* ptask, struct rte_mbuf *mbuf, uint64_t* pkey) +{ + extract_key(ptask, get_ipv4_encap(mbuf), TUNNEL_DIR_ENCAP, pkey); +} + +static inline void extract_key_encap_bulk(struct task_ipv6_tun_base* ptask, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + extract_key_encap(ptask, mbufs[j], &ptask->keys[j]); + } +} + +__attribute__((cold)) static void handle_error(struct task_ipv6_tun_base* ptask, struct rte_mbuf* mbuf, ipv6_tun_dir_t dir) +{ + uint32_t lookup_addr; + uint16_t lookup_port; + uint64_t key; + + struct ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf); + extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port); + extract_key(ptask, pip4, dir, &key); + + plog_warn("IPv6 Tunnel (%s) lookup failed for "IPv4_BYTES_FMT":%d [key=0x%"PRIx64"]\n", + (dir == TUNNEL_DIR_DECAP) ? "decap" : "encap", + IPv4_BYTES(((unsigned char*)&lookup_addr)), lookup_port, key); +} + +static int handle_ipv6_decap_bulk(struct task_base* tbase, struct rte_mbuf** mbufs, const uint16_t n_pkts) +{ + struct task_ipv6_decap* task = (struct task_ipv6_decap *)tbase; + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + struct ipv6_tun_dest* entries[64]; + uint8_t out[MAX_PKT_BURST]; + uint64_t lookup_hit_mask; + uint16_t n_kept = 0; + + prefetch_pkts(mbufs, n_pkts); + + // Lookup to verify packets are valid for their respective tunnels (their sending lwB4) + extract_key_decap_bulk(&task->base, mbufs, n_pkts); + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->base.lookup_table, task->base.fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (likely(lookup_hit_mask == pkts_mask)) { + for (uint16_t j = 0; j < n_pkts; ++j) { + out[j] = handle_ipv6_decap(task, mbufs[j], entries[j]); + } + } + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + handle_error(&task->base, mbufs[j], TUNNEL_DIR_DECAP); + out[j] = OUT_DISCARD; + continue; + } + out[j] = handle_ipv6_decap(task, mbufs[j], entries[j]); + } + } + + return task->base.base.tx_pkt(tbase, mbufs, n_pkts, out); +} + +static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** mbufs, const uint16_t n_pkts) +{ + struct task_ipv6_encap* task = (struct task_ipv6_encap *)tbase; + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + struct ipv6_tun_dest* entries[64]; + uint64_t lookup_hit_mask; + uint8_t out[MAX_PKT_BURST]; + uint16_t n_kept = 0; + + prefetch_first(mbufs, n_pkts); + + extract_key_encap_bulk(&task->base, mbufs, n_pkts); + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->base.lookup_table, task->base.fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (likely(lookup_hit_mask == pkts_mask)) { + for (uint16_t j = 0; j < n_pkts; ++j) { + out[j] = handle_ipv6_encap(task, mbufs[j], entries[j]); + } + } + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + handle_error(&task->base, mbufs[j], TUNNEL_DIR_ENCAP); + out[j] = OUT_DISCARD; + continue; + } + out[j] = handle_ipv6_encap(task, mbufs[j], entries[j]); + } + } + + return task->base.base.tx_pkt(tbase, mbufs, n_pkts, out); +} + +static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest) +{ + struct ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *); + + if (unlikely(peth->ether_type != ETYPE_IPv6)) { + plog_warn("Received non IPv6 packet on ipv6 tunnel port\n"); + // Drop packet + return OUT_DISCARD; + } + + struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); + int ipv6_hdr_len = sizeof(struct ipv6_hdr); + + // TODO - Skip over any IPv6 Extension Header: + // If pip6->next_header is in (0, 43, 44, 50, 51, 60, 135), skip ahead pip->hdr_ext_len + // bytes and repeat. Increase ipv6_hdr_len with as much, each time. + + if (unlikely(pip6->proto != IPPROTO_IPIP)) { + plog_warn("Received non IPv4 content within IPv6 tunnel packet\n"); + // Drop packet + return OUT_DISCARD; + } + + // Discard IPv6 encapsulation + rte_pktmbuf_adj(rx_mbuf, ipv6_hdr_len); + peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *); + + // Restore Ethernet header + ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); + ether_addr_copy(&ptask->dst_mac, &peth->d_addr); + peth->ether_type = ETYPE_IPv4; + + return 0; +} + +static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest) +{ + //plog_info("Found tunnel endpoint:"IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(tun_dest->dst_addr), MAC_BYTES(tun_dest->dst_mac.addr_bytes)); + + struct ether_hdr* peth = (struct ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *)); + struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); + struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask; + + if (unlikely((pip4->version_ihl >> 4) != 4)) { + plog_warn("Received non IPv4 packet at ipv6 tunnel input\n"); + // Drop packet + return OUT_DISCARD; + } + + if (pip4->time_to_live) { + pip4->time_to_live--; + } + else { + plog_info("TTL = 0 => Dropping\n"); + return OUT_DISCARD; + } + pip4->hdr_checksum = 0; + + // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet) + int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(struct ether_hdr)); + if (unlikely(padding > 0)) { + rte_pktmbuf_trim(rx_mbuf, padding); + } + + // Encapsulate + const int extra_space = sizeof(struct ipv6_hdr); + peth = (struct ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space); + + // Ethernet Header + ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); + ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr); + peth->ether_type = ETYPE_IPv6; + + // Set up IPv6 Header + struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); + pip6->vtc_flow = rte_cpu_to_be_32(IPv6_VERSION << 28); + pip6->proto = IPPROTO_IPIP; + pip6->payload_len = rte_cpu_to_be_16(ipv4_length); + pip6->hop_limits = ptask->tunnel_hop_limit; + rte_memcpy(pip6->dst_addr, &tun_dest->dst_addr, sizeof(pip6->dst_addr)); + rte_memcpy(pip6->src_addr, &ptask->local_endpoint_addr, sizeof(pip6->src_addr)); + + if (tun_base->runtime_flags & TASK_TX_CRC) { + // We modified the TTL in the IPv4 header, hence have to recompute the IPv4 checksum +#define TUNNEL_L2_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr)) + prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(struct ipv4_hdr), ptask->base.offload_crc); + } + return 0; +} diff --git a/VNFs/DPPD-PROX/handle_irq.c b/VNFs/DPPD-PROX/handle_irq.c new file mode 100644 index 00000000..4abf84a1 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_irq.c @@ -0,0 +1,169 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "lconf.h" +#include "task_base.h" +#include "task_init.h" +#include "handle_irq.h" +#include "log.h" +#include "unistd.h" +#include "input.h" + +#define MAX_INDEX 65535 * 16 + +struct irq_info { + uint64_t tsc; + uint64_t lat; +}; + +struct irq_bucket { + uint64_t index; + struct irq_info info[MAX_INDEX]; +}; + +struct task_irq { + struct task_base base; + uint64_t start_tsc; + uint64_t tsc; + uint64_t max_irq; + uint8_t lcore_id; + volatile uint16_t stats_use_lt; /* which lt to use, */ + volatile uint16_t task_use_lt; /* 0 or 1 depending on which of the 2 result records are used */ + struct irq_bucket buffer[2]; +}; + +#define MAX_INTERRUPT_LENGTH 500000 /* Maximum length of an interrupt is (1 / MAX_INTERRUPT_LENGTH) seconds */ + +/* + * This module is not handling any packets. + * It loops on rdtsc() and checks whether it has been interrupted + * for more than (1 / MAX_INTERRUPT_LENGTH) sec. + * This is a debugging only task, useful to check if the system h + * as been properly configured. +*/ + +void task_irq_show_stats(struct task_irq *task_irq, struct input *input) +{ + struct irq_bucket *bucket = &task_irq->buffer[!task_irq->task_use_lt]; + if (input->reply) { + char buf[8192] = {0}; + if (bucket->index == 0) { + sprintf(buf, "\n"); + input->reply(input, buf, strlen(buf)); + buf[0] = 0; + } + for (uint64_t i = 0; i < bucket->index; i++) { + sprintf(buf + strlen(buf), "%d; %"PRIu64"""; %ld; %ld; %ld; %ld ;", + task_irq->lcore_id, + i, + bucket->info[i].lat, + bucket->info[i].lat * 1000000 / rte_get_tsc_hz(), + bucket->info[i].tsc - task_irq->start_tsc, + (bucket->info[i].tsc - task_irq->start_tsc) * 1000 / rte_get_tsc_hz()); + sprintf(buf+strlen(buf), "\n"); + input->reply(input, buf, strlen(buf)); + buf[0] = 0; + } + } else { + for (uint64_t i = 0; i < bucket->index; i++) + if (bucket->info[i].lat) + plog_info("[%d]; Interrupt %"PRIu64": %ld cycles (%ld micro-sec) at %ld cycles (%ld msec)\n", + task_irq->lcore_id, + i, + bucket->info[i].lat, + bucket->info[i].lat * 1000000 / rte_get_tsc_hz(), + bucket->info[i].tsc - task_irq->start_tsc, + (bucket->info[i].tsc - task_irq->start_tsc) * 1000 / rte_get_tsc_hz()); + } + task_irq->stats_use_lt = !task_irq->task_use_lt; + bucket->index = 0; +} + +static void irq_stop(struct task_base *tbase) +{ + struct task_irq *task = (struct task_irq *)tbase; + uint32_t i; + uint32_t lcore_id = rte_lcore_id(); + int bucket_id; + + plog_info("Stopping core %u\n", lcore_id); + plog_info("Core ID; Interrupt (nanosec); Time (msec)\n"); + for (int j = 0; j < 2; j++) { + // Start dumping the oldest bucket first + if (task->buffer[0].info[0].tsc < task->buffer[1].info[0].tsc) + bucket_id = j; + else + bucket_id = !j; + struct irq_bucket *bucket = &task->buffer[bucket_id]; + for (i=0; i< bucket->index;i++) { + if (bucket->info[i].lat != 0) { + plog_info("%d; %ld; %ld\n", + lcore_id, + bucket->info[i].lat * 1000000000 / rte_get_tsc_hz(), + (bucket->info[i].tsc - task->start_tsc) * 1000 / rte_get_tsc_hz()); + } + } + } + plog_info("Core %u stopped\n", lcore_id); +} + +static inline int handle_irq_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_irq *task = (struct task_irq *)tbase; + uint64_t tsc1; + uint64_t index; + + if (task->stats_use_lt != task->task_use_lt) + task->task_use_lt = task->stats_use_lt; + struct irq_bucket *bucket = &task->buffer[task->task_use_lt]; + + tsc1 = rte_rdtsc(); + if ((task->tsc != 0) && ((tsc1 - task->tsc) > task->max_irq) && (bucket->index < MAX_INDEX)) { + bucket->info[bucket->index].tsc = tsc1; + bucket->info[bucket->index++].lat = tsc1 - task->tsc; + } + task->tsc = tsc1; + return 0; +} + +static void init_task_irq(struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ + struct task_irq *task = (struct task_irq *)tbase; + // max_irq expressed in cycles + task->max_irq = rte_get_tsc_hz() / MAX_INTERRUPT_LENGTH; + task->start_tsc = rte_rdtsc(); + task->lcore_id = targ->lconf->id; + plog_info("\tusing irq mode with max irq set to %ld cycles\n", task->max_irq); +} + +static struct task_init task_init_irq = { + .mode_str = "irq", + .init = init_task_irq, + .handle = handle_irq_bulk, + .stop = irq_stop, + .flag_features = TASK_FEATURE_NO_RX, + .size = sizeof(struct task_irq) +}; + +static struct task_init task_init_none; + +__attribute__((constructor)) static void reg_task_irq(void) +{ + reg_task(&task_init_irq); +} diff --git a/VNFs/DPPD-PROX/handle_irq.h b/VNFs/DPPD-PROX/handle_irq.h new file mode 100644 index 00000000..784bf0d6 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_irq.h @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_IRQ_H_ +#define _HANDLE_IRQ_H_ + +struct task_irq; +struct input; + +void task_irq_show_stats(struct task_irq *task_irq, struct input *input); + +#endif /* _HANDLE_IRQ_H_ */ diff --git a/VNFs/DPPD-PROX/handle_l2fwd.c b/VNFs/DPPD-PROX/handle_l2fwd.c new file mode 100644 index 00000000..79a5f02e --- /dev/null +++ b/VNFs/DPPD-PROX/handle_l2fwd.c @@ -0,0 +1,117 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> + +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "prox_port_cfg.h" + +struct task_l2fwd { + struct task_base base; + uint8_t src_dst_mac[12]; + uint32_t runtime_flags; +}; + +static int handle_l2fwd_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_l2fwd *task = (struct task_l2fwd *)tbase; + struct ether_hdr *hdr; + struct ether_addr mac; + + if ((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) { + /* Source and Destination mac hardcoded */ + for (uint16_t j = 0; j < n_pkts; ++j) { + hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); + } + } else { + for (uint16_t j = 0; j < n_pkts; ++j) { + hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + if ((task->runtime_flags & (TASK_ARG_DO_NOT_SET_SRC_MAC|TASK_ARG_SRC_MAC_SET)) == 0) { + /* dst mac will be used as src mac */ + ether_addr_copy(&hdr->d_addr, &mac); + } + + if (task->runtime_flags & TASK_ARG_DST_MAC_SET) + ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); + else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_DST_MAC) == 0) + ether_addr_copy(&hdr->s_addr, &hdr->d_addr); + + if (task->runtime_flags & TASK_ARG_SRC_MAC_SET) { + ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); + } else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_SRC_MAC) == 0) { + ether_addr_copy(&mac, &hdr->s_addr); + } + } + } + return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); +} + +static void init_task_l2fwd(struct task_base *tbase, struct task_args *targ) +{ + struct task_l2fwd *task = (struct task_l2fwd *)tbase; + struct ether_addr *src_addr, *dst_addr; + + /* + * Destination MAC can come from + * - pre-configured mac in case 'dst mac=xx:xx:xx:xx:xx:xx' in config file + * - src mac from the packet in case 'dst mac=packet' in config file + * - not written in case 'dst mac=no' in config file + * - (default - no 'dst mac') src mac from the packet + * Source MAC can come from + * - pre-configured mac in case 'src mac=xx:xx:xx:xx:xx:xx' in config file + * - dst mac from the packet in case 'src mac=packet' in config file + * - not written in case 'src mac=no' in config file + * - (default - no 'src mac') if (tx_port) port mac + * - (default - no 'src mac') if (no tx_port) dst mac from the packet + */ + + if (targ->flags & TASK_ARG_DST_MAC_SET) { + dst_addr = &targ->edaddr; + memcpy(&task->src_dst_mac[0], dst_addr, sizeof(*src_addr)); + } + + if (targ->flags & TASK_ARG_SRC_MAC_SET) { + src_addr = &targ->esaddr; + memcpy(&task->src_dst_mac[6], src_addr, sizeof(*dst_addr)); + plog_info("\t\tCore %d: src mac set from config file\n", targ->lconf->id); + } else if ((targ->flags & TASK_ARG_DO_NOT_SET_SRC_MAC) == 0) { + if (targ->nb_txports) { + src_addr = &prox_port_cfg[task->base.tx_params_hw.tx_port_queue[0].port].eth_addr; + targ->flags |= TASK_ARG_SRC_MAC_SET; + plog_info("\t\tCore %d: src mac set from port\n", targ->lconf->id); + memcpy(&task->src_dst_mac[6], src_addr, sizeof(*dst_addr)); + } + } + task->runtime_flags = targ->flags; +} + +static struct task_init task_init_l2fwd = { + .mode_str = "l2fwd", + .init = init_task_l2fwd, + .handle = handle_l2fwd_bulk, + .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .size = sizeof(struct task_l2fwd), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_l2fwd(void) +{ + reg_task(&task_init_l2fwd); +} diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c new file mode 100644 index 00000000..0b7ad561 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lat.c @@ -0,0 +1,650 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +//#define LAT_DEBUG + +#include <rte_cycles.h> +#include <stdio.h> +#include <math.h> + +#include "handle_gen.h" +#include "prox_malloc.h" +#include "mbuf_utils.h" +#include "handle_lat.h" +#include "log.h" +#include "task_init.h" +#include "task_base.h" +#include "stats.h" +#include "lconf.h" +#include "quit.h" +#include "eld.h" +#include "prox_shared.h" + +#define DEFAULT_BUCKET_SIZE 10 + +struct lat_info { + uint32_t rx_packet_index; + uint32_t tx_packet_index; + uint32_t tx_err; + uint32_t rx_err; + uint64_t rx_time; + uint64_t tx_time; + uint16_t port_queue_id; +#ifdef LAT_DEBUG + uint16_t id_in_bulk; + uint16_t bulk_size; + uint64_t begin; + uint64_t after; + uint64_t before; +#endif +}; + +struct delayed_latency_entry { + uint32_t rx_packet_idx; + uint64_t pkt_rx_time; + uint64_t pkt_tx_time; + uint64_t rx_time_err; +}; + +struct delayed_latency { + struct delayed_latency_entry entries[64]; +}; + +static struct delayed_latency_entry *delayed_latency_get(struct delayed_latency *delayed_latency, uint32_t rx_packet_idx) +{ + if (delayed_latency->entries[rx_packet_idx % 64].rx_packet_idx == rx_packet_idx) + return &delayed_latency->entries[rx_packet_idx % 64]; + else + return NULL; +} + +static struct delayed_latency_entry *delayed_latency_create(struct delayed_latency *delayed_latency, uint32_t rx_packet_idx) +{ + delayed_latency->entries[rx_packet_idx % 64].rx_packet_idx = rx_packet_idx; + return &delayed_latency->entries[rx_packet_idx % 64]; +} + +struct rx_pkt_meta_data { + uint8_t *hdr; + uint32_t pkt_tx_time; + uint32_t bytes_after_in_bulk; +}; + +struct task_lat { + struct task_base base; + uint64_t limit; + uint64_t rx_packet_index; + uint64_t last_pkts_tsc; + struct delayed_latency delayed_latency; + struct lat_info *latency_buffer; + uint32_t latency_buffer_idx; + uint32_t latency_buffer_size; + uint64_t begin; + uint16_t lat_pos; + uint16_t unique_id_pos; + uint16_t accur_pos; + uint16_t sig_pos; + uint32_t sig; + volatile uint16_t use_lt; /* which lt to use, */ + volatile uint16_t using_lt; /* 0 or 1 depending on which of the 2 measurements are used */ + struct lat_test lt[2]; + struct lat_test *lat_test; + uint32_t generator_count; + struct early_loss_detect *eld; + struct rx_pkt_meta_data *rx_pkt_meta; + FILE *fp_rx; + FILE *fp_tx; +}; + +static uint32_t abs_diff(uint32_t a, uint32_t b) +{ + return a < b? UINT32_MAX - (b - a - 1) : a - b; +} + +struct lat_test *task_lat_get_latency_meassurement(struct task_lat *task) +{ + if (task->use_lt == task->using_lt) + return &task->lt[!task->using_lt]; + return NULL; +} + +void task_lat_use_other_latency_meassurement(struct task_lat *task) +{ + task->use_lt = !task->using_lt; +} + +static void task_lat_update_lat_test(struct task_lat *task) +{ + if (task->use_lt != task->using_lt) { + task->using_lt = task->use_lt; + task->lat_test = &task->lt[task->using_lt]; + task->lat_test->accuracy_limit_tsc = task->limit; + } +} + +static int compare_tx_time(const void *val1, const void *val2) +{ + const struct lat_info *ptr1 = val1; + const struct lat_info *ptr2 = val2; + + return ptr1->tx_time - ptr2->tx_time; +} + +static int compare_queue_id(const void *val1, const void *val2) +{ + return compare_tx_time(val1, val2); +} + +static void fix_latency_buffer_tx_time(struct lat_info *lat, uint32_t count) +{ + uint32_t id, time, old_id = 0, old_time = 0, n_overflow = 0; + + for (uint32_t i = 0; i < count; i++) { + id = lat->port_queue_id; + time = lat->tx_time; + if (id == old_id) { + // Same queue id as previous entry; time should always increase + if (time < old_time) { + n_overflow++; + } + lat->tx_time += UINT32_MAX * n_overflow; + old_time = time; + } else { + // Different queue_id, time starts again at 0 + old_id = id; + old_time = 0; + n_overflow = 0; + } + } +} + +static void task_lat_count_remaining_lost_packets(struct task_lat *task) +{ + struct lat_test *lat_test = task->lat_test; + + for (uint32_t j = 0; j < task->generator_count; j++) { + struct early_loss_detect *eld = &task->eld[j]; + + lat_test->lost_packets += early_loss_detect_count_remaining_loss(eld); + } +} + +static void task_lat_reset_eld(struct task_lat *task) +{ + for (uint32_t j = 0; j < task->generator_count; j++) { + early_loss_detect_reset(&task->eld[j]); + } +} + +static uint64_t lat_latency_buffer_get_min_tsc(struct task_lat *task) +{ + uint64_t min_tsc = UINT64_MAX; + + for (uint32_t i = 0; i < task->latency_buffer_idx; i++) { + if (min_tsc > task->latency_buffer[i].tx_time) + min_tsc = task->latency_buffer[i].tx_time; + } + + return min_tsc << LATENCY_ACCURACY; +} + +static uint64_t lat_info_get_lat_tsc(struct lat_info *lat_info) +{ + uint64_t lat = abs_diff(lat_info->rx_time, lat_info->tx_time); + + return lat << LATENCY_ACCURACY; +} + +static uint64_t lat_info_get_tx_err_tsc(const struct lat_info *lat_info) +{ + return ((uint64_t)lat_info->tx_err) << LATENCY_ACCURACY; +} + +static uint64_t lat_info_get_rx_err_tsc(const struct lat_info *lat_info) +{ + return ((uint64_t)lat_info->rx_err) << LATENCY_ACCURACY; +} + +static uint64_t lat_info_get_rx_tsc(const struct lat_info *lat_info) +{ + return ((uint64_t)lat_info) << LATENCY_ACCURACY; +} + +static uint64_t lat_info_get_tx_tsc(const struct lat_info *lat_info) +{ + return ((uint64_t)lat_info) << LATENCY_ACCURACY; +} + +static void lat_write_latency_to_file(struct task_lat *task) +{ + uint64_t min_tsc; + uint32_t n_loss; + + min_tsc = lat_latency_buffer_get_min_tsc(task); + + // Dumping all packet statistics + fprintf(task->fp_rx, "Latency stats for %u packets, ordered by rx time\n", task->latency_buffer_idx); + fprintf(task->fp_rx, "rx index; queue; tx index; lat (nsec);tx time;\n"); + for (uint32_t i = 0; i < task->latency_buffer_idx ; i++) { + struct lat_info *lat_info = &task->latency_buffer[i]; + uint64_t lat_tsc = lat_info_get_lat_tsc(lat_info); + uint64_t rx_tsc = lat_info_get_rx_tsc(lat_info); + uint64_t tx_tsc = lat_info_get_tx_tsc(lat_info); + + fprintf(task->fp_rx, "%u%d;%d;%ld;%lu;%lu\n", + lat_info->rx_packet_index, + lat_info->port_queue_id, + lat_info->tx_packet_index, + tsc_to_nsec(lat_tsc), + tsc_to_nsec(rx_tsc - min_tsc), + tsc_to_nsec(tx_tsc - min_tsc)); + } + + // To detect dropped packets, we need to sort them based on TX + plogx_info("Sorting packets based on queue_id\n"); + qsort (task->latency_buffer, task->latency_buffer_idx, sizeof(struct lat_info), compare_queue_id); + plogx_info("Adapting tx_time\n"); + fix_latency_buffer_tx_time(task->latency_buffer, task->latency_buffer_idx); + plogx_info("Sorting packets based on tx_time\n"); + qsort (task->latency_buffer, task->latency_buffer_idx, sizeof(struct lat_info), compare_tx_time); + plogx_info("Sorted packets based on tx_time\n"); + + // A packet is marked as dropped if 2 packets received from the same queue are not consecutive + fprintf(task->fp_tx, "Latency stats for %u packets, sorted by tx time\n", task->latency_buffer_idx); + fprintf(task->fp_tx, "queue;tx index; rx index; lat (nsec);tx time; rx time; tx_err;rx_err\n"); + + uint32_t prev_tx_packet_index = -1; + for (uint32_t i = 0; i < task->latency_buffer_idx; i++) { + struct lat_info *lat_info = &task->latency_buffer[i]; + uint64_t lat_tsc = lat_info_get_lat_tsc(lat_info); + uint64_t tx_err_tsc = lat_info_get_tx_err_tsc(lat_info); + uint64_t rx_err_tsc = lat_info_get_rx_err_tsc(lat_info); + uint64_t rx_tsc = lat_info_get_rx_tsc(lat_info); + uint64_t tx_tsc = lat_info_get_tx_tsc(lat_info); + + /* Packet n + 64 delivers the TX error for packet n, + hence the last 64 packets do no have TX error. */ + if (i + 64 >= task->latency_buffer_idx) { + tx_err_tsc = 0; + } + // Log dropped packet + n_loss = lat_info->tx_packet_index - prev_tx_packet_index - 1; + if (n_loss) + fprintf(task->fp_tx, "===> %d;%d;0;0;0;0; lost %d packets <===\n", + lat_info->port_queue_id, + lat_info->tx_packet_index - n_loss, n_loss); + // Log next packet + fprintf(task->fp_tx, "%d;%d;%u;%lu;%lu;%lu;%lu;%lu\n", + lat_info->port_queue_id, + lat_info->tx_packet_index, + lat_info->rx_packet_index, + tsc_to_nsec(lat_tsc), + tsc_to_nsec(tx_tsc - min_tsc), + tsc_to_nsec(rx_tsc - min_tsc), + tsc_to_nsec(tx_err_tsc), + tsc_to_nsec(rx_err_tsc)); +#ifdef LAT_DEBUG + fprintf(task->fp_tx, ";%d from %d;%lu;%lu;%lu", + lat_info->id_in_bulk, + lat_info->bulk_size, + tsc_to_nsec(lat_info->begin - min_tsc), + tsc_to_nsec(lat_info->before - min_tsc), + tsc_to_nsec(lat_info->after - min_tsc)); +#endif + fprintf(task->fp_tx, "\n"); + prev_tx_packet_index = lat_info->tx_packet_index; + } + fflush(task->fp_rx); + fflush(task->fp_tx); + task->latency_buffer_idx = 0; +} + +static void lat_stop(struct task_base *tbase) +{ + struct task_lat *task = (struct task_lat *)tbase; + + if (task->unique_id_pos) { + task_lat_count_remaining_lost_packets(task); + task_lat_reset_eld(task); + } + if (task->latency_buffer) + lat_write_latency_to_file(task); +} + +#ifdef LAT_DEBUG +static void task_lat_store_lat_debug(struct task_lat *task, uint32_t rx_packet_index, uint32_t id_in_bulk, uint32_t bulk_size) +{ + struct lat_info *lat_info = &task->latency_buffer[rx_packet_index]; + + lat_info->bulk_size = bulk_size; + lat_info->id_in_bulk = id_in_bulk; + lat_info->begin = task->begin; + lat_info->before = task->base.aux->tsc_rx.before; + lat_info->after = task->base.aux->tsc_rx.after; +} +#endif + +static void task_lat_store_lat_buf(struct task_lat *task, uint64_t rx_packet_index, struct unique_id *unique_id, uint64_t rx_time, uint64_t tx_time, uint64_t rx_err, uint64_t tx_err) +{ + struct lat_info *lat_info; + uint8_t generator_id = 0; + uint32_t packet_index = 0; + + if (unique_id) + unique_id_get(unique_id, &generator_id, &packet_index); + + /* If unique_id_pos is specified then latency is stored per + packet being sent. Lost packets are detected runtime, and + latency stored for those packets will be 0 */ + lat_info = &task->latency_buffer[task->latency_buffer_idx++]; + lat_info->rx_packet_index = task->latency_buffer_idx - 1; + lat_info->tx_packet_index = packet_index; + lat_info->port_queue_id = generator_id; + lat_info->rx_time = rx_time; + lat_info->tx_time = tx_time; + lat_info->rx_err = rx_err; + lat_info->tx_err = tx_err; +} + +static uint32_t task_lat_early_loss_detect(struct task_lat *task, struct unique_id *unique_id) +{ + struct early_loss_detect *eld; + uint8_t generator_id; + uint32_t packet_index; + + unique_id_get(unique_id, &generator_id, &packet_index); + + if (generator_id >= task->generator_count) + return 0; + + eld = &task->eld[generator_id]; + + return early_loss_detect_add(eld, packet_index); +} + +static uint64_t tsc_extrapolate_backward(uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum) +{ + uint64_t tsc = tsc_from - rte_get_tsc_hz()*bytes/1250000000; + if (likely(tsc > tsc_minimum)) + return tsc; + else + return tsc_minimum; +} + +static void lat_test_histogram_add(struct lat_test *lat_test, uint64_t lat_tsc) +{ + uint64_t bucket_id = (lat_tsc >> lat_test->bucket_size); + size_t bucket_count = sizeof(lat_test->buckets)/sizeof(lat_test->buckets[0]); + + bucket_id = bucket_id < bucket_count? bucket_id : bucket_count; + lat_test->buckets[bucket_id]++; +} + +static void lat_test_add_lost(struct lat_test *lat_test, uint64_t lost_packets) +{ + lat_test->lost_packets += lost_packets; +} + +static void lat_test_add_latency(struct lat_test *lat_test, uint64_t lat_tsc, uint64_t error) +{ + lat_test->tot_all_pkts++; + + if (error > lat_test->accuracy_limit_tsc) + return; + lat_test->tot_pkts++; + + lat_test->tot_lat += lat_tsc; + lat_test->tot_lat_error += error; + + /* (a +- b)^2 = a^2 +- (2ab + b^2) */ + lat_test->var_lat += lat_tsc * lat_tsc; + lat_test->var_lat_error += 2 * lat_tsc * error; + lat_test->var_lat_error += error * error; + + if (lat_tsc > lat_test->max_lat) { + lat_test->max_lat = lat_tsc; + lat_test->max_lat_error = error; + } + if (lat_tsc < lat_test->min_lat) { + lat_test->min_lat = lat_tsc; + lat_test->min_lat_error = error; + } + +#ifdef LATENCY_HISTOGRAM + lat_test_histogram_add(lat_test, lat_tsc); +#endif +} + +static int task_lat_can_store_latency(struct task_lat *task) +{ + return task->latency_buffer_idx < task->latency_buffer_size; +} + +static void task_lat_store_lat(struct task_lat *task, uint64_t rx_packet_index, uint64_t rx_time, uint64_t tx_time, uint64_t rx_error, uint64_t tx_error, struct unique_id *unique_id) +{ + if (tx_time == 0) + return; + uint32_t lat_tsc = abs_diff(rx_time, tx_time) << LATENCY_ACCURACY; + + lat_test_add_latency(task->lat_test, lat_tsc, rx_error + tx_error); + + if (task_lat_can_store_latency(task)) { + task_lat_store_lat_buf(task, rx_packet_index, unique_id, rx_time, tx_time, rx_error, tx_error); + } +} + +static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lat *task = (struct task_lat *)tbase; + uint64_t rx_time_err; + + uint32_t pkt_rx_time, pkt_tx_time; + + if (n_pkts == 0) { + task->begin = tbase->aux->tsc_rx.before; + return 0; + } + + task_lat_update_lat_test(task); + + const uint64_t rx_tsc = tbase->aux->tsc_rx.after; + uint32_t tx_time_err = 0; + + /* Go once through all received packets and read them. If + packet has just been modified by another core, the cost of + latency will be partialy amortized though the bulk size */ + for (uint16_t j = 0; j < n_pkts; ++j) { + struct rte_mbuf *mbuf = mbufs[j]; + task->rx_pkt_meta[j].hdr = rte_pktmbuf_mtod(mbuf, uint8_t *); + } + for (uint16_t j = 0; j < n_pkts; ++j) { + } + + if (task->sig) { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (*(uint32_t *)(task->rx_pkt_meta[j].hdr + task->sig_pos) == task->sig) + task->rx_pkt_meta[j].pkt_tx_time = *(uint32_t *)(task->rx_pkt_meta[j].hdr + task->lat_pos); + else + task->rx_pkt_meta[j].pkt_tx_time = 0; + } + } else { + for (uint16_t j = 0; j < n_pkts; ++j) { + task->rx_pkt_meta[j].pkt_tx_time = *(uint32_t *)(task->rx_pkt_meta[j].hdr + task->lat_pos); + } + } + + uint32_t bytes_total_in_bulk = 0; + // Find RX time of first packet, for RX accuracy + for (uint16_t j = 0; j < n_pkts; ++j) { + uint16_t flipped = n_pkts - 1 - j; + + task->rx_pkt_meta[flipped].bytes_after_in_bulk = bytes_total_in_bulk; + bytes_total_in_bulk += mbuf_wire_size(mbufs[flipped]); + } + + pkt_rx_time = tsc_extrapolate_backward(rx_tsc, task->rx_pkt_meta[0].bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY; + if ((uint32_t)((task->begin >> LATENCY_ACCURACY)) > pkt_rx_time) { + // Extrapolation went up to BEFORE begin => packets were stuck in the NIC but we were not seeing them + rx_time_err = pkt_rx_time - (uint32_t)(task->last_pkts_tsc >> LATENCY_ACCURACY); + } else { + rx_time_err = pkt_rx_time - (uint32_t)(task->begin >> LATENCY_ACCURACY); + } + + struct unique_id *unique_id = NULL; + struct delayed_latency_entry *delayed_latency_entry; + + for (uint16_t j = 0; j < n_pkts; ++j) { + struct rx_pkt_meta_data *rx_pkt_meta = &task->rx_pkt_meta[j]; + uint8_t *hdr = rx_pkt_meta->hdr; + + pkt_rx_time = tsc_extrapolate_backward(rx_tsc, rx_pkt_meta->bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY; + pkt_tx_time = rx_pkt_meta->pkt_tx_time; + + if (task->unique_id_pos) { + unique_id = (struct unique_id *)(hdr + task->unique_id_pos); + + uint32_t n_loss = task_lat_early_loss_detect(task, unique_id); + lat_test_add_lost(task->lat_test, n_loss); + } + + /* If accuracy is enabled, latency is reported with a + delay of 64 packets since the generator puts the + accuracy for packet N into packet N + 64. The delay + ensures that all reported latencies have both rx + and tx error. */ + if (task->accur_pos) { + tx_time_err = *(uint32_t *)(hdr + task->accur_pos); + + delayed_latency_entry = delayed_latency_get(&task->delayed_latency, task->rx_packet_index - 64); + + if (delayed_latency_entry) { + task_lat_store_lat(task, + task->rx_packet_index, + delayed_latency_entry->pkt_rx_time, + delayed_latency_entry->pkt_tx_time, + delayed_latency_entry->rx_time_err, + tx_time_err, + unique_id); + } + + delayed_latency_entry = delayed_latency_create(&task->delayed_latency, task->rx_packet_index); + delayed_latency_entry->pkt_rx_time = pkt_rx_time; + delayed_latency_entry->pkt_tx_time = pkt_tx_time; + delayed_latency_entry->rx_time_err = rx_time_err; + } else { + task_lat_store_lat(task, + task->rx_packet_index, + pkt_rx_time, + pkt_tx_time, + 0, + 0, + unique_id); + } + task->rx_packet_index++; + } + int ret; + ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); + task->begin = tbase->aux->tsc_rx.before; + task->last_pkts_tsc = tbase->aux->tsc_rx.after; + return ret; +} + +static void init_task_lat_latency_buffer(struct task_lat *task, uint32_t core_id) +{ + const int socket_id = rte_lcore_to_socket_id(core_id); + char name[256]; + size_t latency_buffer_mem_size = 0; + + if (task->latency_buffer_size > UINT32_MAX - MAX_RING_BURST) + task->latency_buffer_size = UINT32_MAX - MAX_RING_BURST; + + latency_buffer_mem_size = sizeof(struct lat_info) * task->latency_buffer_size; + + task->latency_buffer = prox_zmalloc(latency_buffer_mem_size, socket_id); + PROX_PANIC(task->latency_buffer == NULL, "Failed to allocate %ld kbytes for %s\n", latency_buffer_mem_size / 1024, name); + + sprintf(name, "latency.rx_%d.txt", core_id); + task->fp_rx = fopen(name, "w+"); + PROX_PANIC(task->fp_rx == NULL, "Failed to open %s\n", name); + + sprintf(name, "latency.tx_%d.txt", core_id); + task->fp_tx = fopen(name, "w+"); + PROX_PANIC(task->fp_tx == NULL, "Failed to open %s\n", name); +} + +static void task_lat_init_eld(struct task_lat *task, uint8_t socket_id) +{ + uint8_t *generator_count = prox_sh_find_system("generator_count"); + size_t eld_mem_size; + + if (generator_count == NULL) + task->generator_count = 0; + else + task->generator_count = *generator_count; + + eld_mem_size = sizeof(task->eld[0]) * task->generator_count; + task->eld = prox_zmalloc(eld_mem_size, socket_id); +} + +void task_lat_set_accuracy_limit(struct task_lat *task, uint32_t accuracy_limit_nsec) +{ + task->limit = nsec_to_tsc(accuracy_limit_nsec); +} + +static void init_task_lat(struct task_base *tbase, struct task_args *targ) +{ + struct task_lat *task = (struct task_lat *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->lat_pos = targ->lat_pos; + task->accur_pos = targ->accur_pos; + task->unique_id_pos = targ->packet_id_pos; + task->latency_buffer_size = targ->latency_buffer_size; + + if (task->latency_buffer_size) { + init_task_lat_latency_buffer(task, targ->lconf->id); + } + + if (targ->bucket_size < LATENCY_ACCURACY) { + targ->bucket_size = DEFAULT_BUCKET_SIZE; + } + + task->lt[0].bucket_size = targ->bucket_size - LATENCY_ACCURACY; + task->lt[1].bucket_size = targ->bucket_size - LATENCY_ACCURACY; + if (task->unique_id_pos) { + task_lat_init_eld(task, socket_id); + task_lat_reset_eld(task); + } + task->lat_test = &task->lt[task->using_lt]; + + task_lat_set_accuracy_limit(task, targ->accuracy_limit_nsec); + task->rx_pkt_meta = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(*task->rx_pkt_meta), socket_id); + PROX_PANIC(task->rx_pkt_meta == NULL, "unable to allocate memory to store RX packet meta data"); +} + +static struct task_init task_init_lat = { + .mode_str = "lat", + .init = init_task_lat, + .handle = handle_lat_bulk, + .stop = lat_stop, + .flag_features = TASK_FEATURE_TSC_RX | TASK_FEATURE_RX_ALL | TASK_FEATURE_ZERO_RX | TASK_FEATURE_NEVER_DISCARDS, + .size = sizeof(struct task_lat) +}; + +__attribute__((constructor)) static void reg_task_lat(void) +{ + reg_task(&task_init_lat); +} diff --git a/VNFs/DPPD-PROX/handle_lat.h b/VNFs/DPPD-PROX/handle_lat.h new file mode 100644 index 00000000..a832a641 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lat.h @@ -0,0 +1,189 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_LAT_H_ +#define _HANDLE_LAT_H_ + +#include <stdio.h> +#include <math.h> +#include <string.h> + +#include "task_base.h" +#include "clock.h" + +#define MAX_PACKETS_FOR_LATENCY 64 +#define LATENCY_ACCURACY 1 + +struct lat_test { + uint64_t tot_all_pkts; + uint64_t tot_pkts; + uint64_t max_lat; + uint64_t min_lat; + uint64_t tot_lat; + unsigned __int128 var_lat; /* variance */ + uint64_t accuracy_limit_tsc; + + uint64_t max_lat_error; + uint64_t min_lat_error; + uint64_t tot_lat_error; + unsigned __int128 var_lat_error; + + uint64_t buckets[128]; + uint64_t bucket_size; + uint64_t lost_packets; +}; + +static struct time_unit lat_test_get_accuracy_limit(struct lat_test *lat_test) +{ + return tsc_to_time_unit(lat_test->accuracy_limit_tsc); +} + +static struct time_unit_err lat_test_get_avg(struct lat_test *lat_test) +{ + uint64_t tsc; + uint64_t tsc_error; + + tsc = lat_test->tot_lat/lat_test->tot_pkts; + tsc_error = lat_test->tot_lat_error/lat_test->tot_pkts; + + struct time_unit_err ret = { + .time = tsc_to_time_unit(tsc), + .error = tsc_to_time_unit(tsc_error), + }; + + return ret; +} + +static struct time_unit_err lat_test_get_min(struct lat_test *lat_test) +{ + struct time_unit_err ret = { + .time = tsc_to_time_unit(lat_test->min_lat), + .error = tsc_to_time_unit(lat_test->min_lat_error), + }; + + return ret; +} + +static struct time_unit_err lat_test_get_max(struct lat_test *lat_test) +{ + struct time_unit_err ret = { + .time = tsc_to_time_unit(lat_test->max_lat), + .error = tsc_to_time_unit(lat_test->max_lat_error), + }; + + return ret; +} + +static struct time_unit_err lat_test_get_stddev(struct lat_test *lat_test) +{ + unsigned __int128 avg_tsc = lat_test->tot_lat/lat_test->tot_pkts; + unsigned __int128 avg_tsc_squared = avg_tsc * avg_tsc; + unsigned __int128 avg_squares_tsc = lat_test->var_lat/lat_test->tot_pkts; + + /* The assumption is that variance fits into 64 bits, meaning + that standard deviation fits into 32 bits. In other words, + the assumption is that the standard deviation is not more + than approximately 1 second. */ + uint64_t var_tsc = avg_squares_tsc - avg_tsc_squared; + uint64_t stddev_tsc = sqrt(var_tsc); + + unsigned __int128 avg_tsc_error = lat_test->tot_lat_error / lat_test->tot_pkts; + unsigned __int128 avg_tsc_squared_error = 2 * avg_tsc * avg_tsc_error + avg_tsc_error * avg_tsc_error; + unsigned __int128 avg_squares_tsc_error = lat_test->var_lat_error / lat_test->tot_pkts; + + uint64_t var_tsc_error = avg_squares_tsc_error + avg_tsc_squared_error; + + /* sqrt(a+-b) = sqrt(a) +- (-sqrt(a) + sqrt(a + b)) */ + + uint64_t stddev_tsc_error = - stddev_tsc + sqrt(var_tsc + var_tsc_error); + + struct time_unit_err ret = { + .time = tsc_to_time_unit(stddev_tsc), + .error = tsc_to_time_unit(stddev_tsc_error), + }; + + return ret; +} + +static void _lat_test_histogram_combine(struct lat_test *dst, struct lat_test *src) +{ + for (size_t i = 0; i < sizeof(dst->buckets)/sizeof(dst->buckets[0]); ++i) + dst->buckets[i] += src->buckets[i]; +} + +static void lat_test_combine(struct lat_test *dst, struct lat_test *src) +{ + dst->tot_all_pkts += src->tot_all_pkts; + + dst->tot_pkts += src->tot_pkts; + + dst->tot_lat += src->tot_lat; + dst->tot_lat_error += src->tot_lat_error; + + /* (a +- b)^2 = a^2 +- (2ab + b^2) */ + dst->var_lat += src->var_lat; + dst->var_lat_error += src->var_lat_error; + + if (src->max_lat > dst->max_lat) { + dst->max_lat = src->max_lat; + dst->max_lat_error = src->max_lat_error; + } + if (src->min_lat < dst->min_lat) { + dst->min_lat = src->min_lat; + dst->min_lat_error = src->min_lat_error; + } + + if (src->accuracy_limit_tsc > dst->accuracy_limit_tsc) + dst->accuracy_limit_tsc = src->accuracy_limit_tsc; + dst->lost_packets += src->lost_packets; + +#ifdef LATENCY_HISTOGRAM + _lat_test_histogram_combine(dst, src); +#endif +} + +static void lat_test_reset(struct lat_test *lat_test) +{ + lat_test->tot_all_pkts = 0; + lat_test->tot_pkts = 0; + lat_test->max_lat = 0; + lat_test->min_lat = -1; + lat_test->tot_lat = 0; + lat_test->var_lat = 0; + lat_test->max_lat_error = 0; + lat_test->min_lat_error = 0; + lat_test->tot_lat_error = 0; + lat_test->var_lat_error = 0; + lat_test->accuracy_limit_tsc = 0; + + lat_test->lost_packets = 0; + + memset(lat_test->buckets, 0, sizeof(lat_test->buckets)); +} + +static void lat_test_copy(struct lat_test *dst, struct lat_test *src) +{ + if (src->tot_all_pkts) + memcpy(dst, src, sizeof(struct lat_test)); +} + +struct task_lat; + +struct lat_test *task_lat_get_latency_meassurement(struct task_lat *task); +void task_lat_use_other_latency_meassurement(struct task_lat *task); +void task_lat_set_accuracy_limit(struct task_lat *task, uint32_t accuracy_limit_nsec); + +#endif /* _HANDLE_LAT_H_ */ diff --git a/VNFs/DPPD-PROX/handle_lb_5tuple.c b/VNFs/DPPD-PROX/handle_lb_5tuple.c new file mode 100644 index 00000000..ae973f1c --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_5tuple.c @@ -0,0 +1,143 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_hash.h> +#include <rte_ether.h> +#include <rte_memcpy.h> +#include <rte_mbuf.h> +#include <rte_ip.h> +#include <rte_tcp.h> +#include <rte_udp.h> +#include <rte_version.h> +#include <rte_byteorder.h> + +#include "handle_lb_5tuple.h" +#include "prox_malloc.h" +#include "prox_lua.h" +#include "prox_lua_types.h" +#include "etypes.h" +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "prefetch.h" +#include "prox_globals.h" +#include "defines.h" +#include "quit.h" + +#define BYTE_VALUE_MAX 256 +#define ALL_32_BITS 0xffffffff +#define BIT_8_TO_15 0x0000ff00 + +#define HASH_MAX_SIZE 4*8*1024*1024 + +struct task_lb_5tuple { + struct task_base base; + uint32_t runtime_flags; + struct rte_hash *lookup_hash; + uint8_t out_if[HASH_MAX_SIZE] __rte_cache_aligned; +}; + +static __m128i mask0; +static inline uint8_t get_ipv4_dst_port(struct task_lb_5tuple *task, void *ipv4_hdr, uint8_t portid, struct rte_hash * ipv4_l3fwd_lookup_struct) +{ + int ret = 0; + union ipv4_5tuple_host key; + + ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live); + __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr)); + /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */ + key.xmm = _mm_and_si128(data, mask0); + + /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */ + /* + rte_mov16(&key.pad0, ipv4_hdr); + key.pad0 = 0; + key.pad1 = 0; + */ + /* Find destination port */ + ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key); + return (uint8_t)((ret < 0)? portid : task->out_if[ret]); +} + +static inline uint8_t handle_lb_5tuple(struct task_lb_5tuple *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + + switch (eth_hdr->ether_type) { + case ETYPE_IPv4: + /* Handle IPv4 headers.*/ + ipv4_hdr = (struct ipv4_hdr *) (eth_hdr + 1); + return get_ipv4_dst_port(task, ipv4_hdr, OUT_DISCARD, task->lookup_hash); + default: + return OUT_DISCARD; + } +} + +static int handle_lb_5tuple_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_5tuple *task = (struct task_lb_5tuple *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_lb_5tuple(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_lb_5tuple(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void init_task_lb_5tuple(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_5tuple *task = (struct task_lb_5tuple *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + mask0 = _mm_set_epi32(ALL_32_BITS, ALL_32_BITS, ALL_32_BITS, BIT_8_TO_15); + + uint8_t *out_table = task->out_if; + int ret = lua_to_tuples(prox_lua(), GLOBAL, "tuples", socket_id, &task->lookup_hash, &out_table); + PROX_PANIC(ret, "Failed to read tuples from config\n"); + + task->runtime_flags = targ->flags; +} + +static struct task_init task_init_lb_5tuple = { + .mode_str = "lb5tuple", + .init = init_task_lb_5tuple, + .handle = handle_lb_5tuple_bulk, + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, + .size = sizeof(struct task_lb_5tuple), +}; + +__attribute__((constructor)) static void reg_task_lb_5tuple(void) +{ + reg_task(&task_init_lb_5tuple); +} diff --git a/VNFs/DPPD-PROX/handle_lb_5tuple.h b/VNFs/DPPD-PROX/handle_lb_5tuple.h new file mode 100644 index 00000000..bb830fa7 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_5tuple.h @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_LB_TUP_H_ +#define _HANDLE_LB_TUP_H_ + +union ipv4_5tuple_host { + struct { + uint8_t pad0; + uint8_t proto; + uint16_t pad1; + uint32_t ip_src; + uint32_t ip_dst; + uint16_t port_src; + uint16_t port_dst; + }; + __m128i xmm; +}; + +#endif /* _HANDLE_LB_TUP_H_ */ diff --git a/VNFs/DPPD-PROX/handle_lb_net.c b/VNFs/DPPD-PROX/handle_lb_net.c new file mode 100644 index 00000000..878b8158 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_net.c @@ -0,0 +1,577 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> + +#include <rte_mbuf.h> +#include <rte_ip.h> +#include <rte_table_hash.h> +#include <rte_byteorder.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "handle_lb_net.h" +#include "task_base.h" +#include "defines.h" +#include "tx_pkt.h" +#include "log.h" +#include "stats.h" +#include "mpls.h" +#include "etypes.h" +#include "gre.h" +#include "prefetch.h" +#include "qinq.h" +#include "hash_utils.h" +#include "quit.h" +#include "flow_iter.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +struct task_lb_net { + struct task_base base; + uint16_t qinq_tag; + uint8_t bit_mask; + uint8_t nb_worker_threads; + uint8_t worker_byte_offset_ipv4; + uint8_t worker_byte_offset_ipv6; + uint8_t runtime_flags; +}; + +struct task_lb_net_lut { + struct task_base base; + uint8_t nb_worker_threads; + uint8_t runtime_flags; + struct rte_table_hash *worker_hash_table; + uint8_t *worker_lut; + uint32_t keys[64]; + struct rte_mbuf *fake_packets[64]; +}; + +static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *mbuf); +static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, struct rte_mbuf *mbuf); + +static struct rte_table_hash *setup_gre_to_wt_lookup(struct task_args *targ, uint8_t n_workers, int socket_id) +{ + uint32_t gre_id, rss; + void* entry_in_hash; + int r, key_found = 0; + struct rte_table_hash *ret; + uint32_t count = 0; + + for (int i = 0; i < n_workers; ++i) { + struct core_task ct = targ->core_task_set[0].core_task[i]; + struct task_args *t = core_targ_get(ct.core, ct.task); + + struct flow_iter *it = &t->task_init->flow_iter; + + PROX_PANIC(t->task_init->flow_iter.beg == NULL, + "Load distributor can't find flows owned by destination worker %d\n", i); + + for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) { + count++; + } + } + + struct rte_table_hash_ext_params table_hash_params = { + .key_size = 4, + .n_keys = count, + .n_buckets = count, + .n_buckets_ext = count >> 1, + .f_hash = hash_crc32, + .seed = 0, + .signature_offset = HASH_METADATA_OFFSET(0), + .key_offset = HASH_METADATA_OFFSET(0), + }; + + ret = rte_table_hash_ext_dosig_ops.f_create(&table_hash_params, socket_id, sizeof(uint8_t)); + + for (int i = 0; i < n_workers; ++i) { + struct core_task ct = targ->core_task_set[0].core_task[i]; + struct task_args *t = core_targ_get(ct.core, ct.task); + + PROX_PANIC(t->task_init->flow_iter.beg == NULL, + "Load distributor can't find flows owned by destination worker %d\n", i); + + struct flow_iter *it = &t->task_init->flow_iter; + + for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) { + uint32_t gre_id = it->get_gre_id(it, t); + uint8_t dst = i; + + r = rte_table_hash_ext_dosig_ops.f_add(ret, &gre_id, &dst, &key_found, &entry_in_hash); + if (r) { + plog_err("Failed to add gre_id = %x, dest worker = %u\n", gre_id, i); + } + else { + plog_dbg("Core %u added: gre_id %x, dest woker = %u\n", targ->lconf->id, gre_id, i); + } + } + } + return ret; +} + +static uint8_t *setup_wt_indexed_table(struct task_args *targ, uint8_t n_workers, int socket_id) +{ + uint32_t gre_id, rss; + uint32_t max_gre_id = 0; + uint8_t queue; + uint8_t *ret = NULL; + void* entry_in_hash; + int key_found = 0; + + for (int i = 0; i < n_workers; ++i) { + struct core_task ct = targ->core_task_set[0].core_task[i]; + struct task_args *t = core_targ_get(ct.core, ct.task); + + struct flow_iter *it = &t->task_init->flow_iter; + + PROX_PANIC(t->task_init->flow_iter.beg == NULL, + "Load distributor can't find flows owned by destination worker %d\n", i); + + for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) { + uint32_t gre_id = it->get_gre_id(it, t); + if (gre_id > max_gre_id) + max_gre_id = gre_id; + } + } + + PROX_PANIC(max_gre_id == 0, "Failed to get maximum GRE ID from workers"); + + ret = prox_zmalloc(1 + max_gre_id, socket_id); + PROX_PANIC(ret == NULL, "Failed to allocate worker_lut\n"); + + for (int i = 0; i < n_workers; ++i) { + struct core_task ct = targ->core_task_set[0].core_task[i]; + struct task_args *t = core_targ_get(ct.core, ct.task); + + PROX_PANIC(t->task_init->flow_iter.beg == NULL, + "Load distributor can't find flows owned by destination worker %d\n", i); + + struct flow_iter *it = &t->task_init->flow_iter; + + for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) { + uint32_t gre_id = it->get_gre_id(it, t); + uint8_t dst = i; + + ret[gre_id] = dst; + } + } + return ret; +} + +static void init_task_lb_net(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_net *task = (struct task_lb_net *)tbase; + + task->qinq_tag = targ->qinq_tag; + task->runtime_flags = targ->runtime_flags; + task->worker_byte_offset_ipv6 = 23; + task->worker_byte_offset_ipv4 = 15; + task->nb_worker_threads = targ->nb_worker_threads; + /* The optimal configuration is when the number of worker threads + is a power of 2. In that case, a bit_mask can be used. Setting + the bitmask to 0xff disables the "optimal" usage of bitmasks + and the actual number of worker threads will be used instead. */ + task->bit_mask = rte_is_power_of_2(targ->nb_worker_threads) ? targ->nb_worker_threads - 1 : 0xff; +} + +static void init_task_lb_net_lut(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_net_lut *task = (struct task_lb_net_lut *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->runtime_flags = targ->runtime_flags; + task->nb_worker_threads = targ->nb_worker_threads; + for (uint32_t i = 0; i < 64; ++i) { + task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf)); + } + + task->worker_hash_table = setup_gre_to_wt_lookup(targ, task->nb_worker_threads, socket_id); +} + +static void init_task_lb_net_indexed_table(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_net_lut *task = (struct task_lb_net_lut *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->runtime_flags = targ->runtime_flags; + task->nb_worker_threads = targ->nb_worker_threads; + + task->worker_lut = setup_wt_indexed_table(targ, task->nb_worker_threads, socket_id); +} + +static int handle_lb_net_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_net *task = (struct task_lb_net *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_lb_net(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + + for (; j < n_pkts; ++j) { + out[j] = handle_lb_net(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int handle_lb_net_lut_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_net_lut *task = (struct task_lb_net_lut *)tbase; + uint16_t not_dropped = 0; + uint8_t out[MAX_PKT_BURST]; + // process packet, i.e. decide if the packet has to be dropped or not and where the packet has to go + uint16_t j; + prefetch_first(mbufs, n_pkts); + + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + uint8_t *wt[MAX_PKT_BURST]; + uint64_t lookup_hit_mask = 0; + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + if (extract_gre_key(task, &task->keys[j], mbufs[j])) { + // Packet will be dropped after lookup + pkts_mask &= ~(1 << j); + out[j] = OUT_DISCARD; + } + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + if (extract_gre_key(task, &task->keys[j], mbufs[j])) { + pkts_mask &= ~(1 << j); + out[j] = OUT_DISCARD; + rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbufs[j], 0)); + } + } +#endif + // keys have been extracted for all packets, now do the lookup + rte_table_hash_ext_dosig_ops.f_lookup(task->worker_hash_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)wt); + /* mbufs now contains the packets that have not been dropped */ + if (likely(lookup_hit_mask == RTE_LEN2MASK(n_pkts, uint64_t))) { + for (j = 0; j < n_pkts; ++j) { + out[j] = *wt[j]; + } + } + else { + for (j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + plog_warn("Packet %d keys %x can not be sent to worker thread => dropped\n", j, task->keys[j]); + out[j] = OUT_DISCARD; + } + else { + out[j] = *wt[j]; + } + } + } + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int handle_lb_net_indexed_table_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_net_lut *task = (struct task_lb_net_lut *)tbase; + uint8_t out[MAX_PKT_BURST]; + // process packet, i.e. decide if the packet has to be dropped or not and where the packet has to go + uint16_t j; + uint32_t gre_id; + prefetch_first(mbufs, n_pkts); + + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + if (extract_gre_key(task, &gre_id, mbufs[j])) { + // Packet will be dropped after lookup + pkts_mask &= ~(1 << j); + out[j] = OUT_DISCARD; + } else { + out[j] = task->worker_lut[rte_bswap32(gre_id)]; + } + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + if (extract_gre_key(task, &gre_id, mbufs[j])) { + pkts_mask &= ~(1 << j); + out[j] = OUT_DISCARD; + } else { + out[j] = task->worker_lut[rte_bswap32(gre_id)]; + } + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static inline uint8_t worker_from_mask(struct task_lb_net *task, uint32_t val) +{ + if (task->bit_mask != 0xff) { + return val & task->bit_mask; + } + else { + return val % task->nb_worker_threads; + } +} + +static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, struct rte_mbuf *mbuf) +{ + // For all packets, one by one, remove MPLS tag if any and fills in keys used by "fake" packets + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + // Check for MPLS TAG + struct ipv4_hdr *ip; + if (peth->ether_type == ETYPE_MPLSU) { + struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); + uint32_t mpls_len = 0; + while (!(mpls->bytes & 0x00010000)) { + mpls++; + mpls_len += sizeof(struct mpls_hdr); + } + mpls_len += sizeof(struct mpls_hdr); + ip = (struct ipv4_hdr *)(mpls + 1); + switch (ip->version_ihl >> 4) { + case 4: + // Remove MPLS Tag if requested + if (task->runtime_flags & TASK_MPLS_TAGGING) { + peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth->ether_type = ETYPE_IPv4; + } + break; + case 6: + plog_warn("IPv6 not supported in this mode\n"); + return 1;; + default: + plog_warn("Unexpected IP version %d\n", ip->version_ihl >> 4); + return 1; + } + } + else { + ip = (struct ipv4_hdr *)(peth + 1); + } + // Entry point for the packet => check for packet validity + // => do not use extract_key_core(mbufs[j], &task->keys[j]); + // + if (likely(ip->next_proto_id == IPPROTO_GRE)) { + struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1); + if (likely(pgre->bits & GRE_KEY_PRESENT)) { + uint32_t gre_id; + if (pgre->bits & (GRE_CRC_PRESENT | GRE_ROUTING_PRESENT)) { + // gre_id = *((uint32_t *)((uint8_t *)pgre + 8)); + *key = *(uint32_t *)((uint8_t *)pgre + 8); + } + else { + // gre_id = *((uint32_t *)((uint8_t *)pgre + 4)); + *key = *(uint32_t *)((uint8_t *)pgre + 4); + } + } + else { + plog_warn("Key not present\n"); + return 1; + } + } + else { + plog_warn("Invalid protocol: GRE xas expected, got 0x%x\n", ip->next_proto_id); + return 1; + } + return 0; +} + +static inline uint8_t lb_ip4(struct task_lb_net *task, struct ipv4_hdr *ip) +{ + if (unlikely(ip->version_ihl >> 4 != 4)) { + plog_warn("Expected to receive IPv4 packet but IP version was %d\n", + ip->version_ihl >> 4); + return OUT_DISCARD; + } + + if (ip->next_proto_id == IPPROTO_GRE) { + struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1); + + if (pgre->bits & GRE_KEY_PRESENT) { + uint32_t gre_id; + if (pgre->bits & (GRE_CRC_PRESENT | GRE_ROUTING_PRESENT)) { + gre_id = *((uint32_t *)((uint8_t *)pgre + 8)); + } + else { + gre_id = *((uint32_t *)((uint8_t *)pgre + 4)); + } + + gre_id = rte_be_to_cpu_32(gre_id) & 0xFFFFFFF; + uint8_t worker = worker_from_mask(task, gre_id); + plogx_dbg("gre_id = %u worker = %u\n", gre_id, worker); + return worker + task->nb_worker_threads * IPV4; + } + else { + plog_warn("Key not present\n"); + return OUT_DISCARD; + } + } + else if (ip->next_proto_id == IPPROTO_UDP) { + uint8_t worker = worker_from_mask(task, rte_bswap32(ip->dst_addr)); + return worker + task->nb_worker_threads * IPV4; + } + return OUT_DISCARD; +} + +static inline uint8_t lb_ip6(struct task_lb_net *task, struct ipv6_hdr *ip) +{ + if (unlikely((*(uint8_t*)ip) >> 4 != 6)) { + plog_warn("Expected to receive IPv6 packet but IP version was %d\n", + *(uint8_t*)ip >> 4); + return OUT_DISCARD; + } + + uint8_t worker = worker_from_mask(task, *((uint8_t *)ip + task->worker_byte_offset_ipv6)); + return worker + task->nb_worker_threads * IPV6; +} + +static inline uint8_t lb_mpls(struct task_lb_net *task, struct ether_hdr *peth, struct rte_mbuf *mbuf) +{ + struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); + uint32_t mpls_len = 0; + while (!(mpls->bytes & 0x00010000)) { + mpls++; + mpls_len += sizeof(struct mpls_hdr); + } + mpls_len += sizeof(struct mpls_hdr); + struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1); + + switch (ip->version_ihl >> 4) { + case 4: + if (task->runtime_flags & TASK_MPLS_TAGGING) { + peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth->ether_type = ETYPE_IPv4; + } + return lb_ip4(task, ip); + case 6: + if (task->runtime_flags & TASK_MPLS_TAGGING) { + peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth->ether_type = ETYPE_IPv6; + } + return lb_ip6(task, (struct ipv6_hdr *)ip); + default: + plogd_warn(mbuf, "Failed Decoding MPLS Packet - neither IPv4 neither IPv6: version %u for packet : \n", ip->version_ihl); + return OUT_DISCARD; + } +} + +static inline uint8_t lb_qinq(struct task_lb_net *task, struct qinq_hdr *qinq) +{ + if (qinq->cvlan.eth_proto != ETYPE_VLAN) { + plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto); + return OUT_DISCARD; + } + uint32_t qinq_tags = rte_bswap16(qinq->cvlan.vlan_tci & 0xFF0F); + return worker_from_mask(task, qinq_tags); +} + +static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + const uint16_t len = rte_pktmbuf_pkt_len(mbuf); + if (len < 60) { + plogd_warn(mbuf, "Unexpected frame len = %d for packet : \n", len); + return OUT_DISCARD; + } + + switch (peth->ether_type) { + case ETYPE_MPLSU: + return lb_mpls(task, peth, mbuf); + case ETYPE_8021ad: + return lb_qinq(task, (struct qinq_hdr *)peth); + case ETYPE_IPv4: + return lb_ip4(task, (struct ipv4_hdr *)(peth + 1)); + case ETYPE_IPv6: + return lb_ip6(task, (struct ipv6_hdr *)(peth + 1)); + case ETYPE_LLDP: + return OUT_DISCARD; + default: + if (peth->ether_type == task->qinq_tag) + return lb_qinq(task, (struct qinq_hdr *)peth); + plogd_warn(mbuf, "Unexpected frame Ether type = %#06x for packet : \n", peth->ether_type); + return OUT_DISCARD; + } + + return 1; +} + +static struct task_init task_init_lb_net = { + .mode_str = "lbnetwork", + .init = init_task_lb_net, + .handle = handle_lb_net_bulk, + .size = sizeof(struct task_lb_net), + .flag_features = TASK_FEATURE_GRE_ID +}; + +static struct task_init task_init_lb_net_lut_qinq_rss = { + .mode_str = "lbnetwork", + .sub_mode_str = "lut_qinq_rss", + .init = init_task_lb_net_lut, + .handle = handle_lb_net_lut_bulk, + .size = sizeof(struct task_lb_net_lut), + .flag_features = TASK_FEATURE_LUT_QINQ_RSS +}; + +static struct task_init task_init_lb_net_lut_qinq_hash = { + .mode_str = "lbnetwork", + .sub_mode_str = "lut_qinq_hash", + .init = init_task_lb_net_lut, + .handle = handle_lb_net_lut_bulk, + .size = sizeof(struct task_lb_net_lut), + .flag_features = TASK_FEATURE_LUT_QINQ_HASH +}; + +static struct task_init task_init_lb_net_indexed_table_rss = { + .mode_str = "lbnetwork", + .sub_mode_str = "indexed_table_rss", + .init = init_task_lb_net_indexed_table, + .handle = handle_lb_net_indexed_table_bulk, + .size = sizeof(struct task_lb_net_lut), + .flag_features = TASK_FEATURE_LUT_QINQ_RSS +}; + +static struct task_init task_init_lb_net_indexed_table_hash = { + .mode_str = "lbnetwork", + .sub_mode_str = "indexed_table_hash", + .init = init_task_lb_net_indexed_table, + .handle = handle_lb_net_indexed_table_bulk, + .size = sizeof(struct task_lb_net_lut), + .flag_features = TASK_FEATURE_LUT_QINQ_HASH +}; + +__attribute__((constructor)) static void reg_task_lb_net(void) +{ + reg_task(&task_init_lb_net); + reg_task(&task_init_lb_net_lut_qinq_rss); + reg_task(&task_init_lb_net_lut_qinq_hash); + reg_task(&task_init_lb_net_indexed_table_rss); + reg_task(&task_init_lb_net_indexed_table_hash); +} diff --git a/VNFs/DPPD-PROX/handle_lb_net.h b/VNFs/DPPD-PROX/handle_lb_net.h new file mode 100644 index 00000000..4124fbd6 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_net.h @@ -0,0 +1,27 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_LB_NET_H_ +#define _HANDLE_LB_NET_H_ + +#include "defaults.h" + +static inline int8_t rss_to_queue(int rss, int nb_queues) +{ + return (rss & ((1 << MAX_RSS_QUEUE_BITS) - 1)) % nb_queues; +} + +#endif /* _HANDLE_LB_NET_H_ */ diff --git a/VNFs/DPPD-PROX/handle_lb_pos.c b/VNFs/DPPD-PROX/handle_lb_pos.c new file mode 100644 index 00000000..4324e94d --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_pos.c @@ -0,0 +1,156 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_hash_crc.h> + +#include "log.h" +#include "task_base.h" +#include "defines.h" +#include "tx_pkt.h" +#include "task_init.h" +#include "quit.h" +#include "mpls.h" +#include "etypes.h" +#include "gre.h" +#include "prefetch.h" + +struct task_lb_pos { + struct task_base base; + uint16_t byte_offset; + uint8_t n_workers; +}; + +static void init_task_lb_pos(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_pos *task = (struct task_lb_pos *)tbase; + + task->n_workers = targ->nb_worker_threads; + task->byte_offset = targ->byte_offset; +} + +static int handle_lb_pos_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_pos *task = (struct task_lb_pos *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t offset = task->byte_offset; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + uint8_t* pkt = rte_pktmbuf_mtod(mbufs[j], uint8_t*); + out[j] = pkt[offset] % task->n_workers; + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + uint8_t* pkt = rte_pktmbuf_mtod(mbufs[j], uint8_t*); + out[j] = pkt[offset] % task->n_workers; + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +union ip_port { + struct { + uint32_t ip; + uint32_t port; + }; + uint64_t ip_port; +}; + +struct pkt_ether_ipv4_udp { + struct ether_hdr ether; + struct ipv4_hdr ipv4; + struct udp_hdr udp; +} __attribute__((unused)); + +static uint8_t handle_lb_ip_port(struct task_lb_pos *task, struct rte_mbuf *mbuf) +{ + union ip_port ip_port; + uint8_t ret; + + struct pkt_ether_ipv4_udp *pkt = rte_pktmbuf_mtod(mbuf, void *); + + if (pkt->ether.ether_type != ETYPE_IPv4 || + (pkt->ipv4.next_proto_id != IPPROTO_TCP && + pkt->ipv4.next_proto_id != IPPROTO_UDP)) + return OUT_DISCARD; + + if (task->byte_offset == 0) { + ip_port.ip = pkt->ipv4.src_addr; + ip_port.port = pkt->udp.src_port; + } + else { + ip_port.ip = pkt->ipv4.dst_addr; + ip_port.port = pkt->udp.dst_port; + } + + return rte_hash_crc(&ip_port.ip_port, sizeof(ip_port.ip_port), 0) % task->n_workers; +} + +static int handle_lb_ip_port_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_pos *task = (struct task_lb_pos *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + uint64_t ip_port = 0; + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_lb_ip_port(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_lb_ip_port(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static struct task_init task_init_lb_pos = { + .mode_str = "lbpos", + .init = init_task_lb_pos, + .handle = handle_lb_pos_bulk, + .size = sizeof(struct task_lb_pos) +}; + +static struct task_init task_init_lb_pos2 = { + .mode_str = "lbpos", + .sub_mode_str = "ip_port", + .init = init_task_lb_pos, + .handle = handle_lb_ip_port_bulk, + .size = sizeof(struct task_lb_pos) +}; + +__attribute__((constructor)) static void reg_task_lb_pos(void) +{ + reg_task(&task_init_lb_pos); + reg_task(&task_init_lb_pos2); +} diff --git a/VNFs/DPPD-PROX/handle_lb_qinq.c b/VNFs/DPPD-PROX/handle_lb_qinq.c new file mode 100644 index 00000000..d58703c5 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_lb_qinq.c @@ -0,0 +1,377 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> + +#include <rte_mbuf.h> +#include <rte_ip.h> +#include <rte_byteorder.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "task_base.h" +#include "tx_pkt.h" +#include "rx_pkt.h" +#include "etypes.h" +#include "log.h" +#include "quit.h" +#include "qinq.h" +#include "lconf.h" +#include "prefetch.h" +#include "defines.h" +#include "prox_cfg.h" +#include "hash_utils.h" +#include "handle_lb_net.h" +#include "toeplitz.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +/* Load balancing based on one byte, figures out what type of packet + is passed and depending on the type, pass the packet to the correct + worker thread. If an unsupported packet type is used, the packet is + simply dropped. This Load balancer can only handling QinQ packets + (i.e. packets comming from the vCPE). */ +int handle_lb_qinq_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +int handle_lb_qinq_bulk_set_port(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +struct task_lb_qinq { + struct task_base base; + uint8_t *worker_table; + uint8_t bit_mask; + uint8_t protocols_mask; + uint8_t nb_worker_threads; + uint16_t qinq_tag; +}; + +static void init_task_lb_qinq(struct task_base *tbase, struct task_args *targ) +{ + struct task_lb_qinq *task = (struct task_lb_qinq *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->qinq_tag = targ->qinq_tag; + task->nb_worker_threads = targ->nb_worker_threads; + task->bit_mask = rte_is_power_of_2(targ->nb_worker_threads) ? targ->nb_worker_threads - 1 : 0xff; + + /* The load distributor is sending to a set of cores. These + cores are responsible for handling a set of flows + identified by a qinq tag. The load distributor identifies + the flows and forwards them to the appropriate worker. The + mapping from flow to worker is stored within the + work_table. Build the worker_table by asking each worker + which flows are handled. */ + + task->worker_table = prox_zmalloc(0x1000000, socket_id); + for (int i = 0; i < targ->nb_worker_threads; ++i) { + struct core_task ct = targ->core_task_set[0].core_task[i]; + struct task_args *t = core_targ_get(ct.core, ct.task); + + PROX_PANIC(t->task_init->flow_iter.beg == NULL, + "Load distributor can't find flows owned by destination worker %d\n", i); + + struct flow_iter *it = &t->task_init->flow_iter; + + int cnt = 0; + for (it->beg(it, t); !it->is_end(it, t); it->next(it, t)) { + uint16_t svlan = it->get_svlan(it, t); + uint16_t cvlan = it->get_cvlan(it, t); + + task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)] = i; + } + + } + + /* Check which protocols we are allowed to send to worker tasks */ + for (int i = 0; i < MAX_PROTOCOLS; ++i) { + int is_active = !!targ->core_task_set[i].n_elems; + task->protocols_mask |= is_active << i; + } + plog_info("\t\ttask_lb_qinq protocols_mask = 0x%x\n", task->protocols_mask); + + if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_RSS) + tbase->flags |= BASE_FLAG_LUT_QINQ_RSS; + if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_HASH) + tbase->flags |= BASE_FLAG_LUT_QINQ_HASH; + plog_info("\t\ttask_lb_qinq flags = 0x%x\n", tbase->flags); +} + +static struct task_init task_init_lb_qinq = { + .mode_str = "lbqinq", + .init = init_task_lb_qinq, + .handle = handle_lb_qinq_bulk, + .size = sizeof(struct task_lb_qinq) +}; + +/* + Add correct port id to mbufs coming from a DPDK ring port in the loadbalancer. + For the split-bng using DPDK rings between the vSwitch and the VMs + we need to know the port from which a packet was received. + The ring PMD in dpdk does not update the port field in the mbuf + and thus we have no control over the port numbers that are being used. + This submode allows the loadbalancer to set the port number on which it + received the mbuf. +*/ +static struct task_init task_init_lb_qinq_set_port = { + .mode_str = "lbqinq", + .sub_mode_str = "lut_qinq_set_port", + .init = init_task_lb_qinq, + .handle = handle_lb_qinq_bulk_set_port, + .size = sizeof(struct task_lb_qinq) +}; + +/* + Load Balance on Hash of combination of cvlan and svlan +*/ +static struct task_init task_init_lb_qinq_hash_friend = { + .mode_str = "lbqinq", + .sub_mode_str ="lut_qinq_hash_friend", + .init = init_task_lb_qinq, + .handle = handle_lb_qinq_bulk, + .flag_features = TASK_FEATURE_LUT_QINQ_HASH, + .size = sizeof(struct task_lb_qinq) +}; + +/* + Load Balance on rss of combination of cvlan and svlan. + This could be used to compare with HW implementations. +*/ +static struct task_init task_init_lb_qinq_rss_friend = { + .mode_str = "lbqinq", + .sub_mode_str ="lut_qinq_rss_friend", + .init = init_task_lb_qinq, + .handle = handle_lb_qinq_bulk, + .flag_features = TASK_FEATURE_LUT_QINQ_RSS, + .size = sizeof(struct task_lb_qinq) +}; + +__attribute__((constructor)) static void reg_task_lb_qinq(void) +{ + reg_task(&task_init_lb_qinq); + reg_task(&task_init_lb_qinq_hash_friend); + reg_task(&task_init_lb_qinq_rss_friend); + reg_task(&task_init_lb_qinq_set_port); +} + +static inline uint8_t handle_lb_qinq(struct task_lb_qinq *task, struct rte_mbuf *mbuf); + +int handle_lb_qinq_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_qinq *task = (struct task_lb_qinq *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_lb_qinq(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_lb_qinq(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +int handle_lb_qinq_bulk_set_port(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_lb_qinq *task = (struct task_lb_qinq *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) + uint32_t port_id = mbufs[0]->pkt.in_port; +#else + uint32_t port_id = mbufs[0]->port; +#endif + + if (tbase->rx_pkt == rx_pkt_hw) { + port_id = tbase->rx_params_hw.last_read_portid + tbase->rx_params_hw.nb_rxports; + port_id = ( port_id - 1 ) % tbase->rx_params_hw.nb_rxports; + port_id = tbase->rx_params_hw.rx_pq[port_id].port; + } else if (tbase->rx_pkt == rx_pkt_hw1) { + port_id = tbase->rx_params_hw1.rx_pq.port; + } + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) + mbufs[j]->pkt.in_port = port_id; +#else + mbufs[j]->port = port_id; +#endif + out[j] = handle_lb_qinq(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) + mbufs[j]->pkt.in_port = port_id; +#else + mbufs[j]->port = port_id; +#endif + out[j] = handle_lb_qinq(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +struct qinq_packet { + struct qinq_hdr qinq_hdr; + union { + struct ipv4_hdr ipv4_hdr; + struct ipv6_hdr ipv6_hdr; + }; +} __attribute__((packed)); + +struct qinq_packet_data { + struct ether_addr d_addr; + struct ether_addr s_addr; + uint64_t qinq; +} __attribute__((packed)); + +struct ether_packet { + struct ether_hdr ether_hdr; + union { + struct ipv4_hdr ipv4_hdr; + struct ipv6_hdr ipv6_hdr; + }; +} __attribute__((packed)); + +struct cpe_packet { + union { + struct qinq_packet qp; + struct ether_packet ep; + struct qinq_packet_data qd; + }; +}; + +static inline uint8_t get_worker(struct task_lb_qinq *task, struct cpe_packet *packet) +{ + uint8_t worker = 0; + if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_HASH) { + // Load Balance on Hash of combination of cvlan and svlan + uint64_t qinq_net = packet->qd.qinq; + qinq_net = qinq_net & 0xFF0F0000FF0F0000; // Mask Proto and QoS bits + if (task->bit_mask != 0xff) { + worker = hash_crc32(&qinq_net,8,0) & task->bit_mask; + } + else { + worker = hash_crc32(&qinq_net,8,0) % task->nb_worker_threads; + } + plogx_dbg("Sending packet svlan=%x, cvlan=%x, pseudo_qinq=%lx to worker %d\n", rte_bswap16(0xFF0F & packet->qp.qinq_hdr.svlan.vlan_tci), rte_bswap16(0xFF0F & packet->qp.qinq_hdr.cvlan.vlan_tci), qinq_net, worker); + } else if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_RSS){ + // Load Balance on rss of combination of cvlan and svlan + uint32_t qinq = (packet->qp.qinq_hdr.cvlan.vlan_tci & 0xFF0F) << 16; + uint32_t rss = toeplitz_hash((uint8_t *)&qinq, 4); + if (task->bit_mask != 0xff) { + worker = rss & task->bit_mask; + } else { + worker = (0x1ff & rss) % task->nb_worker_threads; + } + plogx_dbg("Sending packet svlan=%x, cvlan=%x, rss_input=%x, rss=%x to worker %d\n", rte_bswap16(0xFF0F & packet->qp.qinq_hdr.svlan.vlan_tci), rte_bswap16(0xFF0F & packet->qp.qinq_hdr.cvlan.vlan_tci), qinq, rss, worker); + } else { + uint16_t svlan = packet->qp.qinq_hdr.svlan.vlan_tci; + uint16_t cvlan = packet->qp.qinq_hdr.cvlan.vlan_tci; + prefetch_nta(&task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)]); + worker = task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)]; + + const size_t pos = offsetof(struct cpe_packet, qp.qinq_hdr.cvlan.vlan_tci); + plogx_dbg("qinq = %u, worker = %u, pos = %lu\n", rte_be_to_cpu_16(cvlan), worker, pos); + } + return worker; +} + +static inline uint8_t handle_lb_qinq(struct task_lb_qinq *task, struct rte_mbuf *mbuf) +{ + struct cpe_packet *packet = rte_pktmbuf_mtod(mbuf, struct cpe_packet*); + if (packet->ep.ether_hdr.ether_type == ETYPE_IPv4) { + if (unlikely((packet->ep.ipv4_hdr.version_ihl >> 4) != 4)) { + plogx_err("Invalid Version %u for ETYPE_IPv4\n", packet->ep.ipv4_hdr.version_ihl); + return OUT_DISCARD; + } + /* use 24 bits from the IP, clients are from the 10.0.0.0/8 network */ + const uint32_t tmp = rte_bswap32(packet->ep.ipv4_hdr.src_addr) & 0x00FFFFFF; + const uint32_t svlan = rte_bswap16(tmp >> 12); + const uint32_t cvlan = rte_bswap16(tmp & 0x0FFF); + prefetch_nta(&task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)]); + uint8_t worker = task->worker_table[PKT_TO_LUTQINQ(svlan, cvlan)]; + return worker + IPV4 * task->nb_worker_threads; + } + else if (unlikely(packet->qp.qinq_hdr.svlan.eth_proto != task->qinq_tag)) { + /* might receive LLDP from the L2 switch... */ + if (packet->qp.qinq_hdr.svlan.eth_proto != ETYPE_LLDP) { + plogdx_err(mbuf, "Invalid packet for LB in QinQ mode\n"); + } + return OUT_DISCARD; + } + + uint8_t worker = 0; + uint8_t proto = 0xFF; + switch (packet->qp.qinq_hdr.ether_type) { + case ETYPE_IPv4: { + if (unlikely((packet->qp.ipv4_hdr.version_ihl >> 4) != 4)) { + plogx_err("Invalid Version %u for ETYPE_IPv4\n", packet->qp.ipv4_hdr.version_ihl); + return OUT_DISCARD; + } + worker = get_worker(task, packet); + proto = IPV4; + break; + } + case ETYPE_IPv6: { + if (unlikely((packet->qp.ipv4_hdr.version_ihl >> 4) != 6)) { + plogx_err("Invalid Version %u for ETYPE_IPv6\n", packet->qp.ipv4_hdr.version_ihl); + return OUT_DISCARD; + } + /* Use IP Destination when IPV6 QinQ */ + if (task->bit_mask != 0xff) { + worker = ((uint8_t *)packet)[61] & task->bit_mask; + } + else { + worker = ((uint8_t *)packet)[61] % task->nb_worker_threads; + } + proto = IPV6; + break; + } + case ETYPE_ARP: { + // We can only send to ARP ring if it exists + if (0 != (task->protocols_mask & (1 << ARP))) { + proto = ARP; + } else { + proto = IPV4; + } + worker = get_worker(task, packet); + break; + } + default: + plogx_warn("Error in ETYPE_8021ad: ether_type = %#06x\n", packet->qp.qinq_hdr.ether_type); + return OUT_DISCARD; + } + + return worker + proto * task->nb_worker_threads; +} diff --git a/VNFs/DPPD-PROX/handle_mirror.c b/VNFs/DPPD-PROX/handle_mirror.c new file mode 100644 index 00000000..0d764b4d --- /dev/null +++ b/VNFs/DPPD-PROX/handle_mirror.c @@ -0,0 +1,161 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <rte_mbuf.h> + +#include "mbuf_utils.h" +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "prox_port_cfg.h" +#include "quit.h" + +/* Task that sends packets to multiple outputs. Note that in case of n + outputs, the output packet rate is n times the input packet + rate. Also, since the packet is duplicated by increasing the + refcnt, a change to a packet in subsequent tasks connected through + one of the outputs of this task will also change the packets as + seen by tasks connected behind through other outputs. The correct + way to resolve this is to create deep copies of the packet. */ +struct task_mirror { + struct task_base base; + uint32_t n_dests; +}; + +struct task_mirror_copy { + struct task_base base; + struct rte_mempool *mempool; + uint32_t n_dests; +}; + +static int handle_mirror_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + int ret = 0; + struct task_mirror *task = (struct task_mirror *)tbase; + uint8_t out[MAX_PKT_BURST]; + struct rte_mbuf *mbufs2[MAX_PKT_BURST]; + + /* Since after calling tx_pkt the mbufs parameter of a handle + function becomes invalid and handle_mirror calls tx_pkt + multiple times, the pointers are copied first. This copy is + used in each call to tx_pkt below. */ + rte_memcpy(mbufs2, mbufs, sizeof(mbufs[0]) * n_pkts); + + for (uint16_t j = 0; j < n_pkts; ++j) { + rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests - 1); + } + for (uint16_t j = 0; j < task->n_dests; ++j) { + memset(out, j, n_pkts); + + ret+= task->base.tx_pkt(&task->base, mbufs2, n_pkts, out); + } + return ret; +} + +static int handle_mirror_bulk_copy(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_mirror_copy *task = (struct task_mirror_copy *)tbase; + uint8_t out[MAX_PKT_BURST]; + int ret = 0; + + /* Send copies of the packet to all but the first + destination */ + struct rte_mbuf *new_pkts[MAX_PKT_BURST]; + + for (uint16_t j = 1; j < task->n_dests; ++j) { + if (rte_mempool_get_bulk(task->mempool, (void **)new_pkts, n_pkts) < 0) { + continue; + } + /* Finally, forward the incoming packets. */ + for (uint16_t i = 0; i < n_pkts; ++i) { + void *dst, *src; + uint16_t pkt_len; + + out[i] = j; + init_mbuf_seg(new_pkts[i]); + + pkt_len = rte_pktmbuf_pkt_len(mbufs[i]); + rte_pktmbuf_pkt_len(new_pkts[i]) = pkt_len; + rte_pktmbuf_data_len(new_pkts[i]) = pkt_len; + + dst = rte_pktmbuf_mtod(new_pkts[i], void *); + src = rte_pktmbuf_mtod(mbufs[i], void *); + + rte_memcpy(dst, src, pkt_len); + } + ret+= task->base.tx_pkt(&task->base, new_pkts, n_pkts, out); + } + + /* Finally, forward the incoming packets to the first destination. */ + memset(out, 0, n_pkts); + ret+= task->base.tx_pkt(&task->base, mbufs, n_pkts, out); + return ret; +} + +static void init_task_mirror(struct task_base *tbase, struct task_args *targ) +{ + struct task_mirror *task = (struct task_mirror *)tbase; + + task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings; +} + +static void init_task_mirror_copy(struct task_base *tbase, struct task_args *targ) +{ + static char name[] = "mirror_pool"; + struct task_mirror_copy *task = (struct task_mirror_copy *)tbase; + const int sock_id = rte_lcore_to_socket_id(targ->lconf->id); + task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings; + + name[0]++; + task->mempool = rte_mempool_create(name, + targ->nb_mbuf - 1, MBUF_SIZE, + targ->nb_cache_mbuf, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, 0, + sock_id, 0); + PROX_PANIC(task->mempool == NULL, + "Failed to allocate memory pool on socket %u with %u elements\n", + sock_id, targ->nb_mbuf - 1); + task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings; +} + +static struct task_init task_init_mirror = { + .mode_str = "mirror", + .init = init_task_mirror, + .handle = handle_mirror_bulk, + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS | TASK_FEATURE_TXQ_FLAGS_REFCOUNT, + .size = sizeof(struct task_mirror), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +static struct task_init task_init_mirror2 = { + .mode_str = "mirror", + .sub_mode_str = "copy", + .init = init_task_mirror_copy, + .handle = handle_mirror_bulk_copy, + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .size = sizeof(struct task_mirror), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_mirror(void) +{ + reg_task(&task_init_mirror); + reg_task(&task_init_mirror2); +} diff --git a/VNFs/DPPD-PROX/handle_mplstag.c b/VNFs/DPPD-PROX/handle_mplstag.c new file mode 100644 index 00000000..ce5996eb --- /dev/null +++ b/VNFs/DPPD-PROX/handle_mplstag.c @@ -0,0 +1,157 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "defines.h" +#include "hash_entry_types.h" +#include "mpls.h" +#include "prefetch.h" +#include "task_base.h" +#include "tx_pkt.h" +#include "task_init.h" +#include "prox_port_cfg.h" +#include "prox_cksum.h" +#include "thread_generic.h" +#include "prefetch.h" +#include "prox_assert.h" +#include "etypes.h" +#include "log.h" +#include "mbuf_utils.h" + +struct task_unmpls { + struct task_base base; + uint8_t n_tags; +}; + +static void init_task_unmpls(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ +} + +static inline uint8_t handle_unmpls(__attribute__((unused)) struct task_unmpls *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); + uint32_t mpls_len = sizeof(struct mpls_hdr); + while (!(mpls->bytes & 0x00010000)) { + mpls++; + mpls_len += sizeof(struct mpls_hdr); + } + uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr); + rte_memcpy(((uint8_t *)peth) + mpls_len, peth, tot_eth_addr_len); + struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1); + switch (ip->version_ihl >> 4) { + case 4: + peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth->ether_type = ETYPE_IPv4; + return 0; + case 6: + peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth->ether_type = ETYPE_IPv6; + return 0; + default: + plog_warn("Failed Decoding MPLS Packet - neither IPv4 nor IPv6: version %u\n", ip->version_ihl); + return OUT_DISCARD; + } +} + +static int handle_unmpls_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_unmpls *task = (struct task_unmpls *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + prefetch_first(mbufs, n_pkts); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_unmpls(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_unmpls(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static struct task_init task_init_unmpls = { + .mode_str = "unmpls", + .init = init_task_unmpls, + .handle = handle_unmpls_bulk, + .thread_x = thread_generic, + .size = sizeof(struct task_unmpls) +}; + +struct task_tagmpls { + struct task_base base; + uint8_t n_tags; +}; + +static void init_task_tagmpls(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ +} + +static inline uint8_t handle_tagmpls(__attribute__((unused)) struct task_tagmpls *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 4); + PROX_ASSERT(peth); + rte_prefetch0(peth); + uint32_t mpls = 0; + + uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr); + rte_memcpy(peth, ((uint8_t *)peth) + sizeof(struct mpls_hdr), tot_eth_addr_len); + *((uint32_t *)(peth + 1)) = mpls | 0x00010000; // Set BoS to 1 + peth->ether_type = ETYPE_MPLSU; + return 0; +} + +static int handle_tagmpls_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_tagmpls *task = (struct task_tagmpls *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + prefetch_first(mbufs, n_pkts); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_tagmpls(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_tagmpls(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static struct task_init task_init_tagmpls = { + .mode_str = "tagmpls", + .init = init_task_tagmpls, + .handle = handle_tagmpls_bulk, + .size = sizeof(struct task_tagmpls) +}; + +__attribute__((constructor)) static void reg_task_mplstag(void) +{ + reg_task(&task_init_unmpls); + reg_task(&task_init_tagmpls); +} diff --git a/VNFs/DPPD-PROX/handle_nat.c b/VNFs/DPPD-PROX/handle_nat.c new file mode 100644 index 00000000..23d7ad87 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_nat.c @@ -0,0 +1,196 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_hash.h> +#include <rte_hash_crc.h> +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_version.h> +#include <rte_byteorder.h> + +#include "prox_lua_types.h" +#include "prox_lua.h" +#include "prox_malloc.h" +#include "prox_cksum.h" +#include "prefetch.h" +#include "etypes.h" +#include "log.h" +#include "quit.h" +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "prox_port_cfg.h" + +struct task_nat { + struct task_base base; + struct rte_hash *hash; + uint32_t *entries; + int use_src; + int offload_crc; +}; + +struct pkt_eth_ipv4 { + struct ether_hdr ether_hdr; + struct ipv4_hdr ipv4_hdr; +} __attribute__((packed)); + +static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf) +{ + uint32_t *ip_addr; + struct pkt_eth_ipv4 *pkt = rte_pktmbuf_mtod(mbuf, struct pkt_eth_ipv4 *); + int ret; + + /* Currently, only support eth/ipv4 packets */ + if (pkt->ether_hdr.ether_type != ETYPE_IPv4) + return OUT_DISCARD; + if (task->use_src) + ip_addr = &(pkt->ipv4_hdr.src_addr); + else + ip_addr = &(pkt->ipv4_hdr.dst_addr); + + ret = rte_hash_lookup(task->hash, ip_addr); + + /* Drop all packets for which no translation has been + configured. */ + if (ret < 0) + return OUT_DISCARD; + + *ip_addr = task->entries[ret]; + prox_ip_udp_cksum(mbuf, &pkt->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + return 0; +} + +static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_nat *task = (struct task_nat *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + prefetch_first(mbufs, n_pkts); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_nat(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_nat(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int lua_to_hash_nat(struct lua_State *L, enum lua_place from, const char *name, + uint8_t socket, struct rte_hash **hash, uint32_t **entries) +{ + struct rte_hash *ret_hash; + uint32_t *ret_entries; + uint32_t n_entries; + uint32_t ip_from, ip_to; + int ret, pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + PROX_PANIC(n_entries == 0, "No entries for NAT\n"); + + static char hash_name[30] = "000_hash_nat_table"; + + const struct rte_hash_parameters hash_params = { + .name = hash_name, + .entries = n_entries * 4, + .key_len = sizeof(ip_from), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + }; + + ret_hash = rte_hash_create(&hash_params); + PROX_PANIC(ret_hash == NULL, "Failed to set up hash table for NAT\n"); + name++; + ret_entries = prox_zmalloc(n_entries * sizeof(ip_to), socket); + PROX_PANIC(ret_entries == NULL, "Failed to allocate memory for NAT %u entries\n", n_entries); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_ip(L, TABLE, "from", &ip_from) || + lua_to_ip(L, TABLE, "to", &ip_to)) + return -1; + + ip_from = rte_bswap32(ip_from); + ip_to = rte_bswap32(ip_to); + + ret = rte_hash_lookup(ret_hash, (const void *)&ip_from); + PROX_PANIC(ret >= 0, "Key %x already exists in NAT hash table\n", ip_from); + + ret = rte_hash_add_key(ret_hash, (const void *)&ip_from); + + PROX_PANIC(ret < 0, "Failed to add Key %x to NAT hash table\n", ip_from); + ret_entries[ret] = ip_to; + lua_pop(L, 1); + } + + lua_pop(L, pop); + + *hash = ret_hash; + *entries = ret_entries; + return 0; +} + +static void init_task_nat(struct task_base *tbase, struct task_args *targ) +{ + struct task_nat *task = (struct task_nat *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + int ret; + + /* Use destination IP by default. */ + task->use_src = targ->use_src; + + PROX_PANIC(!strcmp(targ->nat_table, ""), "No nat table specified\n"); + ret = lua_to_hash_nat(prox_lua(), GLOBAL, targ->nat_table, socket_id, &task->hash, &task->entries); + PROX_PANIC(ret != 0, "Failed to load NAT table from lua:\n%s\n", get_lua_to_errors()); + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } + +} + +/* Basic static nat. */ +static struct task_init task_init_nat = { + .mode_str = "nat", + .init = init_task_nat, + .handle = handle_nat_bulk, +#ifdef SOFT_CRC + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, +#else + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, +#endif + .size = sizeof(struct task_nat), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_nat(void) +{ + reg_task(&task_init_nat); +} diff --git a/VNFs/DPPD-PROX/handle_nop.c b/VNFs/DPPD-PROX/handle_nop.c new file mode 100644 index 00000000..b3eef54c --- /dev/null +++ b/VNFs/DPPD-PROX/handle_nop.c @@ -0,0 +1,53 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "handle_nop.h" +#include "thread_nop.h" + +static struct task_init task_init_nop_thrpt_opt = { + .mode_str = "nop", + .init = NULL, + .handle = handle_nop_bulk, + .thread_x = thread_nop, + .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_THROUGHPUT_OPT|TASK_FEATURE_MULTI_RX, + .size = sizeof(struct task_nop), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +static struct task_init task_init_nop_lat_opt = { + .mode_str = "nop", + .sub_mode_str = "latency optimized", + .init = NULL, + .handle = handle_nop_bulk, + .thread_x = thread_nop, + .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_MULTI_RX, + .size = sizeof(struct task_nop), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +static struct task_init task_init_none; + +__attribute__((constructor)) static void reg_task_nop(void) +{ + reg_task(&task_init_nop_thrpt_opt); + reg_task(&task_init_nop_lat_opt); + + /* For backwards compatibility, add none */ + task_init_none = task_init_nop_thrpt_opt; + strcpy(task_init_none.mode_str, "none"); + + reg_task(&task_init_none); +} diff --git a/VNFs/DPPD-PROX/handle_nop.h b/VNFs/DPPD-PROX/handle_nop.h new file mode 100644 index 00000000..0f84eaa3 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_nop.h @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_NOP_H_ +#define _HANDLE_NOP_H_ + +#include "task_base.h" +#include "task_init.h" + +struct task_nop { + struct task_base base; +}; + +static inline int handle_nop_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_nop *task = (struct task_nop *)tbase; + return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); +} + +#endif /* _HANDLE_NOP_H_ */ diff --git a/VNFs/DPPD-PROX/handle_nsh.c b/VNFs/DPPD-PROX/handle_nsh.c new file mode 100644 index 00000000..65a80c3d --- /dev/null +++ b/VNFs/DPPD-PROX/handle_nsh.c @@ -0,0 +1,225 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ethdev.h> +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_udp.h> + +#include "vxlangpe_nsh.h" +#include "task_base.h" +#include "tx_pkt.h" +#include "task_init.h" +#include "thread_generic.h" +#include "prefetch.h" +#include "log.h" + +#define VXLAN_GPE_HDR_SZ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + sizeof(struct vxlan_gpe_hdr) + sizeof(struct nsh_hdr) +#define ETHER_NSH_TYPE 0x4F89 /* 0x894F in little endian */ +#define VXLAN_GPE_NSH_TYPE 0xB612 /* 4790 in little endian */ +#define VXLAN_GPE_NP 0x4 + +uint16_t decap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts); +uint16_t encap_nsh_packets(struct rte_mbuf **mbufs, uint16_t n_pkts); + +struct task_decap_nsh { + struct task_base base; +}; + +struct task_encap_nsh { + struct task_base base; +}; + +static void init_task_decap_nsh(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ + return; +} + +static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap_nsh *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *eth_hdr = NULL; + struct udp_hdr *udp_hdr = NULL; + struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; + uint16_t hdr_len; + + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + if (eth_hdr->ether_type == ETHER_NSH_TYPE) { + /* "decapsulate" Ethernet + NSH header by moving packet pointer */ + hdr_len = sizeof(struct ether_hdr) + sizeof(struct nsh_hdr); + + mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len); + mbuf->data_off += hdr_len; + mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len); + /* save length of header in reserved 16bits of rte_mbuf */ + mbuf->udata64 = hdr_len; + } + else { + if (mbuf->data_len < VXLAN_GPE_HDR_SZ) { + mbuf->udata64 = 0; + return 0; + } + + /* check the UDP destination port */ + udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); + if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) { + mbuf->udata64 = 0; + return 0; + } + + /* check the Next Protocol field in VxLAN-GPE header */ + vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr)); + if (vxlan_gpe_hdr->next_proto != VXLAN_GPE_NP) { + mbuf->udata64 = 0; + return 0; + } + + /* "decapsulate" VxLAN-GPE + NSH header by moving packet pointer */ + hdr_len = VXLAN_GPE_HDR_SZ; + + mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len); + mbuf->data_off += hdr_len; + mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len); + /* save length of header in reserved 16bits of rte_mbuf */ + mbuf->udata64 = hdr_len; + } + + return 0; +} + +static int handle_decap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_decap_nsh *task = (struct task_decap_nsh *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_decap_nsh(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_decap_nsh(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void init_task_encap_nsh(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ + return; +} + +static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap_nsh *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *eth_hdr = NULL; + struct nsh_hdr *nsh_hdr = NULL; + struct udp_hdr *udp_hdr = NULL; + struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; + uint16_t hdr_len; + + if (mbuf == NULL) + return 0; + if (mbuf->udata64 == 0) + return 0; + + /* use header length saved in reserved 16bits of rte_mbuf to + "encapsulate" transport + NSH header by moving packet pointer */ + mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->udata64); + mbuf->data_off -= mbuf->udata64; + mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64); + + eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + if (eth_hdr->ether_type == ETHER_NSH_TYPE) { + nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(struct ether_hdr)); + + /* decrement Service Index in NSH header */ + if (nsh_hdr->sf_index > 0) + nsh_hdr->sf_index -= 1; + } + else { + /* "encapsulate" VxLAN-GPE + NSH header by moving packet pointer */ + if (mbuf->data_len < VXLAN_GPE_HDR_SZ) + return 0; + + /* check the UDP destination port */ + udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); + if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) + return 0; + + /* check the Next Protocol field in VxLAN-GPE header */ + vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr)); + if (vxlan_gpe_hdr->next_proto != VXLAN_GPE_NP) + return 0; + + /* decrement Service Index in NSH header */ + nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(struct vxlan_gpe_hdr)); + if (nsh_hdr->sf_index > 0) + nsh_hdr->sf_index -= 1; + } + + return 0; +} + +static int handle_encap_nsh_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_encap_nsh *task = (struct task_encap_nsh *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_encap_nsh(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_encap_nsh(task, mbufs[j]); + } +#endif + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static struct task_init task_init_decap_nsh = { + .mode_str = "decapnsh", + .init = init_task_decap_nsh, + .handle = handle_decap_nsh_bulk, + .thread_x = thread_generic, + .size = sizeof(struct task_decap_nsh) +}; + +static struct task_init task_init_encap_nsh = { + .mode_str = "encapnsh", + .init = init_task_encap_nsh, + .handle = handle_encap_nsh_bulk, + .size = sizeof(struct task_encap_nsh) +}; + +__attribute__((constructor)) static void reg_task_nshtag(void) +{ + reg_task(&task_init_decap_nsh); + reg_task(&task_init_encap_nsh); +} diff --git a/VNFs/DPPD-PROX/handle_pf_acl.c b/VNFs/DPPD-PROX/handle_pf_acl.c new file mode 100644 index 00000000..16ac0330 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_pf_acl.c @@ -0,0 +1,104 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_table_stub.h> //FIXME: ACL + +#include "log.h" +#include "quit.h" +#include "thread_pipeline.h" + +struct task_pf_acl { + struct task_pipe pipe; + //TODO +}; + +static void init_task_pf_acl(struct task_base *tbase, struct task_args *targ) +{ + struct task_pipe *tpipe = (struct task_pipe *)tbase; +// struct task_pf_acl *task = (struct task_pf_acl *)tpipe; + int err; + + /* create pipeline, input ports and output ports */ + init_pipe_create_in_out(tpipe, targ); + + /* create ACL pipeline table */ + //TODO + +//FIXME: this is not ACL ( + /* create pipeline tables */ + for (uint8_t i = 0; i < tpipe->n_ports_in; ++i) { + struct rte_pipeline_table_params table_params = { + .ops = &rte_table_stub_ops, + .arg_create = NULL, + .f_action_hit = NULL, + .f_action_miss = NULL, + .arg_ah = NULL, + .action_data_size = 0, + }; + err = rte_pipeline_table_create(tpipe->p, &table_params, + &tpipe->table_id[i]); + PROX_PANIC(err != 0, "Failed to create table %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, targ->task_init->mode_str, + targ->lconf->id, targ->task, + err); + } + tpipe->n_tables = tpipe->n_ports_in; + PROX_PANIC(tpipe->n_tables < 1, "No table created " + "for %s pipeline on core %u task %u\n", + targ->task_init->mode_str, targ->lconf->id, targ->task); + + /* add default entry to tables */ + for (uint8_t i = 0; i < tpipe->n_tables; ++i) { + struct rte_pipeline_table_entry default_entry = { + .action = RTE_PIPELINE_ACTION_PORT, + {.port_id = tpipe->port_out_id[i % tpipe->n_ports_out]}, + }; + struct rte_pipeline_table_entry *default_entry_ptr; + err = rte_pipeline_table_default_entry_add(tpipe->p, tpipe->table_id[i], + &default_entry, &default_entry_ptr); + PROX_PANIC(err != 0, "Failed to add default entry to table %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, targ->task_init->mode_str, + targ->lconf->id, targ->task, + err); + } +//FIXME: this is not ACL ) + + /* connect pipeline input ports to ACL pipeline table */ + init_pipe_connect_one(tpipe, targ, tpipe->table_id[0]); + + /* enable pipeline input ports */ + init_pipe_enable(tpipe, targ); + + /* check pipeline consistency */ + init_pipe_check(tpipe, targ); +} + +static struct task_init task_init_pf_acl = { + .mode_str = "pf_acl", + .init = init_task_pf_acl, + .handle = handle_pipe, + .thread_x = thread_pipeline, + .size = sizeof(struct task_pf_acl), +}; + +__attribute__((constructor)) static void reg_task_pf_acl(void) +{ + reg_task(&task_init_pf_acl); +} diff --git a/VNFs/DPPD-PROX/handle_police.c b/VNFs/DPPD-PROX/handle_police.c new file mode 100644 index 00000000..125e8c0a --- /dev/null +++ b/VNFs/DPPD-PROX/handle_police.c @@ -0,0 +1,270 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> +#include <rte_mbuf.h> +#include <rte_cycles.h> +#include <rte_version.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" +#include "prox_malloc.h" +#include "task_base.h" +#include "task_init.h" +#include "lconf.h" +#include "prefetch.h" +#include "quit.h" +#include "log.h" +#include "defines.h" +#include "qinq.h" +#include "prox_cfg.h" +#include "prox_shared.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +struct task_police { + struct task_base base; + union { + struct rte_meter_srtcm *sr_flows; + struct rte_meter_trtcm *tr_flows; + }; + + uint16_t *user_table; + enum police_action police_act[3][3]; + uint16_t overhead; + uint8_t runtime_flags; +}; + +typedef uint8_t (*hp) (struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user); + +static uint8_t handle_police(struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user) +{ + enum rte_meter_color in_color = e_RTE_METER_GREEN; + enum rte_meter_color out_color; + uint32_t pkt_len = rte_pktmbuf_pkt_len(mbuf) + task->overhead; + out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], tsc, pkt_len, in_color); + + return task->police_act[in_color][out_color] == ACT_DROP? OUT_DISCARD : 0; +} + +static uint8_t handle_police_tr(struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user) +{ + enum rte_meter_color in_color = e_RTE_METER_GREEN; + enum rte_meter_color out_color; + uint32_t pkt_len = rte_pktmbuf_pkt_len(mbuf) + task->overhead; + out_color = rte_meter_trtcm_color_aware_check(&task->tr_flows[user], tsc, pkt_len, in_color); + + if (task->runtime_flags & TASK_MARK) { +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + uint32_t subport, pipe, traffic_class, queue; + enum rte_meter_color color; + + rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue); + color = task->police_act[in_color][out_color]; + + rte_sched_port_pkt_write(mbuf, subport, pipe, traffic_class, queue, color); +#else + struct rte_sched_port_hierarchy *sched = + (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched; + sched->color = task->police_act[in_color][out_color]; +#endif + } + + return task->police_act[in_color][out_color] == ACT_DROP? OUT_DISCARD : 0; +} + +static inline int get_user(struct task_police *task, struct rte_mbuf *mbuf) +{ + if (task->runtime_flags & TASK_CLASSIFY) { + struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, struct qinq_hdr *); + return PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); + } + +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + uint32_t dummy; + uint32_t pipe; + + rte_sched_port_pkt_read_tree_path(mbuf, &dummy, &pipe, &dummy, &dummy); + return pipe; +#else + struct rte_sched_port_hierarchy *sched = + (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched; + return sched->pipe; +#endif +} + +#define PHASE1_DELAY PREFETCH_OFFSET +#define PHASE2_DELAY PREFETCH_OFFSET +#define PHASE3_DELAY PREFETCH_OFFSET +#define PHASE4_DELAY PREFETCH_OFFSET + +static inline int handle_pb(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, hp handle_police_func) +{ + struct task_police *task = (struct task_police *)tbase; + uint16_t j; + uint64_t cur_tsc = rte_rdtsc(); + uint32_t user[64]; + uint8_t out[MAX_PKT_BURST]; + uint32_t cur_user; + for (j = 0; j < PHASE1_DELAY && j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + + for (j = 0; j < PHASE2_DELAY && j + PHASE1_DELAY < n_pkts; ++j) { + PREFETCH0(mbufs[j + PHASE1_DELAY]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void*)); + } + + for (j = 0; j < PHASE3_DELAY && j + PHASE2_DELAY + PHASE1_DELAY < n_pkts; ++j) { + PREFETCH0(mbufs[j + PHASE2_DELAY + PHASE1_DELAY]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PHASE2_DELAY], void*)); + cur_user = get_user(task, mbufs[j]); + user[j] = cur_user; + PREFETCH0(&task->user_table[cur_user]); + } + + /* At this point, the whole pipeline is running */ + for (j = 0; j + PHASE3_DELAY + PHASE2_DELAY + PHASE1_DELAY < n_pkts; ++j) { + PREFETCH0(mbufs[j + PHASE3_DELAY + PHASE2_DELAY + PHASE1_DELAY]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PHASE3_DELAY + PHASE2_DELAY], void*)); + cur_user = get_user(task, mbufs[j + PHASE3_DELAY]); + user[j + PHASE3_DELAY] = cur_user; + PREFETCH0(&task->user_table[cur_user]); + + out[j] = handle_police_func(task, mbufs[j], cur_tsc, task->user_table[user[j]]); + } + + /* Last part of pipeline */ + for (; j + PHASE3_DELAY + PHASE2_DELAY < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PHASE3_DELAY + PHASE2_DELAY], void*)); + PREFETCH0(&task->user_table[j + PHASE3_DELAY]); + cur_user = get_user(task, mbufs[j + PHASE3_DELAY]); + user[j + PHASE3_DELAY] = cur_user; + PREFETCH0(&task->user_table[cur_user]); + + out[j] = handle_police_func(task, mbufs[j], cur_tsc, task->user_table[user[j]]); + } + + for (; j + PHASE3_DELAY < n_pkts; ++j) { + cur_user = get_user(task, mbufs[j + PHASE3_DELAY]); + user[j + PHASE3_DELAY] = cur_user; + PREFETCH0(&task->user_table[cur_user]); + + out[j] = handle_police_func(task, mbufs[j], cur_tsc, task->user_table[user[j]]); + } + + for (; j < n_pkts; ++j) { + out[j] = handle_police_func(task, mbufs[j], cur_tsc, task->user_table[user[j]]); + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int handle_police_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts) +{ + return handle_pb(tbase, mbuf, n_pkts, handle_police); +} + +static int handle_police_tr_bulk(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts) +{ + return handle_pb(tbase, mbuf, n_pkts, handle_police_tr); +} + +static void init_task_police(struct task_base *tbase, struct task_args *targ) +{ + struct task_police *task = (struct task_police *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->overhead = targ->overhead; + task->runtime_flags = targ->runtime_flags; + + task->user_table = prox_sh_find_socket(socket_id, "user_table"); + if (!task->user_table) { + PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n"); + int ret = lua_to_user_table(prox_lua(), GLOBAL, targ->user_table, socket_id, &task->user_table); + PROX_PANIC(ret, "Failed to create user table from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, "user_table", task->user_table); + } + + if (strcmp(targ->task_init->sub_mode_str, "trtcm")) { + task->sr_flows = prox_zmalloc(targ->n_flows * sizeof(*task->sr_flows), socket_id); + PROX_PANIC(task->sr_flows == NULL, "Failed to allocate flow contexts\n"); + PROX_PANIC(!targ->cir, "Commited information rate is set to 0\n"); + PROX_PANIC(!targ->cbs, "Commited information bucket size is set to 0\n"); + PROX_PANIC(!targ->ebs, "Execess information bucket size is set to 0\n"); + + struct rte_meter_srtcm_params params = { + .cir = targ->cir, + .cbs = targ->cbs, + .ebs = targ->ebs, + }; + + for (uint32_t i = 0; i < targ->n_flows; ++i) { + rte_meter_srtcm_config(&task->sr_flows[i], ¶ms); + } + } + else { + task->tr_flows = prox_zmalloc(targ->n_flows * sizeof(*task->tr_flows), socket_id); + PROX_PANIC(task->tr_flows == NULL, "Failed to allocate flow contexts\n"); + PROX_PANIC(!targ->pir, "Peak information rate is set to 0\n"); + PROX_PANIC(!targ->cir, "Commited information rate is set to 0\n"); + PROX_PANIC(!targ->pbs, "Peak information bucket size is set to 0\n"); + PROX_PANIC(!targ->cbs, "Commited information bucket size is set to 0\n"); + + struct rte_meter_trtcm_params params = { + .pir = targ->pir, + .pbs = targ->pbs, + .cir = targ->cir, + .cbs = targ->cbs, + }; + + for (uint32_t i = 0; i < targ->n_flows; ++i) { + rte_meter_trtcm_config(&task->tr_flows[i], ¶ms); + } + } + + for (uint32_t i = 0; i < 3; ++i) { + for (uint32_t j = 0; j < 3; ++j) { + task->police_act[i][j] = targ->police_act[i][j]; + } + } +} + +static struct task_init task_init_police = { + .mode_str = "police", + .init = init_task_police, + .handle = handle_police_bulk, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_police) +}; + +static struct task_init task_init_police2 = { + .mode_str = "police", + .sub_mode_str = "trtcm", + .init = init_task_police, + .handle = handle_police_tr_bulk, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_police) +}; + +__attribute__((constructor)) static void reg_task_police(void) +{ + reg_task(&task_init_police); + reg_task(&task_init_police2); +} diff --git a/VNFs/DPPD-PROX/handle_qinq_decap4.c b/VNFs/DPPD-PROX/handle_qinq_decap4.c new file mode 100644 index 00000000..f5c80227 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_decap4.c @@ -0,0 +1,659 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_byteorder.h> +#include <rte_cycles.h> +#include <rte_table_hash.h> +#include <rte_lpm.h> +#include <rte_version.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" +#include "handle_qinq_decap4.h" +#include "handle_qinq_encap4.h" +#include "stats.h" +#include "tx_pkt.h" +#include "defines.h" +#include "handle_routing.h" +#include "prox_assert.h" +#include "task_init.h" +#include "quit.h" +#include "pkt_prototypes.h" +#include "task_base.h" +#include "task_init.h" +#include "bng_pkts.h" +#include "prox_cksum.h" +#include "expire_cpe.h" +#include "prox_port_cfg.h" +#include "prefetch.h" +#include "prox_cfg.h" +#include "lconf.h" +#include "prox_cfg.h" +#include "prox_shared.h" + +struct task_qinq_decap4 { + struct task_base base; + struct rte_table_hash *cpe_table; + struct rte_table_hash *qinq_gre_table; + struct qinq_gre_data *qinq_gre_data; + struct next_hop *next_hops; + struct rte_lpm *ipv4_lpm; + uint32_t local_ipv4; + uint16_t qinq_tag; + uint8_t runtime_flags; + int offload_crc; + uint64_t keys[64]; + uint64_t src_mac[PROX_MAX_PORTS]; + struct rte_mbuf* fake_packets[64]; + struct expire_cpe expire_cpe; + uint64_t cpe_timeout; + uint8_t mapping[PROX_MAX_PORTS]; +}; + +static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry); +/* Convert IPv4 packets to GRE and optionally store QinQ Tags */ +static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs); + +static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *targ) +{ + struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + struct lpm4 *lpm; + + task->cpe_table = targ->cpe_table; + task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); + + PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n"); + lpm = prox_sh_find_socket(socket_id, targ->route_table); + if (!lpm) { + int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); + PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->route_table, lpm); + } + task->ipv4_lpm = lpm->rte_lpm; + task->next_hops = lpm->next_hops; + + task->qinq_tag = targ->qinq_tag; + task->local_ipv4 = targ->local_ipv4; + task->runtime_flags = targ->runtime_flags; + if (strcmp(targ->task_init->sub_mode_str, "pe")) + PROX_PANIC(targ->qinq_gre_table == NULL, "can't set up qinq gre\n"); + + task->qinq_gre_table = targ->qinq_gre_table; + + if (targ->cpe_table_timeout_ms) { + targ->lconf->period_func = check_expire_cpe; + task->expire_cpe.cpe_table = task->cpe_table; + targ->lconf->period_data = &task->expire_cpe; + targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES; + } + + for (uint32_t i = 0; i < 64; ++i) { + task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf)); + } + if (task->runtime_flags & TASK_ROUTING) { + if (targ->nb_txrings) { + struct task_args *dtarg; + struct core_task ct; + + for (uint32_t i = 0; i < targ->nb_txrings; ++i) { + ct = targ->core_task_set[0].core_task[i]; + dtarg = core_targ_get(ct.core, ct.task); + dtarg = find_reachable_task_sending_to_port(dtarg); + + PROX_PANIC(dtarg == NULL, "Error finding destination port through other tasks for outgoing ring %u\n", i); + task->src_mac[i] = *(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr; + } + } + else { + for (uint32_t i = 0; i < targ->nb_txports; ++i) { + task->src_mac[i] = *(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr; + } + } + } + + if (targ->runtime_flags & TASK_CTRL_HANDLE_ARP) { + targ->lconf->ctrl_func_p[targ->task] = arp_update; + } + + /* Copy the mapping from a sibling task which is configured + with mode encap4. The mapping is constant, so it is faster + to apply it when entries are added (least common case) + instead of re-applying it for every packet (most common + case). */ + + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (QINQ_ENCAP4 == smode) { + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + task->mapping[i] = targ->lconf->targs[task_id].mapping[i]; + } + } + } + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } + + // By default, calling this function 1K times per second => 64K ARP per second max + // If 4 interfaces sending to here, = ~0.1% of workload. + // If receiving more ARP, they will be dropped, or will dramatically slow down LB if in "no drop" mode. + targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq); + targ->lconf->ctrl_func_m[targ->task] = arp_msg; +} + +static void early_init_table(struct task_args *targ) +{ + if (!targ->qinq_gre_table && !targ->cpe_table) { + init_qinq_gre_table(targ, get_qinq_gre_map(targ)); + init_cpe4_table(targ); + } +} + +static inline void extract_key_bulk(struct rte_mbuf **mbufs, uint16_t n_pkts, struct task_qinq_decap4 *task) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + extract_key_cpe(mbufs[j], &task->keys[j]); + } +} + +__attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf) +{ + struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *); +#ifdef USE_QINQ + uint64_t key = (*(uint64_t*)(((uint8_t *)packet) + 12)) & 0xFF0FFFFFFF0FFFFF; + uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci; + uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci; + + svlan = rte_be_to_cpu_16(svlan & 0xFF0F); + cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F); +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n", + key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type); +#else +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n", + key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->reserved, mbuf->packet_type); +#else + plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, flags=%x, L2Tag=%d\n", + key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved); +#endif +#endif +#else + plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr)); +#endif +} + +static int add_cpe_entry(struct rte_table_hash *hash, struct cpe_key *key, struct cpe_data *data) +{ + void* entry_in_hash; + int ret, key_found = 0; + + ret = rte_table_hash_key8_ext_dosig_ops. + f_add(hash, key, data, &key_found, &entry_in_hash); + if (unlikely(ret)) { + plogx_err("Failed to add key: ip %x, gre %x\n", key->ip, key->gre_id); + return 1; + } + return 0; +} + +static void extract_key_data_arp(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t* mapping) +{ + const struct cpe_packet_arp *packet = rte_pktmbuf_mtod(mbuf, const struct cpe_packet_arp *); + uint32_t svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F; + uint32_t cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F; + uint8_t port_id; + key->ip = packet->arp.data.spa; + key->gre_id = entry->gre_id; + + data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr)); + data->qinq_svlan = svlan; + data->qinq_cvlan = cvlan; +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + port_id = mbuf->port; + +#else + port_id = mbuf->pkt.in_port; +#endif + uint8_t mapped = mapping[port_id]; + data->mac_port.out_idx = mapping[port_id]; + + if (unlikely(mapped == 255)) { + /* This error only occurs if the system is configured incorrectly */ + plog_warn("Failed adding packet: unknown mapping for port %d", port_id); + data->mac_port.out_idx = 0; + } + + data->user = entry->user; + data->tsc = rte_rdtsc() + cpe_timeout; +} + +void arp_msg_to_str(char *str, struct arp_msg *msg) +{ + sprintf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u\n", + msg->data.mac_port.out_idx, msg->key.gre_id, msg->data.qinq_svlan, msg->data.qinq_cvlan, + msg->key.ip_bytes[0], msg->key.ip_bytes[1], msg->key.ip_bytes[2], msg->key.ip_bytes[3], + msg->data.mac_port_b[0], msg->data.mac_port_b[1], msg->data.mac_port_b[2], + msg->data.mac_port_b[3], msg->data.mac_port_b[4], msg->data.mac_port_b[5], msg->data.user); +} + +int str_to_arp_msg(struct arp_msg *msg, const char *str) +{ + uint32_t ip[4], interface, gre_id, svlan, cvlan, mac[6], user; + + int ret = sscanf(str, "%u %u %u %u %u.%u.%u.%u %x:%x:%x:%x:%x:%x %u", + &interface, &gre_id, &svlan, &cvlan, + ip, ip + 1, ip + 2, ip + 3, + mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, &user); + + for (uint8_t i = 0; i < 4; ++i) + msg->key.ip_bytes[i] = ip[i]; + msg->key.gre_id = gre_id; + + for (uint8_t i = 0; i < 4; ++i) + msg->data.mac_port_b[i] = mac[i]; + msg->data.qinq_svlan = svlan; + msg->data.qinq_cvlan = cvlan; + msg->data.user = user; + msg->data.mac_port.out_idx = interface; + + return ret != 15; +} + +void arp_update_from_msg(struct rte_table_hash * cpe_table, struct arp_msg **msgs, uint16_t n_msgs, uint64_t cpe_timeout) +{ + int ret, key_found = 0; + void* entry_in_hash; + + for (uint16_t i = 0; i < n_msgs; ++i) { + msgs[i]->data.tsc = rte_rdtsc() + cpe_timeout; + ret = rte_table_hash_key8_ext_dosig_ops. + f_add(cpe_table, &msgs[i]->key, &msgs[i]->data, &key_found, &entry_in_hash); + if (unlikely(ret)) { + plogx_err("Failed to add key %x, gre %x\n", msgs[i]->key.ip, msgs[i]->key.gre_id); + } + } +} + +static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs) +{ + struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase; + struct arp_msg **msgs = (struct arp_msg **)data; + + arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout); +} + +static void arp_update(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase; + + prefetch_pkts(mbufs, n_pkts); + extract_key_bulk(mbufs, n_pkts, task); + + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + uint64_t lookup_hit_mask = 0; + struct qinq_gre_data* entries[64]; + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + TASK_STATS_ADD_RX(&task->base.aux->stats, n_pkts); + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + handle_error(mbufs[j]); + rte_pktmbuf_free(mbufs[j]); + continue; + } + + struct cpe_key key; + struct cpe_data data; + + extract_key_data_arp(mbufs[j], &key, &data, entries[j], task->cpe_timeout, task->mapping); + + void* entry_in_hash; + int ret, key_found = 0; + + ret = rte_table_hash_key8_ext_dosig_ops. + f_add(task->cpe_table, &key, &data, &key_found, &entry_in_hash); + + if (unlikely(ret)) { + plogx_err("Failed to add key %x, gre %x\n", key.ip, key.gre_id); + TASK_STATS_ADD_DROP_DISCARD(&task->base.aux->stats, 1); + } + + /* should do ARP reply */ + TASK_STATS_ADD_DROP_HANDLED(&task->base.aux->stats, 1); + rte_pktmbuf_free(mbufs[j]); + } +} + +static int handle_qinq_decap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_decap4 *task = (struct task_qinq_decap4 *)tbase; + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + struct qinq_gre_data* entries[64]; + uint8_t out[MAX_PKT_BURST]; + uint64_t lookup_hit_mask; + prefetch_pkts(mbufs, n_pkts); + + // Prefetch headroom, as we will prepend mbuf and write to this cache line + for (uint16_t j = 0; j < n_pkts; ++j) { + PREFETCH0((rte_pktmbuf_mtod(mbufs[j], char*)-1)); + } + + extract_key_bulk(mbufs, n_pkts, task); + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (likely(lookup_hit_mask == pkts_mask)) { + for (uint16_t j = 0; j < n_pkts; ++j) { + out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]); + } + } + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + // This might fail as the packet has not the expected QinQ or it's not an IPv4 packet + handle_error(mbufs[j]); + out[j] = OUT_DISCARD; + continue; + } + out[j] = handle_qinq_decap4(task, mbufs[j], entries[j]); + } + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +/* add gre header */ +static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id) +{ +#ifdef USE_QINQ + struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *)); +#else + struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *)); +#endif + uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); + uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr); + + if (padlen) { + rte_pktmbuf_trim(mbuf, padlen); + } + + PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN, + "Would need to fragment packet new size = %u - not implemented\n", + rte_pktmbuf_data_len(mbuf) - padlen + 20); + +#ifdef USE_QINQ + /* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */ + struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20); +#else + struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28); +#endif + + PROX_ASSERT(peth); + PREFETCH0(peth); + if (task->runtime_flags & TASK_TX_CRC) { + /* calculate IP CRC here to avoid problems with -O3 flag with gcc */ +#ifdef MPLS_ROUTING + prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc); +#else + prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); +#endif + } + + /* new IP header */ + struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1); + rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); + ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len); + p_tunnel_ip->src_addr = src_ipv4; + + /* Add GRE Header values */ + struct gre_hdr *pgre = (struct gre_hdr *)(p_tunnel_ip + 1); + + rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr)); + pgre->gre_id = gre_id; + peth->ether_type = ETYPE_IPv4; +} + +static inline uint16_t calc_padlen(const struct rte_mbuf *mbuf, const uint16_t ip_len) +{ + return rte_pktmbuf_pkt_len(mbuf) - DOWNSTREAM_DELTA - ip_len - offsetof(struct cpe_pkt, ipv4_hdr); +} + +static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task) +{ + PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN, + "Would need to fragment packet new size = %u - not implemented\n", + rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA); + + struct core_net_pkt_m *packet = (struct core_net_pkt_m *)rte_pktmbuf_prepend(mbuf, DOWNSTREAM_DELTA); + PROX_ASSERT(packet); + PREFETCH0(packet); + + struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr; + uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); + + /* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */ +#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1) + uint32_t next_hop_index; +#else + uint8_t next_hop_index; +#endif + if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(pip->dst_addr), &next_hop_index) != 0)) { + plog_warn("lpm_lookup failed for ip %x: rc = %d\n", rte_bswap32(pip->dst_addr), -ENOENT); + return ROUTE_ERR; + } + PREFETCH0(&task->next_hops[next_hop_index]); + + /* calculate outer IP CRC here to avoid problems with -O3 flag with gcc */ + const uint16_t padlen = calc_padlen(mbuf, ip_len); + if (padlen) { + rte_pktmbuf_trim(mbuf, padlen); + } + const uint8_t port_id = task->next_hops[next_hop_index].mac_port.out_idx; + + *((uint64_t *)(&packet->ether_hdr.d_addr)) = task->next_hops[next_hop_index].mac_port_8bytes; + *((uint64_t *)(&packet->ether_hdr.s_addr)) = task->src_mac[task->next_hops[next_hop_index].mac_port.out_idx]; + +#ifdef MPLS_ROUTING + packet->mpls_bytes = task->next_hops[next_hop_index].mpls | 0x00010000; // Set BoS to 1 + packet->ether_hdr.ether_type = ETYPE_MPLSU; +#else + packet->ether_hdr.ether_type = ETYPE_IPv4; +#endif + + /* New IP header */ + rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); + ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len); + packet->tunnel_ip_hdr.src_addr = src_ipv4; + packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst; + if (task->runtime_flags & TASK_TX_CRC) { +#ifdef MPLS_ROUTING + prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc); +#else + prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); +#endif + } + + /* Add GRE Header values */ + rte_memcpy(&packet->gre_hdr, &gre_hdr_proto, sizeof(struct gre_hdr)); + packet->gre_hdr.gre_id = rte_be_to_cpu_32(gre_id); + + return port_id; +} + +static void extract_key_data(struct rte_mbuf* mbuf, struct cpe_key* key, struct cpe_data* data, const struct qinq_gre_data* entry, uint64_t cpe_timeout, uint8_t *mapping) +{ + struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt *); + uint8_t port_id; + +#ifndef USE_QINQ + const uint32_t tmp = rte_bswap32(packet->ipv4_hdr.src_addr) & 0x00FFFFFF; + const uint32_t svlan = rte_bswap16(tmp >> 12); + const uint32_t cvlan = rte_bswap16(tmp & 0x0FFF); +#endif + +#ifdef USE_QINQ + key->ip = packet->ipv4_hdr.src_addr; +#else + key->ip = 0; +#endif + key->gre_id = entry->gre_id; + +#ifdef USE_QINQ + data->mac_port_8bytes = *((const uint64_t *)(&packet->qinq_hdr.s_addr)); + data->qinq_svlan = packet->qinq_hdr.svlan.vlan_tci & 0xFF0F; + data->qinq_cvlan = packet->qinq_hdr.cvlan.vlan_tci & 0xFF0F; +#else + data->mac_port_8bytes = *((const uint64_t *)(&packet->ether_hdr.s_addr)); + data->qinq_svlan = svlan; + data->qinq_cvlan = cvlan; +#endif + +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + port_id = mbuf->port; + +#else + port_id = mbuf->pkt.in_port; +#endif + uint8_t mapped = mapping[port_id]; + data->mac_port.out_idx = mapped; + + if (unlikely(mapped == 255)) { + /* This error only occurs if the system is configured incorrectly */ + plog_warn("Failed adding packet: unknown mapping for port %d", port_id); + data->mac_port.out_idx = 0; + } + else { + data->mac_port.out_idx = mapped; + } + + data->user = entry->user; + data->tsc = rte_rdtsc() + cpe_timeout; +} + +static uint8_t handle_qinq_decap4(struct task_qinq_decap4 *task, struct rte_mbuf *mbuf, struct qinq_gre_data* entry) +{ + if (!(task->runtime_flags & (TASK_CTRL_HANDLE_ARP|TASK_FP_HANDLE_ARP))) { + // We learn CPE MAC addresses on every packets + struct cpe_key key; + struct cpe_data data; + extract_key_data(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); + //plogx_err("Adding key ip=%x/gre_id=%x data (svlan|cvlan)=%x|%x, rss=%x, gre_id=%x\n", key.ip, key.gre_id, data.qinq_svlan,data.qinq_cvlan, mbuf->hash.rss, entry->gre_id); + + if (add_cpe_entry(task->cpe_table, &key, &data)) { + plog_warn("Failed to add ARP entry\n"); + return OUT_DISCARD; + } + } + if (task->runtime_flags & TASK_FP_HANDLE_ARP) { + // We learn CPE MAC addresses on ARP packets in Fast Path +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + if (mbuf->packet_type == 0xB) { + struct cpe_key key; + struct cpe_data data; + extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); + + if (add_cpe_entry(task->cpe_table, &key, &data)) { + plog_warn("Failed to add ARP entry\n"); + return OUT_DISCARD; + } + return OUT_HANDLED; + } else +#endif + { +#ifdef USE_QINQ + struct cpe_pkt *packet = rte_pktmbuf_mtod(mbuf, struct cpe_pkt*); + if (packet->qinq_hdr.svlan.eth_proto == task->qinq_tag && + packet->qinq_hdr.ether_type == ETYPE_ARP) { + struct cpe_key key; + struct cpe_data data; + extract_key_data_arp(mbuf, &key, &data, entry, task->cpe_timeout, task->mapping); + + if (add_cpe_entry(task->cpe_table, &key, &data)) { + plog_warn("Failed to add ARP entry\n"); + return OUT_DISCARD; + } + return OUT_HANDLED; + } +#endif + } + } + if (task->runtime_flags & TASK_ROUTING) { + uint8_t tx_portid; + tx_portid = gre_encap_route(task->local_ipv4, mbuf, entry->gre_id, task); + + return tx_portid == ROUTE_ERR? OUT_DISCARD : tx_portid; + } + else { + gre_encap(task, task->local_ipv4, mbuf, entry->gre_id); + return 0; + } +} + +static void flow_iter_next(struct flow_iter *iter, struct task_args *targ) +{ + do { + iter->idx++; + } while (iter->idx < (int)get_qinq_gre_map(targ)->count && + get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads != targ->worker_thread_id); +} + +static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ) +{ + iter->idx = -1; + flow_iter_next(iter, targ); +} + +static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ) +{ + return iter->idx == (int)get_qinq_gre_map(targ)->count; +} + +static uint16_t flow_iter_get_svlan(struct flow_iter *iter, struct task_args *targ) +{ + return get_qinq_gre_map(targ)->entries[iter->idx].svlan; +} + +static uint16_t flow_iter_get_cvlan(struct flow_iter *iter, struct task_args *targ) +{ + return get_qinq_gre_map(targ)->entries[iter->idx].cvlan; +} + +static struct task_init task_init_qinq_decapv4_table = { + .mode = QINQ_DECAP4, + .mode_str = "qinqdecapv4", + .early_init = early_init_table, + .init = init_task_qinq_decap4, + .handle = handle_qinq_decap4_bulk, + .flag_features = TASK_FEATURE_ROUTING, + .flow_iter = { + .beg = flow_iter_beg, + .is_end = flow_iter_is_end, + .next = flow_iter_next, + .get_svlan = flow_iter_get_svlan, + .get_cvlan = flow_iter_get_cvlan, + }, + .size = sizeof(struct task_qinq_decap4) +}; + +__attribute__((constructor)) static void reg_task_qinq_decap4(void) +{ + reg_task(&task_init_qinq_decapv4_table); +} diff --git a/VNFs/DPPD-PROX/handle_qinq_decap4.h b/VNFs/DPPD-PROX/handle_qinq_decap4.h new file mode 100644 index 00000000..ae2475d2 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_decap4.h @@ -0,0 +1,34 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_QINQ_DECAP4_H_ +#define _HANDLE_QINQ_DECAP4_H_ + +#include "hash_entry_types.h" + +struct rte_table_hash; + +struct arp_msg { + struct cpe_key key; + struct cpe_data data; +}; + +void arp_msg_to_str(char *str, struct arp_msg *msg); +int str_to_arp_msg(struct arp_msg *msg, const char *str); + +void arp_update_from_msg(struct rte_table_hash * cpe_table, struct arp_msg **msgs, uint16_t n_msgs, uint64_t cpe_timeout); + +#endif /* _HANDLE_QINQ_DECAP4_H_ */ diff --git a/VNFs/DPPD-PROX/handle_qinq_decap6.c b/VNFs/DPPD-PROX/handle_qinq_decap6.c new file mode 100644 index 00000000..d876c733 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_decap6.c @@ -0,0 +1,197 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_table_hash.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "handle_qinq_encap6.h" +#include "log.h" +#include "lconf.h" +#include "task_init.h" +#include "task_base.h" +#include "tx_pkt.h" +#include "defines.h" +#include "pkt_prototypes.h" +#include "prox_assert.h" +#include "hash_utils.h" +#include "task_base.h" +#include "prefetch.h" +#include "hash_entry_types.h" +#include "prox_cfg.h" +#include "log.h" +#include "quit.h" +#include "prox_shared.h" + +/* Packets must all be IPv6, always store QinQ tags for lookup (not configurable) */ +struct task_qinq_decap6 { + struct task_base base; + struct rte_table_hash *cpe_table; + uint16_t *user_table; + uint32_t bucket_index; + struct ether_addr edaddr; + struct rte_lpm6 *rte_lpm6; + void* period_data; /* used if using dual stack*/ + void (*period_func)(void* data); + uint64_t cpe_timeout; +}; + +void update_arp_entries6(void* data); + +static void init_task_qinq_decap6(struct task_base *tbase, struct task_args *targ) +{ + struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->edaddr = targ->edaddr; + task->cpe_table = targ->cpe_table; + task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); + + if (targ->cpe_table_timeout_ms) { + if (targ->lconf->period_func) { + task->period_func = targ->lconf->period_func; + task->period_data = targ->lconf->period_data; + } + targ->lconf->period_func = update_arp_entries6; + targ->lconf->period_data = tbase; + targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES; + } + + task->user_table = prox_sh_find_socket(socket_id, "user_table"); + if (!task->user_table) { + PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n"); + int ret = lua_to_user_table(prox_lua(), GLOBAL, targ->user_table, socket_id, &task->user_table); + PROX_PANIC(ret, "Failed to create user table from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, "user_table", task->user_table); + } + + struct lpm6 *lpm = prox_sh_find_socket(socket_id, "lpm6"); + if (!lpm) { + struct lpm6 *lpm6; + int ret; + + ret = lua_to_lpm6(prox_lua(), GLOBAL, "lpm6", socket_id, &lpm6); + PROX_PANIC(ret, "Failed to read lpm6 from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, "lpm6", lpm6); + } + task->rte_lpm6 = lpm->rte_lpm6; +} + +static void early_init(struct task_args *targ) +{ + if (!targ->cpe_table) { + init_cpe6_table(targ); + } +} + +static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct rte_mbuf *mbuf) +{ + struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, struct qinq_hdr *); + struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1); + + uint16_t svlan = pqinq->svlan.vlan_tci & 0xFF0F; + uint16_t cvlan = pqinq->cvlan.vlan_tci & 0xFF0F; + + struct cpe_data entry; + entry.mac_port_8bytes = *((uint64_t *)(((uint8_t *)pqinq) + 5)) << 16; + entry.qinq_svlan = svlan; + entry.qinq_cvlan = cvlan; + entry.user = task->user_table[PKT_TO_LUTQINQ(svlan, cvlan)]; + entry.tsc = rte_rdtsc() + task->cpe_timeout; + + int key_found = 0; + void* entry_in_hash = NULL; + int ret = rte_table_hash_ext_dosig_ops. + f_add(task->cpe_table, pip6->src_addr, &entry, &key_found, &entry_in_hash); + + if (unlikely(ret)) { + plogx_err("Failed to add key " IPv6_BYTES_FMT "\n", IPv6_BYTES(pip6->src_addr)); + return OUT_DISCARD; + } + + pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(struct vlan_hdr)); + PROX_ASSERT(pqinq); + pqinq->ether_type = ETYPE_IPv6; + // Dest MAC addresses + ether_addr_copy(&task->edaddr, &pqinq->d_addr); + return 0; +} + +static int handle_qinq_decap6_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_qinq_decap6(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_qinq_decap6(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +void update_arp_entries6(void* data) +{ + uint64_t cur_tsc = rte_rdtsc(); + struct task_qinq_decap6 *task = (struct task_qinq_decap6 *)data; + + struct cpe_data *entries[4] = {0}; + void *key[4] = {0}; + uint64_t n_buckets = get_bucket(task->cpe_table, task->bucket_index, key, (void**)entries); + + for (uint8_t i = 0; i < 4 && entries[i]; ++i) { + if (entries[i]->tsc < cur_tsc) { + int key_found = 0; + void* entry = 0; + rte_table_hash_ext_dosig_ops.f_delete(task->cpe_table, key[i], &key_found, entry); + } + } + + task->bucket_index++; + task->bucket_index &= (n_buckets - 1); + + if (task->period_func) { + task->period_func(task->period_data); + } +} + +static struct task_init task_init_qinq_decap6 = { + .mode = QINQ_DECAP6, + .mode_str = "qinqdecapv6", + .early_init = early_init, + .init = init_task_qinq_decap6, + .handle = handle_qinq_decap6_bulk, + .size = sizeof(struct task_qinq_decap6) +}; + +__attribute__((constructor)) static void reg_task_qinq_decap6(void) +{ + reg_task(&task_init_qinq_decap6); +} diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.c b/VNFs/DPPD-PROX/handle_qinq_encap4.c new file mode 100644 index 00000000..24181959 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_encap4.c @@ -0,0 +1,662 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_table_hash.h> +#include <rte_cycles.h> + +#include "mbuf_utils.h" +#include "prox_malloc.h" +#include "prox_lua.h" +#include "prox_lua_types.h" +#include "handle_qinq_encap4.h" +#include "handle_qinq_decap4.h" +#include "prox_args.h" +#include "defines.h" +#include "tx_pkt.h" +#include "prefetch.h" +#include "pkt_prototypes.h" +#include "hash_entry_types.h" +#include "task_init.h" +#include "bng_pkts.h" +#include "prox_cksum.h" +#include "hash_utils.h" +#include "quit.h" +#include "prox_port_cfg.h" +#include "handle_lb_net.h" +#include "prox_cfg.h" +#include "cfgfile.h" +#include "toeplitz.h" +#include "prox_shared.h" + +static struct cpe_table_data *read_cpe_table_config(const char *name, uint8_t socket) +{ + struct lua_State *L = prox_lua(); + struct cpe_table_data *ret = NULL; + + lua_getglobal(L, name); + PROX_PANIC(lua_isnil(L, -1), "Coudn't find cpe_table data\n"); + + return ret; +} + +struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ) +{ + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + struct qinq_gre_map *ret = prox_sh_find_socket(socket_id, "qinq_gre_map"); + + if (!ret) { + PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n"); + int rv = lua_to_qinq_gre_map(prox_lua(), GLOBAL, targ->user_table, socket_id, &ret); + PROX_PANIC(rv, "Error reading mapping between qinq and gre from qinq_gre_map: \n%s\n", + get_lua_to_errors()); + prox_sh_add_socket(socket_id, "qinq_gre_map", ret); + } + return ret; +} + +/* Encapsulate IPv4 packets in QinQ. QinQ tags are derived from gre_id. */ +int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs); + +static void fill_table(struct task_args *targ, struct rte_table_hash *table) +{ + struct cpe_table_data *cpe_table_data; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + int ret = lua_to_cpe_table_data(prox_lua(), GLOBAL, targ->cpe_table_name, socket_id, &cpe_table_data); + const uint8_t n_slaves = targ->nb_slave_threads; + const uint8_t worker_id = targ->worker_thread_id; + + for (uint32_t i = 0; i < cpe_table_data->n_entries; ++i) { + if (rte_bswap32(cpe_table_data->entries[i].ip) % n_slaves != worker_id) { + continue; + } + struct cpe_table_entry *entry = &cpe_table_data->entries[i]; + + uint32_t port_idx = prox_cfg.cpe_table_ports[entry->port_idx]; + PROX_PANIC(targ->mapping[port_idx] == 255, "Error reading cpe table: Mapping for port %d is missing", port_idx); + + struct cpe_key key = { + .ip = entry->ip, + .gre_id = entry->gre_id, + }; + + struct cpe_data data = { + .qinq_svlan = entry->svlan, + .qinq_cvlan = entry->cvlan, + .user = entry->user, + .mac_port = { + .mac = entry->eth_addr, + .out_idx = targ->mapping[port_idx], + }, + .tsc = UINT64_MAX, + }; + + int key_found; + void* entry_in_hash; + rte_table_hash_key8_ext_dosig_ops.f_add(table, &key, &data, &key_found, &entry_in_hash); + } +} + +static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *targ) +{ + struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)(tbase); + int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + + task->qinq_tag = targ->qinq_tag; + task->cpe_table = targ->cpe_table; + task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); + + if (!strcmp(targ->task_init->sub_mode_str, "pe")) { + PROX_PANIC(!strcmp(targ->cpe_table_name, ""), "CPE table not configured\n"); + fill_table(targ, task->cpe_table); + } + +#ifdef ENABLE_EXTRA_USER_STATISTICS + task->n_users = targ->n_users; + task->stats_per_user = prox_zmalloc(targ->n_users * sizeof(uint32_t), socket_id); +#endif + if (targ->runtime_flags & TASK_CLASSIFY) { + PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n"); + task->dscp = prox_sh_find_socket(socket_id, targ->dscp); + if (!task->dscp) { + int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp); + PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n", + get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->dscp, task->dscp); + } + } + + task->runtime_flags = targ->runtime_flags; + + for (uint32_t i = 0; i < 64; ++i) { + task->fake_packets[i] = (struct rte_mbuf*)((uint8_t*)&task->keys[i] - sizeof (struct rte_mbuf)); + } + + targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq); + targ->lconf->ctrl_func_m[targ->task] = arp_msg; + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } + + /* TODO: check if it is not necessary to limit reverse mapping + for the elements that have been changing in mapping? */ + + for (uint32_t i =0 ; i < sizeof(targ->mapping)/sizeof(targ->mapping[0]); ++i) { + task->src_mac[targ->mapping[i]] = *(uint64_t*)&prox_port_cfg[i].eth_addr; + } + + /* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */ +} + +static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs) +{ + struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase; + struct arp_msg **msgs = (struct arp_msg **)data; + + arp_update_from_msg(task->cpe_table, msgs, n_msgs, task->cpe_timeout); +} + +static inline void add_key(struct task_args *targ, struct qinq_gre_map *qinq_gre_map, struct rte_table_hash* qinq_gre_table, uint32_t i, uint32_t *count) +{ + struct qinq_gre_data entry = { + .gre_id = qinq_gre_map->entries[i].gre_id, + .user = qinq_gre_map->entries[i].user, + }; + +#ifdef USE_QINQ + struct vlans qinq2 = { + .svlan = {.eth_proto = targ->qinq_tag, .vlan_tci = qinq_gre_map->entries[i].svlan}, + .cvlan = {.eth_proto = ETYPE_VLAN, .vlan_tci = qinq_gre_map->entries[i].cvlan} + }; + + int key_found = 0; + void* entry_in_hash = NULL; + rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &qinq2, &entry, &key_found, &entry_in_hash); + + plog_dbg("Core %u adding user %u (tag %x svlan %x cvlan %x), rss=%x\n", + targ->lconf->id, qinq_gre_map->entries[i].user, qinq2.svlan.eth_proto, + rte_bswap16(qinq_gre_map->entries[i].svlan), + rte_bswap16(qinq_gre_map->entries[i].cvlan), + qinq_gre_map->entries[i].rss); +#else + /* lower 3 bytes of IPv4 address contain svlan/cvlan. */ + uint64_t ip = ((uint32_t)rte_bswap16(qinq_gre_map->entries[i].svlan) << 12) | + rte_bswap16(qinq_gre_map->entries[i].cvlan); + int key_found = 0; + void* entry_in_hash = NULL; + rte_table_hash_key8_ext_dosig_ops.f_add(qinq_gre_table, &ip, &entry, &key_found, &entry_in_hash); + + plog_dbg("Core %u hash table add: key = %016"PRIx64"\n", + targ->lconf->id, ip); +#endif + (*count)++; +} + +void init_qinq_gre_table(struct task_args *targ, struct qinq_gre_map *qinq_gre_map) +{ + struct rte_table_hash* qinq_gre_table; + uint8_t table_part = targ->nb_slave_threads; + if (!rte_is_power_of_2(table_part)) { + table_part = rte_align32pow2(table_part) >> 1; + } + + if (table_part == 0) + table_part = 1; + + uint32_t n_entries = MAX_GRE / table_part; + + struct rte_table_hash_key8_ext_params table_hash_params = { + .n_entries = n_entries, + .n_entries_ext = n_entries >> 1, + .f_hash = hash_crc32, + .seed = 0, + .signature_offset = HASH_METADATA_OFFSET(8), + .key_offset = HASH_METADATA_OFFSET(0), + }; + + qinq_gre_table = rte_table_hash_key8_ext_dosig_ops. + f_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), sizeof(struct qinq_gre_data)); + + // LB configuration known from Network Load Balancer + // Find LB network Load balancer, i.e. ENCAP friend. + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (QINQ_ENCAP4 == smode) { + targ->lb_friend_core = targ->lconf->targs[task_id].lb_friend_core; + targ->lb_friend_task = targ->lconf->targs[task_id].lb_friend_task; + } + } + // Packet coming from Load balancer. LB could balance on gre_id LSB, qinq hash or qinq RSS + uint32_t flag_features = 0; + if (targ->lb_friend_core != 0xFF) { + struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task]; + flag_features = lb_targ->task_init->flag_features; + plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core); + } else { + plog_info("\t\tWT %d has no friend\n", targ->lconf->id); + } + if (targ->nb_slave_threads == 0) { + // No slave threads, i.e. using RSS + plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS); + flag_features = TASK_FEATURE_LUT_QINQ_RSS; + } + if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) { + plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n"); + flag_features = TASK_FEATURE_GRE_ID; + } + + /* Only store QinQ <-> GRE mapping for packets that are handled by this worker thread */ + uint32_t count = 0; + if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) { + // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0 + // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0 + uint8_t nb_worker_threads, worker_thread_id; + if (targ->nb_slave_threads) { + nb_worker_threads = targ->nb_slave_threads; + worker_thread_id = targ->worker_thread_id; + } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) { + nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq; + worker_thread_id = targ->rx_port_queue[0].queue; + } else { + PROX_PANIC(1, "Unexpected: unknown number of worker thread\n"); + } + plog_info("\t\tUsing %d worker_threads id %d\n", nb_worker_threads, worker_thread_id); + for (uint32_t i = 0; i < qinq_gre_map->count; ++i) { + if (targ->nb_slave_threads == 0 || rss_to_queue(qinq_gre_map->entries[i].rss, nb_worker_threads) == worker_thread_id) { + add_key(targ, qinq_gre_map, qinq_gre_table, i, &count); + //plog_info("Queue %d adding key %16lx, svlan %x cvlan %x, rss=%x\n", targ->rx_queue, *(uint64_t *)q, qinq_to_gre_lookup[i].svlan, qinq_to_gre_lookup[i].cvlan, qinq_to_gre_lookup[i].rss); + } + } + plog_info("\t\tAdded %d entries to worker thread %d\n", count, worker_thread_id); + } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) { + for (uint32_t i = 0; i < qinq_gre_map->count; ++i) { + uint64_t cvlan = rte_bswap16(qinq_gre_map->entries[i].cvlan & 0xFF0F); + uint64_t svlan = rte_bswap16((qinq_gre_map->entries[i].svlan & 0xFF0F)); + uint64_t qinq = rte_bswap64((svlan << 32) | cvlan); + uint8_t queue = hash_crc32(&qinq, 8, 0) % targ->nb_slave_threads; + if (queue == targ->worker_thread_id) { + add_key(targ, qinq_gre_map, qinq_gre_table, i, &count); + } + } + plog_info("\t\tAdded %d entries to WT %d\n", count, targ->worker_thread_id); + } else if (flag_features & TASK_FEATURE_GRE_ID) { + for (uint32_t i = 0; i < qinq_gre_map->count; ++i) { + if (qinq_gre_map->entries[i].gre_id % targ->nb_slave_threads == targ->worker_thread_id) { + add_key(targ, qinq_gre_map, qinq_gre_table, i, &count); + } + } + } + + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (QINQ_DECAP4 == smode) { + targ->lconf->targs[task_id].qinq_gre_table = qinq_gre_table; + } + + } +} + +void init_cpe4_table(struct task_args *targ) +{ + char name[64]; + sprintf(name, "core_%u_CPEv4Table", targ->lconf->id); + + uint8_t table_part = targ->nb_slave_threads; + if (!rte_is_power_of_2(table_part)) { + table_part = rte_align32pow2(table_part) >> 1; + } + + if (table_part == 0) + table_part = 1; + + uint32_t n_entries = MAX_GRE / table_part; + struct rte_table_hash_key8_ext_params table_hash_params = { + .n_entries = n_entries, + .n_entries_ext = n_entries >> 1, + .f_hash = hash_crc32, + .seed = 0, + .signature_offset = HASH_METADATA_OFFSET(8), + .key_offset = HASH_METADATA_OFFSET(0), + }; + size_t entry_size = sizeof(struct cpe_data); + if (!rte_is_power_of_2(entry_size)) { + entry_size = rte_align32pow2(entry_size); + } + + struct rte_table_hash* phash = rte_table_hash_key8_ext_dosig_ops. + f_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), entry_size); + PROX_PANIC(NULL == phash, "Unable to allocate memory for IPv4 hash table on core %u\n", targ->lconf->id); + + /* for locality, copy the pointer to the port structure where it is needed at packet handling time */ + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (QINQ_ENCAP4 == smode || QINQ_DECAP4 == smode) { + targ->lconf->targs[task_id].cpe_table = phash; + } + } +} + +static void early_init_table(struct task_args* targ) +{ + if (!targ->cpe_table) { + init_cpe4_table(targ); + } +} + +static inline void restore_cpe(struct cpe_pkt *packet, struct cpe_data *table, __attribute__((unused)) uint16_t qinq_tag, uint64_t *src_mac) +{ +#ifdef USE_QINQ + struct qinq_hdr *pqinq = &packet->qinq_hdr; + rte_memcpy(pqinq, &qinq_proto, sizeof(struct qinq_hdr)); + (*(uint64_t *)(&pqinq->d_addr)) = table->mac_port_8bytes; + /* set source as well now */ + *((uint64_t *)(&pqinq->s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]); + pqinq->svlan.vlan_tci = table->qinq_svlan; + pqinq->cvlan.vlan_tci = table->qinq_cvlan; + pqinq->svlan.eth_proto = qinq_tag; + pqinq->cvlan.eth_proto = ETYPE_VLAN; + pqinq->ether_type = ETYPE_IPv4; +#else + (*(uint64_t *)(&packet->ether_hdr.d_addr)) = table->mac_port_8bytes; + /* set source as well now */ + *((uint64_t *)(&packet->ether_hdr.s_addr)) = *((uint64_t *)&src_mac[table->mac_port.out_idx]); + packet->ether_hdr.ether_type = ETYPE_IPv4; + + packet->ipv4_hdr.dst_addr = rte_bswap32(10 << 24 | rte_bswap16(table->qinq_svlan) << 12 | rte_bswap16(table->qinq_cvlan)); +#endif +} + +static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry); + +/* Same functionality as handle_qinq_encap_v4_bulk but untag MPLS as well. */ +static int handle_qinq_encap4_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase; + uint8_t out[MAX_PKT_BURST]; + prefetch_pkts(mbufs, n_pkts); + + for (uint16_t j = 0; j < n_pkts; ++j) { + if (likely(mpls_untag(mbufs[j]))) { + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA); + out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], NULL); + } + else { + out[j] = OUT_DISCARD; + } + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static inline void extract_key_bulk(struct task_qinq_encap4 *task, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + extract_key_core(mbufs[j], &task->keys[j]); + } +} + +__attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf) +{ + struct core_net_pkt* core_pkt = rte_pktmbuf_mtod(mbuf, struct core_net_pkt *); + uint32_t dst_ip = core_pkt->ip_hdr.dst_addr; + uint32_t le_gre_id = rte_be_to_cpu_32(core_pkt->gre_hdr.gre_id); + + plogx_dbg("Unknown IP %x/gre_id %x\n", dst_ip, le_gre_id); +} + +static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase; + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + struct cpe_data* entries[64]; + uint8_t out[MAX_PKT_BURST]; + uint64_t lookup_hit_mask; + + prefetch_pkts(mbufs, n_pkts); + + for (uint16_t j = 0; j < n_pkts; ++j) { + struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1); + task->keys[j] = (uint64_t)ip->dst_addr; + } + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (likely(lookup_hit_mask == pkts_mask)) { + for (uint16_t j = 0; j < n_pkts; ++j) { + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); + + if (padlen) { + rte_pktmbuf_trim(mbufs[j], padlen); + } + out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]); + } + } + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + handle_error(mbufs[j]); + out[j] = OUT_DISCARD; + continue; + } + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); + + if (padlen) { + rte_pktmbuf_trim(mbufs[j], padlen); + } + out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]); + } + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} +int handle_qinq_encap4_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_encap4 *task = (struct task_qinq_encap4 *)tbase; + uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t); + struct cpe_data* entries[64]; + uint8_t out[MAX_PKT_BURST]; + uint64_t lookup_hit_mask; + + prefetch_pkts(mbufs, n_pkts); + + // From GRE ID and IP address, retrieve QinQ and MAC addresses + extract_key_bulk(task, mbufs, n_pkts); + rte_table_hash_key8_ext_dosig_ops.f_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (likely(lookup_hit_mask == pkts_mask)) { + for (uint16_t j = 0; j < n_pkts; ++j) { + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA); + // We are receiving GRE tunnelled packets (and removing UPSTRAM_DELTA bytes), whose length is > 64 bytes + // So there should be no padding, but in case the is one, remove it + uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); + + if (padlen) { + rte_pktmbuf_trim(mbufs[j], padlen); + } + out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]); + } + } + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(!((lookup_hit_mask >> j) & 0x1))) { + handle_error(mbufs[j]); + out[j] = OUT_DISCARD; + continue; + } + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_adj(mbufs[j], UPSTREAM_DELTA); + uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); + + if (padlen) { + rte_pktmbuf_trim(mbufs[j], padlen); + } + out[j] = handle_qinq_encap4(task, cpe_pkt, mbufs[j], entries[j]); + } + } + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct cpe_pkt *cpe_pkt, struct rte_mbuf *mbuf, struct cpe_data *entry) +{ + PROX_ASSERT(cpe_pkt); + + if (cpe_pkt->ipv4_hdr.time_to_live) { + cpe_pkt->ipv4_hdr.time_to_live--; + } + else { + plog_info("TTL = 0 => Dropping\n"); + return OUT_DISCARD; + } + cpe_pkt->ipv4_hdr.hdr_checksum = 0; + + restore_cpe(cpe_pkt, entry, task->qinq_tag, task->src_mac); + + if (task->runtime_flags & TASK_CLASSIFY) { + uint8_t queue = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] & 0x3; + uint8_t tc = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] >> 2; + + rte_sched_port_pkt_write(mbuf, 0, entry->user, tc, queue, 0); + } +#ifdef ENABLE_EXTRA_USER_STATISTICS + task->stats_per_user[entry->user]++; +#endif + if (task->runtime_flags & TASK_TX_CRC) { + prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + } + return entry->mac_port.out_idx; +} + +static void flow_iter_next(struct flow_iter *iter, struct task_args *targ) +{ + do { + iter->idx++; + uint8_t flag_features = iter->data; + + if (flag_features & TASK_FEATURE_LUT_QINQ_RSS) { + // If there is a load balancer, number of worker thread is indicated by targ->nb_slave_threads and n_rxq = 0 + // If there is no load balancers, number of worker thread is indicated by n_rxq and nb_slave_threads = 0 + uint8_t nb_worker_threads, worker_thread_id; + nb_worker_threads = 1; + worker_thread_id = 1; + if (targ->nb_slave_threads) { + nb_worker_threads = targ->nb_slave_threads; + worker_thread_id = targ->worker_thread_id; + } else if (prox_port_cfg[targ->rx_port_queue[0].port].n_rxq) { + nb_worker_threads = prox_port_cfg[targ->rx_port_queue[0].port].n_rxq; + worker_thread_id = targ->rx_port_queue[0].queue; + } else { + plog_err("Unexpected: unknown number of worker thread\n"); + } + + if (targ->nb_slave_threads == 0 || rss_to_queue(get_qinq_gre_map(targ)->entries[iter->idx].rss, nb_worker_threads) == worker_thread_id) + break; + } else if (flag_features & TASK_FEATURE_LUT_QINQ_HASH) { + uint64_t cvlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].cvlan & 0xFF0F); + uint64_t svlan = rte_bswap16(get_qinq_gre_map(targ)->entries[iter->idx].svlan & 0xFF0F); + uint64_t qinq = rte_bswap64((svlan << 32) | cvlan); + uint8_t queue = hash_crc32(&qinq, 8, 0) % targ->nb_slave_threads; + if (queue == targ->worker_thread_id) + break; + } else if (flag_features & TASK_FEATURE_GRE_ID) { + if (get_qinq_gre_map(targ)->entries[iter->idx].gre_id % targ->nb_slave_threads == targ->worker_thread_id) + break; + } + } while (iter->idx != (int)get_qinq_gre_map(targ)->count); +} + +static void flow_iter_beg(struct flow_iter *iter, struct task_args *targ) +{ + uint32_t flag_features = 0; + if (targ->lb_friend_core != 0xFF) { + struct task_args *lb_targ = &lcore_cfg[targ->lb_friend_core].targs[targ->lb_friend_task]; + flag_features = lb_targ->task_init->flag_features; + plog_info("\t\tWT %d Updated features to %x from friend %d\n", targ->lconf->id, flag_features, targ->lb_friend_core); + } else { + plog_info("\t\tWT %d has no friend\n", targ->lconf->id); + } + if (targ->nb_slave_threads == 0) { + // No slave threads, i.e. using RSS + plog_info("feature was %x is now %x\n", flag_features, TASK_FEATURE_LUT_QINQ_RSS); + flag_features = TASK_FEATURE_LUT_QINQ_RSS; + } + if ((flag_features & (TASK_FEATURE_GRE_ID|TASK_FEATURE_LUT_QINQ_RSS|TASK_FEATURE_LUT_QINQ_HASH)) == 0) { + plog_info("\t\tCould not find flag feature from Load balancer => supposing TASK_FEATURE_GRE_ID\n"); + flag_features = TASK_FEATURE_GRE_ID; + } + + iter->idx = -1; + flow_iter_next(iter, targ); +} + +static int flow_iter_is_end(struct flow_iter *iter, struct task_args *targ) +{ + return iter->idx == (int)get_qinq_gre_map(targ)->count; +} + +static uint32_t flow_iter_get_gre_id(struct flow_iter *iter, struct task_args *targ) +{ + return get_qinq_gre_map(targ)->entries[iter->idx].gre_id; +} + +static struct task_init task_init_qinq_encap4_table = { + .mode = QINQ_ENCAP4, + .mode_str = "qinqencapv4", + .early_init = early_init_table, + .init = init_task_qinq_encap4, + .handle = handle_qinq_encap4_bulk, + /* In this case user in qinq_lookup table is the QoS user + (from user_table), i.e. usually from 0 to 32K Otherwise it + would have been a user from (0 to n_interface x 32K) */ + .flow_iter = { + .beg = flow_iter_beg, + .is_end = flow_iter_is_end, + .next = flow_iter_next, + .get_gre_id = flow_iter_get_gre_id, + }, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_qinq_encap4) +}; + +static struct task_init task_init_qinq_encap4_table_pe = { + .mode = QINQ_ENCAP4, + .mode_str = "qinqencapv4", + .sub_mode_str = "pe", + .early_init = early_init_table, + .init = init_task_qinq_encap4, + .handle = handle_qinq_encap4_bulk_pe, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_qinq_encap4) +}; + +static struct task_init task_init_qinq_encap4_untag = { + .mode = QINQ_ENCAP4, + .sub_mode_str = "unmpls", + .mode_str = "qinqencapv4", + .init = init_task_qinq_encap4, + .handle = handle_qinq_encap4_untag_bulk, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_qinq_encap4) +}; + +__attribute__((constructor)) static void reg_task_qinq_encap4(void) +{ + reg_task(&task_init_qinq_encap4_table); + reg_task(&task_init_qinq_encap4_table_pe); + reg_task(&task_init_qinq_encap4_untag); +} diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.h b/VNFs/DPPD-PROX/handle_qinq_encap4.h new file mode 100644 index 00000000..639135e0 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_encap4.h @@ -0,0 +1,103 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_QINQ_ENCAP4_H_ +#define _HANDLE_QINQ_ENCAP4_H_ + +#include <rte_ip.h> +#include <rte_ether.h> + +#include "log.h" +#include "prox_assert.h" +#include "etypes.h" +#include "mpls.h" +#include "task_init.h" + +struct task_qinq_encap4 { + struct task_base base; + struct rte_table_hash *cpe_table; + uint16_t qinq_tag; + uint64_t src_mac[PROX_MAX_PORTS]; + int offload_crc; + uint8_t runtime_flags; + uint8_t *dscp; + uint64_t keys[64]; + struct rte_mbuf* fake_packets[64]; + uint64_t cpe_timeout; +#ifdef ENABLE_EXTRA_USER_STATISTICS + uint32_t *stats_per_user; + uint32_t n_users; +#endif +}; + +struct qinq_gre_entry { + uint16_t svlan; + uint16_t cvlan; + uint32_t gre_id; + uint32_t user; + uint32_t rss; // RSS based on Toeplitz_hash(svlan and cvlan) +}; + +struct qinq_gre_map { + uint32_t count; + struct qinq_gre_entry entries[0]; +}; + +struct qinq_gre_map *get_qinq_gre_map(struct task_args *targ); + +struct task_args; +struct prox_shared; + +void init_qinq_gre_table(struct task_args *targ, struct qinq_gre_map *qinq_gre_map); +void init_qinq_gre_hash(struct task_args *targ, struct qinq_gre_map *qinq_gre_map); +void init_cpe4_table(struct task_args *targ); +void init_cpe4_hash(struct task_args *targ); + +static inline uint8_t mpls_untag(struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + const uint16_t eth_type = peth->ether_type; + + if (eth_type == ETYPE_MPLSU) { + struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1); + + if (mpls->bos == 0) { + // Double MPLS tag + pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + PROX_ASSERT(pneweth); + } + + const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(pneweth + 1); + if ((pip->version_ihl >> 4) == 4) { + pneweth->ether_type = ETYPE_IPv4; + return 1; + } + else if ((pip->version_ihl >> 4) == 6) { + pneweth->ether_type = ETYPE_IPv6; + return 1; + } + + plog_info("Error removing MPLS: unexpected IP version: %d\n", pip->version_ihl >> 4); + return 0; + } + if (eth_type != ETYPE_LLDP) { + plog_info("Error Removing MPLS: ether_type = %#06x\n", eth_type); + } + return 0; +} + +#endif /* _HANDLE_QINQ_ENCAP4_H_ */ diff --git a/VNFs/DPPD-PROX/handle_qinq_encap6.c b/VNFs/DPPD-PROX/handle_qinq_encap6.c new file mode 100644 index 00000000..e5b774da --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_encap6.c @@ -0,0 +1,224 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_table_hash.h> + +#include "handle_qinq_encap6.h" +#include "handle_qinq_encap4.h" +#include "task_base.h" +#include "qinq.h" +#include "defines.h" +#include "tx_pkt.h" +#include "hash_entry_types.h" +#include "prefetch.h" +#include "log.h" +#include "lconf.h" +#include "mpls.h" +#include "hash_utils.h" +#include "quit.h" + +struct task_qinq_encap6 { + struct task_base base; + uint16_t qinq_tag; + uint8_t tx_portid; + uint8_t runtime_flags; + struct rte_table_hash *cpe_table; +}; + +static void init_task_qinq_encap6(struct task_base *tbase, struct task_args *targ) +{ + struct task_qinq_encap6 *task = (struct task_qinq_encap6 *)tbase; + + task->qinq_tag = targ->qinq_tag; + task->cpe_table = targ->cpe_table; + task->runtime_flags = targ->runtime_flags; +} + +/* Encapsulate IPv6 packet in QinQ where the QinQ is derived from the IPv6 address */ +static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq_encap6 *task) +{ + struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(struct vlan_hdr)); + + PROX_ASSERT(pqinq); + struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1); + + if (pip6->hop_limits) { + pip6->hop_limits--; + } + else { + plog_info("TTL = 0 => Dropping\n"); + return OUT_DISCARD; + } + + // TODO: optimize to use bulk as intended with the rte_table_library + uint64_t pkts_mask = RTE_LEN2MASK(1, uint64_t); + uint64_t lookup_hit_mask; + struct cpe_data* entries[64]; // TODO: use bulk size + rte_table_hash_ext_dosig_ops.f_lookup(task->cpe_table, &mbuf, pkts_mask, &lookup_hit_mask, (void**)entries); + + if (lookup_hit_mask == 0x1) { + /* will also overwrite part of the destination addr */ + (*(uint64_t *)pqinq) = entries[0]->mac_port_8bytes; + pqinq->svlan.eth_proto = task->qinq_tag; + pqinq->cvlan.eth_proto = ETYPE_VLAN; + pqinq->svlan.vlan_tci = entries[0]->qinq_svlan; + pqinq->cvlan.vlan_tci = entries[0]->qinq_cvlan; + pqinq->ether_type = ETYPE_IPv6; + + /* classification can only be done from this point */ + if (task->runtime_flags & TASK_CLASSIFY) { + rte_sched_port_pkt_write(mbuf, 0, entries[0]->user, 0, 0, 0); + } + return 0; + } + else { + plogx_err("Unknown IP " IPv6_BYTES_FMT "\n", IPv6_BYTES(pip6->dst_addr)); + return OUT_DISCARD; + } +} + +void init_cpe6_table(struct task_args *targ) +{ + char name[64]; + sprintf(name, "core_%u_CPEv6Table", targ->lconf->id); + + uint8_t table_part = targ->nb_slave_threads; + if (!rte_is_power_of_2(table_part)) { + table_part = rte_align32pow2(table_part) >> 1; + } + + uint32_t n_entries = MAX_GRE / table_part; + struct rte_table_hash_ext_params table_hash_params = { + .key_size = sizeof(struct ipv6_addr), + .n_keys = n_entries, + .n_buckets = n_entries >> 2, + .n_buckets_ext = n_entries >> 3, + .f_hash = hash_crc32, + .seed = 0, + .signature_offset = HASH_METADATA_OFFSET(0), + .key_offset = HASH_METADATA_OFFSET(0), + }; + + size_t entry_size = sizeof(struct cpe_data); + if (!rte_is_power_of_2(entry_size)) { + entry_size = rte_align32pow2(entry_size); + } + + struct rte_table_hash* phash = rte_table_hash_ext_dosig_ops. + f_create(&table_hash_params, rte_lcore_to_socket_id(targ->lconf->id), entry_size); + PROX_PANIC(phash == NULL, "Unable to allocate memory for IPv6 hash table on core %u\n", targ->lconf->id); + + for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { + enum task_mode smode = targ->lconf->targs[task_id].mode; + if (smode == QINQ_DECAP6 || smode == QINQ_ENCAP6) { + targ->lconf->targs[task_id].cpe_table = phash; + } + } +} + +static void early_init(struct task_args *targ) +{ + if (!targ->cpe_table) { + init_cpe6_table(targ); + } +} + +static int handle_qinq_encap6_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_encap6 *task = (struct task_qinq_encap6 *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_qinq_encap6(mbufs[j], task); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_qinq_encap6(mbufs[j], task); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static int handle_qinq_encap6_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qinq_encap6 *task = (struct task_qinq_encap6 *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + if (likely(mpls_untag(mbufs[j]))) { + out[j] = handle_qinq_encap6(mbufs[j], task); + } + else { + out[j] = OUT_DISCARD; + } + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + if (likely(mpls_untag(mbufs[j]))) { + out[j] = handle_qinq_encap6(mbufs[j], task); + } + else { + out[j] = OUT_DISCARD; + } + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static struct task_init task_init_qinq_encap6 = { + .mode = QINQ_ENCAP6, + .mode_str = "qinqencapv6", + .init = init_task_qinq_encap6, + .early_init = early_init, + .handle = handle_qinq_encap6_bulk, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_qinq_encap6) +}; + +static struct task_init task_init_qinq_encap6_untag = { + .mode = QINQ_ENCAP6, + .mode_str = "qinqencapv6", + .sub_mode_str = "unmpls", + .early_init = early_init, + .init = init_task_qinq_encap6, + .handle = handle_qinq_encap6_untag_bulk, + .flag_features = TASK_FEATURE_CLASSIFY, + .size = sizeof(struct task_qinq_encap6) +}; + +__attribute__((constructor)) static void reg_task_qinq_encap6(void) +{ + reg_task(&task_init_qinq_encap6); + reg_task(&task_init_qinq_encap6_untag); +} diff --git a/VNFs/DPPD-PROX/handle_qinq_encap6.h b/VNFs/DPPD-PROX/handle_qinq_encap6.h new file mode 100644 index 00000000..1f72b53c --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qinq_encap6.h @@ -0,0 +1,24 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_QINQ_ENCAP6_H_ +#define _HANDLE_QINQ_ENCAP6_H_ + +struct task_args; + +void init_cpe6_table(struct task_args *targ); + +#endif /* _HANDLE_QINQ_ENCAP6_H_ */ diff --git a/VNFs/DPPD-PROX/handle_qos.c b/VNFs/DPPD-PROX/handle_qos.c new file mode 100644 index 00000000..eef64796 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qos.c @@ -0,0 +1,179 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> +#include <rte_mbuf.h> +#include <rte_sched.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "etypes.h" +#include "stats.h" +#include "task_init.h" +#include "lconf.h" +#include "task_base.h" +#include "defines.h" +#include "prefetch.h" +#include "handle_qos.h" +#include "log.h" +#include "quit.h" +#include "qinq.h" +#include "prox_cfg.h" +#include "prox_shared.h" + +struct task_qos { + struct task_base base; + struct rte_sched_port *sched_port; + uint16_t *user_table; + uint8_t *dscp; + uint32_t nb_buffered_pkts; + uint8_t runtime_flags; +}; + +uint32_t task_qos_n_pkts_buffered(struct task_base *tbase) +{ + struct task_qos *task = (struct task_qos *)tbase; + + return task->nb_buffered_pkts; +} + +static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_qos *task = (struct task_qos *)tbase; + int ret = 0; + + if (n_pkts) { + if (task->runtime_flags & TASK_CLASSIFY) { + uint16_t j; +#ifdef PROX_PREFETCH_OFFSET + for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(mbufs[j]); + } + for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + prefetch_nta(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif + uint8_t queue = 0; + uint8_t tc = 0; + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { + prefetch_nta(mbufs[j + PREFETCH_OFFSET]); + prefetch_nta(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); + const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *); + uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); + if (pqinq->ether_type == ETYPE_IPv4) { + const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3; + tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2; + } else { + // Keep queue and tc = 0 for other packet types like ARP + queue = 0; + tc = 0; + } + + rte_sched_port_pkt_write(mbufs[j], 0, task->user_table[qinq], tc, queue, 0); + } +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *); + uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); + if (pqinq->ether_type == ETYPE_IPv4) { + const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3; + tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2; + } else { + // Keep queue and tc = 0 for other packet types like ARP + queue = 0; + tc = 0; + } + + rte_sched_port_pkt_write(mbufs[j], 0, task->user_table[qinq], tc, queue, 0); + } +#endif + } + int16_t ret = rte_sched_port_enqueue(task->sched_port, mbufs, n_pkts); + task->nb_buffered_pkts += ret; + TASK_STATS_ADD_IDLE(&task->base.aux->stats, n_pkts - ret); + } + + if (task->nb_buffered_pkts) { + n_pkts = rte_sched_port_dequeue(task->sched_port, mbufs, 32); + if (likely(n_pkts)) { + task->nb_buffered_pkts -= n_pkts; + ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); + } + } + return ret; +} + +static void init_task_qos(struct task_base *tbase, struct task_args *targ) +{ + struct task_qos *task = (struct task_qos *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + char name[64]; + + snprintf(name, sizeof(name), "qos_sched_port_%u_%u", targ->lconf->id, 0); + + targ->qos_conf.port_params.name = name; + targ->qos_conf.port_params.socket = socket_id; + task->sched_port = rte_sched_port_config(&targ->qos_conf.port_params); + + PROX_PANIC(task->sched_port == NULL, "failed to create sched_port"); + + plog_info("number of pipes: %d\n\n", targ->qos_conf.port_params.n_pipes_per_subport); + int err = rte_sched_subport_config(task->sched_port, 0, targ->qos_conf.subport_params); + PROX_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err); + + /* only single subport and single pipe profile is supported */ + for (uint32_t pipe = 0; pipe < targ->qos_conf.port_params.n_pipes_per_subport; ++pipe) { + err = rte_sched_pipe_config(task->sched_port, 0 , pipe, 0); + PROX_PANIC(err != 0, "failed setting up sched port pipe, error: %d", err); + } + + task->runtime_flags = targ->runtime_flags; + + task->user_table = prox_sh_find_socket(socket_id, "user_table"); + if (!task->user_table) { + PROX_PANIC(!strcmp(targ->user_table, ""), "No user table defined\n"); + int ret = lua_to_user_table(prox_lua(), GLOBAL, targ->user_table, socket_id, &task->user_table); + PROX_PANIC(ret, "Failed to create user table from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, "user_table", task->user_table); + } + + if (task->runtime_flags & TASK_CLASSIFY) { + PROX_PANIC(!strcmp(targ->dscp, ""), "DSCP table not specified\n"); + task->dscp = prox_sh_find_socket(socket_id, targ->dscp); + if (!task->dscp) { + int ret = lua_to_dscp(prox_lua(), GLOBAL, targ->dscp, socket_id, &task->dscp); + PROX_PANIC(ret, "Failed to create dscp table from config:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->dscp, task->dscp); + } + } +} + +static struct task_init task_init_qos = { + .mode_str = "qos", + .init = init_task_qos, + .handle = handle_qos_bulk, + .flag_features = TASK_FEATURE_CLASSIFY | TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_MULTI_RX | TASK_FEATURE_ZERO_RX, + .size = sizeof(struct task_qos) +}; + +__attribute__((constructor)) static void reg_task_qos(void) +{ + reg_task(&task_init_qos); +} diff --git a/VNFs/DPPD-PROX/handle_qos.h b/VNFs/DPPD-PROX/handle_qos.h new file mode 100644 index 00000000..0fabe9b8 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_qos.h @@ -0,0 +1,26 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_QOS_H_ +#define _HANDLE_QOS_H_ + +#include <inttypes.h> + +struct task_base; + +uint32_t task_qos_n_pkts_buffered(struct task_base *tbase); + +#endif /* _HANDLE_QOS_H_ */ diff --git a/VNFs/DPPD-PROX/handle_read.c b/VNFs/DPPD-PROX/handle_read.c new file mode 100644 index 00000000..9a06a2b1 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_read.c @@ -0,0 +1,78 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> + +#include "task_base.h" +#include "task_init.h" +#include "defines.h" +#include "prefetch.h" +#include "log.h" + +struct task_read { + struct task_base base; +}; + +static int handle_read_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_read *task = (struct task_read *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + uint64_t *first; + +#ifdef PROX_PREFETCH_OFFSET + for (j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + first = rte_pktmbuf_mtod(mbufs[j], uint64_t *); + out[j] = *first != 0? 0: OUT_DISCARD; + } +#ifdef PROX_PREFETCH_OFFSET + prefetch_nta(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + first = rte_pktmbuf_mtod(mbufs[j], uint64_t *); + out[j] = *first != 0? 0: OUT_DISCARD; + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void init_task_read(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct task_args *targ) +{ +} + +static struct task_init task_init_read = { + .mode_str = "read", + .init = init_task_read, + .handle = handle_read_bulk, + .size = sizeof(struct task_read) +}; + +__attribute__((constructor)) static void reg_task_read(void) +{ + reg_task(&task_init_read); +} diff --git a/VNFs/DPPD-PROX/handle_routing.c b/VNFs/DPPD-PROX/handle_routing.c new file mode 100644 index 00000000..9dd45ed8 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_routing.c @@ -0,0 +1,321 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_lpm.h> +#include <rte_cycles.h> +#include <string.h> +#include <rte_version.h> +#include <rte_ip.h> +#include <rte_byteorder.h> + +#include "prox_lua.h" +#include "prox_lua_types.h" + +#include "quit.h" +#include "log.h" +#include "handle_routing.h" +#include "tx_pkt.h" +#include "gre.h" +#include "lconf.h" +#include "prox_port_cfg.h" +#include "etypes.h" +#include "prefetch.h" +#include "hash_entry_types.h" +#include "mpls.h" +#include "qinq.h" +#include "prox_cfg.h" +#include "ip6_addr.h" +#include "prox_shared.h" +#include "prox_cksum.h" +#include "mbuf_utils.h" + +struct task_routing { + struct task_base base; + uint8_t runtime_flags; + struct lcore_cfg *lconf; + struct rte_lpm *ipv4_lpm; + struct next_hop *next_hops; + int offload_crc; + uint32_t number_free_rules; + uint16_t qinq_tag; + uint32_t marking[4]; + uint64_t src_mac[PROX_MAX_PORTS]; +}; + +static void routing_update(struct task_base *tbase, void **data, uint16_t n_msgs) +{ + struct task_routing *task = (struct task_routing *)tbase; + struct route_msg *msg; + + for (uint16_t i = 0; i < n_msgs; ++i) { + msg = (struct route_msg *)data[i]; + + if (task->number_free_rules == 0) { + plog_warn("Failed adding route: %u.%u.%u.%u/%u: lpm table full\n", + msg->ip_bytes[0], msg->ip_bytes[1], msg->ip_bytes[2], + msg->ip_bytes[3], msg->prefix); + } else { + if (rte_lpm_add(task->ipv4_lpm, rte_bswap32(msg->ip), msg->prefix, msg->nh)) { + plog_warn("Failed adding route: %u.%u.%u.%u/%u\n", + msg->ip_bytes[0], msg->ip_bytes[1], msg->ip_bytes[2], + msg->ip_bytes[3], msg->prefix); + } else { + task->number_free_rules--; + } + } + } +} + +static void init_task_routing(struct task_base *tbase, struct task_args *targ) +{ + struct task_routing *task = (struct task_routing *)tbase; + const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); + struct lpm4 *lpm; + + task->lconf = targ->lconf; + task->qinq_tag = targ->qinq_tag; + task->runtime_flags = targ->runtime_flags; + + PROX_PANIC(!strcmp(targ->route_table, ""), "route table not specified\n"); + if (targ->flags & TASK_ARG_LOCAL_LPM) { + int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); + PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->route_table, lpm); + + task->number_free_rules = lpm->n_free_rules; + } + else { + lpm = prox_sh_find_socket(socket_id, targ->route_table); + if (!lpm) { + int ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm); + PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors()); + prox_sh_add_socket(socket_id, targ->route_table, lpm); + } + } + task->ipv4_lpm = lpm->rte_lpm; + task->next_hops = lpm->next_hops; + task->number_free_rules = lpm->n_free_rules; + + for (uint32_t i = 0; i < MAX_HOP_INDEX; i++) { + int tx_port = task->next_hops[i].mac_port.out_idx; + if ((tx_port > targ->nb_txports - 1) && (tx_port > targ->nb_txrings - 1)) { + PROX_PANIC(1, "Routing Table contains port %d but only %d tx port/ %d ring:\n", tx_port, targ->nb_txports, targ->nb_txrings); + } + } + + if (targ->nb_txrings) { + struct task_args *dtarg; + struct core_task ct; + for (uint32_t i = 0; i < targ->nb_txrings; ++i) { + ct = targ->core_task_set[0].core_task[i]; + dtarg = core_targ_get(ct.core, ct.task); + dtarg = find_reachable_task_sending_to_port(dtarg); + if (task->runtime_flags & TASK_MPLS_TAGGING) { + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr))) | ((uint64_t)ETYPE_MPLSU << (64 - 16)); + } else { + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[dtarg->tx_port_queue[0].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16)); + } + } + } else { + for (uint32_t i = 0; i < targ->nb_txports; ++i) { + if (task->runtime_flags & TASK_MPLS_TAGGING) { + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr))) | ((uint64_t)ETYPE_MPLSU << (64 - 16)); + } else { + task->src_mac[i] = (0x0000ffffffffffff & ((*(uint64_t*)&prox_port_cfg[targ->tx_port_queue[i].port].eth_addr))) | ((uint64_t)ETYPE_IPv4 << (64 - 16)); + } + } + } + + for (uint32_t i = 0; i < 4; ++i) { + task->marking[i] = rte_bswap32(targ->marking[i] << 9); + } + + struct prox_port_cfg *port = find_reachable_port(targ); + if (port) { + task->offload_crc = port->capabilities.tx_offload_cksum; + } + + targ->lconf->ctrl_func_m[targ->task] = routing_update; + targ->lconf->ctrl_timeout = freq_to_tsc(20); +} + +static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf); + +static int handle_routing_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_routing *task = (struct task_routing *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_routing(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_routing(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void set_l2(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; + *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; +} + +static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx, uint16_t l2_len) +{ + struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr)); + l2_len += sizeof(struct mpls_hdr); + prox_ip_cksum(mbuf, (struct ipv4_hdr *)((uint8_t *)peth + l2_len), l2_len, sizeof(struct ipv4_hdr), task->offload_crc); + + *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; + *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; + /* MPLSU ether_type written as high word of 64bit src_mac prepared by init_task_routing */ + struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); + + if (task->runtime_flags & TASK_MARK) { + enum rte_meter_color color = rte_sched_port_pkt_read_color(mbuf); + + *(uint32_t *)mpls = task->next_hops[nh_idx].mpls | task->marking[color] | 0x00010000; // Set BoS to 1 + } + else { + *(uint32_t *)mpls = task->next_hops[nh_idx].mpls | 0x00010000; // Set BoS to 1 + } +} + +static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_offset, struct rte_mbuf *mbuf) +{ + struct ipv4_hdr *ip = (struct ipv4_hdr*)(beg + ip_offset); + struct ether_hdr *peth_out; + uint8_t tx_port; + uint32_t dst_ip; + + if (unlikely(ip->version_ihl >> 4 != 4)) { + plog_warn("Offset: %d\n", ip_offset); + plog_warn("Expected to receive IPv4 packet but IP version was %d\n", + ip->version_ihl >> 4); + return OUT_DISCARD; + } + + switch(ip->next_proto_id) { + case IPPROTO_GRE: { + struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1); + dst_ip = ((struct ipv4_hdr *)(pgre + 1))->dst_addr; + break; + } + case IPPROTO_TCP: + case IPPROTO_UDP: + dst_ip = ip->dst_addr; + break; + default: + /* Routing for other protocols is not implemented */ + return OUT_DISCARD; + } + +#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1) + uint32_t next_hop_index; +#else + uint8_t next_hop_index; +#endif + if (unlikely(rte_lpm_lookup(task->ipv4_lpm, rte_bswap32(dst_ip), &next_hop_index) != 0)) { + uint8_t* dst_ipp = (uint8_t*)&dst_ip; + plog_warn("lpm_lookup failed for ip %d.%d.%d.%d: rc = %d\n", + dst_ipp[0], dst_ipp[1], dst_ipp[2], dst_ipp[3], -ENOENT); + return OUT_DISCARD; + } + + tx_port = task->next_hops[next_hop_index].mac_port.out_idx; + if (task->runtime_flags & TASK_MPLS_TAGGING) { + uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - rte_be_to_cpu_16(ip->total_length) - ip_offset; + if (padlen) { + rte_pktmbuf_trim(mbuf, padlen); + } + + set_l2_mpls(task, mbuf, next_hop_index, ip_offset); + } + else { + set_l2(task, mbuf, next_hop_index); + } + return tx_port; +} + +static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf) +{ + struct qinq_hdr *qinq; + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + + switch (peth->ether_type) { + case ETYPE_8021ad: { + struct qinq_hdr *qinq = (struct qinq_hdr *)peth; + if ((qinq->cvlan.eth_proto != ETYPE_VLAN)) { + plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto); + return OUT_DISCARD; + } + + return route_ipv4(task, (uint8_t*)qinq, sizeof(*qinq), mbuf); + } + case ETYPE_IPv4: + return route_ipv4(task, (uint8_t*)peth, sizeof(*peth), mbuf); + case ETYPE_MPLSU: { + /* skip MPLS headers if any for routing */ + struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); + uint32_t count = sizeof(struct ether_hdr); + while (!(mpls->bytes & 0x00010000)) { + mpls++; + count += sizeof(struct mpls_hdr); + } + count += sizeof(struct mpls_hdr); + + return route_ipv4(task, (uint8_t*)peth, count, mbuf); + } + default: + if (peth->ether_type == task->qinq_tag) { + struct qinq_hdr *qinq = (struct qinq_hdr *)peth; + if ((qinq->cvlan.eth_proto != ETYPE_VLAN)) { + plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto); + return OUT_DISCARD; + } + + return route_ipv4(task, (uint8_t*)qinq, sizeof(*qinq), mbuf); + } + plog_warn("Failed routing packet: ether_type %#06x is unknown\n", peth->ether_type); + return OUT_DISCARD; + } +} + +static struct task_init task_init_routing = { + .mode_str = "routing", + .init = init_task_routing, + .handle = handle_routing_bulk, + .flag_features = TASK_FEATURE_ROUTING, + .size = sizeof(struct task_routing) +}; + +__attribute__((constructor)) static void reg_task_routing(void) +{ + reg_task(&task_init_routing); +} diff --git a/VNFs/DPPD-PROX/handle_routing.h b/VNFs/DPPD-PROX/handle_routing.h new file mode 100644 index 00000000..e3dde93d --- /dev/null +++ b/VNFs/DPPD-PROX/handle_routing.h @@ -0,0 +1,29 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_ROUTING_H_ +#define _HANDLE_ROUTING_H_ + +struct route_msg { + union { + uint32_t ip; + uint8_t ip_bytes[4]; + }; + uint32_t prefix; + uint32_t nh; +}; + +#endif /* _HANDLE_ROUTING_H_ */ diff --git a/VNFs/DPPD-PROX/handle_swap.c b/VNFs/DPPD-PROX/handle_swap.c new file mode 100644 index 00000000..8e5a94ce --- /dev/null +++ b/VNFs/DPPD-PROX/handle_swap.c @@ -0,0 +1,291 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_udp.h> + +#include "task_init.h" +#include "task_base.h" +#include "lconf.h" +#include "log.h" +#include "arp.h" +#include "handle_swap.h" +#include "prox_port_cfg.h" +#include "mpls.h" +#include "qinq.h" +#include "gre.h" +#include "prefetch.h" + +struct task_swap { + struct task_base base; + uint8_t src_dst_mac[12]; + uint32_t runtime_flags; + uint32_t tmp_ip; + uint32_t ip; +}; + +static void task_update_config(struct task_swap *task) +{ + if (unlikely(task->ip != task->tmp_ip)) + task->ip = task->tmp_ip; +} + +static void write_src_and_dst_mac(struct task_swap *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *hdr; + struct ether_addr mac; + + if (unlikely((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) { + /* Source and Destination mac hardcoded */ + hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); + } else { + hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + if (likely((task->runtime_flags & TASK_ARG_SRC_MAC_SET) == 0)) { + /* dst mac will be used as src mac */ + ether_addr_copy(&hdr->d_addr, &mac); + } + + if (unlikely(task->runtime_flags & TASK_ARG_DST_MAC_SET)) + ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); + else + ether_addr_copy(&hdr->s_addr, &hdr->d_addr); + + if (unlikely(task->runtime_flags & TASK_ARG_SRC_MAC_SET)) { + ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); + } else { + ether_addr_copy(&mac, &hdr->s_addr); + } + } +} +static inline int handle_arp_request(struct task_swap *task, struct ether_hdr_arp *hdr_arp, struct ether_addr *s_addr, uint32_t ip) +{ + if ((hdr_arp->arp.data.tpa == ip) || (ip == 0)) { + prepare_arp_reply(hdr_arp, s_addr); + memcpy(hdr_arp->ether_hdr.d_addr.addr_bytes, hdr_arp->ether_hdr.s_addr.addr_bytes, 6); + memcpy(hdr_arp->ether_hdr.s_addr.addr_bytes, s_addr, 6); + return 0; + } else if (task->runtime_flags & TASK_MULTIPLE_MAC) { + struct ether_addr tmp_s_addr; + create_mac(hdr_arp, &tmp_s_addr); + prepare_arp_reply(hdr_arp, &tmp_s_addr); + memcpy(hdr_arp->ether_hdr.d_addr.addr_bytes, hdr_arp->ether_hdr.s_addr.addr_bytes, 6); + memcpy(hdr_arp->ether_hdr.s_addr.addr_bytes, &tmp_s_addr, 6); + return 0; + } else { + plogx_dbg("Received ARP on unexpected IP %x, expecting %x\n", rte_be_to_cpu_32(hdr_arp->arp.data.tpa), rte_be_to_cpu_32(ip)); + return OUT_DISCARD; + } +} + +/* + * swap mode does not send arp requests, so does not expect arp replies + * Need to understand later whether we must send arp requests + */ +static inline int handle_arp_replies(struct task_swap *task, struct ether_hdr_arp *hdr_arp) +{ + return OUT_DISCARD; +} + +static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_swap *task = (struct task_swap *)tbase; + struct ether_hdr *hdr; + struct ether_addr mac; + struct ipv4_hdr *ip_hdr; + struct udp_hdr *udp_hdr; + uint32_t ip; + uint16_t port; + uint8_t out[64] = {0}; + struct mpls_hdr *mpls; + uint32_t mpls_len = 0; + struct qinq_hdr *qinq; + struct vlan_hdr *vlan; + struct ether_hdr_arp *hdr_arp; + uint16_t j; + + for (j = 0; j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (j = 0; j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void *)); + } + + for (uint16_t j = 0; j < n_pkts; ++j) { + hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + switch (hdr->ether_type) { + case ETYPE_ARP: + hdr_arp = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *); + if (arp_is_gratuitous(hdr_arp)) { + plog_info("Received gratuitous packet \n"); + out[j] = OUT_DISCARD; + } else if (hdr_arp->arp.oper == ARP_REQUEST) { + out[j] = handle_arp_request(task, hdr_arp, (struct ether_addr *)&task->src_dst_mac[6], task->ip); + } else if (hdr_arp->arp.oper == ARP_REPLY) { + out[j] = handle_arp_replies(task, hdr_arp); + } else { + plog_info("Received unexpected ARP operation %d\n", hdr_arp->arp.oper); + out[j] = OUT_DISCARD; + } + continue; + case ETYPE_MPLSU: + mpls = (struct mpls_hdr *)(hdr + 1); + while (!(mpls->bytes & 0x00010000)) { + mpls++; + mpls_len += sizeof(struct mpls_hdr); + } + mpls_len += sizeof(struct mpls_hdr); + ip_hdr = (struct ipv4_hdr *)(mpls + 1); + break; + case ETYPE_8021ad: + qinq = (struct qinq_hdr *)hdr; + if (qinq->cvlan.eth_proto != ETYPE_VLAN) { + plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto); + out[j] = OUT_DISCARD; + continue; + } + ip_hdr = (struct ipv4_hdr *)(qinq + 1); + break; + case ETYPE_VLAN: + vlan = (struct vlan_hdr *)(hdr + 1); + if (vlan->eth_proto == ETYPE_IPv4) { + ip_hdr = (struct ipv4_hdr *)(vlan + 1); + } else if (vlan->eth_proto == ETYPE_VLAN) { + vlan = (struct vlan_hdr *)(vlan + 1); + if (vlan->eth_proto == ETYPE_IPv4) { + ip_hdr = (struct ipv4_hdr *)(vlan + 1); + } + else if (vlan->eth_proto == ETYPE_IPv6) { + plog_warn("Unsupported IPv6\n"); + out[j] = OUT_DISCARD; + continue; + } + else { + plog_warn("Unsupported packet type\n"); + out[j] = OUT_DISCARD; + continue; + } + } else { + plog_warn("Unsupported packet type\n"); + out[j] = OUT_DISCARD; + continue; + } + break; + case ETYPE_IPv4: + ip_hdr = (struct ipv4_hdr *)(hdr + 1); + break; + case ETYPE_IPv6: + plog_warn("Unsupported IPv6\n"); + out[j] = OUT_DISCARD; + continue; + case ETYPE_LLDP: + out[j] = OUT_DISCARD; + continue; + default: + plog_warn("Unsupported ether_type 0x%x\n", hdr->ether_type); + out[j] = OUT_DISCARD; + continue; + } + udp_hdr = (struct udp_hdr *)(ip_hdr + 1); + ip = ip_hdr->dst_addr; + ip_hdr->dst_addr = ip_hdr->src_addr; + ip_hdr->src_addr = ip; + if (ip_hdr->next_proto_id == IPPROTO_GRE) { + struct gre_hdr *pgre = (struct gre_hdr *)(ip_hdr + 1); + struct ipv4_hdr *inner_ip_hdr = ((struct ipv4_hdr *)(pgre + 1)); + ip = inner_ip_hdr->dst_addr; + inner_ip_hdr->dst_addr = inner_ip_hdr->src_addr; + inner_ip_hdr->src_addr = ip; + udp_hdr = (struct udp_hdr *)(inner_ip_hdr + 1); + port = udp_hdr->dst_port; + udp_hdr->dst_port = udp_hdr->src_port; + udp_hdr->src_port = port; + } else { + port = udp_hdr->dst_port; + udp_hdr->dst_port = udp_hdr->src_port; + udp_hdr->src_port = port; + } + write_src_and_dst_mac(task, mbufs[j]); + } + task_update_config(task); + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static void init_task_swap(struct task_base *tbase, struct task_args *targ) +{ + struct task_swap *task = (struct task_swap *)tbase; + struct ether_addr *src_addr, *dst_addr; + + /* + * Destination MAC can come from + * - pre-configured mac in case 'dst mac=xx:xx:xx:xx:xx:xx' in config file + * - src mac from the packet in case 'dst mac=packet' in config file + * - not written in case 'dst mac=no' in config file + * - (default - no 'dst mac') src mac from the packet + * Source MAC can come from + * - pre-configured mac in case 'src mac=xx:xx:xx:xx:xx:xx' in config file + * - dst mac from the packet in case 'src mac=packet' in config file + * - not written in case 'src mac=no' in config file + * - (default - no 'src mac') if (tx_port) port mac + * - (default - no 'src mac') if (no tx_port) dst mac from the packet + */ + + if (targ->flags & TASK_ARG_DST_MAC_SET) { + dst_addr = &targ->edaddr; + memcpy(&task->src_dst_mac[0], dst_addr, sizeof(*src_addr)); + } + + if (targ->flags & TASK_ARG_SRC_MAC_SET) { + src_addr = &targ->esaddr; + memcpy(&task->src_dst_mac[6], src_addr, sizeof(*dst_addr)); + plog_info("\t\tCore %d: src mac set from config file\n", targ->lconf->id); + } else if (targ->nb_txports) { + src_addr = &prox_port_cfg[task->base.tx_params_hw.tx_port_queue[0].port].eth_addr; + memcpy(&task->src_dst_mac[6], src_addr, sizeof(*dst_addr)); + if (targ->flags & TASK_ARG_HW_SRC_MAC){ + targ->flags |= TASK_ARG_SRC_MAC_SET; + plog_info("\t\tCore %d: src mac set from port\n", targ->lconf->id); + } + } + task->runtime_flags = targ->flags; + task->ip = rte_cpu_to_be_32(targ->local_ipv4); + task->tmp_ip = task->ip; +} + +static struct task_init task_init_swap = { + .mode_str = "swap", + .init = init_task_swap, + .handle = handle_swap_bulk, + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .size = sizeof(struct task_swap), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +static struct task_init task_init_swap_arp = { + .mode_str = "swap", + .sub_mode_str = "l3", + .init = init_task_swap, + .handle = handle_swap_bulk, + .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .size = sizeof(struct task_swap), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_swap(void) +{ + reg_task(&task_init_swap); + reg_task(&task_init_swap_arp); +} diff --git a/VNFs/DPPD-PROX/handle_swap.h b/VNFs/DPPD-PROX/handle_swap.h new file mode 100644 index 00000000..ef2fee04 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_swap.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HANDLE_SWAP_H_ +#define _HANDLE_SWAP_H_ + +struct task_base; +void task_swap_set_local_ip(struct task_base *tbase, uint32_t ip); + +#endif /* _HANDLE_SWAP_H_ */ diff --git a/VNFs/DPPD-PROX/handle_tsc.c b/VNFs/DPPD-PROX/handle_tsc.c new file mode 100644 index 00000000..e686aaa2 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_tsc.c @@ -0,0 +1,51 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mbuf.h> +#include <rte_cycles.h> + +#include "task_base.h" +#include "task_init.h" +#include "thread_generic.h" + +struct task_tsc { + struct task_base base; +}; + +static int handle_bulk_tsc(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_tsc *task = (struct task_tsc *)tbase; + const uint64_t rx_tsc = rte_rdtsc(); + + for (uint16_t j = 0; j < n_pkts; ++j) + mbufs[j]->udata64 = rx_tsc; + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL); +} + +static struct task_init task_init = { + .mode_str = "tsc", + .init = NULL, + .handle = handle_bulk_tsc, + .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_THROUGHPUT_OPT, + .size = sizeof(struct task_tsc), + .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM, +}; + +__attribute__((constructor)) static void reg_task_nop(void) +{ + reg_task(&task_init); +} diff --git a/VNFs/DPPD-PROX/handle_untag.c b/VNFs/DPPD-PROX/handle_untag.c new file mode 100644 index 00000000..2fc8fe64 --- /dev/null +++ b/VNFs/DPPD-PROX/handle_untag.c @@ -0,0 +1,144 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ip.h> + +#include "log.h" +#include "tx_pkt.h" +#include "task_base.h" +#include "task_init.h" +#include "mpls.h" +#include "defines.h" +#include "prefetch.h" +#include "qinq.h" +#include "prox_assert.h" +#include "etypes.h" + +struct task_untag { + struct task_base base; + uint16_t etype; +}; + +static void init_task_untag(struct task_base *tbase, __attribute__((unused)) struct task_args *targ) +{ + struct task_untag *task = (struct task_untag *)tbase; + task->etype = targ->etype; +} + +static inline uint8_t handle_untag(struct task_untag *task, struct rte_mbuf *mbuf); + +static int handle_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + struct task_untag *task = (struct task_untag *)tbase; + uint8_t out[MAX_PKT_BURST]; + uint16_t j; + + prefetch_first(mbufs, n_pkts); + + for (j = 0; j + PREFETCH_OFFSET < n_pkts; ++j) { +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(mbufs[j + PREFETCH_OFFSET]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j + PREFETCH_OFFSET - 1], void *)); +#endif + out[j] = handle_untag(task, mbufs[j]); + } +#ifdef PROX_PREFETCH_OFFSET + PREFETCH0(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *)); + for (; j < n_pkts; ++j) { + out[j] = handle_untag(task, mbufs[j]); + } +#endif + + return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); +} + +static inline uint8_t untag_mpls(struct rte_mbuf *mbuf, struct ether_hdr *peth) +{ + struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1); + const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(mpls + 1); + PROX_ASSERT(pneweth); + + if (mpls->bos == 0) { + // Double MPLS tag + pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + PROX_ASSERT(pneweth); + } + + if ((pip->version_ihl >> 4) == 4) { + pneweth->ether_type = ETYPE_IPv4; + return 0; + } + else if ((pip->version_ihl >> 4) == 6) { + pneweth->ether_type = ETYPE_IPv6; + return 0; + } + + plog_warn("Failed Decoding MPLS Packet - neither IPv4 neither IPv6: version %u\n", pip->version_ihl); + return OUT_DISCARD; +} + +static uint8_t untag_qinq(struct rte_mbuf *mbuf, struct qinq_hdr *qinq) +{ + if ((qinq->cvlan.eth_proto != ETYPE_VLAN)) { + plog_warn("Unexpected proto in QinQ = %#04x\n", qinq->cvlan.eth_proto); + return OUT_DISCARD; + } + + rte_pktmbuf_adj(mbuf, sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + return 0; +} + +static inline uint8_t handle_untag(struct task_untag *task, struct rte_mbuf *mbuf) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + const uint16_t etype = peth->ether_type; + + if (etype != task->etype) { + plog_warn("Failed Removing %04x tag: ether_type = %#06x\n", task->etype, peth->ether_type); + return OUT_DISCARD; + } + + switch (etype) { + case ETYPE_MPLSU: + /* MPLS Decapsulation */ + return untag_mpls(mbuf, peth); + case ETYPE_LLDP: + return OUT_DISCARD; + case ETYPE_IPv6: + return 0; + case ETYPE_IPv4: + return 0; + case ETYPE_8021ad: + case ETYPE_VLAN: + return untag_qinq(mbuf, (struct qinq_hdr *)peth); + default: + plog_warn("Failed untagging header: ether_type = %#06x is not supported\n", peth->ether_type); + return OUT_DISCARD; + } +} + +static struct task_init task_init_untag = { + .mode_str = "untag", + .init = init_task_untag, + .handle = handle_untag_bulk, + .size = sizeof(struct task_untag) +}; + +__attribute__((constructor)) static void reg_task_untag(void) +{ + reg_task(&task_init_untag); +} diff --git a/VNFs/DPPD-PROX/hash_entry_types.h b/VNFs/DPPD-PROX/hash_entry_types.h new file mode 100644 index 00000000..e2cbcb3c --- /dev/null +++ b/VNFs/DPPD-PROX/hash_entry_types.h @@ -0,0 +1,71 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HASH_ENTRY_TYPES_H_ +#define _HASH_ENTRY_TYPES_H_ + +#include <rte_ether.h> + +struct ether_addr_port { + struct ether_addr mac; + uint8_t pad; + uint8_t out_idx; +}; + +struct next_hop { + uint32_t ip_dst; + uint32_t mpls; + union { + uint64_t mac_port_8bytes; + struct ether_addr_port mac_port; + }; +}; + +struct next_hop6 { + uint8_t ip_dst[16]; + uint32_t mpls; + union { + uint64_t mac_port_8bytes; + struct ether_addr_port mac_port; + }; +}; + +struct cpe_data { + uint16_t qinq_svlan; + uint16_t qinq_cvlan; + uint32_t user; + union { + uint64_t mac_port_8bytes; + struct ether_addr_port mac_port; + uint8_t mac_port_b[8]; + }; + uint64_t tsc; +}; + +struct cpe_key { + union { + uint32_t ip; + uint8_t ip_bytes[4]; + }; + uint32_t gre_id; +} __attribute__((__packed__)); + +struct qinq_gre_data { + uint32_t gre_id; + uint32_t user; +} __attribute__((__packed__)); + +#endif /* _HASH_ENTRY_TYPES_H_ */ diff --git a/VNFs/DPPD-PROX/hash_set.c b/VNFs/DPPD-PROX/hash_set.c new file mode 100644 index 00000000..5ea93e96 --- /dev/null +++ b/VNFs/DPPD-PROX/hash_set.c @@ -0,0 +1,105 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_hash_crc.h> +#include <string.h> + +#include "prox_malloc.h" +#include "prox_assert.h" +#include "hash_set.h" + +#define HASH_SET_ALLOC_CHUNCK 1024 +#define HASH_SET_ALLOC_CHUNCK_MEM (sizeof(struct hash_set_entry) * 1024) + +struct hash_set_entry { + uint32_t crc; + void *data; + size_t len; + struct hash_set_entry *next; +}; + +struct hash_set { + uint32_t n_buckets; + int socket_id; + struct hash_set_entry *alloc; + size_t alloc_count; + struct hash_set_entry *mem[0]; +}; + +static struct hash_set_entry *hash_set_alloc_entry(struct hash_set *hs) +{ + struct hash_set_entry *ret; + + if (hs->alloc_count == 0) { + size_t mem_size = HASH_SET_ALLOC_CHUNCK * + sizeof(struct hash_set_entry); + + hs->alloc = prox_zmalloc(mem_size, hs->socket_id); + hs->alloc_count = HASH_SET_ALLOC_CHUNCK; + } + + ret = hs->alloc; + hs->alloc++; + hs->alloc_count--; + return ret; +} + +struct hash_set *hash_set_create(uint32_t n_buckets, int socket_id) +{ + struct hash_set *ret; + size_t mem_size = sizeof(*ret) + sizeof(ret->mem[0]) * n_buckets; + + ret = prox_zmalloc(mem_size, socket_id); + ret->n_buckets = n_buckets; + ret->socket_id = socket_id; + + return ret; +} + +void *hash_set_find(struct hash_set *hs, void *data, size_t len) +{ + uint32_t crc = rte_hash_crc(data, len, 0); + + struct hash_set_entry *entry = hs->mem[crc % hs->n_buckets]; + + while (entry) { + if (entry->crc == crc && entry->len == len && + memcmp(entry->data, data, len) == 0) + return entry->data; + entry = entry->next; + } + return NULL; +} + +void hash_set_add(struct hash_set *hs, void *data, size_t len) +{ + uint32_t crc = rte_hash_crc(data, len, 0); + struct hash_set_entry *new = hash_set_alloc_entry(hs); + + new->data = data; + new->len = len; + new->crc = crc; + + if (hs->mem[crc % hs->n_buckets]) { + struct hash_set_entry *entry = hs->mem[crc % hs->n_buckets]; + while (entry->next) + entry = entry->next; + entry->next = new; + } + else { + hs->mem[crc % hs->n_buckets] = new; + } +} diff --git a/VNFs/DPPD-PROX/hash_set.h b/VNFs/DPPD-PROX/hash_set.h new file mode 100644 index 00000000..72345215 --- /dev/null +++ b/VNFs/DPPD-PROX/hash_set.h @@ -0,0 +1,26 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HASH_SET_H_ +#define _HASH_SET_H_ + +struct hash_set; + +struct hash_set *hash_set_create(uint32_t n_buckets, int socket_id); +void *hash_set_find(struct hash_set *hs, void *data, size_t len); +void hash_set_add(struct hash_set *hs, void *data, size_t len); + +#endif /* _HASH_SET_H_ */ diff --git a/VNFs/DPPD-PROX/hash_utils.c b/VNFs/DPPD-PROX/hash_utils.c new file mode 100644 index 00000000..4ebab94e --- /dev/null +++ b/VNFs/DPPD-PROX/hash_utils.c @@ -0,0 +1,184 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <rte_hash_crc.h> +#include <rte_table_hash.h> +#include <rte_version.h> + +#include "hash_utils.h" + +/* These opaque structure definitions were copied from DPDK lib/librte_table/rte_table_hash_key8.c */ + +struct rte_bucket_4_8 { + /* Cache line 0 */ + uint64_t signature; + uint64_t lru_list; + struct rte_bucket_4_8 *next; + uint64_t next_valid; + + uint64_t key[4]; + + /* Cache line 1 */ + uint8_t data[0]; +}; + +struct rte_table_hash_key8 { +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + struct rte_table_stats stats; +#endif + /* Input parameters */ + uint32_t n_buckets; + uint32_t n_entries_per_bucket; + uint32_t key_size; + uint32_t entry_size; + uint32_t bucket_size; + uint32_t signature_offset; + uint32_t key_offset; +#if RTE_VERSION >= RTE_VERSION_NUM(2,2,0,0) + uint64_t key_mask; +#endif + rte_table_hash_op_hash f_hash; + uint64_t seed; + + /* Extendible buckets */ + uint32_t n_buckets_ext; + uint32_t stack_pos; + uint32_t *stack; + + /* Lookup table */ + uint8_t memory[0] __rte_cache_aligned; +}; + +/* These opaque structure definitions were copied from DPDK lib/librte_table/rte_table_hash_ext.c */ + +struct bucket { + union { + uintptr_t next; + uint64_t lru_list; + }; + uint16_t sig[4]; + uint32_t key_pos[4]; +}; + +#define BUCKET_NEXT(bucket) \ + ((void *) ((bucket)->next & (~1LU))) + +struct grinder { + struct bucket *bkt; + uint64_t sig; + uint64_t match; + uint32_t key_index; +}; + +struct rte_table_hash_ext { +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + struct rte_table_stats stats; +#endif + /* Input parameters */ + uint32_t key_size; + uint32_t entry_size; + uint32_t n_keys; + uint32_t n_buckets; + uint32_t n_buckets_ext; + rte_table_hash_op_hash f_hash; + uint64_t seed; + uint32_t signature_offset; + uint32_t key_offset; + + /* Internal */ + uint64_t bucket_mask; + uint32_t key_size_shl; + uint32_t data_size_shl; + uint32_t key_stack_tos; + uint32_t bkt_ext_stack_tos; + + /* Grinder */ + struct grinder grinders[64]; + + /* Tables */ + struct bucket *buckets; + struct bucket *buckets_ext; + uint8_t *key_mem; + uint8_t *data_mem; + uint32_t *key_stack; + uint32_t *bkt_ext_stack; + + /* Table memory */ + uint8_t memory[0] __rte_cache_aligned; +}; + +uint64_t get_bucket(void* table, uint32_t bucket_idx, void** key, void** entries) +{ + struct rte_table_hash_ext *t = (struct rte_table_hash_ext *) table; + struct bucket *bkt0, *bkt, *bkt_prev; + uint64_t sig; + uint32_t bkt_index, i; + uint8_t n = 0; + bkt_index = bucket_idx & t->bucket_mask; + bkt0 = &t->buckets[bkt_index]; + sig = (bucket_idx >> 16) | 1LLU; + + /* Key is present in the bucket */ + for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt)) { + for (i = 0; i < 4; i++) { + uint64_t bkt_sig = (uint64_t) bkt->sig[i]; + uint32_t bkt_key_index = bkt->key_pos[i]; + uint8_t *bkt_key = + &t->key_mem[bkt_key_index << t->key_size_shl]; + + if (sig == bkt_sig) { + key[n] = bkt_key; + entries[n++] = &t->data_mem[bkt_key_index << t->data_size_shl]; + /* Assume no more than 4 entries in total (including extended state) */ + if (n == 4) + return t->n_buckets; + } + } + } + return t->n_buckets; +} + +uint64_t get_bucket_key8(void* table, uint32_t bucket_idx, void** key, void** entries) +{ + struct rte_bucket_4_8 *bucket, *bucket0; + struct rte_table_hash_key8* f = table; + uint8_t n = 0; + + bucket0 = (struct rte_bucket_4_8 *) &f->memory[bucket_idx * f->bucket_size]; + for (bucket = bucket0; bucket != NULL; bucket = bucket->next) { + uint64_t mask; + + for (uint8_t i = 0, mask = 1LLU; i < 4; i++, mask <<= 1) { + uint64_t bucket_signature = bucket->signature; + + if (bucket_signature & mask) { + key[n] = &bucket->key[i]; + entries[n++] = &bucket->data[i *f->entry_size]; + /* Assume no more than 4 entries + in total (including extended state) */ + if (n == 4) + return f->n_buckets; + } + } + } + return f->n_buckets; +} + +uint64_t hash_crc32(void* key, uint32_t key_size, uint64_t seed) +{ + return rte_hash_crc(key, key_size, seed); +} diff --git a/VNFs/DPPD-PROX/hash_utils.h b/VNFs/DPPD-PROX/hash_utils.h new file mode 100644 index 00000000..a2536ffb --- /dev/null +++ b/VNFs/DPPD-PROX/hash_utils.h @@ -0,0 +1,42 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HASH_UTILS_H_ +#define _HASH_UTILS_H_ + +#include <rte_common.h> +#include <rte_version.h> + +struct rte_table_hash; + +/* Take DPDK 2.2.0 ABI change into account: offset 0 now means first byte of mbuf struct + * see http://www.dpdk.org/browse/dpdk/commit/?id=ba92d511ddacf863fafaaa14c0577f30ee57d092 + */ +#if RTE_VERSION >= RTE_VERSION_NUM(2,2,0,0) +#define HASH_METADATA_OFFSET(offset) (sizeof(struct rte_mbuf) + (offset)) +#else +#define HASH_METADATA_OFFSET(offset) (offset) +#endif + +/* Wrap crc32 hash function to match that required for rte_table */ +uint64_t hash_crc32(void* key, uint32_t key_size, uint64_t seed); + +void print_hash_table_size(const struct rte_table_hash *h); +void print_hash_table(const struct rte_table_hash *h); + +uint64_t get_bucket_key8(void* table, uint32_t bucket_idx, void** key, void** entries); +uint64_t get_bucket(void* table, uint32_t bucket_idx, void** key, void** entries); +#endif /* _HASH_UTILS_H_ */ diff --git a/VNFs/DPPD-PROX/heap.c b/VNFs/DPPD-PROX/heap.c new file mode 100644 index 00000000..69b0736e --- /dev/null +++ b/VNFs/DPPD-PROX/heap.c @@ -0,0 +1,515 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> +#include <stddef.h> +#include <rte_version.h> +#include <rte_prefetch.h> +#include <rte_memory.h> + +#include "prox_malloc.h" +#include "prox_assert.h" +#include "heap.h" +#include "log.h" + +#include <string.h> +#include <stddef.h> +#include <stdlib.h> + +struct heap_elem { + uint64_t priority; + struct heap_ref *ref; + struct heap_elem *prev; + struct heap_elem *next; + struct heap_elem *child; +}; + +struct strl { + char *str; + size_t len; +}; + +int heap_top_is_lower(struct heap *h, uint64_t prio) +{ + return !heap_is_empty(h) && h->top->priority < prio; +} + +static int heap_elem_check(struct heap_elem *e, int is_top) +{ + if (!e) + return 1; + if (e != e->prev && + e != e->next && + e != e->child) + return 1; + else + return 0; + + if (is_top && e->prev != NULL) + return 0; + if (!is_top && e->prev == NULL) + return 0; + + if (e->next) { + if (e->next->prev != e) + return 0; + + if (heap_elem_check(e->next, 0)) + return 1; + else + return 0; + } + + if (e->child) { + if (e->child->prev != e) + return 0; + + if (heap_elem_check(e->child, 0)) + return 1; + else + return 0; + } + + return 1; +} + +static int heap_elem_in_heap_elem(struct heap_elem *in, struct heap_elem *find) +{ + if (in == find) + return 1; + + if (in->next) { + if (heap_elem_in_heap_elem(in->next, find)) + return 1; + } + if (in->child) { + if (heap_elem_in_heap_elem(in->child, find)) + return 1; + } + + return 0; +} + +static int heap_elem_in_heap(struct heap *h, struct heap_elem *e) +{ + if (h->top == NULL) + return 0; + + return heap_elem_in_heap_elem(h->top, e); +} + +static int heap_elem_is_avail(struct heap *h, struct heap_elem *e) +{ + for (uint32_t i = 0; i < h->n_avail; ++i) { + if (h->avail[i] == e) + return 1; + } + return 0; +} + +static uint32_t heap_elem_calc_size(struct heap_elem *e) +{ + int ret = 0; + + if (e) + ret++; + else + return ret; + + if (e->next) + ret += heap_elem_calc_size(e->next); + if (e->child) + ret += heap_elem_calc_size(e->child); + return ret; +} + +static uint32_t heap_calc_size(struct heap *h) +{ + return heap_elem_calc_size(h->top); +} + +static void cat_indent(struct strl *s, int indent) +{ + size_t r; + + if (s->len < 50) + return ; + + for (int i = 0; i < indent; ++i) { + r = snprintf(s->str, s->len, " "); + s->str += r; + s->len -= r; + } +} + +static void cat_priority(struct strl *s, uint64_t priority) +{ + size_t r; + + if (s->len < 50) + return ; + + r = snprintf(s->str, s->len, "%"PRIu64"\n", priority); + s->str += r; + s->len -= r; +} + +static void heap_print2(struct heap_elem *e, int indent, struct strl *s) +{ + size_t r; + + cat_indent(s, indent); + cat_priority(s, e->priority); + + struct heap_elem *child = e->child; + + while (child) { + heap_print2(child, indent + 1, s); + child = child->next; + } +} + +static void heap_print3(struct heap_elem *e, char *result, size_t buf_len) +{ + struct strl s; + + s.str = result; + s.len = buf_len; + + heap_print2(e, 0, &s); +} + +void heap_print(struct heap *h, char *result, size_t buf_len) +{ + if (h->n_elems == 0) { + *result = 0; + return ; + } + + heap_print3(h->top, result, buf_len); +} + +struct heap *heap_create(uint32_t max_elems, int socket_id) +{ + struct heap *ret; + size_t mem_size = 0; + size_t elem_mem = 0; + struct heap_elem *e; + + /* max_elems + 1 since index start at 1. Store total number of + elements in the first entry (which is unused otherwise). */ + mem_size += sizeof(struct heap); + mem_size += sizeof(((struct heap *)0)->top) * max_elems; + mem_size = RTE_CACHE_LINE_ROUNDUP(mem_size); + elem_mem = mem_size; + mem_size += sizeof(*((struct heap *)0)->top) * max_elems; + ret = prox_zmalloc(mem_size, socket_id); + if (!ret) + return NULL; + + e = (struct heap_elem *)(((uint8_t *)ret) + elem_mem); + PROX_ASSERT((void *)&e[max_elems] <= (void *)ret + mem_size); + + for (uint32_t i = 0; i < max_elems; ++i) { + PROX_ASSERT(e->priority == 0); + PROX_ASSERT(e->ref == 0); + PROX_ASSERT(e->prev == 0); + PROX_ASSERT(e->next == 0); + PROX_ASSERT(e->child == 0); + + ret->avail[ret->n_avail++] = e++; + } + + PROX_ASSERT(ret->n_elems + ret->n_avail == max_elems); + return ret; +} + +static struct heap_elem *heap_get(struct heap *h) +{ + PROX_ASSERT(h->n_avail); + + return h->avail[--h->n_avail]; +} + +static void heap_put(struct heap *h, struct heap_elem *e) +{ + h->avail[h->n_avail++] = e; +} + +void heap_add(struct heap *h, struct heap_ref *ref, uint64_t priority) +{ + PROX_ASSERT(h); + PROX_ASSERT(ref); + PROX_ASSERT(ref->elem == NULL); + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); + + if (h->n_elems == 0) { + h->n_elems++; + h->top = heap_get(h); + + h->top->priority = priority; + h->top->ref = ref; + ref->elem = h->top; + h->top->prev = NULL; + h->top->next = NULL; + h->top->child = NULL; + + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); + return ; + } + + h->n_elems++; + /* New element becomes new top */ + if (h->top->priority > priority) { + struct heap_elem *n = heap_get(h); + + n->priority = priority; + n->ref = ref; + ref->elem = n; + n->prev = NULL; + n->next = NULL; + n->child = h->top; + + h->top->prev = n; + h->top = n; + } + /* New element is added as first sibling */ + else { + struct heap_elem *n = heap_get(h); + n->priority = priority; + n->ref = ref; + ref->elem = n; + n->prev = h->top; + n->next = h->top->child; + if (h->top->child) + h->top->child->prev = n; + n->child = NULL; + h->top->child = n; + } + + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); +} + +static void heap_merge_tops_left(struct heap_elem *left, struct heap_elem *right) +{ + PROX_ASSERT(left->priority <= right->priority); + PROX_ASSERT(left != right); + + /* right moves down and becomes first child of left. */ + left->next = right->next; + if (right->next) + right->next->prev = left; + + right->next = left->child; + if (left->child) + left->child->prev = right; + + /* right->prev is now referring to parent since right is the + new first child. */ + left->child = right; +} + +static void heap_merge_tops_right(struct heap_elem *left, struct heap_elem *right) +{ + PROX_ASSERT(left->priority >= right->priority); + PROX_ASSERT(left != right); + + /* Left goes down one layer */ + right->prev = left->prev; + if (left->prev) + left->prev->next = right; + + left->next = right->child; + if (right->child) + right->child->prev = left; + + left->prev = right; + right->child = left; +} + +static struct heap_elem *heap_merge_children(struct heap_elem *e) +{ + struct heap_elem *next = e->next; + struct heap_elem *tmp; + struct heap_elem *prev; + struct heap_elem *first; + + PROX_ASSERT(e); + int cnt = 0; + /* TODO: is this really needed? */ + if (!next) + return e; + + if (e->priority < next->priority) + first = e; + else + first = next; + + /* Forward pass */ + do { + cnt++; + tmp = next->next; + rte_prefetch0(tmp); + if (e->priority < next->priority) { + heap_merge_tops_left(e, next); + prev = e; + PROX_ASSERT(e->child == next); + } + else { + heap_merge_tops_right(e, next); + PROX_ASSERT(next->child == e); + prev = next; + } + + if (tmp) { + tmp->prev = prev; + e = tmp; + /* Next could be empty, (uneven # children) */ + if (!tmp->next) + break; + next = tmp->next; + } + else { + /* Even number of nodes, after breaking set e + to the last merged pair top */ + if (e->priority >= next->priority) + e = next; + break; + } + } while (1); + /* Backward pass, merge everything with the right until the + first child */ + while (first != e) { + prev = e->prev; + + if (e->priority < prev->priority) { + heap_merge_tops_right(prev, e); + if (prev == first) { + first = e; + break; + } + } + else { + heap_merge_tops_left(prev, e); + e = prev; + } + } + return first; +} + +static int heap_elem_first_sibling(const struct heap_elem *e) +{ + return e->prev->child == e; +} + +void heap_del(struct heap *h, struct heap_ref *d) +{ + struct heap_elem *del = d->elem; + + PROX_ASSERT(del); + PROX_ASSERT(heap_elem_in_heap(h, del)); + PROX_ASSERT(!heap_elem_is_avail(h, del)); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->top->next == NULL); + PROX_ASSERT(h->top->prev == NULL); + + d->elem = NULL; + /* Del is at the top */ + if (del->prev == NULL) { + PROX_ASSERT(del == h->top); + if (del->child) { + del->child->prev = NULL; + h->top = heap_merge_children(del->child); + PROX_ASSERT(h->top); + } + else { + h->top = NULL; + } + + h->n_elems--; + heap_put(h, del); + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->n_elems == 0 || h->top != NULL); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); + return ; + } + PROX_ASSERT(del != h->top); + + /* Del is somewhere in a lower layer. If it the first child, + need to fix the parent differently. */ + if (heap_elem_first_sibling(del)) { + del->prev->child = del->next; + if (del->next) + del->next->prev = del->prev; + } + else { + del->prev->next = del->next; + if (del->next) + del->next->prev = del->prev; + } + + struct heap_elem *top2 = del->child; + + /* If the node to be deleted has children, there is more work: + merge the children into a single heap and merge with + top. If there are no children, then the disconnection above + is enough. */ + if (top2) { + top2->prev = NULL; + top2 = heap_merge_children(top2); + + /* Merge top2 with h->top */ + if (h->top->priority < top2->priority) { + top2->next = h->top->child; + top2->prev = h->top; + if (h->top->child) + h->top->child->prev = top2; + + h->top->child = top2; + } + else { + h->top->next = top2->child; + h->top->prev = top2; + if (top2->child) + top2->child->prev = h->top; + + top2->child = h->top; + h->top = top2; + } + + } + h->n_elems--; + heap_put(h, del); + + PROX_ASSERT(heap_elem_check(h->top, 1)); + PROX_ASSERT(h->n_elems == heap_calc_size(h)); +} + +struct heap_ref *heap_pop(struct heap *h) +{ + if (h->n_elems == 0) + return NULL; + + struct heap_ref *ret = h->top->ref; + + heap_del(h, h->top->ref); + return ret; +} diff --git a/VNFs/DPPD-PROX/heap.h b/VNFs/DPPD-PROX/heap.h new file mode 100644 index 00000000..08e5f1a5 --- /dev/null +++ b/VNFs/DPPD-PROX/heap.h @@ -0,0 +1,53 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _HEAP_H_ +#define _HEAP_H_ + +#include <inttypes.h> +#include <stdlib.h> + +struct heap_ref { + struct heap_elem *elem; /* timer management */ +}; + +struct heap { + uint64_t n_elems; + struct heap_elem *top; + uint64_t n_avail; + struct heap_elem *avail[0]; +}; + +static uint64_t heap_n_elems(const struct heap *h) +{ + return h->n_elems; +} + +static int heap_is_empty(const struct heap *h) +{ + return !h->n_elems; +} + +int heap_top_is_lower(struct heap *h, uint64_t prio); + +void heap_print(struct heap *h, char *result, size_t buf_len); + +struct heap *heap_create(uint32_t max_elems, int socket_id); +void heap_add(struct heap *h, struct heap_ref *ref, uint64_t priority); +void heap_del(struct heap *h, struct heap_ref *del); +struct heap_ref *heap_pop(struct heap *h); + +#endif /* _HEAP_H_ */ diff --git a/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py new file mode 100644 index 00000000..f9250d21 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/prox.py @@ -0,0 +1,53 @@ +#!/bin/env python2.7 + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import socket + +class prox: + def __init__(self, ip): + self._ip = ip; + self._dat = "" + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect((self._ip, 8474)) + except: + raise Exception("Failed to connect to PROX on " + self._ip + ":8474") + self._sock = sock; + + def send(self, msg): + self._sock.sendall(msg + "\n"); + return self + def recv(self): + ret_str = ""; + done = 0; + while done == 0: + if (len(self._dat) == 0): + self._dat = self._sock.recv(256); + + while(len(self._dat)): + if (self._dat[0] == '\n'): + done = 1 + self._dat = self._dat[1:] + break; + else: + ret_str += self._dat[0]; + self._dat = self._dat[1:] + return ret_str; + + def wait_cmd_finished(self): + self.send("stats hz").recv(); diff --git a/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py new file mode 100644 index 00000000..112b583a --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/demo-scripts/tx_rate.py @@ -0,0 +1,74 @@ +#!/bin/env python2.7 + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from prox import * +from decimal import * +from time import * + +class data_point: + value = 0; + tsc = 0; + def __init__(self, value, tsc): + self.value = value; + self.tsc = tsc; + +def measure_tx(prox_instance, port_id): + port_tx_pkt = "port(" + str(port_id) + ").tx.packets" + port_tsc = "port(" + str(port_id) + ").tsc"; + cmd = "stats " + port_tx_pkt + "," + port_tsc; + reply = prox_instance.send(cmd).recv().split(","); + + return data_point(int(reply[0]), int(reply[1])); + +def get_rate(first, second, hz): + tsc_diff = second.tsc - first.tsc; + value_diff = second.value - first.value; + + return int(Decimal(value_diff * hz) / tsc_diff) + +# make sure that prox has been started with the -t parameter +prox_instance = prox("127.0.0.1") +print "Connected to prox" + +hz = int(prox_instance.send("stats hz").recv()); + +print "System is running at " + str(hz) + " Hz" + +print "Showing TX pps on port 0" + +update_interval = 0.1 + +print "Requesting new data every " + str(update_interval) + "s" + +measure = measure_tx(prox_instance, 0); +while (True): + sleep(update_interval) + measure2 = measure_tx(prox_instance, 0); + + # since PROX takes measurements at a configured rate (through + # update interval command or throw -r command line parameter), it + # might be possible that two consecutive measurements report the + # same. To get updates at a frequency higher than 1 Hz, + # reconfigure prox as mentioned above. + + if (measure.tsc == measure2.tsc): + continue; + + print get_rate(measure, measure2, hz); + + measure = measure2; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/README b/VNFs/DPPD-PROX/helper-scripts/dpi/README new file mode 100644 index 00000000..f1100757 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/README @@ -0,0 +1,41 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +The scripts in this directory characterize flow a DPI-enabled VNF. The +characeterization is split up into two steps. The first step (dpi1.py) +searches for the traffic profile parameter boundaries. The second step +(dpi2.py) takes as input the output of the first step and searches for +the maximum sustainable throughput of a DPI-enabled VNF. + +To run the first script, use: + + python2.7 ./dpi1.py -t TEST_SYSTEM_DESCRIPTIONS -o OUTPUT1 + +TEST_SYSTEM_DESCRIPTIONS is a comma-separated list of systems where +the syntax of defining each system is shown below: + + user@ip:proxDir:cfgDir + +To run the second script, use: + + python2.7 ./dpi2.py -t TEST_SYSTEM_DESCRIPTIONS \ + -s SYSTEM_UNDER_TEST_DESCRIPTIONS \ + -o OUTPUT2 -d \ + -i OUTPUT1 + +Finally, the results can be processed using the following command: + + python2.7 ./maketable.py -i OUTPUT1 -j OUTPUT2 -o FINAL_TABLE diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/config.py b/VNFs/DPPD-PROX/helper-scripts/dpi/config.py new file mode 100644 index 00000000..ee3f04c6 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/config.py @@ -0,0 +1,178 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import getopt +import sys +from systemconfig import * + +class Config: + _debug = False; + _test_systems = []; + _output_file_name = None; + _input_file_name = None + _input_file_name2 = None + _max_port_rate = 0.85 + _sut = None + _accuracy = 2; + _threshold = 0.95 + _once = None + _skipTime = 10 + _testLength = 120 + _dpiCoreList = range(1, 5) + _checkConditions = False; + _interCheckDuration = float(1) + + def getInputFileName(self): + return self._input_file_name + + def getInputFileName2(self): + return self._input_file_name2 + + def toString(self): + ret = "" + ret += "Test systems: \n" + for ts in self._test_systems: + ret += ts.toString(); + + if (self._sut is not None): + ret += "SUT: \n" + ret += self._sut.toString(); + + ret += "Output file name: " + str(self._output_file_name) + "\n" + ret += "Max port rate: " + str(self._max_port_rate) + "\n" + ret += "Accuracy: " + str(self._accuracy) + " digits after point" + return ret + + def getErrorTestOne(self): + if (len(self._test_systems) == 0): + return "Missing test systems"; + if (self._output_file_name is None): + return "No output file or input file defined"; + return None + + def getErrorTestTwo(self): + if (self._input_file_name is None): + return "Input file is missing" + if (self._input_file_name == self._output_file_name): + return "Input file and output file are the same" + return self.getErrorTestOne(); + + def getErrorMakeTable(self): + if (self._input_file_name is None): + return "Missing input file" + if (self._input_file_name2 is None): + return "Missing file with performance resuilts" + if (self._output_file_name is None): + return "No output file or input file defined"; + if (self._input_file_name2 == self._input_file_name): + return "Input file used multiple times" + if (self._input_file_name == self._output_file_name): + return "output file is the same as the input file" + if (self._input_file_name2 == self._output_file_name): + return "output file is the same as the input file 2" + + return None + + def usageAndExit(self, argv0): + print "Usage: " + str(argv0) + print "-t Add a test system, syntax: " + SystemConfig.expectedSyntax() + print "-s Add SUT, syntax: " + SystemConfig.expectedSyntax() + print "-o Ouput file name" + print "-a Accuracy, number of digits after point" + print "-i Input file" + print "-j File with performance results" + print "-m Maximum per port rate, by default 0.85 (85%)" + print "-d Enable debugging" + print "-w Fraction of connections to reach, by default is 0.95 (95%)" + print "-h Show help" + print "-q Run a single test iteration, syntax of argument " + print "-b Skip time, by default 10 sec" + print "-l Test length, by default 120 sec" + print "-n Maximum number of DPI cores to test" + print "-k Period between checking conditions, 1 second by default" + print "-c Check conditions during 10 second period after convergence" + print " is msr,conn,ss (i.e. -q 4000,100000,38.91)" + exit(-1); + + def parse(self, programName, args): + try: + opts, args = getopt.getopt(args, "t:s:o:a:i:q:m:dhw:j:b:l:n:k:c") + except getopt.GetoptError as err: + print str(err) + return; + for option, arg in opts: + if(option == "-t"): + for ts in arg.split(","): + syntaxErr = SystemConfig.checkSyntax(ts) + if (syntaxErr != ""): + print syntaxErr + exit(-1); + self._test_systems.append(SystemConfig(ts)); + elif(option == "-s"): + syntaxErr = SystemConfig.checkSyntax(ts) + if (syntaxErr != ""): + print syntaxErr + exit(-1); + self._sut = SystemConfig(arg); + elif(option == "-w"): + self._threshold = float(arg) + elif(option == "-o"): + self._output_file_name = arg; + elif(option == '-a'): + self._accuracy = int(arg); + elif(option == "-i"): + self._input_file_name = arg; + elif(option == "-j"): + self._input_file_name2 = arg; + elif(option == "-q"): + self._once = arg.split(",") + elif(option == "-c"): + self._checkConditions = True; + elif(option == "-m"): + self._max_port_rate = float(arg); + elif(option == "-k"): + self._interCheckDuration = float(arg); + elif(option == "-d"): + self._debug = True + elif(option == '-h'): + self.usageAndExit(programName) + elif(option == '-b'): + self._skipTime = int(arg) + elif(option == '-l'): + self._testLength = int(arg) + elif(option == '-n'): + self._dpiCoreList = self.strToList(arg) + else: + self.usageAndExit(programName); + + def strToList(self, arg): + elements = []; + tokens = arg.split(","); + + for a in tokens: + if (a.count('-') == 0): + elements.append(int(a)) + elif (a.count('-') == 1): + beg = int(a.split('-')[0]); + end = int(a.split('-')[1]); + if (beg > end): + raise Exception("Invalid list input format") + elements += range(beg, end + 1); + else: + raise Exception("Invalid list input format") + return elements; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py b/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py new file mode 100644 index 00000000..b0b650dc --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/csvreader.py @@ -0,0 +1,78 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from decimal import * + +class CsvReaderError: + def __init__(self, msg): + self._msg = msg; + + def __str__(self): + return self._msg; + +class CsvReader: + def __init__(self, fieldTypes = None): + self._file_name = None; + self._fieldTypes = fieldTypes; + + def open(self, file_name): + self._file = open(file_name, 'r'); + self._file_name = file_name; + + def read(self): + line = "#" + while (len(line) != 0 and line[0] == "#"): + line = self._file.readline(); + + if (len(line) != 0): + return self._lineToEntry(line) + else: + return None; + + def _lineToEntry(self, line): + split = line.strip().split(','); + if (self._fieldTypes is None): + return split; + have = len(split) + expected = len(self._fieldTypes) + if (have != expected): + raise CsvReaderError("Invalid number of fields %d != %d" % (have, expected)) + + entry = {}; + for i in range(len(self._fieldTypes)): + curFieldType = self._fieldTypes[i][1] + curFieldName = self._fieldTypes[i][0]; + if (curFieldType == "int"): + entry[curFieldName] = int(split[i]) + elif (curFieldType == "Decimal"): + entry[curFieldName] = Decimal(split[i]) + else: + raise CsvReaderError("Invalid field type %s" % curFieldType); + return entry; + + def readAll(self): + ret = [] + line = self.read(); + while (line != None): + ret.append(line); + line = self.read(); + return ret; + + def close(self): + self._file.close(); + self._file = None; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py b/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py new file mode 100644 index 00000000..a5f055e8 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/csvwriter.py @@ -0,0 +1,35 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +class CsvWriter: + def __init__(self): + self._file_name = None; + + def open(self, file_name): + self._file = open(file_name, 'w'); + self._file_name = file_name; + + def write(self, elements): + elements_str = map(lambda x: str(x), elements); + line = ",".join(elements_str); + self._file.write(line + "\n"); + self._file.flush(); + + def close(self): + self._file.close(); + self._file = None; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py new file mode 100644 index 00000000..ec3e4a03 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi1.py @@ -0,0 +1,243 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from testerset import * +from time import sleep +from time import time +from decimal import * +import copy +from os import system +import socket +from itertools import chain +from math import * +from csvwriter import * +from config import * +from progress import * +from proxmaxssprobe import * + +def runTest(minSetupRate, testParam): + print "Running test with following parameters:" + print testParam.toString(); + + testers = testerSet(config._test_systems, config._max_port_rate, testParam); + + thresh = testParam.getConnections(); + p = Progress(thresh, ["connections", "setup rate", "reTX"], False); + loop_count = 0; + converged = False; + + testers.startForkJoin(); + testers.wait_links_up(); + testers.start_cores(); + + print "Running until convergence (%s connections)" % str(thresh) + while (not converged): + sleep(config._interCheckDuration) + testers.update_stats(); + tot = testers.get_total_connections(); + tot_retx = testers.get_total_retx(); + rates = testers.get_rates(); + curSetupRate = testers.get_setup_rate(); + ierrors = testers.getIerrors(); + + converged = tot >= thresh; + if (not converged): + if (loop_count > 0 and curSetupRate < minSetupRate): + reason = str(curSetupRate) + " < " + str(minSetupRate); + print "Current setup rate is lower than min setup rate: " + reason + testers.killProx(); + return False, []; + if (not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + testers.killProx(); + return False, []; + + if (config._debug): + p.setProgress(tot, [tot, curSetupRate, tot_retx]); + print p.toString(); + loop_count += 1; + print "converged" + + skipTime = config._skipTime + print "Connection threshold reached, waiting for " + str(skipTime) + "s, conditions checked = " + str(config._checkConditions) + while (skipTime > 0): + skipTime -= config._interCheckDuration + sleep(config._interCheckDuration) + testers.update_stats(); + if (config._checkConditions and not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + testers.killProx(); + return False, []; + + testers.tx_rate_meassurement(); + + testLength = config._testLength + print "Waiting final " + str(testLength) + "s" + while (testLength > 0): + testLength -= config._interCheckDuration + sleep(config._interCheckDuration) + testers.update_stats(); + if (not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + testers.killProx(); + return False, []; + + rates = testers.tx_rate_meassurement(); + + testers.killProx(); + return True, rates; + +def find_ss(tot_conn, maxSetupRate, ss_max): + iterationCount = 0; + valid_ss = [] + speed_ss = []; + + # The setup rate must be in [0.2% of total connections, maxSetupRate] + # Also, it must not be hihger than 50% of the total connections + min_setup_rate = tot_conn / 500; + + if (min_setup_rate > maxSetupRate): + print "min setup rate > max setup rate: " + str(min_setup_rate) + " > " + str(maxSetupRate); + return valid_ss, speed_ss; + if (maxSetupRate > tot_conn / 2): + print "maximum setup rate (" + str(maxSetupRate) + ") is more than 50% of " + str(tot_conn) + return valid_ss, speed_ss; + + accuracy = 10**config._accuracy + ss_lo = 1 + ss_hi = int(round(ss_max * accuracy,0)) + + iterationOverride = [ss_hi, ss_lo]; + # Binary search for highest speed scaling + while (ss_lo <= ss_hi): + if (iterationCount < len(iterationOverride)): + ss = iterationOverride[iterationCount] + else: + ss = (ss_lo + ss_hi)/2; + + testParam = TestParameters(maxSetupRate, tot_conn, float(ss)/accuracy); + + success, rates = runTest(min_setup_rate, testParam); + print "success = " + str(success) + ", rates = " + str(rates) + if (success == True): + valid_ss.append(float(ss)/accuracy); + speed_ss.append(sum(rates)/len(rates)) + ss_lo = ss + 1 + else: + ss_hi = ss - 1; + iterationCount += 1 + return valid_ss, speed_ss; + +def get_highest_ss_and_speed(valid_ss, speed_ss): + highest_ss = None; + highest_speed = None; + + for i in range(len(valid_ss)): + if(highest_ss == None or highest_ss < valid_ss[i]): + highest_ss = valid_ss[i]; + highest_speed = speed_ss[i]; + return highest_ss, highest_speed; + +def get_max_ss(): + ts = config._test_systems[0]; + test_system = ProxMaxSSProbe(ts); + max_ss = test_system.getMaxSS(); + + return floor((max_ss * (10**config._accuracy)))/(10**config._accuracy) + +config = Config(); +config.parse(sys.argv[0], sys.argv[1:]) + +err = config.getErrorTestOne(); +if (err is not None): + print "Invalid configuration: " + err; + exit(-1); +else: + print config.toString() + +if (config._once is not None): + maxSetupRate = int(config._once[0]) + minSetupRate = maxSetupRate/500 + connections = int(config._once[1]) + speedScaling = float(config._once[2]) + + testParam = TestParameters(maxSetupRate, connections, speedScaling) + success, rates = runTest(minSetupRate, testParam) + print "success = " + str(success) + ", port rates = " + str(rates) + exit(0); + +msr_list = [] +msr_list += range(4000, 20000, 2000) +msr_list += range(20000, 100000, 20000) +msr_list += range(100000, 300000, 50000) +msr_list += range(300000, 800001, 100000); + +conn_list = [1*10**5, 2*10**5, 4*10**5, 8*10**5, 1*10**6, 2*10**6] + +summary_file = CsvWriter() +summary_file.open(config._output_file_name) + +tot_it = 0; +for tot_conn in conn_list: + for msr in msr_list: + if (msr >= tot_conn/2): + break; + tot_it += 1 + +cnt = -1; +print "Search will include " + str(tot_it) + " parameter combinations" +print "Will search for highest link utilization" + +# If the lowest msr was a for n connections, then the lowest msr +# for n + 1 connections can't be lower than a. +low_sr = msr_list[0]; + +max_ss = get_max_ss() + +high_ss = Decimal(max_ss) + +globalProgress = Progress(tot_it) +globalProgress.setProgress(0); +for tot_conn in conn_list: + had_success = False; + all_ss = [] + for msr in msr_list: + globalProgress.incrProgress(); + + if (msr < low_sr): + print "skipping " + str(msr) + " since it is lower than " + str(low_sr) + continue; + + print globalProgress.toString(); + + valid_ss, speed_ss = find_ss(tot_conn, msr, high_ss) + print "valid ss = " + str(valid_ss) + print "valid speeds = " + str(speed_ss) + + if (len(valid_ss) > 0): + highest_ss, highest_speed = get_highest_ss_and_speed(valid_ss, speed_ss); + summary_file.write([msr, tot_conn, highest_ss, highest_speed]); + + if (not had_success): + low_sr = msr; + + had_success = True; + all_ss = all_ss + valid_ss; + + if (len(all_ss) > 0): + high_ss = max(all_ss); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py new file mode 100644 index 00000000..65473f61 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/dpi2.py @@ -0,0 +1,229 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from testerset import * +from proxdpisut import * +from statsconsfile import * +from time import sleep +from time import time +from decimal import * +import copy +from os import system +import socket +from itertools import chain +from math import * +from csvwriter import * +from csvreader import * +from config import * +from progress import * +from resultprocessor import * + +def runTest(coreCount, testParam): + print "Running test with following parameters:" + print testParam.toString(); + + + testers = testerSet(config._test_systems, config._max_port_rate, testParam); + + ret = TestResult(testers.getCount()); + thresh = testParam.getConnections() * config._threshold; + converged = False; + + sut = ProxDpiSut(config._sut, coreCount); + + testers.startFork(); + sut.startFork(); + testers.startJoin(); + sut.startJoin(); + testers.wait_links_up(); + sut.startAllCores(); + sut.waitCmdFinished(); + testers.start_cores(); + + ret.addTimeTS(testers.getTsc()); + ret.addTimeSUT(sut.getTsc()); + + print "Running until convergence (%s connections)" % str(thresh) + p = Progress(thresh, ["connections", "setup rate", "reTX"], False); + while (not converged): + sleep(config._interCheckDuration) + testers.update_stats(); + + tot = testers.get_total_connections(); + tot_retx = testers.get_total_retx(); + rates = testers.get_rates(); + cur_setup_rate = testers.get_setup_rate(); + ierrors = testers.getIerrors(); + converged = tot >= thresh; + + if (not converged and not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + sut.forceQuit(); + sut.killProx(); + testers.killProx(); + return None; + + if (sut.getIerrors() != 0): + testers.killProx(); + print "Sending quit" + try: + sut.forceQuit(); + except: + print "Sending quit failed" + sut.killProx(); + return None; + + if (config._debug): + p.setProgress(tot, [tot, cur_setup_rate, tot_retx]); + print p.toString(); + + skipTime = config._skipTime + print "Connection threshold reached, waiting for " + str(skipTime) + "s, conditions checked = " + str(config._checkConditions) + while (skipTime > 0): + skipTime -= config._interCheckDuration + sleep(config._interCheckDuration) + testers.update_stats(); + if (config._checkConditions and not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + sut.forceQuit(); + sut.killProx(); + testers.killProx(); + return False, []; + + ret.addTimeTS(testers.getTsc()); + ret.addTimeSUT(sut.getTsc()); + + testers.tx_rate_meassurement(); + + testLength = config._testLength + print "Waiting final " + str(testLength) + "s" + while (testLength > 0): + testLength -= config._interCheckDuration + testers.update_stats(); + if (not testers.conditionsGood()): + print "conditions are bad: " + testers.getReason(); + sut.forceQuit(); + sut.killProx(); + testers.killProx(); + return None; + + if (sut.getIerrors() != 0): + testers.killProx(); + print "Sending quit" + try: + sut.forceQuit(); + except: + print "Sending quit failed" + sut.killProx(); + return None; + + sleep(config._interCheckDuration) + + rates = testers.tx_rate_meassurement(); + ret.addTimeTS(testers.getTsc()); + ret.addTimeSUT(sut.getTsc()); + + print "Quiting Prox on SUT" + # make sure stats are flushed + sut.quitProx(); + print "Quiting Prox on test system(s)" + testers.quitProx() + + ret.rates = rates + + sutStatsDump = "stats_dump_sut" + tsStatsDumpBaseName = "stats_dump_ts" + + sut.scpStatsDump(sutStatsDump); + tsStatsDump = testers.scpStatsDump(tsStatsDumpBaseName); + + ret.setTSStatsDump(tsStatsDump); + ret.setSUTStatsDump(sutStatsDump); + return ret + +def meassurePerf(coreCount, maxSetupRate, total_connections, ss_hi): + iterationCount = 0; + accuracy = 10**config._accuracy + ss_lo = 1 + ss_hi = int(round(ss_hi * accuracy, 0)) + success = True; + + downrate = float(0) + highest_ss = 0 + iterationOverride = [ss_hi, ss_lo]; + while (ss_lo <= ss_hi): + if (iterationCount < len(iterationOverride)): + ss = iterationOverride[iterationCount] + else: + ss = (ss_lo + ss_hi)/2; + + testParam = TestParameters(maxSetupRate, total_connections, float(ss)/accuracy); + + result = runTest(coreCount, testParam); + + if (result is None): + success = False + else: + rp = ResultProcessor(result) + rp.process(); + success = rp.percentHandled() > 0.99999 + + print "test result = " + str(success) + if (success): + ss_lo = ss + 1; + highest_ss = max(highest_ss, ss); + print result.rates + downrate = sum(result.rates)/len(result.rates) + else: + ss_hi = ss - 1; + iterationCount += 1 + + return downrate, float(highest_ss)/accuracy + +config = Config(); +config.parse(sys.argv[0], sys.argv[1:]) + +err = config.getErrorTestTwo(); +if (err is not None): + print "Invalid configuration: " + err; + exit(-1); +else: + print config.toString() + +infileFields = [] +infileFields += [("msr", "int")] +infileFields += [("conn", "int")] +infileFields += [("ss", "Decimal")] +infileFields += [("bw", "Decimal")] + +infile = CsvReader(infileFields); +infile.open(config.getInputFileName()) +inputs = infile.readAll() +infile.close(); + +summary = CsvWriter(); +summary.open(config._output_file_name); + +print "Will test up SUT config with " + str(config._dpiCoreList) + " DPI cores" + +for a in inputs: + for coreCount in config._dpiCoreList: + downrate, ss = meassurePerf(coreCount, a["msr"], a["conn"], a["ss"]); + summary.write([coreCount, a["msr"], a["conn"], ss, downrate]); + +summary.close() diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py b/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py new file mode 100644 index 00000000..f8b7bdc0 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/maketable.py @@ -0,0 +1,140 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import sys +from config import * +from csvreader import * +from sets import Set +from csvwriter import * + +class ResultEntry: + def __init__(self): + self.boundary = None; + self.cores = {} + + def setBoundary(self, val): + self.boundary = val; + + def addCoreResult(self, core, val): + self.cores[core] = val + + def getCoreResult(self, core): + if (core in self.cores): + return self.cores[core]; + return None; + + def getBoundary(self): + return self.boundary; + + def getCores(self): + return self.cores + + def getMsr(self): + return self.msr; + +class DictEntry: + def __init__(self, key): + self.dictionary = {} + self.entries = [] + self.key = key; + +config = Config(); +config.parse(sys.argv[0], sys.argv[1:]) + +err = config.getErrorMakeTable(); + +if (err is not None): + print err + exit(-1); + +if (config._debug): + print "Performance data: " + config.getInputFileName2() + print "Boundaries: " + config.getInputFileName() + +allData = {} + +infileFields = [] +infileFields += [("msr", "int")] +infileFields += [("conn", "int")] +infileFields += [("ss", "Decimal")] +infileFields += [("bw", "Decimal")] + +boundariesFile = CsvReader(infileFields) +boundariesFile.open(config.getInputFileName()); +boundaries = boundariesFile.readAll(); + +cores = Set() + +orderedResults = [] +finalResults = {} + +for a in boundaries: + key = a["conn"] + if (key not in finalResults): + newDict = DictEntry(key) + finalResults[key] = newDict + orderedResults.append(newDict) + +for a in boundaries: + table = finalResults[a["conn"]] + key = a["msr"] + value = ResultEntry() + value.msr = a["msr"] + value.conn = a["conn"] + value.boundary = a["bw"] + table.dictionary[key] = value + table.entries.append(value) + +infileFields2 = [] +infileFields2 += [("cores", "int")] +infileFields2 += [("msr", "int")] +infileFields2 += [("conn", "int")] +infileFields2 += [("ss", "Decimal")] +infileFields2 += [("down", "Decimal")] + +resultsFile = CsvReader(infileFields2) +resultsFile.open(config.getInputFileName2()) + +for a in resultsFile.readAll(): + table = finalResults[a["conn"]] + key = a["msr"] + table.dictionary[key].addCoreResult(a["cores"], a["down"]) + cores.add(a["cores"]); + + +outputFile = CsvWriter() + +outputFile.open(config._output_file_name) + +title = ["setup rate", "maximum"] +for e in sorted(cores): + title += [str(e)] + +for a in orderedResults: + outputFile.write(["connections = " + str(a.key)]) + outputFile.write(title) + + for e in a.entries: + line = [str(e.getMsr())] + line += [str(e.getBoundary())] + for c in sorted(cores): + if (e.getCoreResult(c) is not None): + line += [str(e.getCoreResult(c))] + else: + line += [""] + outputFile.write(line) diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py b/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py new file mode 100644 index 00000000..5e44c678 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/progress.py @@ -0,0 +1,67 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from decimal import * +from time import time + +class Progress: + def __init__(self, limit, fieldNames = [], overallETA = True): + self._fieldNames = fieldNames; + self._limit = limit; + self._progress = 0; + self._prevProgress = 0; + self._prevTime = 0; + self._progressSetCount = 0; + self._time = 0; + self._overallETA = overallETA; + + def setProgress(self, progress, fieldValues = []): + self._fieldValues = fieldValues; + if (self._overallETA == True): + self._progress = progress + self._time = time(); + if (self._progressSetCount == 0): + self._prevProgress = self._progress; + self._prevTime = self._time; + else: + self._prevProgress = self._progress; + self._prevTime = self._time; + self._progress = progress; + self._time = time(); + self._progressSetCount += 1 + + def incrProgress(self): + self.setProgress(self._progress + 1); + + def toString(self): + ret = "" + ret += str(self._getETA()) + " seconds left" + for f,v in zip(self._fieldNames, self._fieldValues): + ret += ", %s=%s" % (str(f),str(v)) + return ret; + + def _getETA(self): + if (self._progressSetCount < 2): + return "N/A" + diff = self._progress - self._prevProgress; + t_diff = Decimal(self._time - self._prevTime); + if (t_diff < 0.001 or diff <= 0): + return "N/A" + rate = Decimal(diff)/t_diff + remaining = Decimal(self._limit - self._progress); + return round(remaining/rate, 2); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py b/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py new file mode 100644 index 00000000..60ef7592 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/prox.py @@ -0,0 +1,253 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import threading +from time import * +from proxsocket import * +from remotesystem import * + +class ProxStarter: + def __init__(self, remoteSystem, cmd): + self._remoteSystem = remoteSystem + self._cmd = cmd + self._thread = None + self._prox = None; + self._result = None; + self._startDuration = None + + def startThreaded(self): + self._start_thread = threading.Thread(target = self._run, args = (self, 1)) + self._start_thread.start(); + + def joinThreaded(self): + self._start_thread.join(); + return self._result; + + def getResult(self): + return self._result; + + def getStartDuration(self): + return self._startDuration; + def getProx(self): + return self._prox; + + def _run(self, a, b): + before = time.time() + self._remoteSystem.run("sudo killall -w -q -9 prox") + + self._result = self._remoteSystem.run(self._cmd); + + sleep(1) + after = time.time() + self._startDuration = after - before; + +class StatsCmd(object): + def __init__(self, prox): + self._cmd = "" + self._parts = [] + self._beforeParts = [] + self._prox = prox; + + def sendRecv(self): + cmd = self.getCmd() + reply = self._prox._send(cmd)._recv() + self.setReply(reply) + + def add(self, stats): + if (len(self._cmd) != 0): + self._cmd += "," + self._cmd += stats + + if (len(self._parts) == 0): + self._beforeParts += [0] + else: + before = self._parts[-1] + self._beforeParts[-1]; + self._beforeParts += [before] + + self._parts += [stats.count(",") + 1]; + + def getCmd(self): + return "stats " + self._cmd; + + def setReply(self, reply): + self._reply = reply.split(","); + + def getResult(self, idx): + start = self._beforeParts[idx]; + end = start + self._parts[idx]; + return self._reply[start:end] + +class Prox(object): + def __init__(self, systemConfig): + self._systemConfig = systemConfig; + self._proxStarter = None + + user = self._systemConfig._user + ip = self._systemConfig._ip + self._remoteSystem = remoteSystem(user, ip); + + self.resetArguments() + + def resetArguments(self): + self._args = [] + + def addArgument(self, arg): + self._args.append(arg); + + def startFork(self): + cmd = self.getCmd(); + self._proxStarter = ProxStarter(self._remoteSystem, cmd) + self._proxStarter.startThreaded(); + + def startJoin(self): + ret = self.startJoinNoConnect(); + self._connectSocket(); + self._querySetup(); + return self._proxStarter.getStartDuration(); + + def startJoinNoConnect(self): + return self._proxStarter.joinThreaded(); + + def getCmd(self): + proxDir = self._systemConfig.getProxDir(); + cfgFile = self._systemConfig.getCfgFile(); + + cmd = "cd " + proxDir + "; " + cmd += "sudo ./build/prox " + cmd += "-f " + cfgFile + + for arg in self._args: + cmd += " " + arg + return cmd + + def getLog(self): + proxDir = self._systemConfig.getProxDir() + cmd = "cat " + proxDir + "/prox.log"; + return self._remoteSystem.run(cmd)["out"]; + + def getIP(self): + return self._systemConfig._ip; + + def getHz(self): + return self._hz; + + def getBeg(self): + return self._beg; + + def getPorts(self): + return self._ports; + + def getIerrors(self): + sc = StatsCmd(self) + sc.add(self._buildIerrorsCmd()); + sc.sendRecv() + return self._parseIerrorsReply(sc.getResult(0)); + + def _parseIerrorsReply(self, rep): + tot_ierrors = 0; + for e in rep: + tot_ierrors += int(e); + return tot_ierrors; + + def _buildIerrorsCmd(self): + cmd = "" + for port in self._ports: + if (len(cmd)): + cmd += "," + cmd += "port(%s).ierrors" % str(port) + return cmd; + + def waitCmdFinished(self): + self._send("stats hz")._recv(); + + def waitAllLinksUp(self): + link_down = True; + while (link_down): + link_down = False; + for port in self._ports: + cmd = "port link state %s" % str(port) + link_state = self._send(cmd)._recv(); + if (link_state == "down"): + link_down = True; + print "Link down on port " + str(port) + ", waiting one second" + break; + sleep(1); + + def startAllCores(self): + self._send("start all"); + + def stopAllCores(self): + self._send("stop all"); + + def forceQuit(self): + self._send("quit_force")._recv(); + + def killProx(self): + self._remoteSystem.run("sudo killall -w -q -9 prox") + + def getTsc(self): + return self._getTsc(); + + def _getTsc(self): + return int(self._send("stats global.tsc")._recv()); + + def scpStatsDump(self, dst): + proxDir = self._systemConfig.getProxDir() + + src = proxDir + "/stats_dump"; + print "Copying " + src + " to " + dst + self._remoteSystem.scp(src, dst); + + def _querySetup(self): + print "Query setup on " + str(self.getIP()) + self._queryHz() + self._queryBeg() + self._queryPorts() + self._querySetup2() + + def _querySetup2(self): + print "running query 2" + pass + + def quitProx(self): + self._send("quit")._recv(); + + def _queryHz(self): + self._hz = int(self._send("stats hz")._recv()); + + def _queryBeg(self): + self._beg = self._getTsc(); + + def _queryPorts(self): + self._ports = [] + port_info_all = self._send("port info all")._recv(); + port_info_list = port_info_all.split(','); + + for port_info in port_info_list: + if (len(port_info) > 0): + self._ports.append(int(port_info.split(":")[0])); + + def _connectSocket(self): + self._proxSocket = ProxSocket(self.getIP()) + + def _send(self, msg): + self._proxSocket.send(msg); + return self + + def _recv(self): + return self._proxSocket.recv(); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py new file mode 100644 index 00000000..aae900b0 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpisut.py @@ -0,0 +1,61 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from prox import * +from remotesystem import * +from time import * +from decimal import * + +class ProxDpiSut(Prox): + def __init__(self, ts, coreCount): + super(ProxDpiSut, self).__init__(ts) + + self._setDefaultArguments(); + self._setDpiCoreCount(coreCount); + + def _setDefaultArguments(self): + self.addArgument("-e"); + self.addArgument("-t"); + self.addArgument("-k"); + self.addArgument("-d"); + self.addArgument("-r 0.01"); + + def _setDpiCoreCount(self, count): + self.addArgument("-q dpi_core_count=" + str(count)) + + def _querySetup2(self): + self._query_cores(); + + def _query_cores(self): + print "querying cores" + self._wk = self._get_core_list("$wk"); + + def _get_core_list(self, var): + ret = [] + result = self._send("echo " + var)._recv(); + for e in result.split(","): + ret += [e]; + return ret; + + def getTsc(self): + cmd = "stats task.core(%s).task(0).tsc" % self._wk[-1] + res = int(self._send(cmd)._recv()); + if (res == 0): + return self._getTsc(); + else: + return res; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py new file mode 100644 index 00000000..19b08c92 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxdpitester.py @@ -0,0 +1,258 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from prox import * +from remotesystem import * +from time import * +from decimal import * +from timeseriespoint import * + +class TestParameters: + def __init__(self, max_setup_rate, total_connections, ss): + self.max_setup_rate = max_setup_rate; + self.total_connections = total_connections; + self.ss = ss; + + def toString(self): + ret = "" + ret += "\tMaximum setup rate = %d\n" % self.max_setup_rate + ret += "\tTotal number of connections = %d\n" % self.total_connections + ret += "\tSpeed scaling = %s\n" % str(self.ss) + return ret; + + def getPerSystem(self, count): + msr = self.max_setup_rate / count + cnn = self.total_connections / count + return TestParameters(msr, cnn, self.ss); + + def getConnections(self): + return self.total_connections; + +class ProxDpiTester(Prox): + TENGIGABITBYTESPERSECOND = 1250000000 + + def __init__(self, ts, testParam, ID): + super(ProxDpiTester, self).__init__(ts) + + self._sc = None + self._lastTot = None + self._prevTot = None; + self._prevBytesClient = None + self._lastBytesClient = None + self._prevBytesTxMeassurement = None + self._lastBytesTxMeassurement = None + + self._setDefaultArguments(); + self._setMsr(testParam.max_setup_rate) + self._setConnections(testParam.total_connections); + self._setSpeedScaling(testParam.ss); + self._setID(ID); + + def _setDefaultArguments(self): + self.addArgument("-e") + self.addArgument("-t") + self.addArgument("-k") + self.addArgument("-d") + self.addArgument("-r 0.01"); + + def _setMsr(self, msr): + self.addArgument("-q max_setup_rate=" + str(msr)) + + def _setConnections(self, connections): + self.addArgument("-q connections=" + str(connections)) + + def _setID(self, ID): + self.addArgument("-q test_system_id=" + str(ID)) + + def _setSpeedScaling(self, ss): + self.addArgument("-q ss=" + str(ss)) + + def _querySetup2(self): + self._query_client_ports(); + self._query_server_ports(); + self._query_cores(); + + def _query_client_ports(self): + self._client_ports = [] + for i in range(0, len(self._ports), 2): + self._client_ports.append(self._ports[i]); + + def _query_server_ports(self): + self._server_ports = [] + for i in range(1, len(self._ports), 2): + self._server_ports.append(self._ports[i]); + + def _query_cores(self): + self._query_ld(); + self._query_servers(); + self._query_clients(); + + def _query_ld(self): + self._ld = self._get_core_list("$all_ld"); + + def _query_servers(self): + self._servers = self._get_core_list("$all_servers") + + def _query_clients(self): + self._clients = self._get_core_list("$all_clients") + + def _get_core_list(self, var): + ret = [] + result = self._send("echo " + var)._recv(); + for e in result.split(","): + ret += [e]; + return ret; + + def start_all_ld(self): + self._send("start $all_ld"); + + def start_all_workers(self): + self._send("start $all_workers"); + + def stop_all_ld(self): + self._send("stop $all_ld"); + + def stop_all_workers(self): + self._send("stop $all_workers"); + + def update_stats(self): + if (self._sc is None): + self._sc = StatsCmd(self) + self._sc.add(self._buildTotalConnectionsCmd()) + self._sc.add(self._buildReTXCmd()) + self._sc.add(self._buildIerrorsCmd()) + self._sc.add(self._buildBytesPerPortCmd(self._client_ports, "rx")); + + self._sc.sendRecv() + + self._updateTotalConnections(self._sc.getResult(0)) + self._updateReTX(self._sc.getResult(1)) + self._updateIerrors(self._sc.getResult(2)) + self._update_rates_client_ports(self._sc.getResult(3)); + + def _buildTotalConnectionsCmd(self): + cmd = "l4gen(%s).tsc" % str(self._clients[0]) + + for core in self._clients: + if (len(cmd) > 0): + cmd += "," + cmd += "l4gen(%s).created,l4gen(%s).finished" % (str(core), str(core)) + return cmd; + + def _updateTotalConnections(self, rep): + instant = Decimal(int(rep[0]) - self._beg)/self._hz + rep = rep[1:] + tot = 0; + for i in range(0,len(rep), 2): + tot += int(rep[i]) - int(rep[i + 1]); + + prev = self._lastTot; + last = TimeSeriesPoint(tot, instant); + + if (prev == None): + prev = last; + + self._prevTot = prev + self._lastTot = last; + + def _buildReTXCmd(self): + cmd = "" + for core in self._clients + self._servers: + if (len(cmd) > 0): + cmd += "," + cmd += "l4gen(%s).retx" % str(core) + return cmd; + + def _updateReTX(self, rep): + retx = 0; + for i in rep: + retx += int(i); + self._retx = retx; + + def _updateIerrors(self, rep): + self._ierrors = self._parseIerrorsReply(rep) + + def get_total_connections(self): + return self._lastTot.getValue() + + def getCurrentSetupRate(self): + return int(self._lastTot.getRateOfChange(self._prevTot)); + + def get_total_retx(self): + return self._retx + + def get_rates_client_ports(self): + return self._calcLinkUtilization(self._prevBytesClient, self._lastBytesClient); + + def getIerrorsCached(self): + return self._ierrors; + + def _update_rates_client_ports(self, rep): + prevBytes = self._lastBytesClient + lastBytes = self._parseTimeSeries(rep); + + if (prevBytes == None): + prevBytes = lastBytes; + + self._prevBytesClient = prevBytes; + self._lastBytesClient = lastBytes; + + def _getBytesPerPort(self, ports, rxOrTx): + sc = StatsCmd(self); + sc.add(self._buildBytesPerPortCmd(ports, rxOrTx)) + sc.sendRecv(); + + rep = sc.getResult(0); + + return self._parseTimeSeries(rep); + + def _buildBytesPerPortCmd(self, ports, rxOrTx): + cmd = "" + for port in ports: + if (len(cmd) > 0): + cmd += "," + cmd += "port(%s).%s.bytes,port(%s).tsc" % (str(port), rxOrTx, str(port)); + return cmd + + def tx_rate_meassurement(self): + prev = self._lastBytesTxMeassurement + last = self._getBytesPerPort(self._server_ports, "tx"); + + if (prev == None): + prev = last; + + self._prevBytesTxMeassurement = prev + self._lastBytesTxMeassurement = last + + return self._calcLinkUtilization(prev, last); + + def _parseTimeSeries(self, rep): + ret = [] + for i in range(0, len(rep), 2): + val = int(rep[0]) + instant = Decimal(int(rep[1]) - self._beg)/self._hz + ret.append(TimeSeriesPoint(val, instant)); + return ret; + + def _calcLinkUtilization(self, prev, last): + ret = [] + for i in range(0, len(prev)): + bytesPerSecond = last[i].getRateOfChange(prev[i]); + linkFraction = Decimal(bytesPerSecond)/self.TENGIGABITBYTESPERSECOND + ret.append(round(linkFraction,2)); + return ret; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py new file mode 100644 index 00000000..27c470c4 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxmaxssprobe.py @@ -0,0 +1,34 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from decimal import * +from prox import * + +class ProxMaxSSProbe(Prox): + def __init__(self, ts): + super(ProxMaxSSProbe, self).__init__(ts) + + def getMaxSS(self): + self.addArgument("-q max_ss_and_quit=true"); + self.addArgument("-q test_system_id=0"); + self.startFork(); + ret = self.startJoinNoConnect(); + last_occur = ret["out"].rfind("\n") + 1; + last_line = ret["out"][last_occur:]; + + return Decimal(last_line.split("=")[1]) diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py b/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py new file mode 100644 index 00000000..fd4cc737 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/proxsocket.py @@ -0,0 +1,54 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import socket + +class ProxSocket: + def __init__(self, ip): + self._ip = ip; + self._dat = "" + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect((self._ip, 8474)) + except: + raise Exception("Failed to connect to prox on " + self._ip) + self._sock = sock; + + def send(self, msg): + self._sock.sendall(msg + "\n"); + return self + + def recv(self): + ret_str = ""; + done = 0; + while done == 0: + if (len(self._dat) == 0): + self._dat = self._sock.recv(256); + if (self._dat == ''): + return ''; + + while(len(self._dat)): + if (self._dat[0] == '\n'): + done = 1 + self._dat = self._dat[1:] + break; + else: + ret_str += self._dat[0]; + self._dat = self._dat[1:] + return ret_str; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py b/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py new file mode 100644 index 00000000..41d8ad53 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/ratedistribution.py @@ -0,0 +1,69 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import sys +from decimal import * + +def usage(progName): + print "usage: " + progName + " config [up|down]" + print " The script reads a lua configuration " + print " and outputs a histogram wit 21 buckets." + print " The first 20 buckets contain 70th percentile." + print " The last bucket contains the remaining items." + exit(-1); + +if (len(sys.argv) != 3): + usage(sys.argv[0]) + +if (sys.argv[2] == "down"): + match = "dn_bps" +elif (sys.argv[2] == "up"): + match = "up_bps" +else: + usage(sys.argv[0]) + +values = [] +for line in open(sys.argv[1]).readlines(): + line = line.strip(); + + if line.find(match) != -1: + v = line.split(" = ")[1].strip(",") + values.append(Decimal(v)); + +values = sorted(values) + +treshold = values[int(len(values)*0.7)] + +buckets = [0]*21; + +for v in values: + if (v > treshold): + buckets[20] += 1 + else: + buckets[int(v * 20 / treshold)] += 1 + +stepSize = treshold / 20; + +print "# bucket range, count" +for i in range(len(buckets) - 1): + beg = str(int(i * stepSize)) + end = str(int((i + 1) * stepSize - 1)) + print beg + "-" + end + "," + str(buckets[i]) + +i = len(buckets) - 1 +print beg + "+," + str(buckets[i]) diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py b/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py new file mode 100644 index 00000000..adbb288c --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/remotesystem.py @@ -0,0 +1,58 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import os +import time +import socket + +def ssh(user, ip, cmd): + # print cmd; + ssh_options = "" + ssh_options += "-o StrictHostKeyChecking=no " + ssh_options += "-o UserKnownHostsFile=/dev/null " + ssh_options += "-o LogLevel=quiet " + running = os.popen("ssh " + ssh_options + " " + user + "@" + ip + " \"" + cmd + "\""); + ret = {}; + ret['out'] = running.read().strip(); + ret['ret'] = running.close(); + if (ret['ret'] == None): + ret['ret'] = 0; + + return ret; + +def ssh_check_quit(obj, user, ip, cmd): + ret = ssh(user, ip, cmd); + if (ret['ret'] != 0): + obj._err = True; + obj._err_str = ret['out']; + exit(-1); + +class remoteSystem: + def __init__(self, user, ip): + self._ip = ip; + self._user = user; + + def run(self, cmd): + return ssh(self._user, self._ip, cmd); + + def scp(self, src, dst): + running = os.popen("scp " + self._user + "@" + self._ip + ":" + src + " " + dst); + return running.close(); + + def getIP(self): + return self._ip diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py b/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py new file mode 100644 index 00000000..ad196035 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/resultprocessor.py @@ -0,0 +1,210 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from sutstatsconsfile import * +from tsstatsconsfile import * +from csvwriter import * + +class TestResult: + class Times: + def __init__(self): + self.serie = [] + def addTime(self, val): + self.serie.append(val) + def getTime(self, i): + return self.serie[i] + + def __init__(self, testSystemCount): + self.rates = None; + self.tsStatsDump = []; + self.tsTimes = []; + for i in range(testSystemCount): + self.tsStatsDump.append(""); + self.tsTimes.append(TestResult.Times()); + + self.sutStatsDump = None; + self.sutTime = TestResult.Times(); + + def getTSCount(self): + return len(self.tsTimes) + + def setTSStatsDump(self, filePaths): + self.tsStatsDump = filePaths; + + def setSUTStatsDump(self, filePath): + self.sutStatsDump = filePath; + + def getTSStatsDump(self): + return self.tsStatsDump; + + def getSUTStatsDump(self): + return self.sutStatsDump; + + def addTimeTS(self, times): + for i in range(len(times)): + self.tsTimes[i].addTime(times[i]) + + def addTimeSUT(self, time): + self.sutTime.addTime(time); + + +class ResultProcessor: + def __init__(self, testResult): + self._testResults = testResult; + + def process(self): + self._readStatsConsLogs(); + self._mergeTsStats(); + self._calcSetupRate(); + + def percentHandled(self): + converged_tsc = self._testResults.sutTime.getTime(1) - self._testResults.sutTime.getTime(0) + end_tsc = self._testResults.sutTime.getTime(2) - self._testResults.sutTime.getTime(0) + + converged = converged_tsc/Decimal(self._sutHz) + end = end_tsc/Decimal(self._sutHz); + + rx_converged = -1 + tx_converged = -1 + rx_end = -1 + tx_end = -1 + + for entry in self._sutStats: + timeStamp = entry[3] + if (rx_converged == -1): + if (timeStamp > converged): + rx_converged = entry[0] + tx_converged = entry[1] - entry[2] + else: + continue; + else: + if (timeStamp > end): + rx_end = entry[0] + tx_end = entry[1] - entry[2] + break; + return (tx_end - tx_converged)/Decimal(rx_end - rx_converged) + + def toFile(self, fileName): + outFile = CsvWriter(); + + outFile.open(fileName) + + for entry in self._sutStats: + timeStamp = round(entry[3], 3); + rx = entry[0] + tx = entry[1] + drop = entry[2] + + outFile.write([timeStamp, rx, tx, drop, "", ""]) + + for entry in self._tsStats: + timeStamp = round(entry[-1], 3); + connections = entry[0] + setupRate = entry[3] + outFile.write([timeStamp,"","","", connections, setupRate]); + outFile.close(); + + def _readStatsConsLogs(self): + print "Reading SUT stats" + self._sutStats = self._readSutStats(); + print "Reading TS stats" + self._tsAllStats = self._readAllTSStats(); + + def _mergeTsStats(self): + # The first test system is the reference system. The totals + # will be accumulated by repeatedly taking the closest + # available data from other systems + ret = [] + for entry in self._tsAllStats[0]: + ret.append(entry) + + interSampleTime = ret[1][-1] - ret[0][-1]; + + mergedSampleCount = 0; + if (len(self._tsAllStats) == 1): + mergedSampleCount = len(ret) + + for i in range(0, len(self._tsAllStats) - 1): + prev = 0; + for entry in ret: + timeStamp = entry[-1] + found = False; + + for idx in range(prev, len(self._tsAllStats[i])): + diff = abs(self._tsAllStats[i][idx][-1] - timeStamp) + if (diff < interSampleTime): + found = True; + prev = idx; + break; + + if (found): + entry[0] += self._tsAllStats[i][prev][0] + entry[1] += self._tsAllStats[i][prev][1] + mergedSampleCount += 1; + else: + break; + + self._tsStats = ret[0: mergedSampleCount]; + + def _calcSetupRate(self): + for i in range(0, len(self._tsStats)): + prevCreated = 0 + prevTime = 0 + if (i > 0): + prevCreated = self._tsStats[i - 1][1]; + prevTime = self._tsStats[i - 1][-1]; + curCreated = self._tsStats[i][1]; + curTime = self._tsStats[i][-1]; + + setupRate = (curCreated - prevCreated)/(curTime - prevTime) + + self._tsStats[i].append(setupRate); + + + def _readSutStats(self): + ret = [] + fileName = self._testResults.getSUTStatsDump(); + beg = self._testResults.sutTime.getTime(0); + f = SutStatsConsFile(fileName, beg); + entry = f.readNext(); + self._sutHz = f.getHz(); + while (entry is not None): + ret.append(entry); + entry = f.readNext(); + f.close(); + return ret; + + def _readAllTSStats(self): + stats = [] + for i in range(self._testResults.getTSCount()): + fileName = self._testResults.getTSStatsDump()[i] + beg = self._testResults.tsTimes[i].getTime(0) + tsStat = self._readTSStats(fileName, beg) + stats.append(tsStat); + return stats; + + def _readTSStats(self, fileName, beg): + ret = [] + f = TSStatsConsFile(fileName, beg) + + entry = f.readNext() + while (entry is not None): + ret.append(entry); + entry = f.readNext(); + f.close() + return ret; diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py new file mode 100644 index 00000000..a25c1232 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/statsconsfile.py @@ -0,0 +1,84 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import os +import struct + +class StatsConsFile: + def __init__(self, file_name, tsc = None): + self._file = open(file_name, "rb"); + try: + data = self._file.read(4*8); + dataUnpacked = struct.unpack("<qqqq", data); + + self._hz = dataUnpacked[0] + if (tsc is None): + self._tsc = dataUnpacked[1] + else: + self._tsc = tsc; + + self._entryCount = dataUnpacked[2] + fieldCount = dataUnpacked[3] + + data = self._file.read(fieldCount); + fmt = "b" * fieldCount; + + dataUnpacked = struct.unpack("<" + fmt, data); + self._entryFmt = "<"; + self._entrySize = 0; + + for e in dataUnpacked: + if (e == 4): + self._entryFmt += "i" + elif (e == 8): + self._entryFmt += "q" + else: + raise Exception("Unknown field format: " + str(e)) + self._entrySize += e + except: + print "except" + self._file.close(); + + def setBeg(self, tsc): + self._tsc = tsc + + def getBeg(self): + return self._tsc; + + def getHz(self): + return self._hz + + def readNext(self): + ret = [] + for i in range(self._entryCount): + entry = self._readNextEntry() + if (entry == None): + return None; + ret.append(entry); + return ret; + + def _readNextEntry(self): + try: + entry = self._file.read(self._entrySize); + entryUnpacked = struct.unpack(self._entryFmt, entry); + return list(entryUnpacked) + except: + return None; + + def close(self): + self._file.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py new file mode 100644 index 00000000..82bca9a8 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/sutstatsconsfile.py @@ -0,0 +1,61 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from statsconsfile import * +from decimal import * + +class SutStatsConsFile: + def __init__(self, fileName, offset): + self.offset = offset; + self.statsConsFile = StatsConsFile(fileName) + + def readNext(self): + entry = self._readNextEntry(); + + if (entry is None): + return None; + + while (entry is not None and entry[-1] <= 0): + entry = self._readNextEntry(); + return entry; + + def getHz(self): + return self.statsConsFile.getHz(); + + def _readNextEntry(self): + entry = self.statsConsFile.readNext(); + if (entry is None): + return None; + + rx = 0; + tx = 0; + drop = 0; + last_tsc = 0; + + for i in range(0, len(entry), 2): + rx += entry[i][2] + tx += entry[i][3] + drop += entry[i][4] + last_tsc = entry[i][5] + + last_tsc -= self.offset; + last_tsc = Decimal(last_tsc) / self.statsConsFile.getHz(); + return [rx, tx, drop, last_tsc]; + + def close(self): + self.statsConsFile.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py b/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py new file mode 100644 index 00000000..9e35576f --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/systemconfig.py @@ -0,0 +1,73 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +class SystemConfig: + _user = None + _ip = None + _proxDir = None + _cfgFile = None + def __init__(self, user, ip, proxDir, configDir): + self._user = user; + self._ip = ip; + self._proxDir = proxDir; + self._cfgFile = configDir; + def __init__(self, text): + self._user = text.split("@")[0]; + text = text.split("@")[1]; + self._ip = text.split(":")[0]; + self._proxDir = text.split(":")[1]; + self._cfgFile = text.split(":")[2]; + + def getUser(self): + return self._user; + + def getIP(self): + return self._ip; + + def getProxDir(self): + return self._proxDir; + + def getCfgFile(self): + return self._cfgFile; + + @staticmethod + def checkSyntax(text): + split = text.split("@"); + if (len(split) != 2): + return SystemConfig.getSyntaxError(text); + after = split[1].split(":"); + if (len(after) != 3): + return SystemConfig.getSyntaxError(text); + return "" + def toString(self): + ret = ""; + ret += " " + self._user + "@" + self._ip + "\n" + ret += " " + "prox dir: " + self._proxDir + "\n" + ret += " " + "cfg dir: " + self._cfgFile + "\n" + return ret; + + @staticmethod + def getSyntaxError(text): + ret = "Invaild system syntax" + ret += ", got: " + str(text) + ret += ", expected: " + str(SystemConfig.expectedSyntax()) + return ret; + + @staticmethod + def expectedSyntax(): + return "user@ip:proxDir:cfgFile" diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py b/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py new file mode 100644 index 00000000..fe3dce72 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/testerset.py @@ -0,0 +1,176 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from proxdpitester import * + +class testerSet: + def __init__(self, test_systems, maxRate, testParam): + self._test_systems = []; + self._reason = "" + self._maxRate = maxRate + + testParamPerSystem = testParam.getPerSystem(len(test_systems)); + + for i in range(len(test_systems)): + ts = test_systems[i]; + to_add = ProxDpiTester(ts, testParamPerSystem, i); + self.add_test_system(to_add); + + def getCount(self): + return len(self._test_systems); + + def add_test_system(self, test_system): + self._test_systems.append(test_system); + + def startFork(self): + print "Starting test systems:" + for ts in self._test_systems: + print "\t" + str(ts.getIP()) + ts.startFork(); + + def startJoin(self): + for ts in self._test_systems: + elapsed = ts.startJoin(); + if (elapsed == None): + print "Failed to start on " + str(ts.getIP()) + else: + print "Started on " + str(ts.getIP()) + sleep(1); + + def startForkJoin(self): + self.startFork(); + self.startJoin(); + + def update_stats(self): + for ts in self._test_systems: + ts.update_stats(); + + def wait_links_up(self): + for ts in self._test_systems: + ts.waitAllLinksUp(); + sleep(1); + + def start_cores(self): + for ts in self._test_systems: + ts.start_all_ld(); + ts.waitCmdFinished(); + for ts in self._test_systems: + ts.start_all_workers(); + for ts in self._test_systems: + ts.waitCmdFinished(); + + def stop_cores(self): + for ts in self._test_systems: + ts.stop_all_workers(); + ts.stop_all_ld(); + + for ts in self._test_systems: + ts.waitCmdFinished(); + + def getTsc(self): + ret = [] + for ts in self._test_systems: + ret += [ts.getTsc()] + return ret; + + def get_setup_rate(self): + total = 0; + for ts in self._test_systems: + total += ts.getCurrentSetupRate(); + return total + + def get_total_connections(self): + total = 0; + for ts in self._test_systems: + ts_tot_conn = ts.get_total_connections(); + total += ts_tot_conn + + return total; + + def get_total_retx(self): + total = 0; + for ts in self._test_systems: + total += ts.get_total_retx(); + return total; + + def getIerrors(self): + total = 0; + for ts in self._test_systems: + total += ts.getIerrorsCached(); + return total; + + def get_rates(self): + rates = []; + for ts in self._test_systems: + rates += ts.get_rates_client_ports(); + return rates; + + def tx_rate_meassurement(self): + rates = [] + for ts in self._test_systems: + rates += ts.tx_rate_meassurement(); + return rates; + + def scpStatsDump(self, dst): + ret = [] + for i in range(len(self._test_systems)): + dstFileName = dst + str(i); + ret.append(dstFileName); + self._test_systems[i].scpStatsDump(dstFileName) + return ret; + + def conditionsGood(self): + tot_retx = self.get_total_retx(); + rates = self.get_rates(); + ierrors = self.getIerrors(); + + if (tot_retx > 100): + self._reason = "Too many reTX (" + str(tot_retx) + ")" + return False; + if (ierrors > 0): + self._reason = "Too many ierrors (" + str(ierrors) + ")" + return False; + for i in range(0, len(rates)): + if (rates[i] > self._maxRate): + self._setReason(i, rates) + return False; + return True; + + def _setReason(self, port, rates): + portStr = str(port); + rateStr = str(rates[port]) + maxRateStr = str(self._maxRate); + allRatesStr = str(rates); + + fmt = "Rate on port %s = %s > %s, rate on all = %s" + self._reason = fmt % (portStr, rateStr, maxRateStr, allRatesStr) + + def getReason(self): + return self._reason; + + def quitProx(self): + for ts in self._test_systems: + ts.quitProx(); + + def killProx(self): + for ts in self._test_systems: + ts.stop_all_workers(); + for ts in self._test_systems: + ts.stop_all_ld(); + for ts in self._test_systems: + ts.killProx(); diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py b/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py new file mode 100644 index 00000000..521a0893 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/timeseriespoint.py @@ -0,0 +1,39 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from decimal import * + +class TimeSeriesPoint: + def __init__(self, value, instant): + self._value = value; + self._instant = instant; + + def getValue(self): + return self._value; + + def getInstant(self): + return self._instant; + + def getRateOfChange(self, other): + diff = self.getValue() - other.getValue(); + t_diff = self.getInstant() - other.getInstant(); + + if (diff == 0 or abs(t_diff) <= 0.00001): + return Decimal(0) + else: + return Decimal(diff)/t_diff diff --git a/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py b/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py new file mode 100644 index 00000000..10e48a68 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/dpi/tsstatsconsfile.py @@ -0,0 +1,60 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from statsconsfile import * +from decimal import * + +class TSStatsConsFile: + def __init__(self, fileName, offset): + self.offset = offset; + self.statsConsFile = StatsConsFile(fileName) + + def readNext(self): + entry = self._readNextEntry(); + if (entry is None): + return None; + + while (entry is not None and entry[-1] <= 0): + entry = self._readNextEntry(); + + return entry; + + def _readNextEntry(self): + entry = self.statsConsFile.readNext(); + if (entry is None): + return None; + + rx = 0; + tx = 0; + active = 0; + created = 0; + last_tsc = 0; + for i in range(0, len(entry), 2): + active += entry[i][2] + created += entry[i][3] + rx += entry[i][4] + tx += entry[i][5] + last_tsc = entry[i][6] + + last_tsc -= self.offset; + last_tsc = Decimal(last_tsc) / self.statsConsFile.getHz(); + + return [active, created, rx, tx, last_tsc]; + + def close(self): + self.statsConsFile.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl new file mode 100755 index 00000000..8e42eeba --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/gen_4over6.pl @@ -0,0 +1,271 @@ +#!/usr/bin/perl + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +use strict vars; +use Getopt::Long; +use Pod::Usage; +use Net::Pcap; +use Net::Frame::Layer; +use Net::Frame::Layer::ETH qw(:consts); +use Net::Frame::Layer::IPv6 qw(:consts); +use Net::Frame::Layer::IPv4 qw(:consts); +use Net::Frame::Layer::UDP; +use Socket qw(AF_INET AF_INET6 inet_ntop inet_pton); + +use constant NUM_PACKETS => 30000; + +use constant ETHER_ADDR_LEN => 6; +use constant ETHER_TYPE_LEN => 2; +use constant ETHER_HDR_LEN => ( 2 * ETHER_ADDR_LEN ) + ETHER_TYPE_LEN; +use constant ETHER_STATIC_MAC => "78acdddddddd"; + +use constant UDP_HDR_LEN => 8; +use constant UDP_STATIC_PORT => 0x6666; + +use constant IPv6_HOP_LIMIT => 4; +use constant IPv6_STATIC_IP => "2222:2222:2222:2222:2222:2222:2222:2222"; + +use constant IPv4_TIME_TO_LIVE => 32; +use constant IPv4_STATIC_IP => "68.68.68.68"; + +srand; + +my $type = 'tun'; +my $pkt_count = NUM_PACKETS; + +GetOptions( + 'inet' => sub { $type = 'inet'}, + 'tun' => sub { $type = 'tun'}, + 'count=i' => \$pkt_count, + 'in=s' => \(my $in = 'ip6_tun_bind.lua'), + 'out=s' => \(my $out = 'output.pcap'), + 'size=s' => \(my $size = 0) +) or exit; + +my $pcap = pcap_open_dead( DLT_EN10MB, 65535 ); +my $dumper = pcap_dump_open($pcap, $out ) or die 'Could not create output file: ' . $out; + +if( $type eq 'inet' ) { + gen_inet_pcap( $in, $pkt_count ); +} +if( $type eq 'tun' ) { + gen_tun_pcap( $in, $pkt_count ); +} + +pcap_close( $pcap ); + +# Trim string +sub trim { + my ( $str ) = @_; + + $str =~ s/^\s+|\s+$//g; + + return $str; +} + +# Generate random port based on $port and $port_mask +sub rand_port { + my ( $port, $port_mask ) = @_; + + return ( $port | int( rand( 0xFFFF ) & $port_mask ) ); +} + +# Generate packet originating from CPE +sub gen_tun_packet { + my ( $sz, $ether, $ipv6, $ipv4, $udp ) = @_; + + my $hdr_ether = Net::Frame::Layer::ETH->new( + src => $ether->{'src'}, + dst => $ether->{'dst'}, + type => NF_ETH_TYPE_IPv6 + )->pack; + + my $hdr_ipv6 = Net::Frame::Layer::IPv6->new( + nextHeader => NF_IPv6_PROTOCOL_IPIP, + hopLimit => IPv6_HOP_LIMIT, + src => $ipv6->{'src'}, + dst => $ipv6->{'dst'}, + payloadLength => $sz + NF_IPv4_HDR_LEN + UDP_HDR_LEN + )->pack; + + my $hdr_ipv4 = Net::Frame::Layer::IPv4->new( + length => $sz + UDP_HDR_LEN + NF_IPv4_HDR_LEN, + ttl => IPv4_TIME_TO_LIVE, + protocol => NF_IPv4_PROTOCOL_UDP, + src => $ipv4->{'src'}, + dst => $ipv4->{'dst'} + )->pack; + + my $hdr_udp = Net::Frame::Layer::UDP->new( + src => $udp->{'src'}, + dst => $udp->{'dst'}, + length => $sz + UDP_HDR_LEN + )->pack; + + my $pkt = pack( "H*", "de" x $sz ); + $pkt = $hdr_ether . $hdr_ipv6 . $hdr_ipv4 . $hdr_udp . $pkt; + + my $pkt_size = length( $pkt ); + + my $hdr = { + tv_sec => 0, + tv_usec => 0, + len => $pkt_size, + caplen => $pkt_size + }; + + return ( $hdr, $pkt ); +} + +# Generate packet originating from the internet +sub gen_inet_packet { + my ( $sz, $ether, $ipv4, $udp ) = @_; + + my $hdr_ether = Net::Frame::Layer::ETH->new( + src => $ether->{'src'}, + dst => $ether->{'dst'}, + type => NF_ETH_TYPE_IPv4 + )->pack; + + my $hdr_ipv4 = Net::Frame::Layer::IPv4->new( + length => $sz + UDP_HDR_LEN + NF_IPv4_HDR_LEN, + ttl => IPv4_TIME_TO_LIVE, + protocol => NF_IPv4_PROTOCOL_UDP, + src => $ipv4->{'src'}, + dst => $ipv4->{'dst'} + )->pack; + + my $hdr_udp = Net::Frame::Layer::UDP->new( + src => $udp->{'src'}, + dst => $udp->{'dst'}, + length => $sz + UDP_HDR_LEN + )->pack; + + my $pkt = pack( "H*", "de" x $sz ); + $pkt = $hdr_ether . $hdr_ipv4 . $hdr_udp . $pkt; + + my $pkt_size = length( $pkt ); + + my $hdr = { + tv_sec => 0, + tv_usec => 0, + len => $pkt_size, + caplen => $pkt_size + }; + + return ( $hdr, $pkt ); +} + +# Read bindings file +sub read_bindings { + my ( $file ) = @_; + + print "Reading bindings file...\n"; + + my @rows; + + open my $fh, "<:encoding(utf8)", $file or die $file . ": $!"; +LINE: while ( my $line = <$fh> ) { + next if ($line =~ /^--.*/); # Skip comments + + my ($ip6, $mac, $ip4, $port); + if ($line =~ /\s*\{.*\},\s*$/) { # Weak check for a data line... + + $line =~ /ip6\s*=\s*ip6\("([^\)]*)"\)/ && do { $ip6 = trim($1); }; + unless ( inet_pton( AF_INET6, $ip6 ) ) { print "ERROR - Invalid ipv6: $ip6\n"; next LINE; } + + $line =~ /ip\s*=\s*ip\("([^\)]*)"\)/ && do { $ip4 = trim($1); }; + unless ( inet_pton( AF_INET, $ip4 ) ) { print "ERROR - Invalid ipv4: $ip4\n"; next LINE; } + + $line =~ /mac\s*=\s*mac\("([^\)]*)"\)/ && do { $mac = trim($1); }; + unless ( $mac =~ /^([0-9a-f]{2}([:-]|$)){6}$/i ) { print "ERROR - Invalid mac: $mac\n"; next LINE; } + + $line =~ /port\s*=\s*([0-9]*)/ && do { $port = trim($1); }; + unless ( int($port) ) { print "ERROR - Invalid port number: $port\n"; next LINE; } + + push @rows, { + ipv6 => $ip6, + mac => $mac, + ipv4 => $ip4, + port => $port + } + } + } + close $fh; + + return @rows; +} + +# Generate packets originating from CPE +sub gen_tun_pcap { + my ( $binding_file, $pkt_count ) = @_; + my @bind = read_bindings($binding_file); + my $idx = 0; + my $row; + my $public_port = 0; + + print "Generating $pkt_count Tunnel packets...\n"; + + my $max = @bind; + for( my $i=0; $i<$pkt_count; $i++ ) { + + $idx = rand $max; + $row = @bind[$idx]; + + $public_port = rand_port( $row->{port}, 0x3f ); + + my ( $hdr, $pkt ) = gen_tun_packet( + $size, + { src => $row->{mac}, dst => ETHER_STATIC_MAC }, + { src => $row->{ipv6}, dst => IPv6_STATIC_IP }, + { src => $row->{ipv4}, dst => IPv4_STATIC_IP }, + { src => $public_port, dst => UDP_STATIC_PORT } + ); + + pcap_dump( $dumper, $hdr, $pkt ); + } +} + +# Generate packets originating from the internet +sub gen_inet_pcap { + my ( $binding_file, $pkt_count ) = @_; + my @bind = read_bindings($binding_file); + my $idx = 0; + my $row; + my $public_port = 0; + + print "Generating $pkt_count Internet packets...\n"; + + my $max = @bind; + for( my $i=0; $i<$pkt_count; $i++ ) { + + $idx = rand $max; + $row = @bind[$idx]; + + $public_port = rand_port( $row->{port}, 0x3f ); + + my ( $hdr, $pkt ) = gen_inet_packet( + $size, + { src => ETHER_STATIC_MAC, dst => $row->{mac} }, + { src => IPv4_STATIC_IP, dst => $row->{ipv4} }, + { src => UDP_STATIC_PORT, dst => $public_port } + ); + + pcap_dump( $dumper, $hdr, $pkt ); + } +} diff --git a/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl new file mode 100755 index 00000000..02af5103 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/ipv6_tun/ipv6_tun_bindings.pl @@ -0,0 +1,266 @@ +#!/usr/bin/perl + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +=head1 NAME + +ipv6_tun_bindings.pl + +=head1 SYNOPSIS + + ipv6_tun_bindings.pl [-n <num_entries>] [-tun_ip <ipv6>] [-mac <next_hop_mac>] + [-pub_ip <ipv4>] [-port <begin>-<end>] [-set <num_ports>] + [-suffix <suffix>] [-test <num_entries>] [-sym|-nosym] + [-help] + +=head1 DESCRIPTION + +This script can be used to generate a binding table for the IPv6 Tunnel +task implemented in PROX (ipv6_encap and ipv6_decap). +The entries in this table bind a specific tunnel endpoint (lwB4 in lw4over6 +architecture) to a public IPv4 address and port set. +The port set is actually derived from the port specified in the table +and a port bitmask in the PROX task configuration ("lookup port mask"). + +The ipv6_encap task uses the binding table to know where to tunnel IPv4 +traffic to. The ipv6_decap task uses the table to verify tunnel packets +have a valid public IPv4 and port combination for the originating tunnel. + +The table uses the Lua syntax so it can be loaded into PROX. Example: +return { + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0000"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4608}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0001"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4672}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0002"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4736}, + {ip6 = ip6("fe80:0000:0000:0000:0200:00ff:fe00:0003"), mac = mac("fe:80:00:00:00:00"), ip = ip("171.205.239.1"), port = 4800}, +} + +The script generates consecutive entries, starting from a given IP address +and assigning ports within a given range, increasing the port number by a +fixed amount which should correspond to the port lookup mask being used. + +UDF table: In addition to the binding table itself, the script can optionally +generate accompanying UDF tables for generating test traffic matching the +binding table. Such UDF tables can then be used in a traffic generation tool. + +=head1 OPTIONS + +=over 22 + +=item -n <num_entries> + +How many entries in the binding table + +=item -tun_ip <ipv6> + +Starting tunnel endpoint IPv6 address (will be incremented) + +=item -mac <next_hop_mac> + +MAC address of the next hop to reach the tunnel endpoints + +=item -pub_ip <ipv4> + +Starting public IPv4 address + +=item -port <begin>-<end> + +Range of ports where to assign Port Sets + +=item -set <num_ports> + +Number of ports in set (should be a power of 2 because bitmasking is used +in lwAFTR) + +=item -suffix <suffix> + +Filename suffix to use for the generated file(s) + +=item -test <num_entries> + +Number of random entries to put into test UDF table + +=item -sym + +Whether the same random entry from the table should be inserted into both +traffic sides or if different entries should be used + +=item -help + +Shows the full script documentation. + +=back + +=head1 AUTHOR + + Copyright(c) 2010-2017 Intel Corporation. + All rights reserved. + +=cut + + +use strict vars; +use Getopt::Long; +use Pod::Usage; +use Socket qw(AF_INET AF_INET6 inet_ntop inet_pton); + +sub parse_ip +{ + my ($str, $ip_ref, $family) = @_; + + my $packed = inet_pton($family, $str); + return 0 if (!defined($packed)); + + if ($family == AF_INET6) { + #print unpack("H*", $packed). "\n"; + my @w = unpack("NNNN", $packed); + my ($high, $low) = (($w[0] << 32) | $w[1], ($w[2] << 32) | $w[3]); + @$ip_ref = ($high, $low); + } + else { + $$ip_ref = unpack("N", $packed); + } + return 1; +} + +sub ntop6 +{ + my ($in) = @_; + my $packed = pack('NNNN', $in->[0] >> 32, $in->[0] & 0xffffffff, + $in->[1] >> 32, $in->[1] & 0xffffffff); + return inet_ntop(AF_INET6, $packed); +} + +sub ntop6_expanded +{ + my ($in) = @_; + return sprintf('%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x', + ($in->[0] >> 48) & 0xffff, ($in->[0] >> 32) & 0xffff, + ($in->[0] >> 16) & 0xffff, ($in->[0] ) & 0xffff, + ($in->[1] >> 48) & 0xffff, ($in->[1] >> 32) & 0xffff, + ($in->[1] >> 16) & 0xffff, ($in->[1] ) & 0xffff); +} + +my ($tun_ip_str, $pub_ip_str, $ports_str); + +GetOptions( + 'help' => sub () { Pod::Usage::pod2usage( -verbose => 2 ); exit; }, + 'n=i' => \(my $num_B4s = 10), + 'tun_ip=s' => \(my $tun_ip_str = 'fe80:0000:0000:0000:0200:00ff:0000:0000'), + 'pub_ip=s' => \(my $pub_ip_str = '171.205.239.1'), + 'mac=s' => \(my $next_hop_mac = 'fe:80:00:00:00:00'), + 'port=s' => \(my $ports_str='4608-11968'), + 'set=n' => \(my $port_set_sz = 64), + 'suffix=s' => \(my $suffix = ''), + 'test=n' => \(my $num_test_lines = 200000), + 'sym!' => \(my $symmetric_traffic = TRUE), +) or pod2usage(-verbose => 1) && exit; + +my @tun_ip; +parse_ip($tun_ip_str, \@tun_ip, AF_INET6) or print("Invalid starting tunnel IP: $tun_ip_str\n") && pod2usage(-verbose => 1) && exit; +parse_ip($pub_ip_str, \(my $pub_ip), AF_INET) or print("Invalid starting public IP: $pub_ip_str\n") && pod2usage(-verbose => 1) && exit; +my @port_range; +if ($ports_str =~ /^([^d]+)\s*\-\s*([^d]+)$/) { + @port_range = ($1, $2); +} +else { print "Invalid port range: $ports_str\n"; pod2usage(-verbose => 1); exit } + +# Summary of input data +print "File suffix: $suffix\n" if ($suffix); +print "Starting Tunnel IP: " . ntop6(\@tun_ip) . "\n"; +print "Starting Public IP: ".inet_ntop(AF_INET, pack("N", $pub_ip)) . "\n"; +print "Public Port Range: $port_range[0]-$port_range[1] by blocks of $port_set_sz\n"; + +my @data; # Holds generated binding table, so we can later generate test traffic for it + +# Binding table for PROX IPv6 Tunnel +my $filename = 'ip6_tun_bind'.$suffix.'.lua'; +print "\nGenerating binding table with $num_B4s entries into $filename ... "; +open(my $fh, '>', $filename) or die "Could not open file '$filename' $!"; +print $fh "-- Bindings for lwaftr: lwB4 IPv6 address, next hop MAC address\n"; +print $fh "-- towards lwB4, IPv4 Public address, IPv4 Public Port Set\n"; +print $fh "\n"; +print $fh "return {" . "\n"; +my $port = $port_range[0]; +for (my $B4_id = 0; $B4_id < $num_B4s; $B4_id++) { + $data[$B4_id]{'b4_ipv6'} = ntop6_expanded(\@tun_ip); + $data[$B4_id]{'pub_ipv4'} = "" . (($pub_ip >> 24) & 0xff) . "." . (($pub_ip >> 16) & 0xff) . "." . (($pub_ip >> 8) & 0xff) . "." . ($pub_ip & 0xff); + $data[$B4_id]{'pub_port'} = $port; + $data[$B4_id]{'next_hop_mac'} = $next_hop_mac; + + print $fh " {"; + print $fh "ip6 = ip6(\"" . $data[$B4_id]{'b4_ipv6'} . "\")"; + print $fh ", mac = mac(\"" . $data[$B4_id]{'next_hop_mac'} . "\")"; + print $fh ", ip = ip(\"" . $data[$B4_id]{'pub_ipv4'} . "\")"; + print $fh ", port = " . $data[$B4_id]{'pub_port'}; + print $fh "},\n"; + + $port += $port_set_sz; + if ($port > $port_range[1]) { + $pub_ip++; + $port = $port_range[0]; + } + + # Move to next Tunnel address + if (@tun_ip[1] < 0xffffffffffffffff) { + @tun_ip[1]++; + } else { + @tun_ip[0]++; + @tun_ip[1] = 0; + } +} +print $fh "}" . "\n"; +close $fh; +print "[DONE]\n"; + +# Test traffic "UDF Tables" +if ($num_test_lines) { + print "Generating $num_test_lines lines of test UDF table into lwAFTR_tun|inet".$suffix.".csv ... "; + + # Tunnel Packets from B4 to lwAFTR + my $filename = 'lwAFTR_tun' . $suffix . '.csv'; + open(my $fh_tun, '>', $filename) or die "Could not open file '$filename' $!"; + print $fh_tun "b4_ip,pub_ip,pub_port\n"; + print $fh_tun "22,66,74\n"; # Offsets + print $fh_tun "16,4,2\n"; # Sizes + print $fh_tun "6,5,3\n"; # Format (IPv6, IPv4, Decimal) + print $fh_tun ",,\n"; + + # Internet Packets towards the lwAFTR, to be sent to corresp lwB4 over tunnel + my $filename = 'lwAFTR_inet' . $suffix . '.csv'; + open(my $fh_inet, '>', $filename) or die "Could not open file '$filename' $!"; + print $fh_inet "pub_ip,pub_port\n"; + print $fh_inet "30,36\n"; # Offsets + print $fh_inet "4,2\n"; # Sizes + print $fh_inet "5,3\n"; # Format (IPv6, IPv4, Decimal) + print $fh_inet ",,\n"; + + for (my $i = 0; $i < $num_test_lines; $i++) { + my $B4_id = int(rand($num_B4s)); + my $port = $data[$B4_id]{'pub_port'} + int(rand($port_set_sz)); + printf $fh_tun $data[$B4_id]{'b4_ipv6'} . "," . $data[$B4_id]{'pub_ipv4'} . "," . $port . "\n"; + + if (! $symmetric_traffic) { + $B4_id = int(rand($num_B4s)); + $port = $data[$B4_id]{'pub_port'} + int(rand($port_set_sz)); + } + printf $fh_inet $data[$B4_id]{'pub_ipv4'} . "," . $port . "\n"; + } + + close $fh_tun; + close $fh_inet; + print "[DONE]\n"; +} diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README new file mode 100644 index 00000000..49d819d8 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README @@ -0,0 +1,57 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +rapid (Rapid Automated Performance Indication for Dataplane) +************************************************************ + +rapid is a set of files offering an easy way to do a sanity check of the +dataplane performance of an OpenStack environment. + +Copy the files in a directory on a machine that can run the OpenStack CLI +commands and that can reach the OpenStack public network. Also create a qcow2 +image in the same directory with the following characteristics: +* Name of the qcow2 file should be: rapidVM.qcow2 + This default name can be changed on the rapid command line +* Should have DPDK and PROX installed. PROX should be in /root/prox/ directory +* Image should have cloud-init installed + +Source the openrc file of the OpenStack environment so that the OpenStack CLI +commands can be run: + # source openrc +Now you can run the rapid.py file. Use help for more info on the usage: + # ./rapid.py --help + +rapid will use the OpenStack CLI to create the flavor, key-pair, network, image, +stack, ... +Then it will connect to the 2 VMs that have been instantiated and it will launch +PROX in both VMs. +Once that is done it will connect to the PROX tcp socket and start sending +commands to run the actual test. +It will print test results on the screen while running. +The PROX instance in the Generator VM will generate packets which will arrive in +the PROX instance running on the SUT (System Under Test) VM. The SUT will then +send the packets back to the generator by swapping source and destination. + +Notes about prox_gen_user_data.sh and prox_sut_user_data.sh scripts: +- These scripts contain commands that will be executed using cloud-init at + startup of the VMs. They contain a hard-coded PCI address for the DPDK + interface that will be used by PROX. You might want to check that this is + actually the right PCI address. +- These scripts also assume some specific DPDK directory and tools which might + change over different DPDK release. They have been tested with DPDK-17.02. +- These scripts are also assuming that this interface is on the "dpdk-network" + network managed by OpenStack. + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg new file mode 100644 index 00000000..522eb801 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg @@ -0,0 +1,64 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=p0 + +[variables] +$mbs=8 + +[defaults] +mempool size=4K + +[global] +name=Basic Gen + +[core 0] +mode=master + +[core 1] +name=p0 +task=0 +mode=gen +sub mode=l3 +rx ring=yes +tx port=p0 +bps=1250000000 +pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d ${gen_hex_ip} ${sut_hex_ip} 0b b8 0b b9 00 08 55 7b +gateway ipv4=${sut_ip} +local ipv4=${gen_ip} +min bulk size=$mbs +;random=XXXXXXXXXXXXXXXX +;random=0000000000XXXXXX ; 64 possibilities +;rand_offset=34 ; SOURCE UDP PORT +;random=XXXXXXXXXXXXXXXX +;random=000000000XXXXXXX ; 128 +;rand_offset=36 ; DESTINTAITON UDP PORT + +[core 2] +task=0 +mode=arp +rx port=p0,p0,p0,p0 +tx port=p0 +tx cores=1t0 + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py new file mode 100644 index 00000000..b384e9f0 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py @@ -0,0 +1,218 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from __future__ import print_function + +import os +import subprocess +import socket + +class prox_ctrl(object): + def __init__(self, ip, key=None, user=None): + self._ip = ip + self._key = key + self._user = user + self._children = [] + self._proxsock = [] + + def ip(self): + return self._ip + + def connect(self): + """Simply try to run 'true' over ssh on remote system. + On failure, raise RuntimeWarning exception when possibly worth + retrying, and raise RuntimeError exception otherwise. + """ + return self.run_cmd('true', True) + + def close(self): + """Must be called before program termination.""" + for prox in self._proxsock: + prox.quit() + children = len(self._children) + if children == 0: + return + if children > 1: + print('Waiting for %d child processes to complete ...' % children) + for child in self._children: + ret = os.waitpid(child[0], os.WNOHANG) + if ret[0] == 0: + print("Waiting for child process '%s' to complete ..." % child[1]) + ret = os.waitpid(child[0], 0) + rc = ret[1] + if os.WIFEXITED(rc): + if os.WEXITSTATUS(rc) == 0: + print("Child process '%s' completed successfully" % child[1]) + else: + print("Child process '%s' returned exit status %d" % ( + child[1], os.WEXITSTATUS(rc))) + elif os.WIFSIGNALED(rc): + print("Child process '%s' exited on signal %d" % ( + child[1], os.WTERMSIG(rc))) + else: + print("Wait status for child process '%s' is 0x%04x" % ( + child[1], rc)) + + def run_cmd(self, command, _connect=False): + """Execute command over ssh on remote system. + Wait for remote command completion. + Return command output (combined stdout and stderr). + _connect argument is reserved for connect() method. + """ + cmd = self._build_ssh(command) + try: + return subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + if _connect and ex.returncode == 255: + raise RuntimeWarning(ex.output.strip()) + raise RuntimeError('ssh returned exit status %d:\n%s' + % (ex.returncode, ex.output.strip())) + + def fork_cmd(self, command, name=None): + """Execute command over ssh on remote system, in a child process. + Do not wait for remote command completion. + Return child process id. + """ + if name is None: + name = command + cmd = self._build_ssh(command) + pid = os.fork() + if (pid != 0): + # In the parent process + self._children.append((pid, name)) + return pid + # In the child process: use os._exit to terminate + try: + # Actually ignore output on success, but capture stderr on failure + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + raise RuntimeError("Child process '%s' failed:\n" + 'ssh returned exit status %d:\n%s' + % (name, ex.returncode, ex.output.strip())) + os._exit(0) + + def prox_sock(self, port=8474): + """Connect to the PROX instance on remote system. + Return a prox_sock object on success, None on failure. + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + try: + sock.connect((self._ip, port)) + prox = prox_sock(sock) + self._proxsock.append(prox) + return prox + except: + return None + + def scp_put(self, src, dst): + """Copy src file from local system to dst on remote system.""" + cmd = [ 'scp', + '-B', + '-oStrictHostKeyChecking=no', + '-oUserKnownHostsFile=/dev/null', + '-oLogLevel=ERROR' ] + if self._key is not None: + cmd.extend(['-i', self._key]) + cmd.append(src) + remote = '' + if self._user is not None: + remote += self._user + '@' + remote += self._ip + ':' + dst + cmd.append(remote) + try: + # Actually ignore output on success, but capture stderr on failure + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as ex: + raise RuntimeError('scp returned exit status %d:\n%s' + % (ex.returncode, ex.output.strip())) + + def _build_ssh(self, command): + cmd = [ 'ssh', + '-oBatchMode=yes', + '-oStrictHostKeyChecking=no', + '-oUserKnownHostsFile=/dev/null', + '-oLogLevel=ERROR' ] + if self._key is not None: + cmd.extend(['-i', self._key]) + remote = '' + if self._user is not None: + remote += self._user + '@' + remote += self._ip + cmd.append(remote) + cmd.append(command) + return cmd + +class prox_sock(object): + def __init__(self, sock): + self._sock = sock + self._rcvd = b'' + + def quit(self): + if self._sock is not None: + self._send('quit') + self._sock.close() + self._sock = None + + def start(self, cores): + self._send('start %s' % ','.join(map(str, cores))) + + def stop(self, cores): + self._send('stop %s' % ','.join(map(str, cores))) + + def speed(self, speed, cores, tasks=None): + if tasks is None: + tasks = [ 0 ] * len(cores) + elif len(tasks) != len(cores): + raise ValueError('cores and tasks must have the same len') + for (core, task) in zip(cores, tasks): + self._send('speed %s %s %s' % (core, task, speed)) + + def reset_stats(self): + self._send('reset stats') + + def core_stats(self, cores, task=0): + rx = tx = drop = tsc = hz = 0 + self._send('core stats %s %s' % (','.join(map(str, cores)), task)) + for core in cores: + stats = self._recv().split(',') + rx += int(stats[0]) + tx += int(stats[1]) + drop += int(stats[2]) + tsc = int(stats[3]) + hz = int(stats[4]) + return rx, tx, drop, tsc, hz + + def set_random(self, cores, task, offset, mask, length): + self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length)) + + def _send(self, cmd): + """Append LF and send command to the PROX instance.""" + if self._sock is None: + raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd) + self._sock.sendall(cmd.encode() + b'\n') + + def _recv(self): + """Receive response from PROX instance, and return it with LF removed.""" + if self._sock is None: + raise RuntimeError("PROX socket closed, cannot receive anymore") + pos = self._rcvd.find(b'\n') + while pos == -1: + self._rcvd += self._sock.recv(256) + pos = self._rcvd.find(b'\n') + rsp = self._rcvd[:pos] + self._rcvd = self._rcvd[pos+1:] + return rsp.decode() + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh new file mode 100644 index 00000000..e7f58a9f --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_gen_user_data.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +echo 128 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages +mount -t hugetlbfs nodev /mnt/huge +modprobe uio +insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko +/root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio 00:04.0 +iptables -F diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh new file mode 100644 index 00000000..e7f58a9f --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_sut_user_data.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +echo 128 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages +mount -t hugetlbfs nodev /mnt/huge +modprobe uio +insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko +/root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio 00:04.0 +iptables -F diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py new file mode 100755 index 00000000..1a0ea41c --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.py @@ -0,0 +1,445 @@ +#!/usr/bin/python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import sys +import time +import subprocess +import getopt +from prox_ctrl import prox_ctrl + +version="17.04.19" +stack = "rapidTestEnv" #Default string for stack +yaml = "rapid.yaml" #Default string for yaml file +key = "prox" # This is also the default in the yaml file.... +flavor = "prox_flavor" # This is also the default in the yaml file.... +image = "rapidVM" # This is also the default in the yaml file.... +image_file = "rapidVM.qcow2" +network = "dpdk-network" # This is also the default in the yaml file.... +subnet = "dpdk-subnet" #Hardcoded at this moment + +def usage(): + print("usage: rapid [--version] [-v]") + print(" [--stack STACK_NAME]") + print(" [--yaml YAML_FILE]") + print(" [--key KEY_NAME]") + print(" [--flavor FLAVOR_NAME]") + print(" [--image IMAGE_NAME]") + print(" [--image_file IMAGE_FILE]") + print(" [--network NETWORK]") + print(" [-h] [--help]") + print("") + print("Command-line interface to RAPID") + print("") + print("optional arguments:") + print(" -v, --version Show program's version number and exit") + print(" --stack STACK_NAME Specify a name for the heat stack. Default is rapidTestEnv.") + print(" --yaml YAML_FILE Specify the yaml file to be used. Default is rapid.yaml.") + print(" --key KEY_NAME Specify the key to be used. Default is prox.") + print(" --flavor FLAVOR_NAME Specify the flavor to be used. Default is prox_flavor.") + print(" --image IMAGE_NAME Specify the image to be used. Default is rapidVM.") + print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is rapidVM.qcow2.") + print(" --network NETWORK Specify the network name to be used for the dataplane. Default is dpdk-network.") + print(" -h, --help Show help message and exit.") + print("") + print("To delete the rapid stack, type the following command") + print(" openstack stack delete --yes --wait DPTestEnv") + print("Note that rapidTestEnv is the default stack name. Replace with STACK_NAME if needed") + +try: + opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "yaml=","stack=","key=","flavor=","image=","network="]) +except getopt.GetoptError as err: + print("===========================================") + print str(err) + print("===========================================") + usage() + sys.exit(2) +if args: + usage() + sys.exit(2) +for opt, arg in opts: + if opt in ("-h", "--help"): + usage() + sys.exit() + if opt in ("-v", "--version"): + print("Rapid Automated Performance Indication for Dataplane "+version) + sys.exit() + if opt in ("--stack"): + stack = arg + print ("Using '"+stack+"' as name for the stack") + elif opt in ("--yaml"): + yaml = arg + print ("Using stack: "+yaml) + elif opt in ("--key"): + key = arg + print ("Using key: "+key) + elif opt in ("--flavor"): + flavor = arg + print ("Using flavor: "+flavor) + elif opt in ("--image"): + image = arg + print ("Using image: "+image) + elif opt in ("--image_file"): + image_file = arg + print ("Using qcow2 file: "+image_file) + elif opt in ("--network"): + network = arg + print ("Using network: "+ network) + +print("Checking image: "+image) +cmd = 'openstack image show '+image+' |grep "status " | tr -s " " | cut -d" " -f 4' +ImageExist = subprocess.check_output(cmd , shell=True).strip() +if ImageExist == 'active': + print("Image already available") +else: + print('Creating image ...') + cmd = 'openstack image create --disk-format qcow2 --container-format bare --public --file ./'+image_file+ ' ' +image+' |grep "status " | tr -s " " | cut -d" " -f 4' + ImageExist = subprocess.check_output(cmd , shell=True).strip() + if ImageExist == 'active': + print('Image created and active') + cmd = 'openstack image set --property hw_vif_multiqueue_enabled="true" ' +image + subprocess.check_call(cmd , shell=True) + else : + raise Exception("Failed to create image") + +print("Checking key: "+key) +cmd = 'openstack keypair show '+key+' |grep "name " | tr -s " " | cut -d" " -f 4' +KeyExist = subprocess.check_output(cmd , shell=True).strip() +if KeyExist == key: + print("Key already installed") +else: + print('Creating key ...') + cmd = 'openstack keypair create '+ key + '>' +key+'.pem' + subprocess.check_call(cmd , shell=True) + cmd = 'chmod 600 ' +key+'.pem' + subprocess.check_call(cmd , shell=True) + cmd = 'openstack keypair show '+key+' |grep "name " | tr -s " " | cut -d" " -f 4' + KeyExist = subprocess.check_output(cmd , shell=True).strip() + if KeyExist == key: + print("Key created") + else : + raise Exception("Failed to create key: " + key) + +print("Checking flavor: "+flavor) +cmd = 'openstack flavor show '+flavor+' |grep "name " | tr -s " " | cut -d" " -f 4' +FlavorExist = subprocess.check_output(cmd , shell=True).strip() +if FlavorExist == flavor: + print("Flavor already installed") +else: + print('Creating flavor ...') + cmd = 'openstack flavor create '+flavor+' --ram 8192 --disk 80 --vcpus 4 |grep "name " | tr -s " " | cut -d" " -f 4' + FlavorExist = subprocess.check_output(cmd , shell=True).strip() + if FlavorExist == flavor: + cmd = 'openstack flavor set '+ flavor +' --property hw:mem_page_size="large" --property hw:cpu_policy="dedicated" --property hw:cpu_threads_policy="isolate"' + subprocess.check_call(cmd , shell=True) + print("Flavor created") + else : + raise Exception("Failed to create flavor: " + flavor) + +print("Checking network: "+network) +cmd = 'openstack network show '+network+' |grep "status " | tr -s " " | cut -d" " -f 4' +NetworkExist = subprocess.check_output(cmd , shell=True).strip() +if NetworkExist == 'ACTIVE': + print("Network already active") +else: + print('Creating network ...') + cmd = 'openstack network create '+network+' |grep "status " | tr -s " " | cut -d" " -f 4' + NetworkExist = subprocess.check_output(cmd , shell=True).strip() + if NetworkExist == 'ACTIVE': + print("Network created") + else : + raise Exception("Failed to create network: " + network) + +print("Checking subnet: "+subnet) +cmd = 'neutron subnet-show '+ subnet+' |grep "name " | tr -s " " | cut -d" " -f 4' +SubnetExist = subprocess.check_output(cmd , shell=True).strip() +if SubnetExist == subnet: + print("Subnet already exists") +else: + print('Creating subnet ...') + cmd = 'neutron subnet-create --name '+ subnet+ ' ' +network+' 10.10.10.0/24 |grep "name " | tr -s " " | cut -d" " -f 4' + SubnetExist = subprocess.check_output(cmd , shell=True).strip() + if SubnetExist == subnet: + print("Subnet created") + else : + raise Exception("Failed to create subnet: " + subnet) + +print("Checking Stack: "+stack) +cmd = 'openstack stack show '+stack+' |grep "stack_status " | tr -s " " | cut -d" " -f 4' +StackRunning = subprocess.check_output(cmd , shell=True).strip() +if StackRunning == '': + print('Creating Stack ...') + cmd = 'openstack stack create -t '+ yaml + ' --parameter flavor="'+flavor +'" --parameter key="'+ key + '" --parameter image="'+image + '" --parameter dpdk_network="'+network+'" --wait '+stack +' |grep "stack_status " | tr -s " " | cut -d" " -f 4' + StackRunning = subprocess.check_output(cmd , shell=True).strip() +if StackRunning != 'CREATE_COMPLETE': + raise Exception("Failed to create stack") + +print('Stack running') +genName=stack+'-gen' +sutName=stack+'-sut' +cmd = 'nova list | grep '+ genName +' | tr -s " " | cut -d " " -f 4' +genVMName = subprocess.check_output(cmd , shell=True).strip() +print('Generator: '+ genVMName) +cmd = 'nova list | grep '+ sutName +' | tr -s " " | cut -d " " -f 4' +sutVMName = subprocess.check_output(cmd , shell=True).strip() +print('SUT: '+ sutVMName) +cmd='nova show ' + genVMName + ' | grep "dpdk-network" | tr -s " " | cut -d" " -f 5' +genDPIP = subprocess.check_output(cmd , shell=True).strip() +cmd='nova show ' + genVMName + ' | grep "admin_internal_net" | tr -s " " | cut -d" " -f 6' +genAdminIP = subprocess.check_output(cmd , shell=True).strip() +cmd='nova show ' + sutVMName + ' | grep "dpdk-network" | tr -s " " | cut -d" " -f 5' +sutDPIP = subprocess.check_output(cmd , shell=True).strip() +cmd='nova show ' + sutVMName + ' | grep "admin_internal_net" | tr -s " " | cut -d" " -f 6' +sutAdminIP = subprocess.check_output(cmd , shell=True).strip() + +#======================================================================== +def connect_socket(client): + attempts = 1 + print("Trying to connect to PROX (just launched) on %s, attempt: %d" + % (client.ip(), attempts)) + sock = None + while True: + sock = client.prox_sock() + if sock is not None: + break + attempts += 1 + if attempts > 20: + raise Exception("Failed to connect to PROX on %s after %d attempts" + % (client.ip(), attempts)) + time.sleep(10) + print("Trying to connect to PROX (just launched) on %s, attempt: %d" + % (client.ip(), attempts)) + print("Connected to PROX on %s" % client.ip()) + return sock + +def connect_client(client): + attempts = 1 + print ("Trying to connect to VM which was just launched on %s, attempt: %d" + % (client.ip(), attempts)) + while True: + try: + client.connect() + break + except RuntimeWarning, ex: + attempts += 1 + if attempts > 20: + raise Exception("Failed to connect to VM after %d attempts:\n%s" + % (attempts, ex)) + time.sleep(15) + print ("Trying to connect to VM which was just launched on %s, attempt: %d" + % (client.ip(), attempts)) + print("Connected to VM on %s" % client.ip()) + + +def run_testA(): + global genclient + global sutclient + ip = genDPIP.split('.') + hexgenDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) + ip = sutDPIP.split('.') + hexsutDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) + with open("parameters.lua", "w") as f: + f.write('gen_hex_ip="'+hexgenDPIP+'"\n') + f.write('sut_hex_ip="'+hexsutDPIP+'"\n') + f.write('gen_ip="'+genDPIP+'"\n') + f.write('sut_ip="'+sutDPIP+'"\n') + f.close + genclient.scp_put('./gen.cfg', '/root/gen.cfg') + sutclient.scp_put('./sut.cfg', '/root/sut.cfg') + genclient.scp_put('./parameters.lua', '/root/parameters.lua') + sutclient.scp_put('./parameters.lua', '/root/parameters.lua') + print("Config files copied") + cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg' + genclient.fork_cmd(cmd, 'PROX GEN') + cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg' + sutclient.fork_cmd(cmd, 'PROX SUT') + gensock = connect_socket(genclient) + sutsock = connect_socket(sutclient) + new_speed = 100 + attempts = 0 + cores = [1,2] + gencores = [1] + gensock.reset_stats() + sutsock.reset_stats() + gensock.start([2]) + print("+---------------------------------------------------------------------------------------------------------+") + print("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |") + print("+------+-----------------+----------------+----------------+----------------+----------------+------------+") + print("| Test | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen | Result |") + print("+------+-----------------+----------------+----------------+----------------+----------------+------------+") + while (new_speed > 0.1): + attempts += 1 + # Start generating packets at requested speed (in % of a 10Gb/s link) + gensock.speed(new_speed, gencores) + gensock.start(gencores) + time.sleep(1) + # Get statistics now that the generation is stable and NO ARP messages any more + old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) + old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(cores) + time.sleep(10) + # Get statistics after some execution time + new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(cores) + new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) + time.sleep(1) + # Stop generating + gensock.stop(gencores) + drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM + rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM + tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface + tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles. + sut_rx = new_sut_rx - old_sut_rx + sut_tx = new_sut_tx - old_sut_tx + sut_tsc = new_sut_tsc - old_sut_tsc + if (tx == 0): + raise Exception("TX = 0") + drop_rate = round(((drop-rx) * 100.0)/(tx+drop-rx),1) + pps_req_tx = round((tx+drop-rx)*tsc_hz*1.0/(tsc*1000000),5) + pps_tx = round(tx*tsc_hz*1.0/(tsc*1000000),5) + pps_rx = round(rx*tsc_hz*1.0/(tsc*1000000),5) + pps_sut_tx = round(sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000),5) + if ((drop_rate) < 1): + # This will stop the test when number of dropped packets is below a certain percentage + print("+------+-----------------+----------------+----------------+----------------+----------------+------------+") + print('|{:>5}'.format(str(attempts))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | SUCCESS |") + print("+------+-----------------+----------------+----------------+----------------+----------------+------------+") + break + else: + print('|{:>5}'.format(str(attempts))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps | FAILED |") + # Following calculates the ratio for the new speed to be applied + # On the Y axis, we will find the ratio, a number between 0 and 1 + # On the x axis, we find the % of dropped packets, a number between 0 and 100 + # 2 lines are drawn and we take the minumun of these lines to calculate the ratio + # One line goes through (0,y0) and (p,q) + # The second line goes through (p,q) and (100,y100) + y0=0.99 + y100=0.1 + p=15 + q=.9 + ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100)) + new_speed = (int(new_speed*ratio*100)+0.5)/100 + gensock.quit() + sutsock.quit() + time.sleep(2) + print("") + +def run_testB(): + global genclient + global sutclient + ip = genDPIP.split('.') + hexgenDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) + ip = sutDPIP.split('.') + hexsutDPIP=hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2) + with open("parameters.lua", "w") as f: + f.write('gen_hex_ip="'+hexgenDPIP+'"\n') + f.write('sut_hex_ip="'+hexsutDPIP+'"\n') + f.write('gen_ip="'+genDPIP+'"\n') + f.write('sut_ip="'+sutDPIP+'"\n') + f.close + genclient.scp_put('./gen.cfg', '/root/gen.cfg') + sutclient.scp_put('./sut.cfg', '/root/sut.cfg') + genclient.scp_put('./parameters.lua', '/root/parameters.lua') + sutclient.scp_put('./parameters.lua', '/root/parameters.lua') + print("Config files copied") + cmd = '/root/prox/build/prox -e -t -o cli -f /root/gen.cfg' + genclient.fork_cmd(cmd, 'PROX GEN') + cmd = '/root/prox/build/prox -t -o cli -f /root/sut.cfg' + sutclient.fork_cmd(cmd, 'PROX SUT') + gensock = connect_socket(genclient) + sutsock = connect_socket(sutclient) + print("+----------------------------------------------------------------------------------------------+") + print("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |") + print("+--------+-----------------+----------------+----------------+----------------+----------------+") + print("| Flows | Speed requested | Req to Generate| Sent by Gen | Forward by SUT | Rec. by Gen |") + print("+--------+-----------------+----------------+----------------+----------------+----------------+") + cores = [1,2] + gencores = [1] + gensock.start([2]) + new_speed = 100 + # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable. + flows={128:['0000000000000XXX','000000000000XXXX'],1024:['00000000000XXXXX','00000000000XXXXX'],8192:['0000000000XXXXXX','000000000XXXXXXX'],65535:['00000000XXXXXXXX','00000000XXXXXXXX'],524280:['0000000XXXXXXXXX','000000XXXXXXXXXX']} + for flow_number in sorted(flows.iterkeys()): + #new_speed = 100 Commented out: Not starting from 100% since we are trying more flows, so speed will not be higher than the speed achieved in previous loop + attempts = 0 + gensock.reset_stats() + sutsock.reset_stats() + source_port,destination_port = flows[flow_number] + gensock.set_random(gencores,0,34,source_port,2) + gensock.set_random(gencores,0,36,destination_port,2) + while (new_speed > 0.1): + attempts += 1 + # Start generating packets at requested speed (in % of a 10Gb/s link) + gensock.speed(new_speed, gencores) + gensock.start(gencores) + time.sleep(1) + # Get statistics now that the generation is stable and NO ARP messages any more + old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) + old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(cores) + time.sleep(10) + # Get statistics after some execution time + new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(cores) + new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats([1]) + time.sleep(1) + # Stop generating + gensock.stop(gencores) + drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM + rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM + tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface + tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles. + sut_rx = new_sut_rx - old_sut_rx + sut_tx = new_sut_tx - old_sut_tx + sut_tsc = new_sut_tsc - old_sut_tsc + if (tx == 0): + raise Exception("TX = 0") + drop_rate = round(((drop-rx) * 100.0)/(tx+drop-rx),1) + pps_req_tx = round((tx+drop-rx)*tsc_hz*1.0/(tsc*1000000),5) + pps_tx = round(tx*tsc_hz*1.0/(tsc*1000000),5) + pps_rx = round(rx*tsc_hz*1.0/(tsc*1000000),5) + pps_sut_tx = round(sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000),5) + if ((drop_rate) < 1): + # This will stop the test when number of dropped packets is below a certain percentage + print('|{:>7}'.format(str(flow_number))+" | "+ '{:>14}'.format(str(new_speed)) + '% | '+ '{:>9}'.format(str(pps_req_tx))+' Mpps | '+ '{:>9}'.format(str(pps_tx)) +' Mpps | ' + '{:>9}'.format(str(pps_sut_tx)) +' Mpps | '+ '{:>9}'.format(str(pps_rx))+" Mpps |") + print("+--------+-----------------+----------------+----------------+----------------+----------------+") + break + # Following calculates the ratio for the new speed to be applied + # On the Y axis, we will find the ratio, a number between 0 and 1 + # On the x axis, we find the % of dropped packets, a number between 0 and 100 + # 2 lines are drawn and we take the minumun of these lines to calculate the ratio + # One line goes through (0,y0) and (p,q) + # The second line goes through (p,q) and (100,y100) + y0=0.99 + y100=0.1 + p=15 + q=.9 + ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100)) + new_speed = (int(new_speed*ratio*100)+0.5)/100 + gensock.quit() + sutsock.quit() + time.sleep(2) + print("") + +#======================================================================== +genclient = prox_ctrl(genAdminIP, key+'.pem') +connect_client(genclient) +sutclient = prox_ctrl(sutAdminIP, key+'.pem') +connect_client(sutclient) +##################################################################################### +run_testA() +run_testB() +##################################################################################### +genclient.close() +sutclient.close() + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml new file mode 100644 index 00000000..eab957f5 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapid.yaml @@ -0,0 +1,105 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +heat_template_version: 2016-04-08 +description: RAPID stack (Rapid Automated Performance Indication for Dataplane) +parameters: + image: + type: string + label: Image name or ID + description: Image to be used for compute instance + default: RapidVM + flavor: + type: string + label: Flavor + description: Type of instance (flavor) to be used + default: prox_flavor + key: + type: string + label: Key name + description: Name of key-pair to be used for compute instance + default: prox + dpdk_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: dpdk-network + private_network: + type: string + label: Private network name or ID + description: Network to attach instance to. + default: admin_internal_net + availability_zone: + type: string + description: The Availability Zone to launch the instance. + default: nova + +resources: + sut: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_sut_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - network: { get_param: private_network } + - network: { get_param: dpdk_network } + gen: + type: OS::Nova::Server + properties: + availability_zone: { get_param: availability_zone } + user_data: + get_file: prox_gen_user_data.sh + key_name: { get_param: key } + image: { get_param: image } + flavor: { get_param: flavor } + networks: + - network: { get_param: private_network } + - network: { get_param: dpdk_network } + + sut_floating_ip: + type: OS::Nova::FloatingIP + properties: + pool: admin_floating_net + + gen_floating_ip: + type: OS::Nova::FloatingIP + properties: + pool: admin_floating_net + + sut_association: + type: OS::Nova::FloatingIPAssociation + properties: + floating_ip: { get_resource: sut_floating_ip } + server_id: { get_resource: sut } + + gen_association: + type: OS::Nova::FloatingIPAssociation + properties: + floating_ip: { get_resource: gen_floating_ip } + server_id: { get_resource: gen } + +outputs: + sut_ip: + description: IP address of the instance + value: { get_attr: [sut, first_address] } + gen_ip: + description: IP address of the instance + value: { get_attr: [gen, first_address] } + diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg new file mode 100644 index 00000000..2937a749 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/sut.cfg @@ -0,0 +1,51 @@ +;; +;; Copyright (c) 2010-2017 Intel Corporation +;; +;; Licensed under the Apache License, Version 2.0 (the "License"); +;; you may not use this file except in compliance with the License. +;; You may obtain a copy of the License at +;; +;; http://www.apache.org/licenses/LICENSE-2.0 +;; +;; Unless required by applicable law or agreed to in writing, software +;; distributed under the License is distributed on an "AS IS" BASIS, +;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;; See the License for the specific language governing permissions and +;; limitations under the License. +;; + +[eal options] +-n=4 ; force number of memory channels +no-output=no ; disable DPDK debug output + +[lua] +dofile("parameters.lua") + +[port 0] +name=if0 +mac=hardware + +[defaults] +mempool size=2K + +[global] +name=NOP forwarding + +[core 0] +mode=master + +[core 1] +name=swap +task=0 +mode=arp +sub mode=local +rx port=if0 +tx port=if0 +tx cores=1t1 +local ipv4=${sut_ip} +task=1 +mode=swap +rx ring=yes +tx port=if0 +drop=no + diff --git a/VNFs/DPPD-PROX/helper-scripts/start_vm.py b/VNFs/DPPD-PROX/helper-scripts/start_vm.py new file mode 100755 index 00000000..7af7df9c --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/start_vm.py @@ -0,0 +1,143 @@ +#!/bin/env python2.7 + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +from os import system +from os import fork, _exit +from subprocess import check_output +import socket +from time import sleep +import json +import sys + +# This script starts qemu with the CPU layout specified by the cores +# array below. Each element in the array represents a core. To enable +# hyper-threading (i.e. two logical cores per core), each element in +# the array should be an array of length two. The values stored inside +# the array define to which host cores the guest cores should be +# affinitized. All arguments of this script are passed to qemu +# directly. Porting an existing qemu command line setup to make use of +# this script requires removing the -smp parameters and -qmp +# parameters if those were used. These are built by the script based +# on the cores array. + +# After successfully starting qemu, this script will connect through +# QMP and affinitize all cores within the VM to match cores on the +# host. + +execfile("./vm-cores.py") + +def build_mask(cores): + ret = 0; + for core in cores: + for thread in core: + ret += 1 << thread; + return ret; + +n_cores = len(cores); +n_threads = len(cores[0]); + +mask = str(hex((build_mask(cores)))) + +smp_str = str(n_cores*n_threads) +smp_str += ",cores=" + str(n_cores) +smp_str += ",sockets=1" +smp_str += ",threads=" + str(n_threads) + +try: + qmp_sock = check_output(["mktemp", "--tmpdir", "qmp-sock-XXXX"]).strip() +except: + qmp_sock = "/tmp/qmp-sock" + +qemu_cmdline = "" +qemu_cmdline += "taskset " + mask + " qemu-system-x86_64 -smp " + smp_str +qemu_cmdline += " -qmp unix:" + qmp_sock + ",server,nowait" +qemu_cmdline += " -daemonize" + +for a in sys.argv[1:]: + qemu_cmdline += " " + a + +try: + pid = fork() +except OSError, e: + sys.exit("Failed to fork: " + e.strerror) + +if (pid != 0): + # In the parent process + ret = system(qemu_cmdline) + if (ret != 0): + sys.exit("Failed to run QEMU: exit status " + str(ret) + ". Command line was:\n" + qemu_cmdline) + # Parent process done + sys.exit(0) + +# In the child process: use _exit to terminate +retry = 0 +s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +while (retry < 10): + sleep(1); + try: + s.connect(qmp_sock) + print "Connected to QMP" + break; + except: + pass + retry = retry + 1 + print "Failed to connect to QMP, attempt " + str(retry) +if (retry >= 10): + print "Failed to connect to QMP" + _exit(1) + +# skip info about protocol +dat = s.recv(100000) +# need to run qmp_capabilities before next command works +s.send("{\"execute\" : \"qmp_capabilities\" }") +dat = s.recv(100000) +# Get the PID for each guest core +s.send("{\"execute\" : \"query-cpus\"}") +dat = s.recv(100000) +a = json.loads(dat)["return"]; + +if (len(a) != n_cores*n_threads): + print "Configuration mismatch: " + str(len(a)) + " vCPU reported by QMP, instead of expected " + str(n_cores*n_threads) + _exit(1) +print "QMP reported " + str(len(a)) + " vCPU, as expected" + +if (n_threads == 1): + idx = 0; + for core in a: + cm = str(hex(1 << cores[idx][0])) + pid = str(core["thread_id"]) + system("taskset -p " + cm + " " + pid + " > /dev/null") + idx = idx + 1 +elif (n_threads == 2): + idx = 0; + prev = 0; + for core in a: + cm = str(hex(1 << cores[idx][prev])) + pid = str(core["thread_id"]) + system("taskset -p " + cm + " " + pid + " > /dev/null") + prev = prev + 1; + if (prev == 2): + idx = idx + 1; + prev = 0 +else: + print "Not implemented yet: more than 2 threads per core" + _exit(1) + +print "Core affinitization completed" +_exit(0) + diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py new file mode 100755 index 00000000..f26d0db6 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_BNG_8ports.py @@ -0,0 +1,457 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import socket +import sys +import os +from time import * +from datetime import datetime +from optparse import OptionParser +import time +from remote_system import * +from math import log + +# General parameters +accuracy = 0.1 # in percent of line rate +max_dropped = 0.1 # in percent +all_pkt_size = [64,128,256,512,1024,1280,1494] +all_ip_src = [0,6,12,18] +all_ip_dst = [0,6,12,18] + +# Stear parameters +step_time = 0.001 # in seconds +step_delta = 10 # in percent of line rate + +##### Use case 1: packet loss and latency ##### +low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps +medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps +normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100% +low_steps = 0.1 +medium_steps = 1.0 + +# Prox parameters +tx_port0 = [4] +tx_port1 = [6] +tx_port2 = [8] +tx_port3 = [10] +tx_port4 = [12] +tx_port5 = [14] +tx_port6 = [16] +tx_port7 = [18] +tx_task = 0 + +all_rx_cores = [20,22,24,26,28,30,32,34] +rx_lat_cores = [20,22,24,26,28,30,32,34] +rx_task = 0 + +# Some variables, do not change + +# Program arguments +parser = OptionParser() +parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10) +parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100) +parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0) +parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0) +(options, args) = parser.parse_args() + +init_speed = int(options.init_speed) +test_duration = int(options.test_duration) +configure = int(options.configure) +run = int(options.run) + +nb_cores_per_interface = len(tx_port0) +max_speed = (100.0/nb_cores_per_interface) +init_speed = (init_speed * 1.0/nb_cores_per_interface) +accuracy = (accuracy * 1.0/nb_cores_per_interface) +normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface) +medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface) +low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface) +medium_steps = (medium_steps /nb_cores_per_interface) +low_steps = (low_steps /nb_cores_per_interface) + +max_dropped = max_dropped / 100 + +def to_str(arr): + ret = "" + first = 1; + for a in arr: + if (first == 0): + ret += "," + + ret += str(a) + first = 0; + return ret; + +tx_cores_cpe = tx_port0 + tx_port1 + tx_port2 + tx_port3 +tx_cores_inet = tx_port4 + tx_port5 + tx_port6 + tx_port7 +tx_cores = tx_cores_cpe + tx_cores_inet + +def send_all_pkt_size(cores, pkt_size): + for c in cores: + sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n"); + +def send_all_value(cores, offset, value, len): + for c in cores: + sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n"); + +def send_all_random(cores, offset, rand_str, len): + for c in cores: + sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + +def send_all_speed(cores, speed_perc): + for c in cores: + sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n"); + +def send_reset_random(): + sock.sendall("reset randoms all" + "\n"); + +def send_reset_value(): + sock.sendall("reset values all" + "\n"); + +def rx_stats(tx_cores, tx_task, rx_cores, rx_task): + rx = tx = drop = tsc = tsc_hs = ierrors = 0 + for e in tx_cores: + sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + for e in rx_cores: + sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + # Also get the ierrors as generators might be the bottleneck... + sock.sendall("tot ierrors tot\n") + recv = recv_once() + ierrors += int(recv.split(",")[0]) + rx+=ierrors + return rx,tx,drop,tsc,tsc_hz + +def lat_stats(cores,task): + lat_min = [0 for e in range(127)] + lat_max = [0 for e in range(127)] + lat_avg = [0 for e in range(127)] + for e in cores: + sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n") + recv = recv_once() + lat_min[e] = int(recv.split(",")[0]) + lat_max[e] = int(recv.split(",")[1]) + lat_avg[e] = int(recv.split(",")[2]) + return lat_min, lat_max, lat_avg + +def recv_once(): + ret_str = ""; + done = 0; + while done == 0: + dat = sock.recv(256); + i = 0; + while(i < len(dat)): + if (dat[i] == '\n'): + done = 1 + else: + ret_str += dat[i]; + i = i + 1; + return ret_str + +def set_pkt_sizes(tx_cores, p): + send_all_pkt_size(tx_cores, p-4) + # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size + send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2) + send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20) + +def set_pkt_sizes_cpe(tx_cores, p): + send_all_pkt_size(tx_cores, p-4) + # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size + send_all_value(tx_cores, 24, p - 26, 2) # 22 for QinQ (8) MAC (12) EthType (2) + send_all_value(tx_cores, 46, p - 46, 2) # 42 for QinQ (8) MAC (12) EthType (2) IP (20) + +def set_pkt_sizes_inet(tx_cores, p): + send_all_pkt_size(tx_cores, p+24-4) + # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size + send_all_value(tx_cores, 20, p + 2, 2) # 14 for MAC (12) EthType (2) + send_all_value(tx_cores, 48, p - 26, 2) # 14 for MAC (12) EthType (2) + send_all_value(tx_cores, 70, p - 46, 2) # 34 for MAC (12) EthType (2) IP (20) + +def run_measure_throughput(speed, speed_cpe): + done = 0 + # Intialize tests by stopping cores and resetting stats + step=0 + steps_done = 0 + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(2) + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + sock.sendall("reset stats\n") + print "Speed = " + str(speed * nb_cores_per_interface) + sleep(1); + + send_all_speed(tx_cores, step); + + # Now starting the steps. First go to the common speed, then increase steps for the faster one. + sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n") + while (steps_done == 0): + sleep(step_time) + if (step + step_delta <= speed): + step+=step_delta + else: + steps_done = 1; + send_all_speed(tx_cores, step) + + # Steps are now OK. Set speed + send_all_speed(tx_cores_inet, speed); + send_all_speed(tx_cores_cpe, speed_cpe); + sleep(2); + + # Getting statistics to calculate PPS at right speed.... + rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + sleep(test_duration); + + # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time... + rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task) + sock.sendall("stop " + to_str(tx_cores) + "\n") + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(3); + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + + rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + rx = rx_pps_end - rx_pps_beg + tsc = tsc_pps_end - tsc_pps_beg + mpps = rx / (tsc/float(tsc_hz)) / 1000000 + tx = tx_pps_end - tx_pps_beg + tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000 + + #print "Runtime = " + str((tsc)/float(tsc_hz)); + if (tx_end == 0): + dropped_tot = tx_end - rx_end + dropped_pct = 0 + else: + dropped_tot = tx_end - rx_end + dropped_pct = ((dropped_tot) * 1.0) / tx_end + + if (dropped_tot > 0): + if (dropped_pct >= max_dropped): + print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + if (dropped_tot < 0): + print "Something wrong happened - received more packets than transmitted" + else: + print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + print "MPPS = " + str(mpps) + print "====================================================" + return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg + +def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg): + f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_flows) + "; " ) + for e in rx_lat_cores: + f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ") + f.write("\n"); + f.flush() + +def run_dicho_search(number_flows, pkt_size): + previous_success_speed = 0.0 + previous_error_speed = max_speed + speed = init_speed * 1.0 + done = 0; + good_tx_mpps = 0 + good_mpps = 0 + good_dropped_pct = 0 + good_dropped_tot = 0 + good_speed = 0 + good_lat_min = [0 for e in range(127)] + good_lat_max = [0 for e in range(127)] + good_lat_avg = [0 for e in range(127)] + + while done == 0: + speed_cpe = (speed * (pkt_size + 20)) / (pkt_size + 24 + 20) + dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed, speed_cpe) + if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)): + good_tx_mpps = tx_mpps + good_mpps = mpps + good_dropped_pct = dropped_pct + good_dropped_tot = dropped_tot + good_speed = speed + good_lat_min = lat_min + good_lat_max = lat_max + good_lat_avg = lat_avg + write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg); + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg); + else: + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg); + + if ((speed == max_speed) and (dropped_pct <= max_dropped)): + write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_flows, lat_min, lat_max, lat_avg); + done = 1 + if (dropped_pct <= max_dropped): + previous_success_speed = speed + if (speed > max_speed - accuracy): + speed = max_speed + else: + if (previous_error_speed - speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_flows, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed + (previous_error_speed - speed)/2; + else: + previous_error_speed = speed + if (speed - previous_success_speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_flows, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed - (speed - previous_success_speed) / 2; + + +def set_source_destination_ip(nb_sources, nb_destinations): + # Destination addressese: "00XXXXXX" "XXXXXXXX" "XXXXXXXX" "XXXXXX10" + # Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z + # Ending with 10 to avoid x.y.z.0 and x.y.z.255 + + dst_mask = "10" + for i in range (nb_destinations): + dst_mask = "X" + str(dst_mask) + for i in range (32 - nb_destinations - 2): + dst_mask = "0" + str(dst_mask) + + src_mask = "10" + for i in range (nb_sources): + src_mask = "X" + str(src_mask) + for i in range (32 - nb_sources - 2): + src_mask = "0" + str(src_mask) + + for c in tx_port0: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port1: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port2: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port3: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port4: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port5: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port6: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + for c in tx_port7: + send_all_random([c], 26, src_mask, 4) + send_all_random([c], 30, dst_mask, 4) + +#======================================================================== +class TestDefinition(): + "Stores test parameters" + def __init__(self, number_ip_src, number_ip_dst, pkt_size): + self.number_ip_src = number_ip_src + self.number_ip_dst = number_ip_dst + self.pkt_size = pkt_size + +#======================================================================== +def run_use_case(number_ip_src, number_ip_dst, pkt_size): + number_flows = (2 ** number_ip_src) * (2 ** number_ip_dst) +# send_reset_random() +# send_reset_value() +# set_source_destination_ip(number_ip_src, number_ip_dst) + set_pkt_sizes_inet(tx_cores_inet, pkt_size) + set_pkt_sizes_cpe(tx_cores_cpe, pkt_size) + print "Running test with pkt size= " + str(pkt_size) + " number_ip_src = " + str(number_ip_src) + " number_ip_dst = " + str(number_ip_dst) + " Number flows = " + str(number_flows) + "; \n" + run_dicho_search(number_flows, pkt_size) + sleep(3) + +#======================================================================== +def run_all_use_cases(): + use_case_nb = 1 + # Connect to dppd + file_path = '/tmp/prox.sock' + sock.connect(file_path) + + f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f.flush(); + f_all.flush(); + f_minimal.flush(); + + # Starting tests + print "Stopping all cores and resetting all values and randoms before starting\n" + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + sock.sendall("stop " + to_str(tx_cores) + "\n") + #sock.sendall("stop all") + sock.sendall("reset stats\n") + sleep(3); + for line in file_tests: + info = line.split(';') + if (info[0][0] == '#'): + continue + if (info[0][0] == ''): + break + number_ip_src = int(info[0]) + number_ip_dst = int(info[1]) + pkt_size = int(info[2]) + run_use_case(number_ip_src, number_ip_dst, pkt_size) + +#======================================================================== +def configure_use_case(): + Tests = [] + number_ip_dst = 0 + number_ip_src = 0 + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(number_ip_src, number_ip_dst, pkt_size)) + + pkt_size = 64 + while (pkt_size < 1494): + Tests.append(TestDefinition(number_ip_src, number_ip_dst, pkt_size)) + pkt_size = (pkt_size *11) / 10 + + file_tests = open('test_description.txt', 'w') + file_tests.write("# Number_ip_src; number_ip_dst; pkt_size; \n") + for test in Tests: + file_tests.write(str(test.number_ip_src) + "; " + str(test.number_ip_dst) + "; " + str(test.pkt_size) + "; " + ";\n") + file_tests.close() + +#======================================================================== +if ((configure == 0) and (run == 0)): + print "Nothing to do - please use -r 1 or -c 1" +if (configure == 1): + configure_use_case() +if (run == 1): + print "****************************************************************************************************************" + print "** Running Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **" + print "****************************************************************************************************************" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + f_all = open('all_results.txt', 'w') + f = open('detailed_results.txt', 'w') + f_minimal = open('minimal_results.txt', 'w') + file_tests = open('test_description.txt', 'r') + run_all_use_cases() + f.close(); + sock.close(); + diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py new file mode 100755 index 00000000..f4d211f6 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter.py @@ -0,0 +1,681 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import socket +import sys +import os +from time import * +from datetime import datetime +from optparse import OptionParser +import time +from remote_system import * +from math import log + +# General parameters +accuracy = 0.1 # in percent of line rate +max_dropped = 0.001 # in percent +all_pkt_size = [64,128,256,512,1024,1280,1518] +#all_pkt_size = [64] + +# vRouter parameters, in case commands must be sent +vRouter_host = "192.168.1.96" + +# Stear parameters +step_time = 0.01 # in seconds +step_delta = 0.025 # in percent of line rate + +# Use case dependent parameters +##### Use case 0: influence of number of routes and next hops ##### +max_number_next_hops = 256 # Maximum number of next-hops per interface +max_number_routes = 8192 # Maximum number of routes per interface +max_number_addresses_local_network = 262144 + +##### Use case 1: packet loss and latency ##### +low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps +medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps +normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100% +low_steps = 0.1 +medium_steps = 1.0 + +# Prox parameters +tx_port4 = [19,27,55,63] +tx_port5 = [20,28,56,64] +tx_port6 = [21,29,57,65] +tx_port7 = [22,30,58,66] +tx_port2 = [23,31,59,67] +tx_port3 = [24,32,60,68] +tx_port0 = [25,33,61,69] +tx_port1 = [26,34,62,70] +tx_task = 0 + +all_rx_cores = [1,2,3,4,5,6,7,10] +rx_lat_cores = [1,2,3,4,5,6,7,10] +rx_task = 1 + +# Some variables, do not change + +# Program arguments +parser = OptionParser() +parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10) +parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100) +parser.add_option("-u", "--use-case", dest="use_case", help="Use Case Number", metavar="integer", default=0) +parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0) +parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0) +(options, args) = parser.parse_args() + +init_speed = int(options.init_speed) +test_duration = int(options.test_duration) +use_case = int(options.use_case) +configure = int(options.configure) +run = int(options.run) + +nb_cores_per_interface = len(tx_port0) +max_speed = (100.0/nb_cores_per_interface) +init_speed = (init_speed * 1.0/nb_cores_per_interface) +accuracy = (accuracy * 1.0/nb_cores_per_interface) +normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface) +medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface) +low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface) +medium_steps = (medium_steps /nb_cores_per_interface) +low_steps = (low_steps /nb_cores_per_interface) + +max_dropped = max_dropped / 100 + +def to_str(arr): + ret = "" + first = 1; + for a in arr: + if (first == 0): + ret += "," + + ret += str(a) + first = 0; + return ret; + +tx_cores = tx_port0 + tx_port1 + tx_port2 + tx_port3 + tx_port4 + tx_port5 + tx_port6 + tx_port7 + +def send_all_pkt_size(cores, pkt_size): + for c in cores: + sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n"); + +def send_all_value(cores, offset, value, len): + for c in cores: + sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n"); + +def send_all_random(cores, offset, rand_str, len): + for c in cores: + sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + +def send_all_speed(cores, speed_perc): + for c in cores: + sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n"); + +def send_reset_random(): + sock.sendall("reset randoms all" + "\n"); + +def send_reset_value(): + sock.sendall("reset values all" + "\n"); + +def rx_stats(tx_cores, tx_task, rx_cores, rx_task): + rx = tx = drop = tsc = tsc_hs = ierrors = 0 + for e in tx_cores: + sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + for e in rx_cores: + sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + # Also get the ierrors as generators might be the bottleneck... + sock.sendall("tot ierrors tot\n") + recv = recv_once() + ierrors += int(recv.split(",")[0]) + rx+=ierrors + return rx,tx,drop,tsc,tsc_hz + +def lat_stats(cores,task): + lat_min = [0 for e in range(127)] + lat_max = [0 for e in range(127)] + lat_avg = [0 for e in range(127)] + for e in cores: + sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n") + recv = recv_once() + lat_min[e] = int(recv.split(",")[0]) + lat_max[e] = int(recv.split(",")[1]) + lat_avg[e] = int(recv.split(",")[2]) + return lat_min, lat_max, lat_avg + +def recv_once(): + ret_str = ""; + done = 0; + while done == 0: + dat = sock.recv(256); + i = 0; + while(i < len(dat)): + if (dat[i] == '\n'): + done = 1 + else: + ret_str += dat[i]; + i = i + 1; + return ret_str + +def wait_vRouter_restarted(host): + while (1): + ret = os.system("ping " + host + " -c 1 > /dev/null") + if ret == 0: + print "still up..." + else: + break; + sleep(1) + + while (1): + ret = os.system("ping " + host + " -c 1 > /dev/null") + if (ret == 0): + print "UP" + break; + else: + print "still down..." + sleep(1) + +def reload_vRouter_config(config): + print "connecting to vRouter...and copying " + str(config) + sut = remote_system("root", vRouter_host) + cmd = "cp /config/prox/" + str(config) + " /config/config.boot" + sut.run(cmd) + print "Rebooting system at " + str(datetime.now().time()) + sut.run_forked("reboot") + sleep(5) + wait_vRouter_restarted(vRouter_host) + print "Waiting for last startup scripts to start..." + last_script = "l2tp" + while(1): + dmesg = str(sut.run("dmesg")) + if last_script in dmesg: + print "found l2tp - UP" + break; + sleep(1) + print "vRouter started - waiting 5 last seconds before starting test" + sleep(5) + print datetime.now().time() + +def set_pkt_sizes(tx_cores, p): + send_all_pkt_size(tx_cores, p-4) + # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size + send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2) + send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20) + +def run_measure_throughput(speed): + done = 0 + # Intialize tests by stopping cores and resetting stats + step=0 + steps_done = 0 + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(2) + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + sock.sendall("reset stats\n") + print "Speed = " + str(speed * nb_cores_per_interface) + sleep(1); + + send_all_speed(tx_cores, step); + + # Now starting the steps. First go to the common speed, then increase steps for the faster one. + sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n") + while (steps_done == 0): + sleep(step_time) + if (step + step_delta <= speed): + step+=step_delta + else: + steps_done = 1; + send_all_speed(tx_cores, step) + + # Steps are now OK. Set speed + send_all_speed(tx_cores, speed); + sleep(2); + + # Getting statistics to calculate PPS at right speed.... + rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + sleep(test_duration); + + # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time... + rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task) + sock.sendall("stop " + "," + to_str(tx_cores) + "\n") + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(3); + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + + rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + rx = rx_pps_end - rx_pps_beg + tsc = tsc_pps_end - tsc_pps_beg + mpps = rx / (tsc/float(tsc_hz)) / 1000000 + tx = tx_pps_end - tx_pps_beg + tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000 + + #print "Runtime = " + str((tsc)/float(tsc_hz)); + if (tx_end == 0): + dropped_tot = tx_end - rx_end + dropped_pct = 0 + else: + dropped_tot = tx_end - rx_end + dropped_pct = ((dropped_tot) * 1.0) / tx_end + + if (dropped_tot > 0): + if (dropped_pct >= max_dropped): + print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + if (dropped_tot < 0): + print "Something wrong happened - received more packets than transmitted" + else: + print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + print "MPPS = " + str(mpps) + print "====================================================" + return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg + +def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg): + f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_next_hops) + "; " + str(number_routes) + "; " + str(traffic) + "; ") + for e in rx_lat_cores: + f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ") + f.write("\n"); + f.flush() + +def run_loss_graph(number_next_hops, number_routes, pkt_size, traffic): + speed = init_speed * 1.0 + done = 0; + while done == 0: + dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed) + write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + if (speed <= low_steps_delta_for_loss): + done = 1 + return + if (speed >= (medium_steps+normal_steps_delta_for_loss)): + speed -= normal_steps_delta_for_loss + else: + if (speed >= (low_steps+medium_steps_delta_for_loss)): + speed -= medium_steps_delta_for_loss + else: + speed -= low_steps_delta_for_loss + +def run_dicho_search(number_next_hops, number_routes, pkt_size, traffic): + previous_success_speed = 0.0 + previous_error_speed = max_speed + speed = init_speed * 1.0 + done = 0; + good_tx_mpps = 0 + good_mpps = 0 + good_dropped_pct = 0 + good_dropped_tot = 0 + good_speed = 0 + good_lat_min = [0 for e in range(127)] + good_lat_max = [0 for e in range(127)] + good_lat_avg = [0 for e in range(127)] + + while done == 0: + dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed) + if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)): + good_tx_mpps = tx_mpps + good_mpps = mpps + good_dropped_pct = dropped_pct + good_dropped_tot = dropped_tot + good_speed = speed + good_lat_min = lat_min + good_lat_max = lat_max + good_lat_avg = lat_avg + write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + else: + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + + if ((speed == max_speed) and (dropped_pct <= max_dropped)): + write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + done = 1 + if (dropped_pct <= max_dropped): + previous_success_speed = speed + if (speed > max_speed - accuracy): + speed = max_speed + else: + if (previous_error_speed - speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed + (previous_error_speed - speed)/2; + else: + previous_error_speed = speed + if (speed - previous_success_speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed - (speed - previous_success_speed) / 2; + + +def set_destination_ip(use_case, nb_destinations, traffic): + # minimmum 8 routes i.e. 1 per interface + # Destination addressese: "00XXXYY1" "Z00ZZ0ZZ" "AA0AA0AA" "BBBBBB10" + # Where X = interface id. Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z + # Y, Z and A = additional routes + # B = IP in routes. 10 to avoid x.y.z.0 and x.y.z.255 + # Gaps in A and B to void "too good" distributions e.g. using LPM and + # First changing Y + + mask = "" + for i in range (2): + mask = str(mask)+"0" + end_mask = "" + if (use_case != 2): + end_mask = "XXXXXX10" # Last 8 bits + + if (nb_destinations == 1): + end_mask = "0010000000000000000" + str(end_mask) + if (nb_destinations == 2): + end_mask = "X010000000000000000" + str(end_mask) + if (nb_destinations == 4): + end_mask = "XX10000000000000000" + str(end_mask) + if (nb_destinations == 8): + end_mask = "XX1X000000000000000" + str(end_mask) + elif (nb_destinations == 16): + end_mask = "XX1X00X000000000000" + str(end_mask) + elif (nb_destinations == 32): + end_mask = "XX1X00XX00000000000" + str(end_mask) + elif (nb_destinations == 64): + end_mask = "XX1X00XX0X000000000" + str(end_mask) + elif (nb_destinations == 128): + end_mask = "XX1X00XX0XX00000000" + str(end_mask) + elif (nb_destinations == 256): + end_mask = "XX1X00XX0XXX0000000" + str(end_mask) + elif (nb_destinations == 512): + end_mask = "XX1X00XX0XXXX000000" + str(end_mask) + elif (nb_destinations == 1024): + end_mask = "XX1X00XX0XXXX0X0000" + str(end_mask) + elif (nb_destinations == 2048): + end_mask = "XX1X00XX0XXXX0XX000" + str(end_mask) + elif (nb_destinations == 4096): + end_mask = "XX1X00XX0XXXX0XX0X0" + str(end_mask) + elif (nb_destinations == 8192): + end_mask = "XX1X00XX0XXXX0XX0XX" + str(end_mask) + else: + if (nb_destinations <= 64 * 1): + end_mask = "0010000000000000000" + n_dest = int(log(nb_destinations, 2)) + for i in range (n_dest): + end_mask = str(end_mask) + "X" + for i in range (6 - n_dest): + end_mask = str(end_mask) + "0" + end_mask = str(end_mask) + "10" + else: + end_mask = "XXXXXX10" # Last 8 bits + + if (nb_destinations == 64 * 2): + end_mask = "001X000000000000000" + str(end_mask) + elif (nb_destinations == 64 * 4): + end_mask = "001X00X000000000000" + str(end_mask) + elif (nb_destinations == 64 * 8): + end_mask = "001X00XX00000000000" + str(end_mask) + elif (nb_destinations == 64 * 16): + end_mask = "001X00XX0X000000000" + str(end_mask) + elif (nb_destinations == 64 * 32): + end_mask = "001X00XX0XX00000000" + str(end_mask) + elif (nb_destinations == 64 * 64): + end_mask = "001X00XX0XXX0000000" + str(end_mask) + elif (nb_destinations == 64 * 128): + end_mask = "001X00XX0XXXX000000" + str(end_mask) + elif (nb_destinations == 64 * 256): + end_mask = "001X00XX0XXXX0X0000" + str(end_mask) + elif (nb_destinations == 64 * 512): + end_mask = "001X00XX0XXXX0XX000" + str(end_mask) + elif (nb_destinations == 64 * 1024): + end_mask = "001X00XX0XXXX0XX0X0" + str(end_mask) + elif (nb_destinations == 64 * 2048): + end_mask = "001X00XX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 4096): + end_mask = "001XX0XX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 8192): + end_mask = "001XXXXX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 16384): + end_mask = "001XXXXXXXXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 32768): + end_mask = "001XXXXXXXXXXXXX0XX" + str(end_mask) + elif (nb_destinations == 64 * 65536): + end_mask = "001XXXXXXXXXXXXXXXX" + str(end_mask) + + if (traffic == 0): # One-to-one. From odd interface to even interface and vice versa, no QPI cross + mask1 = str(mask) + "001" + str(end_mask) + mask2 = str(mask) + "000" + str(end_mask) + mask3 = str(mask) + "011" + str(end_mask) + mask4 = str(mask) + "010" + str(end_mask) + mask5 = str(mask) + "101" + str(end_mask) + mask6 = str(mask) + "100" + str(end_mask) + mask7 = str(mask) + "111" + str(end_mask) + mask8 = str(mask) + "110" + str(end_mask) + + elif (traffic == 1): # Full mesh within QPI (i.e. 1 to 4) + mask1 = str(mask) + "0XX" + str(end_mask) + mask2 = str(mask) + "0XX" + str(end_mask) + mask3 = str(mask) + "0XX" + str(end_mask) + mask4 = str(mask) + "0XX" + str(end_mask) + mask5 = str(mask) + "1XX" + str(end_mask) + mask6 = str(mask) + "1XX" + str(end_mask) + mask7 = str(mask) + "1XX" + str(end_mask) + mask8 = str(mask) + "1XX" + str(end_mask) + + elif (traffic == 2): # One to one, crossing QPI (100% QPI) + mask1 = str(mask) + "100" + str(end_mask) + mask2 = str(mask) + "101" + str(end_mask) + mask3 = str(mask) + "110" + str(end_mask) + mask4 = str(mask) + "111" + str(end_mask) + mask5 = str(mask) + "000" + str(end_mask) + mask6 = str(mask) + "001" + str(end_mask) + mask7 = str(mask) + "010" + str(end_mask) + mask8 = str(mask) + "011" + str(end_mask) + + elif (traffic == 3): # 1 to 4 crossing QPI (100% QPI) + mask1 = str(mask) + "1XX" + str(end_mask) + mask2 = str(mask) + "1XX" + str(end_mask) + mask3 = str(mask) + "1XX" + str(end_mask) + mask4 = str(mask) + "1XX" + str(end_mask) + mask5 = str(mask) + "0XX" + str(end_mask) + mask6 = str(mask) + "0XX" + str(end_mask) + mask7 = str(mask) + "0XX" + str(end_mask) + mask8 = str(mask) + "0XX" + str(end_mask) + + elif (traffic == 4): # 1 to 4 (50% QPI) + mask1 = str(mask) + "XX1" + str(end_mask) + mask2 = str(mask) + "XX0" + str(end_mask) + mask3 = str(mask) + "XX1" + str(end_mask) + mask4 = str(mask) + "XX0" + str(end_mask) + mask5 = str(mask) + "XX1" + str(end_mask) + mask6 = str(mask) + "XX0" + str(end_mask) + mask7 = str(mask) + "XX1" + str(end_mask) + mask8 = str(mask) + "XX0" + str(end_mask) + + elif (traffic == 5): # Full mesh (50% QPI) + mask1 = str(mask) + "XXX" + str(end_mask) + mask2 = str(mask) + "XXX" + str(end_mask) + mask3 = str(mask) + "XXX" + str(end_mask) + mask4 = str(mask) + "XXX" + str(end_mask) + mask5 = str(mask) + "XXX" + str(end_mask) + mask6 = str(mask) + "XXX" + str(end_mask) + mask7 = str(mask) + "XXX" + str(end_mask) + mask8 = str(mask) + "XXX" + str(end_mask) + + for c in tx_port0: + send_all_random([c], 30, mask1, 4) + for c in tx_port1: + send_all_random([c], 30, mask2, 4) + for c in tx_port2: + send_all_random([c], 30, mask3, 4) + for c in tx_port3: + send_all_random([c], 30, mask4, 4) + for c in tx_port4: + send_all_random([c], 30, mask5, 4) + for c in tx_port5: + send_all_random([c], 30, mask6, 4) + for c in tx_port6: + send_all_random([c], 30, mask7, 4) + for c in tx_port7: + send_all_random([c], 30, mask8, 4) + for c in tx_cores: + send_all_random([c], 34, "0XXXXXXXXXXXXX10", 2) + send_all_random([c], 36, "0XXXXXXXXXXXXX10", 2) + +#======================================================================== +class TestDefinition(): + "Stores test parameters" + def __init__(self, use_case, next_hops, number_routes, pkt_size, traffic, reload): + self.use_case = use_case + self.next_hops = next_hops + self.number_routes = number_routes + self.pkt_size = pkt_size + self.traffic = traffic + self.reload = reload + +#======================================================================== +# Use case 0 increases input load and measure output load => show dropped packets at low loads, show overload behavior +# Use case 1 and use case 2 run dichotomic searches, searching for 0 packet loss (or whaever loss is configured) +# Use case 1 shows the effect of number of routes and next-hops +# Use case 2 shows the effect of the number of destination, using a fixed (low) number of routes and next-hops +#======================================================================== +def run_use_case(use_case, number_next_hops, number_routes, pkt_size, traffic, reload): + if (reload): + if (use_case == 2): + config = "config.1_1" + "_" + str(use_case) + ".boot" + else: + config = "config." + str(number_routes) + "_" + str(number_next_hops) + ".boot" + reload_vRouter_config(config) + send_reset_random() + send_reset_value() + set_destination_ip(use_case, number_routes, traffic) + set_pkt_sizes(tx_cores, pkt_size) + print "Running test with pkt size= " + str(pkt_size) + " Next hops = " + str(number_next_hops) + "; number of routes = " + str(number_routes) + "; Traffic = " + str(traffic) + " \n" + if (use_case == 0): + run_loss_graph(number_next_hops, number_routes, pkt_size, traffic) + else: + run_dicho_search(number_next_hops, number_routes, pkt_size, traffic) + sleep(3) + +#======================================================================== +def run_all_use_cases(): + use_case_nb = 1 + # Connect to dppd + file_path = '/tmp/prox.sock' + sock.connect(file_path) + + f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f.flush(); + f_all.flush(); + f_minimal.flush(); + + # Starting tests + print "Stopping all cores and resetting all values and randoms before starting\n" + sock.sendall("stop all") + sock.sendall("reset stats\n") + sleep(3); + for line in file_tests: + info = line.split(';') + if (info[0][0] == '#'): + continue + if (info[0][0] == ''): + break + use_case = int(info[0]) + next_hops = int(info[1]) + number_routes = int(info[2]) + pkt_size = int(info[3]) + traffic = int(info[4]) + reload = int(info[5]) + print str(use_case_nb) + " : Running use case " + str(use_case) + " next_hops = " + str(next_hops) + " routes = " + str(number_routes) + " pkt_size = " + str(pkt_size) + " traffic = " + str(traffic) + " reload = " + str(reload) + run_use_case(use_case, next_hops, number_routes, pkt_size, traffic, reload) + use_case_nb = use_case_nb + 1 + +#======================================================================== +def configure_use_case(use_case): + Tests = [] + if (use_case == 0): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition("0", "1", "1", pkt_size, "0", "1")) + for pkt_size in all_pkt_size: + Tests.append(TestDefinition("0", "1", "1", pkt_size, "1", "1")) + if (use_case == 1): + number_next_hops = 1 + reload = 0 + + number_routes = number_next_hops # At least same number of routes that number of next hops + while number_routes <= max_number_routes: + reload = 1 + for traffic in range(6): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload)) + reload = 0 + if (number_routes < max_number_routes / 2): + number_routes = number_routes * 4 + else: + number_routes = number_routes * 2 + + number_routes = max_number_next_hops + while number_next_hops <= max_number_next_hops: + reload = 1 + for traffic in range(6): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload)) + reload = 0 + number_next_hops = number_next_hops * 2 + if (use_case == 2): + number_next_hops = 1 + reload = 1 + for traffic in range(6): + nb_destinations = 1 + while nb_destinations <= max_number_addresses_local_network: + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, nb_destinations, pkt_size, traffic, reload)) + reload = 0 + nb_destinations = nb_destinations * 2 + reload = 1 + + file_tests = open('test_description.txt', 'w') + file_tests.write("# Use case; next_hops; routes; pkt_size; traffic; reload;\n") + for test in Tests: + file_tests.write(str(test.use_case) + "; " + str(test.next_hops) + "; " + str(test.number_routes) + "; " + str(test.pkt_size) + "; " + str(test.traffic) + "; " + str(test.reload) + ";\n") + file_tests.close() + +#======================================================================== +if ((configure == 0) and (run == 0)): + print "Nothing to do - please use -r 1 or -c 1" +if (configure == 1): + configure_use_case(use_case) +if (run == 1): + print "****************************************************************************************************************" + print "** Running vRouter Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **" + print "****************************************************************************************************************" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + f_all = open('all_results.txt', 'w') + f = open('detailed_results.txt', 'w') + f_minimal = open('minimal_results.txt', 'w') + file_tests = open('test_description.txt', 'r') + run_all_use_cases() + f.close(); + sock.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py new file mode 100755 index 00000000..95eb9811 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/characterize_vRouter_4_ports.py @@ -0,0 +1,681 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import socket +import sys +import os +from time import * +from datetime import datetime +from optparse import OptionParser +import time +from remote_system import * +from math import log + +# General parameters +accuracy = 0.1 # in percent of line rate +max_dropped = 0.001 # in percent +all_pkt_size = [64,128,256,512,1024,1280,1518] +#all_pkt_size = [64] + +# vRouter parameters, in case commands must be sent +vRouter_host = "192.168.1.96" + +# Stear parameters +step_time = 0.01 # in seconds +step_delta = 0.025 # in percent of line rate + +# Use case dependent parameters +##### Use case 0: influence of number of routes and next hops ##### +max_number_next_hops = 256 # Maximum number of next-hops per interface +max_number_routes = 8192 # Maximum number of routes per interface +max_number_addresses_local_network = 262144 + +##### Use case 1: packet loss and latency ##### +low_steps_delta_for_loss = 0.01 # Use increment of 0.01% from 0 to low_steps +medium_steps_delta_for_loss = 0.1 # Use increment of 0.1% from low_steps to medium_steps +normal_steps_delta_for_loss = 1.0 # Use increment of 1% from medium_steps till 100% +low_steps = 0.1 +medium_steps = 1.0 + +# Prox parameters +tx_port0 = [19,27,55,63] +tx_port1 = [20,28,56,64] +tx_port2 = [21,29,57,65] +tx_port3 = [22,30,58,66] +tx_port4 = [] +tx_port5 = [] +tx_port6 = [] +tx_port7 = [] +tx_task = 0 + +all_rx_cores = [23,24,25,26] +rx_lat_cores = [23,24,25,26] +rx_task = 1 + +# Some variables, do not change + +# Program arguments +parser = OptionParser() +parser.add_option("-d", "--duration", dest="test_duration", help="Duration of each steps", metavar="integer", default=10) +parser.add_option("-s", "--speed", dest="init_speed", help="Initial speed", metavar="integer", default=100) +parser.add_option("-u", "--use-case", dest="use_case", help="Use Case Number", metavar="integer", default=0) +parser.add_option("-r", "--run", dest="run", help="Run test", metavar="integer", default=0) +parser.add_option("-c", "--configure", dest="configure", help="Configure Test", metavar="integer", default=0) +(options, args) = parser.parse_args() + +init_speed = int(options.init_speed) +test_duration = int(options.test_duration) +use_case = int(options.use_case) +configure = int(options.configure) +run = int(options.run) + +nb_cores_per_interface = len(tx_port0) +max_speed = (100.0/nb_cores_per_interface) +init_speed = (init_speed * 1.0/nb_cores_per_interface) +accuracy = (accuracy * 1.0/nb_cores_per_interface) +normal_steps_delta_for_loss = (normal_steps_delta_for_loss /nb_cores_per_interface) +medium_steps_delta_for_loss = (medium_steps_delta_for_loss /nb_cores_per_interface) +low_steps_delta_for_loss = (low_steps_delta_for_loss /nb_cores_per_interface) +medium_steps = (medium_steps /nb_cores_per_interface) +low_steps = (low_steps /nb_cores_per_interface) + +max_dropped = max_dropped / 100 + +def to_str(arr): + ret = "" + first = 1; + for a in arr: + if (first == 0): + ret += "," + + ret += str(a) + first = 0; + return ret; + +tx_cores = tx_port0 + tx_port1 + tx_port2 + tx_port3 + tx_port4 + tx_port5 + tx_port6 + tx_port7 + +def send_all_pkt_size(cores, pkt_size): + for c in cores: + sock.sendall("pkt_size " + str(c) + " 0 " + str(pkt_size) + "\n"); + +def send_all_value(cores, offset, value, len): + for c in cores: + sock.sendall("set value " + str(c) + " 0 " + str(offset) + " " + str(value) + " " + str(len)+ "\n"); + +def send_all_random(cores, offset, rand_str, len): + for c in cores: + sock.sendall("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + #print("set random " + str(c) + " 0 " + str(offset) + " " + str(rand_str) + " " + str(len)+ "\n"); + +def send_all_speed(cores, speed_perc): + for c in cores: + sock.sendall("speed " + str(c) + " 0 " + str(speed_perc) + "\n"); + +def send_reset_random(): + sock.sendall("reset randoms all" + "\n"); + +def send_reset_value(): + sock.sendall("reset values all" + "\n"); + +def rx_stats(tx_cores, tx_task, rx_cores, rx_task): + rx = tx = drop = tsc = tsc_hs = ierrors = 0 + for e in tx_cores: + sock.sendall("core stats " + str(e) + " " + str(tx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + for e in rx_cores: + sock.sendall("core stats " + str(e) + " " + str(rx_task) + "\n") + recv = recv_once() + rx += int(recv.split(",")[0]) + tx += int(recv.split(",")[1]) + drop += int(recv.split(",")[2]) + tsc = int(recv.split(",")[3]) + tsc_hz = int(recv.split(",")[4]) + # Also get the ierrors as generators might be the bottleneck... + sock.sendall("tot ierrors tot\n") + recv = recv_once() + ierrors += int(recv.split(",")[0]) + rx+=ierrors + return rx,tx,drop,tsc,tsc_hz + +def lat_stats(cores,task): + lat_min = [0 for e in range(127)] + lat_max = [0 for e in range(127)] + lat_avg = [0 for e in range(127)] + for e in cores: + sock.sendall("lat stats " + str(e) + " " + str(task) + " " + "\n") + recv = recv_once() + lat_min[e] = int(recv.split(",")[0]) + lat_max[e] = int(recv.split(",")[1]) + lat_avg[e] = int(recv.split(",")[2]) + return lat_min, lat_max, lat_avg + +def recv_once(): + ret_str = ""; + done = 0; + while done == 0: + dat = sock.recv(256); + i = 0; + while(i < len(dat)): + if (dat[i] == '\n'): + done = 1 + else: + ret_str += dat[i]; + i = i + 1; + return ret_str + +def wait_vRouter_restarted(host): + while (1): + ret = os.system("ping " + host + " -c 1 > /dev/null") + if ret == 0: + print "still up..." + else: + break; + sleep(1) + + while (1): + ret = os.system("ping " + host + " -c 1 > /dev/null") + if (ret == 0): + print "UP" + break; + else: + print "still down..." + sleep(1) + +def reload_vRouter_config(config): + print "connecting to vRouter...and copying " + str(config) + sut = remote_system("root", vRouter_host) + cmd = "cp /config/prox/" + str(config) + " /config/config.boot" + sut.run(cmd) + print "Rebooting system at " + str(datetime.now().time()) + sut.run_forked("reboot") + sleep(5) + wait_vRouter_restarted(vRouter_host) + print "Waiting for last startup scripts to start..." + last_script = "l2tp" + while(1): + dmesg = str(sut.run("dmesg")) + if last_script in dmesg: + print "found l2tp - UP" + break; + sleep(1) + print "vRouter started - waiting 5 last seconds before starting test" + sleep(5) + print datetime.now().time() + +def set_pkt_sizes(tx_cores, p): + send_all_pkt_size(tx_cores, p-4) + # For all cores, need to adapt IP Length (byte 16) and UDP Length (byte 38) to pkt size + send_all_value(tx_cores, 16, p - 18, 2) # 14 for MAC (12) EthType (2) + send_all_value(tx_cores, 38, p - 38, 2) # 34 for MAC (12) EthType (2) IP (20) + +def run_measure_throughput(speed): + done = 0 + # Intialize tests by stopping cores and resetting stats + step=0 + steps_done = 0 + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(2) + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + sock.sendall("reset stats\n") + print "Speed = " + str(speed * nb_cores_per_interface) + sleep(1); + + send_all_speed(tx_cores, step); + + # Now starting the steps. First go to the common speed, then increase steps for the faster one. + sock.sendall("start " + to_str(tx_cores) + "," + to_str(rx_lat_cores) + "\n") + while (steps_done == 0): + sleep(step_time) + if (step + step_delta <= speed): + step+=step_delta + else: + steps_done = 1; + send_all_speed(tx_cores, step) + + # Steps are now OK. Set speed + send_all_speed(tx_cores, speed); + sleep(2); + + # Getting statistics to calculate PPS at right speed.... + rx_pps_beg,tx_pps_beg,drop_pps_beg,tsc_pps_beg,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + sleep(test_duration); + + # Collect statistics before test stops...and stop the test. Important to get stats before stopping as stops take some time... + rx_pps_end,tx_pps_end,drop_pps_end,tsc_pps_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + lat_min,lat_max,lat_avg = lat_stats(rx_lat_cores, rx_task) + sock.sendall("stop " + "," + to_str(tx_cores) + "\n") + sock.sendall("start " + to_str(all_rx_cores) + "\n") + sleep(3); + sock.sendall("stop " + to_str(all_rx_cores) + "\n") + + rx_end, tx_end,drop_end,tsc_end,tsc_hz = rx_stats(tx_cores, tx_task, all_rx_cores, rx_task); + rx = rx_pps_end - rx_pps_beg + tsc = tsc_pps_end - tsc_pps_beg + mpps = rx / (tsc/float(tsc_hz)) / 1000000 + tx = tx_pps_end - tx_pps_beg + tx_mpps = tx / (tsc/float(tsc_hz)) / 1000000 + + #print "Runtime = " + str((tsc)/float(tsc_hz)); + if (tx_end == 0): + dropped_tot = tx_end - rx_end + dropped_pct = 0 + else: + dropped_tot = tx_end - rx_end + dropped_pct = ((dropped_tot) * 1.0) / tx_end + + if (dropped_tot > 0): + if (dropped_pct >= max_dropped): + print "** FAILED **: lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + print "OK but lost " + str(100*dropped_pct) + "% packets RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + else: + if (dropped_tot < 0): + print "Something wrong happened - received more packets than transmitted" + else: + print "** OK **: RX = " + str(rx_end) + " TX = " + str(tx_end) + " DROPPED = " + str(tx_end - rx_end) + print "MPPS = " + str(mpps) + print "====================================================" + return dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg + +def write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg): + f.write(str(pkt_size) + "; " + str(tx_mpps) + "; " + str(mpps) + "; " + str(100 * dropped_pct) + "; " + str(dropped_tot) + "; " + str(speed * nb_cores_per_interface) + "; " + str(number_next_hops) + "; " + str(number_routes) + "; " + str(traffic) + "; ") + for e in rx_lat_cores: + f.write(str(lat_min[e]) + "; " + str(lat_max[e]) + "; " + str(lat_avg[e]) + "; ") + f.write("\n"); + f.flush() + +def run_loss_graph(number_next_hops, number_routes, pkt_size, traffic): + speed = init_speed * 1.0 + done = 0; + while done == 0: + dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed) + write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + if (speed <= low_steps_delta_for_loss): + done = 1 + return + if (speed >= (medium_steps+normal_steps_delta_for_loss)): + speed -= normal_steps_delta_for_loss + else: + if (speed >= (low_steps+medium_steps_delta_for_loss)): + speed -= medium_steps_delta_for_loss + else: + speed -= low_steps_delta_for_loss + +def run_dicho_search(number_next_hops, number_routes, pkt_size, traffic): + previous_success_speed = 0.0 + previous_error_speed = max_speed + speed = init_speed * 1.0 + done = 0; + good_tx_mpps = 0 + good_mpps = 0 + good_dropped_pct = 0 + good_dropped_tot = 0 + good_speed = 0 + good_lat_min = [0 for e in range(127)] + good_lat_max = [0 for e in range(127)] + good_lat_avg = [0 for e in range(127)] + + while done == 0: + dropped_pct, mpps, tx_mpps, dropped_tot,lat_min,lat_max,lat_avg = run_measure_throughput(speed) + if ((dropped_tot >= 0) and (dropped_pct <= max_dropped)): + good_tx_mpps = tx_mpps + good_mpps = mpps + good_dropped_pct = dropped_pct + good_dropped_tot = dropped_tot + good_speed = speed + good_lat_min = lat_min + good_lat_max = lat_max + good_lat_avg = lat_avg + write_results(f, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + else: + write_results(f_all, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + + if ((speed == max_speed) and (dropped_pct <= max_dropped)): + write_results(f_minimal, pkt_size, tx_mpps, mpps, dropped_pct, dropped_tot, speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, lat_min, lat_max, lat_avg); + done = 1 + if (dropped_pct <= max_dropped): + previous_success_speed = speed + if (speed > max_speed - accuracy): + speed = max_speed + else: + if (previous_error_speed - speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed + (previous_error_speed - speed)/2; + else: + previous_error_speed = speed + if (speed - previous_success_speed < accuracy): + write_results(f_minimal, pkt_size, good_tx_mpps, good_mpps, good_dropped_pct, good_dropped_tot, good_speed, nb_cores_per_interface, number_next_hops, number_routes, traffic, good_lat_min, good_lat_max, good_lat_avg); + done = 1 + else: + speed = speed - (speed - previous_success_speed) / 2; + + +def set_destination_ip(use_case, nb_destinations, traffic): + # minimmum 8 routes i.e. 1 per interface + # Destination addressese: "00XXXYY1" "Z00ZZ0ZZ" "AA0AA0AA" "BBBBBB10" + # Where X = interface id. Starting with 00 to be in class A and skipping 0.x.y.z and 127.x.y.z + # Y, Z and A = additional routes + # B = IP in routes. 10 to avoid x.y.z.0 and x.y.z.255 + # Gaps in A and B to void "too good" distributions e.g. using LPM and + # First changing Y + + mask = "" + for i in range (2): + mask = str(mask)+"0" + end_mask = "" + if (use_case != 2): + end_mask = "XXXXXX10" # Last 8 bits + + if (nb_destinations == 1): + end_mask = "0010000000000000000" + str(end_mask) + if (nb_destinations == 2): + end_mask = "X010000000000000000" + str(end_mask) + if (nb_destinations == 4): + end_mask = "XX10000000000000000" + str(end_mask) + if (nb_destinations == 8): + end_mask = "XX1X000000000000000" + str(end_mask) + elif (nb_destinations == 16): + end_mask = "XX1X00X000000000000" + str(end_mask) + elif (nb_destinations == 32): + end_mask = "XX1X00XX00000000000" + str(end_mask) + elif (nb_destinations == 64): + end_mask = "XX1X00XX0X000000000" + str(end_mask) + elif (nb_destinations == 128): + end_mask = "XX1X00XX0XX00000000" + str(end_mask) + elif (nb_destinations == 256): + end_mask = "XX1X00XX0XXX0000000" + str(end_mask) + elif (nb_destinations == 512): + end_mask = "XX1X00XX0XXXX000000" + str(end_mask) + elif (nb_destinations == 1024): + end_mask = "XX1X00XX0XXXX0X0000" + str(end_mask) + elif (nb_destinations == 2048): + end_mask = "XX1X00XX0XXXX0XX000" + str(end_mask) + elif (nb_destinations == 4096): + end_mask = "XX1X00XX0XXXX0XX0X0" + str(end_mask) + elif (nb_destinations == 8192): + end_mask = "XX1X00XX0XXXX0XX0XX" + str(end_mask) + else: + if (nb_destinations <= 64 * 1): + end_mask = "0010000000000000000" + n_dest = int(log(nb_destinations, 2)) + for i in range (n_dest): + end_mask = str(end_mask) + "X" + for i in range (6 - n_dest): + end_mask = str(end_mask) + "0" + end_mask = str(end_mask) + "10" + else: + end_mask = "XXXXXX10" # Last 8 bits + + if (nb_destinations == 64 * 2): + end_mask = "001X000000000000000" + str(end_mask) + elif (nb_destinations == 64 * 4): + end_mask = "001X00X000000000000" + str(end_mask) + elif (nb_destinations == 64 * 8): + end_mask = "001X00XX00000000000" + str(end_mask) + elif (nb_destinations == 64 * 16): + end_mask = "001X00XX0X000000000" + str(end_mask) + elif (nb_destinations == 64 * 32): + end_mask = "001X00XX0XX00000000" + str(end_mask) + elif (nb_destinations == 64 * 64): + end_mask = "001X00XX0XXX0000000" + str(end_mask) + elif (nb_destinations == 64 * 128): + end_mask = "001X00XX0XXXX000000" + str(end_mask) + elif (nb_destinations == 64 * 256): + end_mask = "001X00XX0XXXX0X0000" + str(end_mask) + elif (nb_destinations == 64 * 512): + end_mask = "001X00XX0XXXX0XX000" + str(end_mask) + elif (nb_destinations == 64 * 1024): + end_mask = "001X00XX0XXXX0XX0X0" + str(end_mask) + elif (nb_destinations == 64 * 2048): + end_mask = "001X00XX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 4096): + end_mask = "001XX0XX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 8192): + end_mask = "001XXXXX0XXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 16384): + end_mask = "001XXXXXXXXXX0XX0XX" + str(end_mask) + elif (nb_destinations == 64 * 32768): + end_mask = "001XXXXXXXXXXXXX0XX" + str(end_mask) + elif (nb_destinations == 64 * 65536): + end_mask = "001XXXXXXXXXXXXXXXX" + str(end_mask) + + if (traffic == 0): # One-to-one. From odd interface to even interface and vice versa, no QPI cross + mask1 = str(mask) + "001" + str(end_mask) + mask2 = str(mask) + "000" + str(end_mask) + mask3 = str(mask) + "011" + str(end_mask) + mask4 = str(mask) + "010" + str(end_mask) + mask5 = str(mask) + "101" + str(end_mask) + mask6 = str(mask) + "100" + str(end_mask) + mask7 = str(mask) + "111" + str(end_mask) + mask8 = str(mask) + "110" + str(end_mask) + + elif (traffic == 1): # Full mesh within QPI (i.e. 1 to 4) + mask1 = str(mask) + "0XX" + str(end_mask) + mask2 = str(mask) + "0XX" + str(end_mask) + mask3 = str(mask) + "0XX" + str(end_mask) + mask4 = str(mask) + "0XX" + str(end_mask) + mask5 = str(mask) + "1XX" + str(end_mask) + mask6 = str(mask) + "1XX" + str(end_mask) + mask7 = str(mask) + "1XX" + str(end_mask) + mask8 = str(mask) + "1XX" + str(end_mask) + + elif (traffic == 2): # One to one, crossing QPI (100% QPI) + mask1 = str(mask) + "100" + str(end_mask) + mask2 = str(mask) + "101" + str(end_mask) + mask3 = str(mask) + "110" + str(end_mask) + mask4 = str(mask) + "111" + str(end_mask) + mask5 = str(mask) + "000" + str(end_mask) + mask6 = str(mask) + "001" + str(end_mask) + mask7 = str(mask) + "010" + str(end_mask) + mask8 = str(mask) + "011" + str(end_mask) + + elif (traffic == 3): # 1 to 4 crossing QPI (100% QPI) + mask1 = str(mask) + "1XX" + str(end_mask) + mask2 = str(mask) + "1XX" + str(end_mask) + mask3 = str(mask) + "1XX" + str(end_mask) + mask4 = str(mask) + "1XX" + str(end_mask) + mask5 = str(mask) + "0XX" + str(end_mask) + mask6 = str(mask) + "0XX" + str(end_mask) + mask7 = str(mask) + "0XX" + str(end_mask) + mask8 = str(mask) + "0XX" + str(end_mask) + + elif (traffic == 4): # 1 to 4 (50% QPI) + mask1 = str(mask) + "XX1" + str(end_mask) + mask2 = str(mask) + "XX0" + str(end_mask) + mask3 = str(mask) + "XX1" + str(end_mask) + mask4 = str(mask) + "XX0" + str(end_mask) + mask5 = str(mask) + "XX1" + str(end_mask) + mask6 = str(mask) + "XX0" + str(end_mask) + mask7 = str(mask) + "XX1" + str(end_mask) + mask8 = str(mask) + "XX0" + str(end_mask) + + elif (traffic == 5): # Full mesh (50% QPI) + mask1 = str(mask) + "XXX" + str(end_mask) + mask2 = str(mask) + "XXX" + str(end_mask) + mask3 = str(mask) + "XXX" + str(end_mask) + mask4 = str(mask) + "XXX" + str(end_mask) + mask5 = str(mask) + "XXX" + str(end_mask) + mask6 = str(mask) + "XXX" + str(end_mask) + mask7 = str(mask) + "XXX" + str(end_mask) + mask8 = str(mask) + "XXX" + str(end_mask) + + for c in tx_port0: + send_all_random([c], 30, mask1, 4) + for c in tx_port1: + send_all_random([c], 30, mask2, 4) + for c in tx_port2: + send_all_random([c], 30, mask3, 4) + for c in tx_port3: + send_all_random([c], 30, mask4, 4) + for c in tx_port4: + send_all_random([c], 30, mask5, 4) + for c in tx_port5: + send_all_random([c], 30, mask6, 4) + for c in tx_port6: + send_all_random([c], 30, mask7, 4) + for c in tx_port7: + send_all_random([c], 30, mask8, 4) + for c in tx_cores: + send_all_random([c], 34, "0XXXXXXXXXXXXX10", 2) + send_all_random([c], 36, "0XXXXXXXXXXXXX10", 2) + +#======================================================================== +class TestDefinition(): + "Stores test parameters" + def __init__(self, use_case, next_hops, number_routes, pkt_size, traffic, reload): + self.use_case = use_case + self.next_hops = next_hops + self.number_routes = number_routes + self.pkt_size = pkt_size + self.traffic = traffic + self.reload = reload + +#======================================================================== +# Use case 0 increases input load and measure output load => show dropped packets at low loads, show overload behavior +# Use case 1 and use case 2 run dichotomic searches, searching for 0 packet loss (or whaever loss is configured) +# Use case 1 shows the effect of number of routes and next-hops +# Use case 2 shows the effect of the number of destination, using a fixed (low) number of routes and next-hops +#======================================================================== +def run_use_case(use_case, number_next_hops, number_routes, pkt_size, traffic, reload): + if (reload): + if (use_case == 2): + config = "config.1_1" + "_" + str(use_case) + ".boot" + else: + config = "config." + str(number_routes) + "_" + str(number_next_hops) + ".boot" + reload_vRouter_config(config) + send_reset_random() + send_reset_value() + set_destination_ip(use_case, number_routes, traffic) + set_pkt_sizes(tx_cores, pkt_size) + print "Running test with pkt size= " + str(pkt_size) + " Next hops = " + str(number_next_hops) + "; number of routes = " + str(number_routes) + "; Traffic = " + str(traffic) + " \n" + if (use_case == 0): + run_loss_graph(number_next_hops, number_routes, pkt_size, traffic) + else: + run_dicho_search(number_next_hops, number_routes, pkt_size, traffic) + sleep(3) + +#======================================================================== +def run_all_use_cases(): + use_case_nb = 1 + # Connect to dppd + file_path = '/tmp/prox.sock' + sock.connect(file_path) + + f.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_all.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f_minimal.write("pkt_size; tx_mpps; rx_mpps; dropped_pct; dropped_tot; percent_line_rate; latency per core\n") + f.flush(); + f_all.flush(); + f_minimal.flush(); + + # Starting tests + print "Stopping all cores and resetting all values and randoms before starting\n" + sock.sendall("stop all") + sock.sendall("reset stats\n") + sleep(3); + for line in file_tests: + info = line.split(';') + if (info[0][0] == '#'): + continue + if (info[0][0] == ''): + break + use_case = int(info[0]) + next_hops = int(info[1]) + number_routes = int(info[2]) + pkt_size = int(info[3]) + traffic = int(info[4]) + reload = int(info[5]) + print str(use_case_nb) + " : Running use case " + str(use_case) + " next_hops = " + str(next_hops) + " routes = " + str(number_routes) + " pkt_size = " + str(pkt_size) + " traffic = " + str(traffic) + " reload = " + str(reload) + run_use_case(use_case, next_hops, number_routes, pkt_size, traffic, reload) + use_case_nb = use_case_nb + 1 + +#======================================================================== +def configure_use_case(use_case): + Tests = [] + if (use_case == 0): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition("0", "1", "1", pkt_size, "0", "1")) + for pkt_size in all_pkt_size: + Tests.append(TestDefinition("0", "1", "1", pkt_size, "1", "1")) + if (use_case == 1): + number_next_hops = 1 + reload = 0 + + number_routes = number_next_hops # At least same number of routes that number of next hops + while number_routes <= max_number_routes: + reload = 1 + for traffic in range(6): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload)) + reload = 0 + if (number_routes < max_number_routes / 2): + number_routes = number_routes * 4 + else: + number_routes = number_routes * 2 + + number_routes = max_number_next_hops + while number_next_hops <= max_number_next_hops: + reload = 1 + for traffic in range(6): + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, number_routes, pkt_size, traffic, reload)) + reload = 0 + number_next_hops = number_next_hops * 2 + if (use_case == 2): + number_next_hops = 1 + reload = 1 + for traffic in range(6): + nb_destinations = 1 + while nb_destinations <= max_number_addresses_local_network: + for pkt_size in all_pkt_size: + Tests.append(TestDefinition(use_case, number_next_hops, nb_destinations, pkt_size, traffic, reload)) + reload = 0 + nb_destinations = nb_destinations * 2 + reload = 1 + + file_tests = open('test_description.txt', 'w') + file_tests.write("# Use case; next_hops; routes; pkt_size; traffic; reload;\n") + for test in Tests: + file_tests.write(str(test.use_case) + "; " + str(test.next_hops) + "; " + str(test.number_routes) + "; " + str(test.pkt_size) + "; " + str(test.traffic) + "; " + str(test.reload) + ";\n") + file_tests.close() + +#======================================================================== +if ((configure == 0) and (run == 0)): + print "Nothing to do - please use -r 1 or -c 1" +if (configure == 1): + configure_use_case(use_case) +if (run == 1): + print "****************************************************************************************************************" + print "** Running vRouter Characterization with " + str(test_duration) + " seconds steps and starting at " + str(init_speed) + " percent of line rate **" + print "****************************************************************************************************************" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + f_all = open('all_results.txt', 'w') + f = open('detailed_results.txt', 'w') + f_minimal = open('minimal_results.txt', 'w') + file_tests = open('test_description.txt', 'r') + run_all_use_cases() + f.close(); + sock.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl b/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl new file mode 100755 index 00000000..b8baa46b --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/create_interfaces_and_routes.pl @@ -0,0 +1,90 @@ +#!/bin/env perl + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +# This script creates four sets of files: 2 sets for use case 0 and 1 +# (which use the same configuration) and 2 for use case 2. +# Each use case is defined by 2 sets of configuration files. +# interface.txt contains the IP addresses of the DPDK fast path interfaces. +# route.x.y.txt contains the routing table for different configurations +# with x being number of routes and y number of next_hops. +# Those interface.txt and route.x.y.txt files should then be converted +# to fit the syntax of vRouter configuration files. + +use strict; +my $max_nb_routes = 8192; +my $max_nb_next_hops = 1024; +my $max_nb_interfaces = 4; +my $nb_next_hops = 1; +my ($interface, $a1, $a2, $a3, $a4, $fh, $output_route); + +# Create interface configuration for use case 0 and 1 +my $interface_config = "interface.txt"; +open($fh, '>', $interface_config) or die "Could not open file '$interface_config' $!"; +print $fh "# interface IP address/prefix\n"; +for ($interface = 0; $interface < $max_nb_interfaces; $interface++) { + print $fh ($interface+64).".0.0.240/24\n"; +} +close $fh; + +# Create interface configuration for use case 2 +my $interface_config = "interface_use_case_2.txt"; +open($fh, '>', $interface_config) or die "Could not open file '$interface_config' $!"; +print $fh "# interface IP address/prefix\n"; +for ($interface = 0; $interface < $max_nb_interfaces; $interface++) { + print $fh ($interface * 8 + 1).".0.0.240/5\n"; +} +close $fh; + +# Create routes configuration for use case 0 and 1 +while ($nb_next_hops <= $max_nb_next_hops) { + my $nb_routes_per_interface = $nb_next_hops; + while ($nb_routes_per_interface <= $max_nb_routes) { + $output_route = "route.".$nb_routes_per_interface.".".$nb_next_hops.".txt"; + open($fh, '>', $output_route) or die "Could not open file '$output_route' $!"; + print $fh "# destination/prefix;nex-hop\n"; + + for (my $route_nb = 0; $route_nb < $nb_routes_per_interface; $route_nb++) { + for ($interface = 0; $interface < $max_nb_interfaces; $interface++) { + $a1 = $interface * 8 + 1 + (($route_nb & 1) << 2) + ($route_nb & 2); + $a2 = (($route_nb & 4) << 5) + (($route_nb & 8) << 1) + (($route_nb & 0x10) >> 1) + (($route_nb & 0x20) >> 4) + (($route_nb & 0x40) >> 6); + $a3 = (($route_nb & 0x80)) + (($route_nb & 0x100) >> 2) + (($route_nb & 0x200) >> 5) + (($route_nb & 0x400) >> 7) + (($route_nb & 0x800) >> 10) + (($route_nb & 0x1000) >> 12); + $a4 = 0; + print $fh $a1.".".$a2.".".$a3.".".$a4."/24;"; + print $fh ($interface+64).".0.".(($route_nb % $nb_next_hops) >> 7).".".(1 + (($route_nb % $nb_next_hops) & 0x7f)) ."\n"; + } + } + $nb_routes_per_interface = $nb_routes_per_interface * 2; + } + $nb_next_hops = $nb_next_hops * 2; +} +close $fh; + +# Create routes configuration for use case 2 +$output_route = "route.1.1.use_case_2.txt"; +open($fh, '>', $output_route) or die "Could not open file '$output_route' $!"; +print $fh "# destination/prefix;nex-hop\n"; + +for ($interface = 0; $interface < $max_nb_interfaces; $interface++) { + $a1 = $interface + 64 ; + $a2 = 0; + $a3 = 0; + $a4 = 0; + print $fh $a1.".".$a2.".".$a3.".".$a4."/24;"; + print $fh ($interface * 8 + 1).".0.0.1\n"; +} +close $fh; diff --git a/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py b/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py new file mode 100755 index 00000000..f00ab77b --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/testvRouter/remote_system.py @@ -0,0 +1,57 @@ +#!/bin/env python + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +import os +import thread +import time +import socket + +def ssh(user, ip, cmd): + # print cmd; + ssh_options = "" + ssh_options += "-o StrictHostKeyChecking=no " + ssh_options += "-o UserKnownHostsFile=/dev/null " + ssh_options += "-o LogLevel=quiet " + running = os.popen("ssh " + ssh_options + " " + user + "@" + ip + " \"" + cmd + "\""); + ret = {}; + ret['out'] = running.read().strip(); + ret['ret'] = running.close(); + if (ret['ret'] == None): + ret['ret'] = 0; + + return ret; + +def ssh_check_quit(obj, user, ip, cmd): + ret = ssh(user, ip, cmd); + if (ret['ret'] != 0): + obj._err = True; + obj._err_str = ret['out']; + exit(-1); + +class remote_system: + def __init__(self, user, ip): + self._ip = ip; + self._user = user; + def run(self, cmd): + return ssh(self._user, self._ip, cmd); + def run_forked(self, cmd): + thread.start_new_thread(ssh, (self._user, self._ip, cmd)); + return 0; + def scp(self, src, dst): + running = os.popen("scp " + self._user + "@" + self._ip + ":" + src + " " + dst); + return running.close(); diff --git a/VNFs/DPPD-PROX/helper-scripts/trailing.sh b/VNFs/DPPD-PROX/helper-scripts/trailing.sh new file mode 100755 index 00000000..5b64b1d7 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/trailing.sh @@ -0,0 +1,69 @@ +#!/bin/bash + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +bad_lines=$(grep -nHr -e "[[:space:]]$" *.c *.h gen/*.cfg config/*.cfg) + +if [ -n "$bad_lines" ]; then + echo "Found trailing white-spaces:" + echo $bad_lines + exit 1; +fi + +for f in *.c *.h gen/*.cfg config/*.cfg; do + result=$(tail -n 1 $f | grep "^$" | wc -l) + + if [ "$result" == "1" ]; then + echo "Trailing newlines at end of file $f" + exit 1 + fi +done; + +prev="dummy" +function findDuplicate() { + line=1 + while read p; do + if [ "$prev" == "" ]; then + if [ "$p" == "" ]; then + echo "duplicate empty line at $1:$line" + bad=1 + fi + fi + prev=$p + let "line+=1" + done <$1 +} + +bad=0 +for f in *.c *.h; do + findDuplicate $f +done; + +if [ "$bad" != "0" ]; then + exit 1 +fi + +tab=" " +bad_lines=$(grep -nHr -e "^$tab$tab$tab$tab$tab$tab$tab" *.c *.h | head -n1) + +if [ -n "$bad_lines" ]; then + echo "Code nested too deep:" + echo $bad_lines + exit 1; +fi + +exit 0 diff --git a/VNFs/DPPD-PROX/helper-scripts/vm-cores.py b/VNFs/DPPD-PROX/helper-scripts/vm-cores.py new file mode 100644 index 00000000..de794998 --- /dev/null +++ b/VNFs/DPPD-PROX/helper-scripts/vm-cores.py @@ -0,0 +1,20 @@ +#!/bin/env python2.7 + +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +cores = [[0,20], [1,21], [2,22], [3,23], [4,24], [5,25], [6,26], [7,27], [8,28], [9,29]] + diff --git a/VNFs/DPPD-PROX/input.c b/VNFs/DPPD-PROX/input.c new file mode 100644 index 00000000..bb956bcd --- /dev/null +++ b/VNFs/DPPD-PROX/input.c @@ -0,0 +1,105 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_common.h> + +#include "clock.h" +#include "input.h" + +static struct input *inputs[32]; +static int n_inputs; +static int max_input_fd; + +int reg_input(struct input *in) +{ + if (n_inputs == sizeof(inputs)/sizeof(inputs[0])) + return -1; + + for (int i = 0; i < n_inputs; ++i) { + if (inputs[i] == in) + return -1; + } + inputs[n_inputs++] = in; + max_input_fd = RTE_MAX(in->fd, max_input_fd); + + return 0; +} + +void unreg_input(struct input *in) +{ + int rm, i; + + for (rm = 0; rm < n_inputs; ++rm) { + if (inputs[rm] == in) { + break; + } + } + + if (rm == n_inputs) + return ; + + for (i = rm + 1; i < n_inputs; ++i) { + inputs[i - 1] = inputs[i]; + } + + n_inputs--; + max_input_fd = 0; + for (i = 0; i < n_inputs; ++i) { + max_input_fd = RTE_MAX(inputs[i]->fd, max_input_fd); + } +} + +static int tsc_diff_to_tv(uint64_t beg, uint64_t end, struct timeval *tv) +{ + if (end < beg) { + return -1; + } + + uint64_t diff = end - beg; + tsc_to_tv(tv, diff); + return 0; +} + +void input_proc_until(uint64_t deadline) +{ + struct timeval tv; + fd_set in_fd; + int ret = 1; + + /* Keep checking for input until select() returned 0 (timeout + occurred before input was read) or current time has passed + the deadline (which occurs when time progresses past the + deadline between return of select() and the next + iteration). */ + while (ret != 0 && tsc_diff_to_tv(rte_rdtsc(), deadline, &tv) == 0) { + FD_ZERO(&in_fd); + + for (int i = 0; i < n_inputs; ++i) { + FD_SET(inputs[i]->fd, &in_fd); + } + + ret = select(max_input_fd + 1, &in_fd, NULL, NULL, &tv); + + if (ret > 0) { + for (int i = 0; i < n_inputs; ++i) { + if (FD_ISSET(inputs[i]->fd, &in_fd)) { + inputs[i]->proc_input(inputs[i]); + } + } + } + } +} diff --git a/VNFs/DPPD-PROX/input.h b/VNFs/DPPD-PROX/input.h new file mode 100644 index 00000000..06f6b653 --- /dev/null +++ b/VNFs/DPPD-PROX/input.h @@ -0,0 +1,35 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _INPUT_H_ +#define _INPUT_H_ + +#include <inttypes.h> + +struct input { + int fd; + /* Function to be called when data is available on the fd */ + void (*proc_input)(struct input *input); + void (*reply)(struct input *input, const char *buf, size_t len); + void (*history)(struct input *input); +}; + +int reg_input(struct input *in); +void unreg_input(struct input *in); + +void input_proc_until(uint64_t deadline); + +#endif /* _INPUT_H_ */ diff --git a/VNFs/DPPD-PROX/input_conn.c b/VNFs/DPPD-PROX/input_conn.c new file mode 100644 index 00000000..63e6511e --- /dev/null +++ b/VNFs/DPPD-PROX/input_conn.c @@ -0,0 +1,236 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <netinet/in.h> +#include <sys/socket.h> +#include <sys/un.h> +#include <unistd.h> + +#include "input_conn.h" +#include "input.h" +#include "run.h" +#include "cmd_parser.h" + +static struct input tcp_server; +int tcp_server_started; +static struct input uds_server; +int uds_server_started; + +/* Active clients */ +struct client_conn { + struct input input; + int enabled; + int n_buf; + char buf[32768]; +}; + +struct client_conn clients[32]; + +static int start_listen_tcp(void) +{ + struct sockaddr_in server; + int ret, sock; + int optval = 1; + + memset(&server, 0, sizeof(server)); + sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + + if (sock == -1) + return -1; + + server.sin_family = AF_INET; + server.sin_port = ntohs(8474); + server.sin_addr.s_addr = ntohl(INADDR_ANY); + + ret = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(int)); + + if (ret) + return -1; + + if (bind(sock, (struct sockaddr *) &server, sizeof(server)) == -1) + return -1; + + if (listen(sock, 1) == -1) + return -1; + + return sock; +} + +static int start_listen_uds(void) +{ + int sock; + struct sockaddr_un server = { + .sun_path = "/tmp/prox.sock", + .sun_family = AF_UNIX + }; + + sock = socket(AF_UNIX, SOCK_STREAM, 0); + if (sock == -1) + return -1; + + /* Unlink can fail, i.e. when /tmp/prox.sock does not + exists. This is not fatal. */ + unlink(server.sun_path); + + if (bind(sock, (struct sockaddr *) &server, sizeof(server)) == -1) + return -1; + + if (listen(sock, 1) == -1) + return -1; + + return sock; +} + +static void write_client(struct input *input, const char *buf, size_t len) +{ + int ret; + + while ((ret = write(input->fd, buf, len)) != (int)len) { + buf += ret; + len -= ret; + } +} + +static void handle_client(struct input* client_input) +{ + char cur[1024]; + size_t i; + int ret; + struct client_conn *c = NULL; + + /* Get the client structure that uses this input */ + for (i = 0; i < sizeof(clients)/sizeof(clients[0]); ++i) { + if (&clients[i].input == client_input) { + c = &clients[i]; + break; + } + } + + /* handle_client function called non-tcp client */ + if (c == NULL) + return ; + + ret = read(c->input.fd, cur, sizeof(cur)); + + if (ret == 0) { + c->enabled = 0; + unreg_input(&c->input); + return ; + } + + /* Scan in data until \n (\r skipped if followed by \n) */ + for (int i = 0; i < ret; ++i) { + if (cur[i] == '\r' && i + 1 < ret && cur[i + 1] == '\n') + continue; + + if (cur[i] == '\n') { + c->buf[c->n_buf] = 0; + if (c->n_buf) + cmd_parser_parse(c->buf, client_input); + c->n_buf = 0; + } + else if (c->n_buf + 1 < (int)sizeof(c->buf)) + c->buf[c->n_buf++] = cur[i]; + else + c->n_buf = 0; + } +} + +static void handle_new_client(struct input* server) +{ + size_t i; + + int new_client = accept(server->fd, NULL, NULL); + + for (i = 0; i < sizeof(clients)/sizeof(clients[0]); ++i) { + if (clients[i].enabled == 0) { + break; + } + } + + if (i == sizeof(clients)/sizeof(clients[0])) { + close(new_client); + return ; + } + + clients[i].enabled = 1; + clients[i].n_buf = 0; + clients[i].input.fd = new_client; + clients[i].input.reply = server->reply; + clients[i].input.proc_input = handle_client; + + reg_input(&clients[i].input); +} + +int reg_input_tcp(void) +{ + int fd; + + if (tcp_server_started) + return -1; + if ((fd = start_listen_tcp()) < 0) + return -1; + + tcp_server.fd = fd; + tcp_server.proc_input = handle_new_client; + tcp_server.reply = write_client; + if (reg_input(&tcp_server) != 0) { + close(fd); + return -1; + } + tcp_server_started = 1; + return 0; +} + +int reg_input_uds(void) +{ + int fd; + + if (uds_server_started) + return -1; + + if ((fd = start_listen_uds()) < 0) + return -1; + + uds_server.fd = fd; + uds_server.proc_input = handle_new_client; + uds_server.reply = write_client; + if (reg_input(&uds_server) != 0) { + close(fd); + return -1; + } + uds_server_started = 1; + return 0; +} + +void unreg_input_tcp(void) +{ + if (!tcp_server_started) + return; + tcp_server_started = 0; + close(tcp_server.fd); + unreg_input(&tcp_server); +} + +void unreg_input_uds(void) +{ + if (!uds_server_started) + return; + uds_server_started = 0; + close(tcp_server.fd); + unreg_input(&tcp_server); +} diff --git a/VNFs/DPPD-PROX/input_conn.h b/VNFs/DPPD-PROX/input_conn.h new file mode 100644 index 00000000..98e9af45 --- /dev/null +++ b/VNFs/DPPD-PROX/input_conn.h @@ -0,0 +1,27 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _INPUT_CONN_H_ +#define _INPUT_CONN_H_ + +/* Returns 0 on success, -1 otherwise. */ +int reg_input_tcp(void); +int reg_input_uds(void); + +void unreg_input_tcp(void); +void unreg_input_uds(void); + +#endif /* _INPUT_CONN_H_ */ diff --git a/VNFs/DPPD-PROX/input_curses.c b/VNFs/DPPD-PROX/input_curses.c new file mode 100644 index 00000000..6f79869b --- /dev/null +++ b/VNFs/DPPD-PROX/input_curses.c @@ -0,0 +1,325 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <errno.h> +#include <stdlib.h> +#include <fcntl.h> +#include <unistd.h> + +#include "log.h" +#include "input.h" +#include "display.h" +#include "run.h" +#include "cmd_parser.h" +#include "input_curses.h" +#include "histedit.h" + +static EditLine *el; +static History *hist; + +static struct input input_curses; +static int tabbed; + +static void show_history(struct input *input) +{ + HistEvent event; + + history(hist, &event, H_LAST); + + do { + plog_info("%s", event.str); /* event.str contains newline */ + } while (history(hist, &event, H_PREV) != -1); +} + +static int complete(__attribute__((unused)) int ch) +{ + const LineInfo *li; + size_t len; + size_t n_match = 0; + char complete_cmd[128] = {0}; + int complete_cmd_partial = 0; + + li = el_line(el); + for (size_t i = 0; i < cmd_parser_n_cmd(); ++i) { + len = li->lastchar - li->buffer; + if (strncmp(cmd_parser_cmd(i), li->buffer, len) == 0) { + if (n_match) { + size_t cur_len = strlen(complete_cmd); + for (size_t j = 0; j < cur_len; ++j) { + if (complete_cmd[j] != cmd_parser_cmd(i)[j]) { + complete_cmd[j] = 0; + complete_cmd_partial = 1; + break; + } + } + } + else { + strcpy(complete_cmd, cmd_parser_cmd(i)); + } + + n_match++; + } + } + + /* Complete only if there are more characters known than + currently entered. */ + if (n_match && len < strlen(complete_cmd)) { + el_deletestr(el, li->cursor - li->buffer); + el_insertstr(el, complete_cmd); + if (!complete_cmd_partial) + el_insertstr(el, " "); + + return CC_REDISPLAY; + } + else if (tabbed) { + int printed = 0; + for (size_t i = 0; i < cmd_parser_n_cmd(); ++i) { + len = li->lastchar - li->buffer; + if (strncmp(cmd_parser_cmd(i), li->buffer, len) == 0) { + plog_info("%-23s", cmd_parser_cmd(i)); + printed++; + } + if (printed == 4) { + printed = 0; + plog_info("\n"); + } + } + if (printed) + plog_info("\n"); + } + else { + tabbed = 1; + } + + return CC_REDISPLAY; +} + +/* Returns non-zero if stdin is readable */ +static int peek_stdin(void) +{ + int tmp; + fd_set in_fd; + struct timeval tv; + + tv.tv_sec = 0; + tv.tv_usec = 10000; + + FD_ZERO(&in_fd); + FD_SET(fileno(stdin), &in_fd); + tmp = select(fileno(stdin) + 1, &in_fd, NULL, NULL, &tv); + return FD_ISSET(fileno(stdin), &in_fd); +} + +static int get_char(EditLine *e, char *c) +{ + *c = display_getch(); + + /* If no characters have been entered, number keys switch the + screen and '0' resets stats. This is provided as a + fall-back in case F-keys do not function. The keys are + intercepted before returning control to libedit. */ + if (*c >= '0' && *c <= '9') { + const LineInfo *li; + + li = el_line(e); + if (li->lastchar == li->buffer) { + if (*c >= '1') { + display_screen(*c - '0' - 1); + return 0; + } + else { + cmd_parser_parse("reset stats", &input_curses); + return 0; + } + } + } + if (*c == '=') { + toggle_display_screen(); + return 0; + } + + /* Escape by itself is the first character used for more + complex escape sequences like F-keys. libedit can't be used + to detect both ESC as a unitary key and more complex + sequences starting ESC at the same time. */ + if (*c == 27 && !peek_stdin()) { + quit(); + return 0; + } + else if (*c != 9) { + tabbed = 0; + } + + return 1; +} + +static void proc_keyboard(struct input *input) +{ + const char *line; + const LineInfo *li; + HistEvent hist_event; + int len; + + line = el_gets(el, &len); + li = el_line(el); + + if (len == 0 || line == NULL) { + display_cmd("", 0, 0); + return; + } else if (len > 0) { + if (len == 1 && line[0] == '\n') { + display_print_page(); + el_set(el, EL_UNBUFFERED, 0); + el_set(el, EL_UNBUFFERED, 1); + return; + } + if (line[len-1] == '\n') { + if (hist) { + history(hist, &hist_event, H_ENTER, line); + } + + char *line2 = strndup(line, len); + line2[len - 1] = 0; /* replace \n */ + cmd_parser_parse(line2, input); + free(line2); + + el_set(el, EL_UNBUFFERED, 0); + el_set(el, EL_UNBUFFERED, 1); + display_cmd("", 0, 0); + return; + } + if (line[len-1] == 4) { + return; /* should quit*/ + } + } + else { + if (errno) { + return; + } + display_cmd("", 0, 0); + return; + } + display_cmd(line, len, li->cursor - li->buffer); +} + +static int key_f1(__attribute__((unused)) int ch) {display_screen(0); return CC_REDISPLAY;} +static int key_f2(__attribute__((unused)) int ch) {display_screen(1); return CC_REDISPLAY;} +static int key_f3(__attribute__((unused)) int ch) {display_screen(2); return CC_REDISPLAY;} +static int key_f4(__attribute__((unused)) int ch) {display_screen(3); return CC_REDISPLAY;} +static int key_f5(__attribute__((unused)) int ch) {display_screen(4); return CC_REDISPLAY;} +static int key_f6(__attribute__((unused)) int ch) {display_screen(5); return CC_REDISPLAY;} +static int key_f7(__attribute__((unused)) int ch) {display_screen(6); return CC_REDISPLAY;} +static int key_f8(__attribute__((unused)) int ch) {display_screen(7); return CC_REDISPLAY;} +static int key_f9(__attribute__((unused)) int ch) {display_screen(8); return CC_REDISPLAY;} +static int key_f10(__attribute__((unused)) int ch) {display_screen(9); return CC_REDISPLAY;} +static int key_f11(__attribute__((unused)) int ch) {display_screen(10); return CC_REDISPLAY;} +static int key_f12(__attribute__((unused)) int ch) {display_screen(11); return CC_REDISPLAY;} + +static int key_page_up(__attribute__((unused)) int ch) {display_page_up(); return CC_REDISPLAY;} +static int key_page_down(__attribute__((unused)) int ch) {display_page_down(); return CC_REDISPLAY;} + +static void setup_el(void) +{ + int pty; + FILE *dev_pty; + HistEvent hist_event; + + /* Open a pseudo-terminal for use in libedit. This is required + since the library checks if it is using a tty. If the file + descriptor does not represent a tty, the library disables + editing. */ + + pty = posix_openpt(O_RDWR); + /* TODO: On error (posix_openpt() < 0), fall-back to + non-libedit implementation. */ + grantpt(pty); + unlockpt(pty); + dev_pty = fdopen(pty, "wr"); + + el = el_init("", dev_pty, dev_pty, dev_pty); + + el_set(el, EL_EDITOR, "emacs"); + + el_set(el, EL_ADDFN, "complete", "Command completion", complete); + + el_set(el, EL_ADDFN, "key_f1", "Switch to screen 1", key_f1); + el_set(el, EL_ADDFN, "key_f2", "Switch to screen 2", key_f2); + el_set(el, EL_ADDFN, "key_f3", "Switch to screen 3", key_f3); + el_set(el, EL_ADDFN, "key_f4", "Switch to screen 4", key_f4); + el_set(el, EL_ADDFN, "key_f5", "Switch to screen 5", key_f5); + el_set(el, EL_ADDFN, "key_f6", "Switch to screen 6", key_f6); + el_set(el, EL_ADDFN, "key_f7", "Switch to screen 7", key_f7); + el_set(el, EL_ADDFN, "key_f8", "Switch to screen 8", key_f8); + el_set(el, EL_ADDFN, "key_f9", "Switch to screen 9", key_f5); + el_set(el, EL_ADDFN, "key_f10", "Switch to screen 10", key_f6); + el_set(el, EL_ADDFN, "key_f11", "Switch to screen 11", key_f7); + el_set(el, EL_ADDFN, "key_f12", "Switch to screen 12", key_f8); + + el_set(el, EL_ADDFN, "key_page_up", "Page up", key_page_up); + el_set(el, EL_ADDFN, "key_page_down", "Page down", key_page_down); + + el_set(el, EL_BIND, "^I", "complete", NULL); + el_set(el, EL_BIND, "^r", "em-inc-search-prev", NULL); + + el_set(el, EL_BIND, "^[[11~", "key_f1", NULL); + el_set(el, EL_BIND, "^[[12~", "key_f2", NULL); + el_set(el, EL_BIND, "^[[13~", "key_f3", NULL); + el_set(el, EL_BIND, "^[[14~", "key_f4", NULL); + el_set(el, EL_BIND, "^[[15~", "key_f5", NULL); + el_set(el, EL_BIND, "^[[17~", "key_f6", NULL); + el_set(el, EL_BIND, "^[[18~", "key_f7", NULL); + el_set(el, EL_BIND, "^[[19~", "key_f8", NULL); + el_set(el, EL_BIND, "^[[20~", "key_f9", NULL); + el_set(el, EL_BIND, "^[[21~", "key_f10", NULL); + el_set(el, EL_BIND, "^[[23~", "key_f11", NULL); + el_set(el, EL_BIND, "^[[24~", "key_f12", NULL); + + el_set(el, EL_BIND, "^[OP", "key_f1", NULL); + el_set(el, EL_BIND, "^[OQ", "key_f2", NULL); + el_set(el, EL_BIND, "^[OR", "key_f3", NULL); + el_set(el, EL_BIND, "^[OS", "key_f4", NULL); + + el_set(el, EL_BIND, "^[[5~", "key_page_up", NULL); + el_set(el, EL_BIND, "^[[6~", "key_page_down", NULL); + + hist = history_init(); + if (hist) { + history(hist, &hist_event, H_SETSIZE, 1000); + el_set(el, EL_HIST, history, hist); + } + el_set(el, EL_UNBUFFERED, 1); + el_set(el, EL_GETCFN, get_char); +} + +void reg_input_curses(void) +{ + setup_el(); + + input_curses.fd = fileno(stdin); + input_curses.proc_input = proc_keyboard; + input_curses.history = show_history; + + reg_input(&input_curses); +} + +void unreg_input_curses(void) +{ + history_end(hist); + el_end(el); + + unreg_input(&input_curses); +} diff --git a/VNFs/DPPD-PROX/input_curses.h b/VNFs/DPPD-PROX/input_curses.h new file mode 100644 index 00000000..8b682646 --- /dev/null +++ b/VNFs/DPPD-PROX/input_curses.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _INPUT_CURSES_H_ +#define _INPUT_CURSES_H_ + +void reg_input_curses(void); +void unreg_input_curses(void); + +#endif /* _INPUT_CURSES_H_ */ diff --git a/VNFs/DPPD-PROX/ip6_addr.h b/VNFs/DPPD-PROX/ip6_addr.h new file mode 100644 index 00000000..f9b56c19 --- /dev/null +++ b/VNFs/DPPD-PROX/ip6_addr.h @@ -0,0 +1,26 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _IP6_ADDR_H_ +#define _IP6_ADDR_H_ + +#include <inttypes.h> + +struct ipv6_addr { + uint8_t bytes[16]; +}; + +#endif /* _IP6_ADDR_H_ */ diff --git a/VNFs/DPPD-PROX/ip_subnet.c b/VNFs/DPPD-PROX/ip_subnet.c new file mode 100644 index 00000000..dc6ab1ac --- /dev/null +++ b/VNFs/DPPD-PROX/ip_subnet.c @@ -0,0 +1,45 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "ip_subnet.h" +#include "prox_assert.h" + +uint32_t ip4_subet_get_n_hosts(const struct ip4_subnet *sn) +{ + PROX_ASSERT(sn->prefix <= 32 && sn->prefix >= 1); + return 1 << (32 - sn->prefix); +} + +int ip4_subnet_to_host(const struct ip4_subnet *sn, uint32_t host_index, uint32_t *ret_ip) +{ + PROX_ASSERT(ip4_subnet_is_valid(sn)); + + if (host_index >= ip4_subet_get_n_hosts(sn)) { + return -1; + } + + *ret_ip = sn->ip + host_index; + return 0; +} + +int ip4_subnet_is_valid(const struct ip4_subnet *sn) +{ + if (sn->prefix == 0) { + return sn->ip == 0; + } + + return (sn->ip & ~(((int)(1 << 31)) >> (sn->prefix - 1))) == 0; +} diff --git a/VNFs/DPPD-PROX/ip_subnet.h b/VNFs/DPPD-PROX/ip_subnet.h new file mode 100644 index 00000000..126efb18 --- /dev/null +++ b/VNFs/DPPD-PROX/ip_subnet.h @@ -0,0 +1,48 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _IP_SUBNET_H_ +#define _IP_SUBNET_H_ + +#include <inttypes.h> + +struct ip4_subnet { + uint32_t ip; + uint8_t prefix; /* always in range [1,32] inclusive */ +}; + +struct ip6_subnet { + uint8_t ip[16]; + uint8_t prefix; /* always in range [1,128] inclusive */ +}; + +/* Returns number of hosts (assuming that network address and + broadcast address are both hosts) within the subnet. */ +uint32_t ip4_subet_get_n_hosts(const struct ip4_subnet *sn); + +/* Allows to get a specific host within a subnet. Note that the + network address and broadcast address are both considered to + "hosts". Setting host_index to 0 returns the network address and + setting the host_index to the last host within the subnet returns + the broadcast. To get all addresses with the subnet, loop + host_index from 0 to ip_subnet_get_n_hosts(). */ +int ip4_subnet_to_host(const struct ip4_subnet* sn, uint32_t host_index, uint32_t* ret_ip); + +/* Check if IP address is a network address (i.e. all bits outside the + prefix are set to 0). */ +int ip4_subnet_is_valid(const struct ip4_subnet* sn); + +#endif /* _IP_SUBNET_H_ */ diff --git a/VNFs/DPPD-PROX/kv_store_expire.h b/VNFs/DPPD-PROX/kv_store_expire.h new file mode 100644 index 00000000..c930af55 --- /dev/null +++ b/VNFs/DPPD-PROX/kv_store_expire.h @@ -0,0 +1,198 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_hash_crc.h> +#include <stdint.h> + +#include "prox_malloc.h" + +#define KV_STORE_BUCKET_DEPTH 8 + +struct kv_store_expire_entry { + /* if set to 0, the entry is disabled */ + uint64_t timeout; + /* Memory contains the key, followed by the actual value. */ + uint8_t mem[0]; +}; + +struct kv_store_expire { + size_t key_size; + size_t entry_size; + size_t bucket_mask; + size_t bucket_size; + uint64_t timeout; + + void (*expire)(void *entry_value); + + uint8_t mem[0]; +}; + +static struct kv_store_expire *kv_store_expire_create(uint32_t n_entries, size_t key_size, size_t value_size, int socket, void (*expire)(void *entry_value), uint64_t timeout) +{ + struct kv_store_expire *ret; + size_t memsize = 0; + size_t bucket_size; + size_t entry_size; + + if (!rte_is_power_of_2(n_entries)) + n_entries = rte_align32pow2(n_entries); + entry_size = sizeof(struct kv_store_expire_entry) + key_size + value_size; + + memsize += sizeof(struct kv_store_expire); + memsize += entry_size * n_entries; + + ret = prox_zmalloc(memsize, socket); + if (ret == NULL) + return NULL; + + ret->bucket_mask = n_entries / KV_STORE_BUCKET_DEPTH - 1; + ret->bucket_size = entry_size * KV_STORE_BUCKET_DEPTH; + ret->entry_size = entry_size; + ret->key_size = key_size; + ret->expire = expire; + ret->timeout = timeout; + + return ret; +} + +static size_t kv_store_expire_size(struct kv_store_expire *kv_store) +{ + return (kv_store->bucket_mask + 1) * KV_STORE_BUCKET_DEPTH; +} + +static void entry_set_timeout(struct kv_store_expire_entry *entry, uint64_t timeout) +{ + entry->timeout = timeout; +} + +static struct kv_store_expire_entry *entry_next(struct kv_store_expire *kv_store, struct kv_store_expire_entry *entry) +{ + return (struct kv_store_expire_entry *)((uint8_t *)entry + kv_store->entry_size); +} + +static void *entry_key(__attribute__((unused)) struct kv_store_expire *kv_store, struct kv_store_expire_entry *entry) +{ + return (uint8_t *)entry->mem; +} + +static void *entry_value(struct kv_store_expire *kv_store, struct kv_store_expire_entry *entry) +{ + return (uint8_t *)entry->mem + kv_store->key_size; +} + +static struct kv_store_expire_entry *kv_store_expire_get_first(struct kv_store_expire *kv_store) +{ + return (struct kv_store_expire_entry *)&kv_store->mem[0]; +} + +static struct kv_store_expire_entry *kv_store_expire_get_first_in_bucket(struct kv_store_expire *kv_store, void *key) +{ + uint32_t key_hash = rte_hash_crc(key, kv_store->key_size, 0); + uint32_t bucket_idx = key_hash & kv_store->bucket_mask; + + return (struct kv_store_expire_entry *)&kv_store->mem[bucket_idx * kv_store->bucket_size]; +} + +static int entry_key_matches(struct kv_store_expire *kv_store, struct kv_store_expire_entry *entry, void *key) +{ + return !memcmp(entry_key(kv_store, entry), key, kv_store->key_size); +} + +static struct kv_store_expire_entry *kv_store_expire_get(struct kv_store_expire *kv_store, void *key, uint64_t now) +{ + struct kv_store_expire_entry *entry = kv_store_expire_get_first_in_bucket(kv_store, key); + + for (int i = 0; i < KV_STORE_BUCKET_DEPTH; ++i) { + if (entry->timeout && entry->timeout >= now) { + if (entry_key_matches(kv_store, entry, key)) { + entry->timeout = now + kv_store->timeout; + return entry; + } + } + entry = entry_next(kv_store, entry); + } + return NULL; +} + +static struct kv_store_expire_entry *kv_store_expire_put(struct kv_store_expire *kv_store, void *key, uint64_t now) +{ + struct kv_store_expire_entry *e = kv_store_expire_get_first_in_bucket(kv_store, key); + + for (int i = 0; i < KV_STORE_BUCKET_DEPTH; ++i) { + if (e->timeout && e->timeout >= now) { + e = entry_next(kv_store, e); + continue; + } + if (!e->timeout) { + kv_store->expire(entry_value(kv_store, e)); + } + + rte_memcpy(entry_key(kv_store, e), key, kv_store->key_size); + e->timeout = now + kv_store->timeout; + return e; + } + + return NULL; +} + +/* If the entry is not found, a put operation is tried and if that + succeeds, that entry is returned. The bucket is full if NULL Is + returned. */ +static struct kv_store_expire_entry *kv_store_expire_get_or_put(struct kv_store_expire *kv_store, void *key, uint64_t now) +{ + struct kv_store_expire_entry *entry = kv_store_expire_get_first_in_bucket(kv_store, key); + struct kv_store_expire_entry *v = NULL; + + for (int i = 0; i < KV_STORE_BUCKET_DEPTH; ++i) { + if (entry->timeout && entry->timeout >= now) { + if (entry_key_matches(kv_store, entry, key)) { + entry->timeout = now + kv_store->timeout; + return entry; + } + } + else { + v = v? v : entry; + } + entry = entry_next(kv_store, entry); + } + + if (v) { + if (entry->timeout) + kv_store->expire(entry_value(kv_store, v)); + rte_memcpy(entry_key(kv_store, v), key, kv_store->key_size); + v->timeout = now + kv_store->timeout; + return v; + } + + return NULL; +} + +static size_t kv_store_expire_expire_all(struct kv_store_expire *kv_store) +{ + struct kv_store_expire_entry *entry = kv_store_expire_get_first(kv_store); + size_t elems = kv_store_expire_size(kv_store); + size_t expired = 0; + + do { + if (entry->timeout) { + kv_store->expire(entry_value(kv_store, entry)); + entry->timeout = 0; + expired++; + } + entry = entry_next(kv_store, entry); + } while (--elems); + return expired; +} diff --git a/VNFs/DPPD-PROX/lconf.c b/VNFs/DPPD-PROX/lconf.c new file mode 100644 index 00000000..88d8f4f9 --- /dev/null +++ b/VNFs/DPPD-PROX/lconf.c @@ -0,0 +1,355 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "prox_malloc.h" +#include "lconf.h" +#include "rx_pkt.h" +#include "tx_pkt.h" +#include "log.h" +#include "quit.h" +#include "prox_cfg.h" + +struct lcore_cfg *lcore_cfg; +/* only used at initialization time */ +struct lcore_cfg lcore_cfg_init[RTE_MAX_LCORE]; + +static int core_targ_next_from(struct lcore_cfg **lconf, struct task_args **targ, struct lcore_cfg *lcore_cfg, const int with_master) +{ + uint32_t lcore_id, task_id; + + if (*lconf && *targ) { + lcore_id = *lconf - lcore_cfg; + task_id = *targ - lcore_cfg[lcore_id].targs; + + if (task_id + 1 < lcore_cfg[lcore_id].n_tasks_all) { + *targ = &lcore_cfg[lcore_id].targs[task_id + 1]; + return 0; + } else { + if (prox_core_next(&lcore_id, with_master)) + return -1; + *lconf = &lcore_cfg[lcore_id]; + *targ = &lcore_cfg[lcore_id].targs[0]; + return 0; + } + } else { + lcore_id = -1; + + if (prox_core_next(&lcore_id, with_master)) + return -1; + *lconf = &lcore_cfg[lcore_id]; + *targ = &lcore_cfg[lcore_id].targs[0]; + return 0; + } +} + +int core_targ_next(struct lcore_cfg **lconf, struct task_args **targ, const int with_master) +{ + return core_targ_next_from(lconf, targ, lcore_cfg, with_master); +} + +int core_targ_next_early(struct lcore_cfg **lconf, struct task_args **targ, const int with_master) +{ + return core_targ_next_from(lconf, targ, lcore_cfg_init, with_master); +} + +struct task_args *core_targ_get(uint32_t lcore_id, uint32_t task_id) +{ + return &lcore_cfg[lcore_id].targs[task_id]; +} + +void lcore_cfg_alloc_hp(void) +{ + size_t mem_size = RTE_MAX_LCORE * sizeof(struct lcore_cfg); + + lcore_cfg = prox_zmalloc(mem_size, rte_socket_id()); + PROX_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n"); + rte_memcpy(lcore_cfg, lcore_cfg_init, mem_size); + + /* get thread ID for master core */ + lcore_cfg[rte_lcore_id()].thread_id = pthread_self(); +} + +int lconf_run(__attribute__((unused)) void *dummy) +{ + uint32_t lcore_id = rte_lcore_id(); + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + /* get thread ID, and set cancellation type to asynchronous */ + lconf->thread_id = pthread_self(); + int ret = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + if (ret != 0) + plog_warn("pthread_setcanceltype() failed on core %u: %i\n", lcore_id, ret); + + plog_info("Entering main loop on core %u\n", lcore_id); + return lconf->thread_x(lconf); +} + +static void msg_stop(struct lcore_cfg *lconf) +{ + int idx = -1; + struct task_base *t = NULL; + + if (lconf->msg.task_id == -1) { + for (int i = 0; i < lconf->n_tasks_all; ++i) { + if (lconf->task_is_running[i]) { + lconf->task_is_running[i] = 0; + t = lconf->tasks_all[i]; + if (t->aux->stop) + t->aux->stop(t); + } + } + lconf->n_tasks_run = 0; + + if (t && t->aux->stop_last) + t->aux->stop_last(t); + } + else { + for (int i = 0; i < lconf->n_tasks_run; ++i) { + if (lconf_get_task_id(lconf, lconf->tasks_run[i]) == lconf->msg.task_id) { + idx = i; + } + else if (idx != -1) { + lconf->tasks_run[idx] = lconf->tasks_run[i]; + + idx++; + } + } + lconf->task_is_running[lconf->msg.task_id] = 0; + + t = lconf->tasks_all[lconf->msg.task_id]; + if (t->aux->stop) + t->aux->stop(t); + lconf->n_tasks_run--; + if (lconf->n_tasks_run == 0 && t->aux->stop_last) + t->aux->stop_last(t); + } +} + +static void msg_start(struct lcore_cfg *lconf) +{ + int idx = 1; + struct task_base *t = NULL; + + if (lconf->msg.task_id == -1) { + for (int i = 0; i < lconf->n_tasks_all; ++i) { + t = lconf->tasks_run[i] = lconf->tasks_all[i]; + lconf->task_is_running[i] = 1; + if (lconf->n_tasks_run == 0 && t->aux->start_first) { + t->aux->start_first(t); + lconf->n_tasks_run = 1; + } + if (t->aux->start) + t->aux->start(t); + } + lconf->n_tasks_run = lconf->n_tasks_all; + } + else if (lconf->n_tasks_run == 0) { + t = lconf->tasks_run[0] = lconf->tasks_all[lconf->msg.task_id]; + lconf->n_tasks_run = 1; + lconf->task_is_running[lconf->msg.task_id] = 1; + + if (t->aux->start_first) + t->aux->start_first(t); + if (t->aux->start) + t->aux->start(t); + } + else { + for (int i = lconf->n_tasks_run - 1; i >= 0; --i) { + idx = lconf_get_task_id(lconf, lconf->tasks_run[i]); + if (idx == lconf->msg.task_id) { + break; + } + else if (idx > lconf->msg.task_id) { + lconf->tasks_run[i + 1] = lconf->tasks_run[i]; + if (i == 0) { + lconf->tasks_run[i] = lconf->tasks_all[lconf->msg.task_id]; + lconf->n_tasks_run++; + break; + } + } + else { + lconf->tasks_run[i + 1] = lconf->tasks_all[lconf->msg.task_id]; + lconf->n_tasks_run++; + break; + } + } + lconf->task_is_running[lconf->msg.task_id] = 1; + + if (lconf->tasks_all[lconf->msg.task_id]->aux->start) + lconf->tasks_all[lconf->msg.task_id]->aux->start(lconf->tasks_all[lconf->msg.task_id]); + } +} + +int lconf_do_flags(struct lcore_cfg *lconf) +{ + struct task_base *t; + int ret = 0; + + switch (lconf->msg.type) { + case LCONF_MSG_STOP: + msg_stop(lconf); + ret = -1; + break; + case LCONF_MSG_START: + msg_start(lconf); + ret = -1; + break; + case LCONF_MSG_DUMP_RX: + case LCONF_MSG_DUMP_TX: + case LCONF_MSG_DUMP: + t = lconf->tasks_all[lconf->msg.task_id]; + + if (lconf->msg.val) { + if (lconf->msg.type == LCONF_MSG_DUMP || + lconf->msg.type == LCONF_MSG_DUMP_RX) { + t->aux->task_rt_dump.n_print_rx = lconf->msg.val; + + task_base_add_rx_pkt_function(t, rx_pkt_dump); + } + + if (lconf->msg.type == LCONF_MSG_DUMP || + lconf->msg.type == LCONF_MSG_DUMP_TX) { + t->aux->task_rt_dump.n_print_tx = lconf->msg.val; + if (t->aux->tx_pkt_orig) + t->tx_pkt = t->aux->tx_pkt_orig; + t->aux->tx_pkt_orig = t->tx_pkt; + t->tx_pkt = tx_pkt_dump; + } + } + break; + case LCONF_MSG_TRACE: + t = lconf->tasks_all[lconf->msg.task_id]; + + if (lconf->msg.val) { + t->aux->task_rt_dump.n_trace = lconf->msg.val; + + if (task_base_get_original_rx_pkt_function(t) != rx_pkt_dummy) { + task_base_add_rx_pkt_function(t, rx_pkt_trace); + if (t->aux->tx_pkt_orig) + t->tx_pkt = t->aux->tx_pkt_orig; + t->aux->tx_pkt_orig = t->tx_pkt; + t->tx_pkt = tx_pkt_trace; + } else { + t->aux->task_rt_dump.n_print_tx = lconf->msg.val; + if (t->aux->tx_pkt_orig) + t->tx_pkt = t->aux->tx_pkt_orig; + t->aux->tx_pkt_orig = t->tx_pkt; + t->tx_pkt = tx_pkt_dump; + } + } + break; + case LCONF_MSG_RX_DISTR_START: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + task_base_add_rx_pkt_function(t, rx_pkt_distr); + memset(t->aux->rx_bucket, 0, sizeof(t->aux->rx_bucket)); + lconf->flags |= LCONF_FLAG_RX_DISTR_ACTIVE; + } + break; + case LCONF_MSG_TX_DISTR_START: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + + t->aux->tx_pkt_orig = t->tx_pkt; + t->tx_pkt = tx_pkt_distr; + memset(t->aux->tx_bucket, 0, sizeof(t->aux->tx_bucket)); + lconf->flags |= LCONF_FLAG_TX_DISTR_ACTIVE; + } + break; + case LCONF_MSG_RX_DISTR_STOP: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + task_base_del_rx_pkt_function(t, rx_pkt_distr); + lconf->flags &= ~LCONF_FLAG_RX_DISTR_ACTIVE; + } + break; + case LCONF_MSG_TX_DISTR_STOP: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + if (t->aux->tx_pkt_orig) { + t->tx_pkt = t->aux->tx_pkt_orig; + t->aux->tx_pkt_orig = NULL; + lconf->flags &= ~LCONF_FLAG_TX_DISTR_ACTIVE; + } + } + break; + case LCONF_MSG_RX_DISTR_RESET: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + + memset(t->aux->rx_bucket, 0, sizeof(t->aux->rx_bucket)); + } + break; + case LCONF_MSG_TX_DISTR_RESET: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + + memset(t->aux->tx_bucket, 0, sizeof(t->aux->tx_bucket)); + } + break; + case LCONF_MSG_RX_BW_START: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + task_base_add_rx_pkt_function(t, rx_pkt_bw); + lconf->flags |= LCONF_FLAG_RX_BW_ACTIVE; + } + break; + case LCONF_MSG_RX_BW_STOP: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + task_base_del_rx_pkt_function(t, rx_pkt_bw); + lconf->flags &= ~LCONF_FLAG_RX_BW_ACTIVE; + } + break; + case LCONF_MSG_TX_BW_START: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + + t->aux->tx_pkt_orig = t->tx_pkt; + t->tx_pkt = tx_pkt_bw; + lconf->flags |= LCONF_FLAG_TX_BW_ACTIVE; + } + break; + case LCONF_MSG_TX_BW_STOP: + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + t = lconf->tasks_all[task_id]; + if (t->aux->tx_pkt_orig) { + t->tx_pkt = t->aux->tx_pkt_orig; + t->aux->tx_pkt_orig = NULL; + lconf->flags &= ~LCONF_FLAG_TX_BW_ACTIVE; + } + } + break; + } + + lconf_unset_req(lconf); + return ret; +} + +int lconf_get_task_id(const struct lcore_cfg *lconf, const struct task_base *task) +{ + for (int i = 0; i < lconf->n_tasks_all; ++i) { + if (lconf->tasks_all[i] == task) + return i; + } + + return -1; +} + +int lconf_task_is_running(const struct lcore_cfg *lconf, uint8_t task_id) +{ + return lconf->task_is_running[task_id]; +} diff --git a/VNFs/DPPD-PROX/lconf.h b/VNFs/DPPD-PROX/lconf.h new file mode 100644 index 00000000..4bfa705d --- /dev/null +++ b/VNFs/DPPD-PROX/lconf.h @@ -0,0 +1,145 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _LCONF_H_ +#define _LCONF_H_ + +#include "task_init.h" +#include "stats.h" + +enum lconf_msg_type { + LCONF_MSG_STOP, + LCONF_MSG_START, + LCONF_MSG_DUMP, + LCONF_MSG_TRACE, + LCONF_MSG_DUMP_RX, + LCONF_MSG_DUMP_TX, + LCONF_MSG_RX_DISTR_START, + LCONF_MSG_RX_DISTR_STOP, + LCONF_MSG_RX_DISTR_RESET, + LCONF_MSG_TX_DISTR_START, + LCONF_MSG_TX_DISTR_STOP, + LCONF_MSG_TX_DISTR_RESET, + LCONF_MSG_RX_BW_START, + LCONF_MSG_RX_BW_STOP, + LCONF_MSG_TX_BW_START, + LCONF_MSG_TX_BW_STOP, +}; + +struct lconf_msg { + /* Set by master core (if not set), unset by worker after consumption. */ + uint32_t req; + enum lconf_msg_type type; + int task_id; + int val; +}; + +#define LCONF_FLAG_RX_DISTR_ACTIVE 0x00000001 +#define LCONF_FLAG_RUNNING 0x00000002 +#define LCONF_FLAG_TX_DISTR_ACTIVE 0x00000004 +#define LCONF_FLAG_RX_BW_ACTIVE 0x00000008 +#define LCONF_FLAG_TX_BW_ACTIVE 0x00000010 + +struct lcore_cfg { + /* All tasks running at the moment. This is empty when the core is stopped. */ + struct task_base *tasks_run[MAX_TASKS_PER_CORE]; + uint8_t n_tasks_run; + + void (*flush_queues[MAX_TASKS_PER_CORE])(struct task_base *tbase); + + void (*period_func)(void *data); + void *period_data; + /* call periodic_func after periodic_timeout cycles */ + uint64_t period_timeout; + + uint64_t ctrl_timeout; + void (*ctrl_func_m[MAX_TASKS_PER_CORE])(struct task_base *tbase, void **data, uint16_t n_msgs); + struct rte_ring *ctrl_rings_m[MAX_TASKS_PER_CORE]; + + void (*ctrl_func_p[MAX_TASKS_PER_CORE])(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + struct rte_ring *ctrl_rings_p[MAX_TASKS_PER_CORE]; + + struct lconf_msg msg __attribute__((aligned(4))); + struct task_base *tasks_all[MAX_TASKS_PER_CORE]; + int task_is_running[MAX_TASKS_PER_CORE]; + uint8_t n_tasks_all; + pthread_t thread_id; + + /* Following variables are not accessed in main loop */ + uint32_t flags; + uint8_t active_task; + uint8_t id; + char name[MAX_NAME_SIZE]; + struct task_args targs[MAX_TASKS_PER_CORE]; + int (*thread_x)(struct lcore_cfg *lconf); + uint32_t cache_set; +} __rte_cache_aligned; + +extern struct lcore_cfg *lcore_cfg; +extern struct lcore_cfg lcore_cfg_init[]; + +/* This function is only run on low load (when no bulk was sent within + last drain_timeout (16kpps if DRAIN_TIMEOUT = 2 ms) */ +static inline void lconf_flush_all_queues(struct lcore_cfg *lconf) +{ + struct task_base *task; + + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + task = lconf->tasks_all[task_id]; + if (!(task->flags & FLAG_TX_FLUSH) || (task->flags & FLAG_NEVER_FLUSH)) { + task->flags |= FLAG_TX_FLUSH; + continue; + } + lconf->flush_queues[task_id](task); + } +} + +static inline void lconf_set_req(struct lcore_cfg *lconf) +{ + (*(volatile uint32_t *)&lconf->msg.req) = 1; +} + +static inline void lconf_unset_req(struct lcore_cfg *lconf) +{ + (*(volatile uint32_t *)&lconf->msg.req) = 0; +} + +static inline int lconf_is_req(struct lcore_cfg *lconf) +{ + return (*(volatile uint32_t *)&lconf->msg.req); +} + +/* Returns non-zero when terminate has been requested */ +int lconf_do_flags(struct lcore_cfg *lconf); + +int lconf_get_task_id(const struct lcore_cfg *lconf, const struct task_base *task); +int lconf_task_is_running(const struct lcore_cfg *lconf, uint8_t task_id); + +int lconf_run(void *dummy); + +void lcore_cfg_alloc_hp(void); + +/* Returns the next active lconf/targ pair. If *lconf = NULL, the + first active lconf/targ pair is returned. If the last lconf/targ + pair is passed, the function returns non-zero. */ +int core_targ_next(struct lcore_cfg **lconf, struct task_args **targ, const int with_master); +/* Same as above, but uses non-huge page memory (used before + lcore_cfg_alloc_hp is called). */ +int core_targ_next_early(struct lcore_cfg **lconf, struct task_args **targ, const int with_master); + +struct task_args *core_targ_get(uint32_t lcore_id, uint32_t task_id); + +#endif /* _LCONF_H_ */ diff --git a/VNFs/DPPD-PROX/local_mbuf.h b/VNFs/DPPD-PROX/local_mbuf.h new file mode 100644 index 00000000..c65086cc --- /dev/null +++ b/VNFs/DPPD-PROX/local_mbuf.h @@ -0,0 +1,62 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _LOCAL_MBUF_H_ +#define _LOCAL_MBUF_H_ +#define LOCAL_MBUF_COUNT 64 + +struct local_mbuf { + struct rte_mempool *mempool; + uint32_t n_new_pkts; + struct rte_mbuf *new_pkts[LOCAL_MBUF_COUNT]; +}; + +static struct rte_mbuf **local_mbuf_take(struct local_mbuf *local_mbuf, uint32_t count) +{ + PROX_ASSERT(local_mbuf->n_new_pkts >= count); + + const uint32_t start_pos = local_mbuf->n_new_pkts - count; + struct rte_mbuf **ret = &local_mbuf->new_pkts[start_pos]; + + local_mbuf->n_new_pkts -= count; + return ret; +} + +static int local_mbuf_refill(struct local_mbuf *local_mbuf) +{ + const uint32_t fill = LOCAL_MBUF_COUNT - local_mbuf->n_new_pkts; + struct rte_mbuf **fill_mbuf = &local_mbuf->new_pkts[local_mbuf->n_new_pkts]; + + if (rte_mempool_get_bulk(local_mbuf->mempool, (void **)fill_mbuf, fill) < 0) + return -1; + local_mbuf->n_new_pkts += fill; + return 0; +} + +/* Ensures that count or more mbufs are available. Returns pointer to + count allocated mbufs or NULL if not enough mbufs are available. */ +static struct rte_mbuf **local_mbuf_refill_and_take(struct local_mbuf *local_mbuf, uint32_t count) +{ + PROX_ASSERT(count <= LOCAL_MBUF_COUNT); + if (local_mbuf->n_new_pkts >= count) + return local_mbuf_take(local_mbuf, count); + + if (local_mbuf_refill(local_mbuf) == 0) + return local_mbuf_take(local_mbuf, count); + return NULL; +} + +#endif /* _LOCAL_MBUF_H_ */ diff --git a/VNFs/DPPD-PROX/log.c b/VNFs/DPPD-PROX/log.c new file mode 100644 index 00000000..cd8ee002 --- /dev/null +++ b/VNFs/DPPD-PROX/log.c @@ -0,0 +1,398 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <sys/types.h> +#include <unistd.h> +#include <pthread.h> +#include <string.h> +#include <rte_cycles.h> +#include <rte_lcore.h> +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_mbuf.h> + +#include "log.h" +#include "display.h" +#include "etypes.h" +#include "prox_cfg.h" + +static pthread_mutex_t file_mtx = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; +int log_lvl = PROX_MAX_LOG_LVL; +static uint64_t tsc_off; +static FILE *fp; +static int n_warnings = 0; +char last_warn[5][1024]; +int get_n_warnings(void) +{ +#if PROX_MAX_LOG_LVL < PROX_LOG_WARN + return -1; +#endif + return n_warnings; +} + +const char *get_warning(int i) +{ +#if PROX_MAX_LOG_LVL < PROX_LOG_WARN + return NULL; +#endif + if (i > 0 || i < -4) + return NULL; + return last_warn[(n_warnings - 1 + i + 5) % 5]; +} + +static void store_warning(const char *warning) +{ + strncpy(last_warn[n_warnings % 5], warning, sizeof(last_warn[0])); + n_warnings++; +} + +void plog_init(const char *log_name, int log_name_pid) +{ + pid_t pid; + char buf[128]; + + if (*log_name == 0) { + if (log_name_pid) + snprintf(buf, sizeof(buf), "%s-%u.log", "prox", getpid()); + else + strncpy(buf, "prox.log", sizeof(buf)); + } + else { + strncpy(buf, log_name, sizeof(buf)); + } + + fp = fopen(buf, "w"); + + tsc_off = rte_rdtsc() + 2500000000; +} + +int plog_set_lvl(int lvl) +{ + if (lvl <= PROX_MAX_LOG_LVL) { + log_lvl = lvl; + return 0; + } + + return -1; +} + +static void file_lock(void) +{ + pthread_mutex_lock(&file_mtx); +} + +static void file_unlock(void) +{ + pthread_mutex_unlock(&file_mtx); +} + +void file_print(const char *str) +{ + file_lock(); + if (fp != NULL) { + fputs(str, fp); + fflush(fp); + } + file_unlock(); +} +static void plog_buf(const char* buf) +{ + if (prox_cfg.logbuf) { + file_lock(); + if (prox_cfg.logbuf_pos + strlen(buf) + 1 < prox_cfg.logbuf_size) { + memcpy(prox_cfg.logbuf + prox_cfg.logbuf_pos, buf, strlen(buf)); + prox_cfg.logbuf_pos += strlen(buf); + } + file_unlock(); + } else { + file_print(buf); +#ifdef PROX_STATS + display_print(buf); +#else + /* ncurses never initialized */ + fputs(buf, stdout); + fflush(stdout); +#endif + } +} + +static const char* lvl_to_str(int lvl, int always) +{ + switch (lvl) { + case PROX_LOG_ERR: return "error "; + case PROX_LOG_WARN: return "warn "; + case PROX_LOG_INFO: return always? "info " : ""; + case PROX_LOG_DBG: return "debug "; + default: return "?"; + } +} + +#define DUMP_PKT_LEN 128 +static int dump_pkt(char *dst, size_t dst_size, const struct rte_mbuf *mbuf) +{ + const struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, const struct ether_hdr *); + const struct ipv4_hdr *dpip = (const struct ipv4_hdr *)(peth + 1); + const uint8_t *pkt_bytes = (const uint8_t *)peth; + const uint16_t len = rte_pktmbuf_pkt_len(mbuf); + size_t str_len = 0; + + if (peth->ether_type == ETYPE_IPv4) + str_len = snprintf(dst, dst_size, "pkt_len=%u, Eth=%x, Proto=%#06x", + len, peth->ether_type, dpip->next_proto_id); + else + str_len = snprintf(dst, dst_size, "pkt_len=%u, Eth=%x", + len, peth->ether_type); + + for (uint16_t i = 0; i < len && i < DUMP_PKT_LEN && str_len < dst_size; ++i) { + if (i % 16 == 0) { + str_len += snprintf(dst + str_len, dst_size - str_len, "\n%04x ", i); + } + else if (i % 8 == 0) { + str_len += snprintf(dst + str_len, dst_size - str_len, " "); + } + str_len += snprintf(dst + str_len, dst_size - str_len, "%02x ", pkt_bytes[i]); + } + if (str_len < dst_size) + snprintf(dst + str_len, dst_size - str_len, "\n"); + return str_len + 1; +} + +static int vplog(int lvl, const char *format, va_list ap, const struct rte_mbuf *mbuf, int extended) +{ + char buf[32768]; + uint64_t hz, rtime_tsc, rtime_sec, rtime_usec; + int ret = 0; + + if (lvl > log_lvl) + return ret; + + if (format == NULL && mbuf == NULL) + return ret; + + *buf = 0; + if (extended) { + hz = rte_get_tsc_hz(); + rtime_tsc = rte_rdtsc() - tsc_off; + rtime_sec = rtime_tsc / hz; + rtime_usec = (rtime_tsc - rtime_sec * hz) / (hz / 1000000); + ret += snprintf(buf, sizeof(buf) - ret, "%2"PRIu64".%06"PRIu64" C%u %s%s", + rtime_sec, rtime_usec, rte_lcore_id(), lvl_to_str(lvl, 1), format? " " : ""); + } + else { + ret += snprintf(buf, sizeof(buf) - ret, "%s%s", lvl_to_str(lvl, 0), format? " " : ""); + } + + if (format) { + ret--; + ret += vsnprintf(buf + ret, sizeof(buf) - ret, format, ap); + } + + if (mbuf) { + ret--; + ret += dump_pkt(buf + ret, sizeof(buf) - ret, mbuf); + } + plog_buf(buf); + + if (lvl == PROX_LOG_WARN) { + store_warning(buf); + } + return ret; +} + +#if PROX_MAX_LOG_LVL >= PROX_LOG_INFO +int plog_info(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_INFO, fmt, ap, NULL, 0); + va_end(ap); + return ret; +} + +int plogx_info(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_INFO, fmt, ap, NULL, 1); + va_end(ap); + return ret; +} + +int plogd_info(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_INFO, fmt, ap, mbuf, 0); + va_end(ap); + return ret; +} + +int plogdx_info(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_INFO, fmt, ap, mbuf, 1); + va_end(ap); + return ret; +} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_ERR +int plog_err(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_ERR, fmt, ap, NULL, 0); + va_end(ap); + return ret; +} + +int plogx_err(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_ERR, fmt, ap, NULL, 1); + va_end(ap); + return ret; +} + +int plogd_err(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_ERR, fmt, ap, mbuf, 1); + va_end(ap); + return ret; +} + +int plogdx_err(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_ERR, fmt, ap, mbuf, 1); + va_end(ap); + + return ret; +} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_WARN +int plog_warn(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_WARN, fmt, ap, NULL, 0); + va_end(ap); + return ret; +} + +int plogx_warn(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_WARN, fmt, ap, NULL, 1); + va_end(ap); + return ret; +} + +int plogd_warn(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_WARN, fmt, ap, mbuf, 0); + va_end(ap); + return ret; +} + +int plogdx_warn(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_WARN, fmt, ap, mbuf, 1); + va_end(ap); + return ret; +} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_DBG +int plog_dbg(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_DBG, fmt, ap, NULL, 0); + va_end(ap); + return ret; +} + +int plogx_dbg(const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_DBG, fmt, ap, NULL, 1); + va_end(ap); + return ret; +} + +int plogd_dbg(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_DBG, fmt, ap, mbuf, 0); + va_end(ap); + return ret; +} + +int plogdx_dbg(const struct rte_mbuf *mbuf, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = vplog(PROX_LOG_DBG, fmt, ap, mbuf, 1); + va_end(ap); + return ret; +} +#endif diff --git a/VNFs/DPPD-PROX/log.h b/VNFs/DPPD-PROX/log.h new file mode 100644 index 00000000..a5dcf47a --- /dev/null +++ b/VNFs/DPPD-PROX/log.h @@ -0,0 +1,88 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _LOG_H_ +#define _LOG_H_ + +#define PROX_LOG_ERR 0 +#define PROX_LOG_WARN 1 +#define PROX_LOG_INFO 2 +#define PROX_LOG_DBG 3 + +#if PROX_MAX_LOG_LVL > PROX_LOG_DBG +#error Highest supported log level is 3 +#endif + +int get_n_warnings(void); +/* Return previous warnings, only stores last 5 warnings and invalid i return NULL*/ +const char* get_warning(int i); + +struct rte_mbuf; + +#if PROX_MAX_LOG_LVL >= PROX_LOG_ERR +int plog_err(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogx_err(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogd_err(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +int plogdx_err(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +#else +__attribute__((format(printf, 1, 2))) static inline int plog_err(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 1, 2))) static inline int plogx_err(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogd_err(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogdx_err(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_WARN +int plog_warn(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogx_warn(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogd_warn(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +int plogdx_warn(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +#else +__attribute__((format(printf, 1, 2))) static inline int plog_warn(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 1, 2))) static inline int plogx_warn(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogd_warn(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogdx_warn(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_INFO +int plog_info(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogx_info(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogd_info(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +int plogdx_info(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +#else +__attribute__((format(printf, 1, 2))) static inline int plog_info(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 1, 2))) static inline int plogx_info(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogd_info(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogdx_info(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +#endif + +#if PROX_MAX_LOG_LVL >= PROX_LOG_DBG +int plog_dbg(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogx_dbg(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold)); +int plogd_dbg(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +int plogdx_dbg(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold)); +#else +__attribute__((format(printf, 1, 2))) static inline int plog_dbg(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 1, 2))) static inline int plogx_dbg(__attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogd_dbg(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +__attribute__((format(printf, 2, 3))) static inline int plogdx_dbg(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;} +#endif + +void plog_init(const char *log_name, int log_name_pid); +void file_print(const char *str); + +int plog_set_lvl(int lvl); + +#endif /* _LOG_H_ */ diff --git a/VNFs/DPPD-PROX/lua_compat.h b/VNFs/DPPD-PROX/lua_compat.h new file mode 100644 index 00000000..c8c21225 --- /dev/null +++ b/VNFs/DPPD-PROX/lua_compat.h @@ -0,0 +1,48 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _LUA_COMPAT_H_ +#define _LUA_COMPAT_H_ + +#include <lua.h> +#include <lauxlib.h> +#include <lualib.h> + +#if LUA_VERSION_NUM < 503 +#include <float.h> +static int lua_isinteger(lua_State *L, int idx) +{ + if (!lua_isnumber(L, idx)) { + return -1; + } + + double whole = lua_tonumber(L, idx); + whole -= lua_tointeger(L, idx); + return whole < DBL_EPSILON && whole >= -DBL_EPSILON ; +} +#endif + +#if LUA_VERSION_NUM < 502 +static int lua_len(lua_State *L, int idx) +{ + int len = lua_objlen(L, idx); + + lua_pushnumber(L, len); + return len; +} +#endif + +#endif /* _LUA_COMPAT_H_ */ diff --git a/VNFs/DPPD-PROX/main.c b/VNFs/DPPD-PROX/main.c new file mode 100644 index 00000000..28533c78 --- /dev/null +++ b/VNFs/DPPD-PROX/main.c @@ -0,0 +1,993 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <locale.h> +#include <unistd.h> +#include <signal.h> + +#include <rte_cycles.h> +#include <rte_atomic.h> +#include <rte_table_hash.h> +#include <rte_memzone.h> +#include <rte_errno.h> + +#include "prox_malloc.h" +#include "run.h" +#include "main.h" +#include "log.h" +#include "quit.h" +#include "clock.h" +#include "defines.h" +#include "version.h" +#include "prox_args.h" +#include "prox_assert.h" +#include "prox_cfg.h" +#include "prox_shared.h" +#include "prox_port_cfg.h" +#include "toeplitz.h" +#include "hash_utils.h" +#include "handle_lb_net.h" +#include "prox_cksum.h" +#include "thread_nop.h" +#include "thread_generic.h" +#include "thread_pipeline.h" +#include "cqm.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +uint8_t lb_nb_txrings = 0xff; +struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE]; + +static void __attribute__((noreturn)) prox_usage(const char *prgname) +{ + plog_info("\nUsage: %s [-f CONFIG_FILE] [-a|-e] [-m|-s|-i] [-w DEF] [-u] [-t]\n" + "\t-f CONFIG_FILE : configuration file to load, ./prox.cfg by default\n" + "\t-l LOG_FILE : log file name, ./prox.log by default\n" + "\t-p : include PID in log file name if default log file is used\n" + "\t-o DISPLAY: Set display to use, can be 'curses' (default), 'cli' or 'none'\n" + "\t-v verbosity : initial logging verbosity\n" + "\t-a : autostart all cores (by default)\n" + "\t-e : don't autostart\n" + "\t-n : Create NULL devices instead of using PCI devices, useful together with -i\n" + "\t-m : list supported task modes and exit\n" + "\t-s : check configuration file syntax and exit\n" + "\t-i : check initialization sequence and exit\n" + "\t-u : Listen on UDS /tmp/prox.sock\n" + "\t-t : Listen on TCP port 8474\n" + "\t-q : Pass argument to Lua interpreter, useful to define variables\n" + "\t-w : define variable using syntax varname=value\n" + "\t takes precedence over variables defined in CONFIG_FILE\n" + "\t-k : Log statistics to file \"stats_dump\" in current directory\n" + "\t-d : Run as daemon, the parent process will block until PROX is not initialized\n" + "\t-z : Ignore CPU topology, implies -i\n" + "\t-r : Change initial screen refresh rate. If set to a lower than 0.001 seconds,\n" + "\t screen refreshing will be disabled\n" + , prgname); + exit(EXIT_FAILURE); +} + +static void check_mixed_normal_pipeline(void) +{ + struct lcore_cfg *lconf = NULL; + uint32_t lcore_id = -1; + + while (prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + + int all_thread_nop = 1; + int generic = 0; + int pipeline = 0; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + all_thread_nop = all_thread_nop && + targ->task_init->thread_x == thread_nop; + + pipeline = pipeline || targ->task_init->thread_x == thread_pipeline; + generic = generic || targ->task_init->thread_x == thread_generic; + } + PROX_PANIC(generic && pipeline, "Can't run both pipeline and normal thread on same core\n"); + + if (all_thread_nop) + lconf->thread_x = thread_nop; + else { + lconf->thread_x = thread_generic; + } + } +} + +static void check_missing_rx(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task, + "Configuration Error - Core %u task %u Receiving from ring, but nobody xmitting to this ring\n", lconf->id, targ->id); + if (targ->nb_rxports == 0 && targ->nb_rxrings == 0) { + PROX_PANIC(!task_init_flag_set(targ->task_init, TASK_FEATURE_NO_RX), + "\tCore %u task %u: no rx_ports and no rx_rings configured while required by mode %s\n", lconf->id, targ->id, targ->task_init->mode_str); + } + } +} + +static void check_cfg_consistent(void) +{ + check_missing_rx(); + check_mixed_normal_pipeline(); +} + +static void plog_all_rings(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + for (uint8_t ring_idx = 0; ring_idx < targ->nb_rxrings; ++ring_idx) { + plog_info("\tCore %u, task %u, rx_ring[%u] %p\n", lconf->id, targ->id, ring_idx, targ->rx_rings[ring_idx]); + } + } +} + +static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set) +{ + if (task_init_flag_set(targ->task_init, flag) == is_set) + return 1; + + int ret = 0; + + for (uint32_t i = 0; i < targ->n_prev_tasks; ++i) { + ret = chain_flag_state(targ->prev_tasks[i], flag, is_set); + if (ret) + return 1; + } + return 0; +} + +static void configure_if_tx_queues(struct task_args *targ, uint8_t socket) +{ + uint8_t if_port; + + for (uint8_t i = 0; i < targ->nb_txports; ++i) { + if_port = targ->tx_port_queue[i].port; + + PROX_PANIC(if_port == OUT_DISCARD, "port misconfigured, exiting\n"); + + PROX_PANIC(!prox_port_cfg[if_port].active, "\tPort %u not used, skipping...\n", if_port); + + int dsocket = prox_port_cfg[if_port].socket; + if (dsocket != -1 && dsocket != socket) { + plog_warn("TX core on socket %d while device on socket %d\n", socket, dsocket); + } + + if (prox_port_cfg[if_port].tx_ring[0] == '\0') { // Rings-backed port can use single queue + targ->tx_port_queue[i].queue = prox_port_cfg[if_port].n_txq; + prox_port_cfg[if_port].n_txq++; + } else { + prox_port_cfg[if_port].n_txq = 1; + targ->tx_port_queue[i].queue = 0; + } + /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of + the tasks up to the task transmitting to the port + does not use refcnt. */ + if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) { + prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT; + plog_info("\t\tEnabling No refcnt on port %d\n", if_port); + } + else { + plog_info("\t\tRefcnt used on port %d\n", if_port); + } + + /* By default OFFLOAD is enabled, but if the whole + chain has NOOFFLOADS set all the way until the + first task that receives from a port, it will be + disabled for the destination port. */ + if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) { + prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS; + plog_info("\t\tDisabling TX offloads on port %d\n", if_port); + } else { + plog_info("\t\tEnabling TX offloads on port %d\n", if_port); + } + + /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX + It should only be enabled when we know for sure that the RX does not split packets. + Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task + transmitting to the port does not use multsegs. */ + if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) { + prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS; + plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port); + } + else { + plog_info("\t\tMultiSegs used on port %d\n", if_port); + } + } +} + +static void configure_if_rx_queues(struct task_args *targ, uint8_t socket) +{ + for (int i = 0; i < targ->nb_rxports; i++) { + uint8_t if_port = targ->rx_port_queue[i].port; + + if (if_port == OUT_DISCARD) { + return; + } + + PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port); + + if(prox_port_cfg[if_port].rx_ring[0] != '\0') { + prox_port_cfg[if_port].n_rxq = 0; + } + + targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq; + prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool; + prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1; + prox_port_cfg[if_port].n_rxq++; + + int dsocket = prox_port_cfg[if_port].socket; + if (dsocket != -1 && dsocket != socket) { + plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket); + } + } +} + +static void configure_if_queues(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + uint8_t socket; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + socket = rte_lcore_to_socket_id(lconf->id); + + configure_if_tx_queues(targ, socket); + configure_if_rx_queues(targ, socket); + } +} + +static const char *gen_ring_name(void) +{ + static char retval[] = "XX"; + static const char* ring_names = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz" + "[\\]^_`!\"#$%&'()*+,-./:;<=" + ">?@{|}0123456789"; + static int idx2 = 0; + + int idx = idx2; + + retval[0] = ring_names[idx % strlen(ring_names)]; + idx /= strlen(ring_names); + retval[1] = idx ? ring_names[(idx - 1) % strlen(ring_names)] : 0; + + idx2++; + + return retval; +} + +static int task_is_master(struct task_args *targ) +{ + return !targ->lconf; +} + +struct ring_init_stats { + uint32_t n_pkt_rings; + uint32_t n_ctrl_rings; + uint32_t n_opt_rings; +}; + +static uint32_t ring_init_stats_total(const struct ring_init_stats *ris) +{ + return ris->n_pkt_rings + ris->n_ctrl_rings + ris->n_opt_rings; +} + +static uint32_t count_incoming_tasks(uint32_t lcore_worker, uint32_t dest_task) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + uint32_t ret = 0; + struct core_task ct; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + for (uint8_t idxx = 0; idxx < MAX_PROTOCOLS; ++idxx) { + for (uint8_t ridx = 0; ridx < targ->core_task_set[idxx].n_elems; ++ridx) { + ct = targ->core_task_set[idxx].core_task[ridx]; + + if (dest_task == ct.task && lcore_worker == ct.core) + ret++; + } + } + } + return ret; +} + +static struct rte_ring *get_existing_ring(uint32_t lcore_id, uint32_t task_id) +{ + if (!prox_core_active(lcore_id, 0)) + return NULL; + + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (task_id >= lconf->n_tasks_all) + return NULL; + + if (lconf->targs[task_id].nb_rxrings == 0) + return NULL; + + return lconf->targs[task_id].rx_rings[0]; +} + +static void init_ring_between_tasks(struct lcore_cfg *lconf, struct task_args *starg, + const struct core_task ct, uint8_t ring_idx, int idx, + struct ring_init_stats *ris) +{ + uint8_t socket; + struct rte_ring *ring = NULL; + struct lcore_cfg *lworker; + struct task_args *dtarg; + + PROX_ASSERT(prox_core_active(ct.core, 0)); + lworker = &lcore_cfg[ct.core]; + + /* socket used is the one that the sending core resides on */ + socket = rte_lcore_to_socket_id(lconf->id); + + plog_info("\t\tCreating ring on socket %u with size %u\n" + "\t\t\tsource core, task and socket = %u, %u, %u\n" + "\t\t\tdestination core, task and socket = %u, %u, %u\n" + "\t\t\tdestination worker id = %u\n", + socket, starg->ring_size, + lconf->id, starg->id, socket, + ct.core, ct.task, rte_lcore_to_socket_id(ct.core), + ring_idx); + + if (ct.type) { + struct rte_ring **dring = NULL; + + if (ct.type == CTRL_TYPE_MSG) + dring = &lworker->ctrl_rings_m[ct.task]; + else if (ct.type == CTRL_TYPE_PKT) { + dring = &lworker->ctrl_rings_p[ct.task]; + starg->flags |= TASK_ARG_CTRL_RINGS_P; + } + + if (*dring == NULL) + ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ); + else + ring = *dring; + PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core); + + starg->tx_rings[starg->tot_n_txrings_inited] = ring; + starg->tot_n_txrings_inited++; + *dring = ring; + if (lconf->id == prox_cfg.master) { + ctrl_rings[ct.core*MAX_TASKS_PER_CORE + ct.task] = ring; + } + + plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n", + lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT? + "pkt" : "msg", ring, ring->name); + ris->n_ctrl_rings++; + return; + } + + dtarg = &lworker->targs[ct.task]; + lworker->targs[ct.task].worker_thread_id = ring_idx; + PROX_ASSERT(dtarg->flags & TASK_ARG_RX_RING); + PROX_ASSERT(ct.task < lworker->n_tasks_all); + + /* If all the following conditions are met, the ring can be + optimized away. */ + if (!task_is_master(starg) && starg->lconf->id == dtarg->lconf->id && + starg->nb_txrings == 1 && idx == 0 && dtarg->task && + dtarg->tot_rxrings == 1 && starg->task == dtarg->task - 1) { + plog_info("\t\tOptimizing away ring on core %u from task %u to task %u\n", + dtarg->lconf->id, starg->task, dtarg->task); + /* No need to set up ws_mbuf. */ + starg->tx_opt_ring = 1; + /* During init of destination task, the buffer in the + source task will be initialized. */ + dtarg->tx_opt_ring_task = starg; + ris->n_opt_rings++; + ++dtarg->nb_rxrings; + return; + } + + int ring_created = 1; + /* Only create multi-producer rings if configured to do so AND + there is only one task sending to the task */ + if ((prox_cfg.flags & DSF_MP_RINGS && count_incoming_tasks(ct.core, ct.task) > 1) + || (prox_cfg.flags & DSF_ENABLE_BYPASS)) { + ring = get_existing_ring(ct.core, ct.task); + + if (ring) { + plog_info("\t\tCore %u task %u creatign MP ring %p to core %u task %u\n", + lconf->id, starg->id, ring, ct.core, ct.task); + ring_created = 0; + } + else { + ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SC_DEQ); + plog_info("\t\tCore %u task %u using MP ring %p from core %u task %u\n", + lconf->id, starg->id, ring, ct.core, ct.task); + } + } + else + ring = rte_ring_create(gen_ring_name(), starg->ring_size, socket, RING_F_SP_ENQ | RING_F_SC_DEQ); + + PROX_PANIC(ring == NULL, "Cannot create ring to connect I/O core %u with worker core %u\n", lconf->id, ct.core); + + starg->tx_rings[starg->tot_n_txrings_inited] = ring; + starg->tot_n_txrings_inited++; + + if (ring_created) { + PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK); + dtarg->rx_rings[dtarg->nb_rxrings] = ring; + ++dtarg->nb_rxrings; + } + dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems; + dtarg->lb_friend_core = lconf->id; + dtarg->lb_friend_task = starg->id; + plog_info("\t\tWorker thread %d has core %d, task %d as a lb friend\n", ct.core, lconf->id, starg->id); + plog_info("\t\tCore %u task %u tx_ring[%u] -> core %u task %u rx_ring[%u] %p %s %u WT\n", + lconf->id, starg->id, ring_idx, ct.core, ct.task, dtarg->nb_rxrings, ring, ring->name, + dtarg->nb_slave_threads); + ++ris->n_pkt_rings; +} + +static void init_rings(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *starg; + struct ring_init_stats ris = {0}; + + while (core_targ_next(&lconf, &starg, 1) == 0) { + plog_info("\t*** Initializing rings on core %u, task %u ***\n", lconf->id, starg->id); + for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) { + for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) { + PROX_ASSERT(ring_idx < MAX_WT_PER_LB); + PROX_ASSERT(starg->tot_n_txrings_inited < MAX_RINGS_PER_TASK); + + struct core_task ct = starg->core_task_set[idx].core_task[ring_idx]; + init_ring_between_tasks(lconf, starg, ct, ring_idx, idx, &ris); + } + } + } + + plog_info("\tInitialized %d rings:\n" + "\t\tNumber of packet rings: %u\n" + "\t\tNumber of control rings: %u\n" + "\t\tNumber of optimized rings: %u\n", + ring_init_stats_total(&ris), + ris.n_pkt_rings, + ris.n_ctrl_rings, + ris.n_opt_rings); +} + +static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf) +{ + struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id()); + uint64_t got = 0; + + while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0) + ++got; + + while (got) { + int idx; + do { + idx = rand() % nb_mbuf - 1; + } while (pkts[idx] == 0); + + rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1); + pkts[idx] = 0; + --got; + }; + prox_free(pkts); +} + +static void setup_mempools_unique_per_socket(void) +{ + uint32_t flags = 0; + char name[64]; + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + struct rte_mempool *pool[MAX_SOCKETS]; + uint32_t mbuf_count[MAX_SOCKETS] = {0}; + uint32_t nb_cache_mbuf[MAX_SOCKETS] = {0}; + uint32_t mbuf_size[MAX_SOCKETS] = {0}; + + while (core_targ_next_early(&lconf, &targ, 0) == 0) { + PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id); + uint8_t socket = rte_lcore_to_socket_id(lconf->id); + PROX_ASSERT(socket < MAX_SOCKETS); + + if (targ->mbuf_size_set_explicitely) + flags = MEMPOOL_F_NO_SPREAD; + if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) { + targ->mbuf_size = targ->task_init->mbuf_size; + } + if (targ->rx_port_queue[0].port != OUT_DISCARD) { + struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port]; + PROX_ASSERT(targ->nb_mbuf != 0); + mbuf_count[socket] += targ->nb_mbuf; + if (nb_cache_mbuf[socket] == 0) + nb_cache_mbuf[socket] = targ->nb_cache_mbuf; + else { + PROX_PANIC(nb_cache_mbuf[socket] != targ->nb_cache_mbuf, + "all mbuf_cache must have the same size if using a unique mempool per socket\n"); + } + if (mbuf_size[socket] == 0) + mbuf_size[socket] = targ->mbuf_size; + else { + PROX_PANIC(mbuf_size[socket] != targ->mbuf_size, + "all mbuf_size must have the same size if using a unique mempool per socket\n"); + } + if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) { + if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM) + mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM; + } + } + } + for (int i = 0 ; i < MAX_SOCKETS; i++) { + if (mbuf_count[i] != 0) { + sprintf(name, "socket_%u_pool", i); + pool[i] = rte_mempool_create(name, + mbuf_count[i] - 1, mbuf_size[i], + nb_cache_mbuf[i], + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + prox_pktmbuf_init, NULL, + i, flags); + PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i); + plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i], + mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i); + + if (prox_cfg.flags & DSF_SHUFFLE) { + shuffle_mempool(pool[i], mbuf_count[i]); + } + } + } + + lconf = NULL; + while (core_targ_next_early(&lconf, &targ, 0) == 0) { + uint8_t socket = rte_lcore_to_socket_id(lconf->id); + + if (targ->rx_port_queue[0].port != OUT_DISCARD) { + /* use this pool for the interface that the core is receiving from */ + /* If one core receives from multiple ports, all the ports use the same mempool */ + targ->pool = pool[socket]; + /* Set the number of mbuf to the number of the unique mempool, so that the used and free work */ + targ->nb_mbuf = mbuf_count[socket]; + plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool, + targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket); + } + } +} + +static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args *targ) +{ + const uint8_t socket = rte_lcore_to_socket_id(lconf->id); + struct prox_port_cfg *port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port]; + const struct rte_memzone *mz; + struct rte_mempool *mp = NULL; + uint32_t flags = 0; + char memzone_name[64]; + char name[64]; + + /* mbuf size can be set + * - from config file (highest priority, overwriting any other config) - should only be used as workaround + * - through each 'mode', overwriting the default mbuf_size + * - defaulted to MBUF_SIZE i.e. 1518 Bytes + * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver + */ + if (targ->mbuf_size_set_explicitely) { + flags = MEMPOOL_F_NO_SPREAD; + /* targ->mbuf_size already set */ + } + else if (targ->task_init->mbuf_size != 0) { + /* mbuf_size not set through config file but set through mode */ + targ->mbuf_size = targ->task_init->mbuf_size; + } + else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) { + if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM) + targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM; + } + + /* allocate memory pool for packets */ + PROX_ASSERT(targ->nb_mbuf != 0); + + if (targ->pool_name[0] == '\0') { + sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id); + } + + snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name); + mz = rte_memzone_lookup(memzone_name); + + if (mz != NULL) { + mp = (struct rte_mempool*)mz->addr; + + targ->nb_mbuf = mp->size; + targ->pool = mp; + } + +#ifdef RTE_LIBRTE_IVSHMEM_FALSE + if (mz != NULL && mp != NULL && mp->phys_addr != mz->ioremap_addr) { + /* Init mbufs with ioremap_addr for dma */ + mp->phys_addr = mz->ioremap_addr; + mp->elt_pa[0] = mp->phys_addr + (mp->elt_va_start - (uintptr_t)mp); + + struct prox_pktmbuf_reinit_args init_args; + init_args.mp = mp; + init_args.lconf = lconf; + + uint32_t elt_sz = mp->elt_size + mp->header_size + mp->trailer_size; + rte_mempool_obj_iter((void*)mp->elt_va_start, mp->size, elt_sz, 1, + mp->elt_pa, mp->pg_num, mp->pg_shift, prox_pktmbuf_reinit, &init_args); + } +#endif + + /* Use this pool for the interface that the core is + receiving from if one core receives from multiple + ports, all the ports use the same mempool */ + if (targ->pool == NULL) { + plog_info("\t\tCreating mempool with name '%s'\n", name); + targ->pool = rte_mempool_create(name, + targ->nb_mbuf - 1, targ->mbuf_size, + targ->nb_cache_mbuf, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + prox_pktmbuf_init, lconf, + socket, flags); + } + + PROX_PANIC(targ->pool == NULL, + "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno)); + + plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool, + targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket); + if (prox_cfg.flags & DSF_SHUFFLE) { + shuffle_mempool(targ->pool, targ->nb_mbuf); + } +} + +static void setup_mempools_multiple_per_socket(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + while (core_targ_next_early(&lconf, &targ, 0) == 0) { + PROX_PANIC(targ->task_init == NULL, "task_init = NULL, is mode specified for core %d, task %d ?\n", lconf->id, targ->id); + if (targ->rx_port_queue[0].port == OUT_DISCARD) + continue; + setup_mempool_for_rx_task(lconf, targ); + } +} + +static void setup_mempools(void) +{ + if (prox_cfg.flags & UNIQUE_MEMPOOL_PER_SOCKET) + setup_mempools_unique_per_socket(); + else + setup_mempools_multiple_per_socket(); +} + +static void set_task_lconf(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + lconf->targs[task_id].lconf = lconf; + } + } +} + +static void set_dest_threads(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + while (core_targ_next(&lconf, &targ, 0) == 0) { + for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) { + for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) { + struct core_task ct = targ->core_task_set[idx].core_task[ring_idx]; + + struct task_args *dest_task = core_targ_get(ct.core, ct.task); + dest_task->prev_tasks[dest_task->n_prev_tasks++] = targ; + } + } + } +} + +static void setup_all_task_structs_early_init(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + plog_info("\t*** Calling early init on all tasks ***\n"); + while (core_targ_next(&lconf, &targ, 0) == 0) { + if (targ->task_init->early_init) { + targ->task_init->early_init(targ); + } + } +} + +static void setup_all_task_structs(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]); + } + } +} + +static void init_port_activate(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + uint8_t port_id = 0; + + while (core_targ_next_early(&lconf, &targ, 0) == 0) { + for (int i = 0; i < targ->nb_rxports; i++) { + port_id = targ->rx_port_queue[i].port; + prox_port_cfg[port_id].active = 1; + } + + for (int i = 0; i < targ->nb_txports; i++) { + port_id = targ->tx_port_queue[i].port; + prox_port_cfg[port_id].active = 1; + } + } +} + +/* Initialize cores and allocate mempools */ +static void init_lcores(void) +{ + struct lcore_cfg *lconf = 0; + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + uint8_t socket = rte_lcore_to_socket_id(lcore_id); + PROX_PANIC(socket + 1 > MAX_SOCKETS, "Can't configure core %u (on socket %u). MAX_SOCKET is set to %d\n", lcore_id, socket, MAX_SOCKETS); + } + + /* need to allocate mempools as the first thing to use the lowest possible address range */ + plog_info("=== Initializing mempools ===\n"); + setup_mempools(); + + lcore_cfg_alloc_hp(); + + set_dest_threads(); + set_task_lconf(); + + plog_info("=== Initializing port addresses ===\n"); + init_port_addr(); + + plog_info("=== Initializing queue numbers on cores ===\n"); + configure_if_queues(); + + plog_info("=== Initializing rings on cores ===\n"); + init_rings(); + + plog_info("=== Checking configuration consistency ===\n"); + check_cfg_consistent(); + + plog_all_rings(); + + setup_all_task_structs_early_init(); + plog_info("=== Initializing tasks ===\n"); + setup_all_task_structs(); +} + +static int setup_prox(int argc, char **argv) +{ + if (prox_read_config_file() != 0 || + prox_setup_rte(argv[0]) != 0) { + return -1; + } + + if (prox_cfg.flags & DSF_CHECK_SYNTAX) { + plog_info("=== Configuration file syntax has been checked ===\n\n"); + exit(EXIT_SUCCESS); + } + + init_port_activate(); + plog_info("=== Initializing rte devices ===\n"); + if (!(prox_cfg.flags & DSF_USE_DUMMY_DEVICES)) + init_rte_ring_dev(); + init_rte_dev(prox_cfg.flags & DSF_USE_DUMMY_DEVICES); + plog_info("=== Calibrating TSC overhead ===\n"); + clock_init(); + plog_info("\tTSC running at %"PRIu64" Hz\n", rte_get_tsc_hz()); + + init_lcores(); + plog_info("=== Initializing ports ===\n"); + init_port_all(); + + if (prox_cfg.logbuf_size) { + prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id()); + PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size); + } + + if (prox_cfg.flags & DSF_CHECK_INIT) { + plog_info("=== Initialization sequence completed ===\n\n"); + exit(EXIT_SUCCESS); + } + + /* Current way that works to disable DPDK logging */ + FILE *f = fopen("/dev/null", "r"); + rte_openlog_stream(f); + plog_info("=== PROX started ===\n"); + return 0; +} + +static int success = 0; +static void siguser_handler(int signal) +{ + if (signal == SIGUSR1) + success = 1; + else + success = 0; +} + +static void sigabrt_handler(__attribute__((unused)) int signum) +{ + /* restore default disposition for SIGABRT and SIGPIPE */ + signal(SIGABRT, SIG_DFL); + signal(SIGPIPE, SIG_DFL); + + /* ignore further Ctrl-C */ + signal(SIGINT, SIG_IGN); + + /* more drastic exit on tedious termination signal */ + plog_info("Aborting...\n"); + if (lcore_cfg != NULL) { + uint32_t lcore_id; + pthread_t thread_id, tid0, tid = pthread_self(); + memset(&tid0, 0, sizeof(tid0)); + + /* cancel all threads except current one */ + lcore_id = -1; + while (prox_core_next(&lcore_id, 1) == 0) { + thread_id = lcore_cfg[lcore_id].thread_id; + if (pthread_equal(thread_id, tid0)) + continue; + if (pthread_equal(thread_id, tid)) + continue; + pthread_cancel(thread_id); + } + + /* wait for cancelled threads to terminate */ + lcore_id = -1; + while (prox_core_next(&lcore_id, 1) == 0) { + thread_id = lcore_cfg[lcore_id].thread_id; + if (pthread_equal(thread_id, tid0)) + continue; + if (pthread_equal(thread_id, tid)) + continue; + pthread_join(thread_id, NULL); + } + } + + /* close ncurses */ + display_end(); + + /* close ports on termination signal */ + close_ports_atexit(); + + /* terminate now */ + abort(); +} + +static void sigterm_handler(int signum) +{ + /* abort on second Ctrl-C */ + if (signum == SIGINT) + signal(SIGINT, sigabrt_handler); + + /* gracefully quit on harmless termination signal */ + /* ports will subsequently get closed at resulting exit */ + quit(); +} + +int main(int argc, char **argv) +{ + /* set en_US locale to print big numbers with ',' */ + setlocale(LC_NUMERIC, "en_US.utf-8"); + + if (prox_parse_args(argc, argv) != 0){ + prox_usage(argv[0]); + } + + plog_init(prox_cfg.log_name, prox_cfg.log_name_pid); + plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n"); + plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX)); + read_rdt_info(); + + if (prox_cfg.flags & DSF_LIST_TASK_MODES) { + /* list supported task modes and exit */ + tasks_list(); + return EXIT_SUCCESS; + } + + /* close ports at normal exit */ + atexit(close_ports_atexit); + /* gracefully quit on harmless termination signals */ + signal(SIGHUP, sigterm_handler); + signal(SIGINT, sigterm_handler); + signal(SIGQUIT, sigterm_handler); + signal(SIGTERM, sigterm_handler); + signal(SIGUSR1, sigterm_handler); + signal(SIGUSR2, sigterm_handler); + /* more drastic exit on tedious termination signals */ + signal(SIGABRT, sigabrt_handler); + signal(SIGPIPE, sigabrt_handler); + + if (prox_cfg.flags & DSF_DAEMON) { + signal(SIGUSR1, siguser_handler); + signal(SIGUSR2, siguser_handler); + plog_info("=== Running in Daemon mode ===\n"); + plog_info("\tForking child and waiting for setup completion\n"); + + pid_t ppid = getpid(); + pid_t pid = fork(); + if (pid < 0) { + plog_err("Failed to fork process to run in daemon mode\n"); + return EXIT_FAILURE; + } + + if (pid == 0) { + fclose(stdin); + fclose(stdout); + fclose(stderr); + if (setsid() < 0) { + kill(ppid, SIGUSR2); + return EXIT_FAILURE; + } + if (setup_prox(argc, argv) != 0) { + kill(ppid, SIGUSR2); + return EXIT_FAILURE; + } + else { + kill(ppid, SIGUSR1); + run(prox_cfg.flags); + return EXIT_SUCCESS; + } + } + else { + /* Before exiting the parent, wait until the + child process has finished setting up */ + pause(); + if (prox_cfg.logbuf) { + file_print(prox_cfg.logbuf); + } + return success? EXIT_SUCCESS : EXIT_FAILURE; + } + } + + if (setup_prox(argc, argv) != 0) + return EXIT_FAILURE; + run(prox_cfg.flags); + return EXIT_SUCCESS; +} diff --git a/VNFs/DPPD-PROX/main.h b/VNFs/DPPD-PROX/main.h new file mode 100644 index 00000000..5daef700 --- /dev/null +++ b/VNFs/DPPD-PROX/main.h @@ -0,0 +1,41 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#include <rte_version.h> +#include "hash_entry_types.h" +#ifdef RTE_EXEC_ENV_BAREMETAL +#error A linuxapp configuration target is required! +#endif + +#if RTE_VERSION < RTE_VERSION_NUM(1,7,0,0) +#error At least Intel(R) DPDK version 1.7.0 is required +#endif + +#ifndef __INTEL_COMPILER +#if __GNUC__ == 4 && __GNUC_MINOR__ < 7 +#error Only GCC versions 4.7 and above supported +#endif +#endif + +struct rte_ring; +// in main.c +extern uint8_t port_status[]; +extern struct rte_ring *ctrl_rings[]; + +#endif /* _MAIN_H_ */ diff --git a/VNFs/DPPD-PROX/mbuf_utils.h b/VNFs/DPPD-PROX/mbuf_utils.h new file mode 100644 index 00000000..22d57a39 --- /dev/null +++ b/VNFs/DPPD-PROX/mbuf_utils.h @@ -0,0 +1,57 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _MBUF_UTILS_H_ +#define _MBUF_UTILS_H_ + +#include <string.h> + +#include <rte_ip.h> +#include <rte_version.h> +#include <rte_ether.h> + +static void init_mbuf_seg(struct rte_mbuf *mbuf) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + mbuf->nb_segs = 1; +#else + mbuf->pkt.nb_segs = 1; +#endif + rte_mbuf_refcnt_set(mbuf, 1); +} + +static uint16_t pkt_len_to_wire_size(uint16_t pkt_len) +{ + return (pkt_len < 60? 60 : pkt_len) + ETHER_CRC_LEN + 20; +} + +static uint16_t mbuf_wire_size(const struct rte_mbuf *mbuf) +{ + uint16_t pkt_len = rte_pktmbuf_pkt_len(mbuf); + + return pkt_len_to_wire_size(pkt_len); +} + +static uint16_t mbuf_calc_padlen(const struct rte_mbuf *mbuf, void *pkt, struct ipv4_hdr *ipv4) +{ + uint16_t pkt_len = rte_pktmbuf_pkt_len(mbuf); + uint16_t ip_offset = (uint8_t *)ipv4 - (uint8_t*)pkt; + uint16_t ip_total_len = rte_be_to_cpu_16(ipv4->total_length); + + return pkt_len - ip_total_len - ip_offset; +} + +#endif /* _MBUF_UTILS_H_ */ diff --git a/VNFs/DPPD-PROX/mpls.h b/VNFs/DPPD-PROX/mpls.h new file mode 100644 index 00000000..93f3e3d5 --- /dev/null +++ b/VNFs/DPPD-PROX/mpls.h @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _MPLS_H_ +#define _MPLS_H_ + +struct mpls_hdr { + union { + struct { + uint16_t lbl_h; /* Label */ + uint8_t bos: 1; /* Bottom of Stack */ + uint8_t cos: 3; /* Class of Service */ + uint8_t lbl_l: 4; /* Label */ + uint8_t ttl; /* Time to Live, 64 */ + }; + uint32_t bytes; + }; +} __attribute__((__packed__)); + +#endif /* _MPLS_H_ */ diff --git a/VNFs/DPPD-PROX/msr.c b/VNFs/DPPD-PROX/msr.c new file mode 100644 index 00000000..194d4c75 --- /dev/null +++ b/VNFs/DPPD-PROX/msr.c @@ -0,0 +1,80 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <unistd.h> +#include <string.h> +#include <stdio.h> +#include <fcntl.h> + +#include "msr.h" +#include "log.h" + +int msr_fd[RTE_MAX_LCORE]; +int n_msr_fd; +int msr_init(void) +{ + char msr_path[1024]; + + if (n_msr_fd) { + return 0; + } + + for (uint32_t i = 0; i < sizeof(msr_fd)/sizeof(*msr_fd); ++i, n_msr_fd = i) { + snprintf(msr_path, sizeof(msr_path), "/dev/cpu/%u/msr", i); + msr_fd[i] = open(msr_path, O_RDWR); + if (msr_fd[i] < 0) { + return i == 0? -1 : 0; + } + } + + return 0; +} + +void msr_cleanup(void) +{ + for (int i = 0; i < n_msr_fd; ++i) { + close(msr_fd[i]); + } + + n_msr_fd = 0; +} + +int msr_read(uint64_t *ret, int lcore_id, off_t offset) +{ + if (lcore_id > n_msr_fd) { + return -1; + } + + if (0 > pread(msr_fd[lcore_id], ret, sizeof(uint64_t), offset)) { + return -1; + } + + return 0; +} + +int msr_write(int lcore_id, uint64_t val, off_t offset) +{ + if (lcore_id > n_msr_fd) { + return -1; + } + + if (sizeof(uint64_t) != pwrite(msr_fd[lcore_id], &val, sizeof(uint64_t), offset)) { + return -1; + } + plog_dbg("\t\tmsr_write(core %d, offset %x, val %lx)\n", lcore_id, (int)offset, val); + return 0; +} diff --git a/VNFs/DPPD-PROX/msr.h b/VNFs/DPPD-PROX/msr.h new file mode 100644 index 00000000..a8a46c86 --- /dev/null +++ b/VNFs/DPPD-PROX/msr.h @@ -0,0 +1,24 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <fcntl.h> + +int msr_init(void); +void msr_cleanup(void); + +int msr_read(uint64_t *ret, int lcore_id, off_t offset); +int msr_write(int lcore_id, uint64_t val, off_t offset); diff --git a/VNFs/DPPD-PROX/parse_utils.c b/VNFs/DPPD-PROX/parse_utils.c new file mode 100644 index 00000000..d258c591 --- /dev/null +++ b/VNFs/DPPD-PROX/parse_utils.c @@ -0,0 +1,1420 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <ctype.h> +#include <stdio.h> +#include <float.h> +#include <math.h> +#include <unistd.h> +#include <errno.h> +#include <stdarg.h> + +#include <rte_ether.h> +#include <rte_string_fns.h> + +#include "quit.h" +#include "cfgfile.h" +#include "ip6_addr.h" +#include "parse_utils.h" +#include "prox_globals.h" +#include "prox_cfg.h" +#include "log.h" +#include "prox_lua.h" +#include "prox_lua_types.h" + +#define MAX_NB_PORT_NAMES PROX_MAX_PORTS +#define MAX_LEN_PORT_NAME 24 +#define MAX_LEN_VAR_NAME 24 +#define MAX_LEN_VAL 512 +#define MAX_NB_VARS 32 + +#if MAX_WT_PER_LB > MAX_INDEX +#error MAX_WT_PER_LB > MAX_INDEX +#endif + +/* The CPU topology of the system is used to parse "socket + notation". This notation allows to refer to cores on specific + sockets and the hyper-thread of those cores. The CPU topology is + loaded only if the socket notation is used at least once. */ + +struct cpu_topology { + int socket[MAX_SOCKETS][RTE_MAX_LCORE][2]; + uint32_t n_cores[MAX_SOCKETS]; + uint32_t n_sockets; +}; + +struct cpu_topology cpu_topo; + +struct port_name { + uint32_t id; + char name[MAX_LEN_PORT_NAME]; +}; + +static struct port_name port_names[MAX_NB_PORT_NAMES]; +static uint8_t nb_port_names; + +struct var { + uint8_t cli; + char name[MAX_LEN_VAR_NAME]; + char val[MAX_LEN_VAL]; +}; + +static struct var vars[MAX_NB_VARS]; +static uint8_t nb_vars; + +static char format_err_str[256]; +static const char *err_str = ""; + +const char *get_parse_err(void) +{ + return err_str; +} + +static int read_cpu_topology(void); + +static int parse_core(int *socket, int *core, int *ht, const char* str); + +static void set_errf(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + vsnprintf(format_err_str, sizeof(format_err_str), format, ap); + va_end(ap); + err_str = format_err_str; +} + +static struct var *var_lookup(const char *name) +{ + for (uint8_t i = 0; i < nb_vars; ++i) { + if (!strcmp(name, vars[i].name)) { + return &vars[i]; + } + } + return NULL; +} + +static int parse_single_var(char *val, size_t len, const char *name) +{ + struct var *match; + + match = var_lookup(name); + if (match) { + if (strlen(match->val) + 1 > len) { + set_errf("Variables '%s' with value '%s' is too long\n", + match->name, match->val); + return -1; + } + strncpy(val, match->val, len); + return 0; + } + else { + /* name + 1 to skip leading '$' */ + if (lua_to_string(prox_lua(), GLOBAL, name + 1, val, len) >= 0) + return 0; + } + + set_errf("Variable '%s' not defined!", name); + return 1; +} + +/* Replace $... and each occurrence of ${...} with variable values */ +int parse_vars(char *val, size_t len, const char *name) +{ + static char result[MAX_CFG_STRING_LEN]; + static char cur_var[MAX_CFG_STRING_LEN]; + char parsed[2048]; + size_t name_len = strlen(name); + enum parse_vars_state {NO_VAR, WHOLE_VAR, INLINE_VAR} state = NO_VAR; + size_t result_len = 0; + size_t start_var = 0; + + memset(result, 0, sizeof(result)); + PROX_PANIC(name_len > sizeof(result), "\tUnable to parse var %s: too long\n", name); + + for (size_t i = 0; i < name_len; ++i) { + switch (state) { + case NO_VAR: + if (name[i] == '$') { + if (i != name_len - 1 && name[i + 1] == '{') { + start_var = i + 2; + state = INLINE_VAR; + i = i + 1; + } + else if (i == 0 && i != name_len - 1) { + state = WHOLE_VAR; + } + else { + set_errf("Invalid variable syntax"); + return -1; + } + } + else { + result[result_len++] = name[i]; + } + break; + case INLINE_VAR: + if (name[i] == '}') { + cur_var[0] = '$'; + size_t var_len = i - start_var; + if (var_len == 0) { + set_errf("Empty variable are not allowed"); + return -1; + } + + strncpy(&cur_var[1], &name[start_var], var_len); + cur_var[1 + var_len] = 0; + if (parse_single_var(parsed, sizeof(parsed), cur_var)) { + return -1; + } + strcpy(&result[result_len], parsed); + result_len += strlen(parsed); + state = NO_VAR; + } + else if (i == name_len - 1) { + set_errf("Invalid variable syntax, expected '}'."); + return -1; + } + break; + case WHOLE_VAR: + if (i == name_len - 1) { + return parse_single_var(val, len, name); + } + break; + } + } + strncpy(val, result, len); + + return 0; +} + +int parse_int_mask(uint32_t *val, uint32_t *mask, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *mask_str; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + mask_str = strchr(str, '&'); + + if (mask_str == NULL) { + set_errf("Missing '&' when parsing mask"); + return -2; + } + + *mask_str = 0; + + if (parse_int(val, str)) + return -1; + if (parse_int(mask, mask_str + 1)) + return -1; + + return 0; +} + +int parse_range(uint32_t* lo, uint32_t* hi, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *dash; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + dash = strstr(str, "-"); + + if (dash == NULL) { + set_errf("Missing '-' when parsing mask"); + return -2; + } + + *dash = 0; + + if (parse_int(lo, str)) + return -1; + if (parse_int(hi, dash + 1)) + return -1; + + int64_t tmp = strtol(str, 0, 0); + if (tmp > UINT32_MAX) { + set_errf("Integer is bigger than %u", UINT32_MAX); + return -1; + } + if (tmp < 0) { + set_errf("Integer is negative"); + return -2; + } + + *lo = tmp; + + tmp = strtol(dash + 1, 0, 0); + if (tmp > UINT32_MAX) { + set_errf("Integer is bigger than %u", UINT32_MAX); + return -1; + } + if (tmp < 0) { + set_errf("Integer is negative"); + return -2; + } + + *hi = tmp; + + if (*lo > *hi) { + set_errf("Low boundary is above high boundary in range"); + return -2; + } + + return 0; +} + +int parse_ip(uint32_t *addr, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + char *ip_parts[5]; + + if (strlen(str) > MAX_STR_LEN_PROC) { + set_errf("String too long (max supported: %d)", MAX_STR_LEN_PROC); + return -2; + } + + if (4 != rte_strsplit(str, strlen(str), ip_parts, 5, '.')) { + set_errf("Expecting 4 octets in ip."); + return -1; + } + + uint32_t val; + for (uint8_t i = 0; i < 4; ++i) { + val = atoi(ip_parts[i]); + if (val > 255) { + set_errf("Maximum value for octet is 255 but octet %u is %u", i, val); + return -1; + } + *addr = *addr << 8 | val; + } + return 0; +} + +int parse_ip4_cidr(struct ip4_subnet *val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *slash; + int prefix; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + slash = strstr(str, "/"); + + if (slash == NULL) { + set_errf("Missing '/' when parsing CIDR notation"); + return -2; + } + + *slash = 0; + prefix = atoi(slash + 1); + val->prefix = prefix; + + if (prefix > 32) { + set_errf("Prefix %d is too big", prefix); + return -2; + } + if (prefix < 1) { + set_errf("Prefix %d is too small", prefix); + } + if (parse_ip(&val->ip, str)) + return -2; + + /* Apply mask making all bits outside the prefix zero */ + val->ip &= ((int)(1 << 31)) >> (prefix - 1); + + return 0; +} + +int parse_ip6_cidr(struct ip6_subnet *val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *slash; + int prefix; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + slash = strstr(str, "/"); + + if (slash == NULL) { + set_errf("Missing '/' when parsing CIDR notation"); + return -2; + } + + *slash = 0; + prefix = atoi(slash + 1); + val->prefix = prefix; + + parse_ip6((struct ipv6_addr *)&val->ip, str); + + /* Apply mask making all bits outside the prefix zero */ + + int p = 120; + int cnt = 0; + + while (p >= prefix) { + val->ip[15-cnt] = 0; + p -= 8; + cnt++; + } + + if (prefix % 8 != 0) { + val->ip[15-cnt] &= ((int8_t)(1 << 7)) >> ((prefix %8) - 1); + } + + return 0; +} + +int parse_ip6(struct ipv6_addr *addr, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *addr_parts[9]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + uint8_t ret = rte_strsplit(str, strlen(str), addr_parts, 9, ':'); + + if (ret == 9) { + set_errf("Invalid IPv6 address"); + return -1; + } + + uint8_t omitted = 0; + + for (uint8_t i = 0, j = 0; i < ret; ++i, ++j) { + if (*addr_parts[i] == 0) { + if (omitted == 0) { + set_errf("Can only omit zeros once"); + return -1; + } + omitted = 1; + j += 8 - ret; + } + else { + uint16_t w = strtoll(addr_parts[i], NULL, 16); + addr->bytes[j++] = (w >> 8) & 0xff; + addr->bytes[j] = w & 0xff; + } + } + return 0; +} + +int parse_mac(struct ether_addr *ether_addr, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + char *addr_parts[7]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + uint8_t ret = rte_strsplit(str, strlen(str), addr_parts, 7, ':'); + + if (ret != 6) { + set_errf("Invalid MAC address format"); + return -1; + } + + for (uint8_t i = 0; i < 6; ++i) { + if (2 != strlen(addr_parts[i])) { + set_errf("Invalid MAC address format"); + return -1; + } + ether_addr->addr_bytes[i] = strtol(addr_parts[i], NULL, 16); + } + + return 0; +} + +char* get_cfg_key(char *str) +{ + char *pkey = strchr(str, '='); + + if (pkey == NULL) { + return NULL; + } + *pkey++ = '\0'; + + /* remove leading spaces */ + while (isspace(*pkey)) { + pkey++; + } + if (*pkey == '\0') { /* an empty key */ + return NULL; + } + + return pkey; +} + +void strip_spaces(char *strings[], const uint32_t count) +{ + for (uint32_t i = 0; i < count; ++i) { + while (isspace(strings[i][0])) { + ++strings[i]; + } + size_t len = strlen(strings[i]); + + while (len && isspace(strings[i][len - 1])) { + strings[i][len - 1] = '\0'; + --len; + } + } +} + +int is_virtualized(void) +{ + char buf[1024]= "/proc/cpuinfo"; + int virtualized = 0; + FILE* fd = fopen(buf, "r"); + if (fd == NULL) { + set_errf("Could not open %s", buf); + return -1; + } + while (fgets(buf, sizeof(buf), fd) != NULL) { + if ((strstr(buf, "flags") != NULL) && (strstr(buf, "hypervisor") != NULL)) + virtualized = 1; + } + fclose(fd); + return virtualized; +} + +static int get_phys_core(uint32_t *dst, int lcore_id) +{ + uint32_t ret; + char buf[1024]; + snprintf(buf, sizeof(buf), "/sys/devices/system/cpu/cpu%u/topology/thread_siblings_list", lcore_id); + FILE* ht_fd = fopen(buf, "r"); + + if (ht_fd == NULL) { + set_errf("Could not open cpu topology %s", buf); + return -1; + } + + if (fgets(buf, sizeof(buf), ht_fd) == NULL) { + set_errf("Could not read cpu topology"); + return -1; + } + fclose(ht_fd); + + uint32_t list[2] = {-1,-1}; + parse_list_set(list, buf, 2); + + *dst = list[0]; + + return 0; +} + +static int get_socket(uint32_t core_id, uint32_t *socket) +{ + int ret = -1; + char buf[1024]; + snprintf(buf, sizeof(buf), "/sys/devices/system/cpu/cpu%u/topology/physical_package_id", core_id); + FILE* fd = fopen(buf, "r"); + + if (fd == NULL) { + set_errf("%s", buf); + return -1; + } + + if (fgets(buf, sizeof(buf), fd) != NULL) { + ret = atoi(buf); + } + fclose(fd); + + if (socket) + *socket = (ret == -1 ? 0 : ret); + + return 0; +} + +int lcore_to_socket_core_ht(uint32_t lcore_id, char *dst, size_t len) +{ + if (cpu_topo.n_sockets == 0) { + if (read_cpu_topology() == -1) { + return -1; + } + } + + for (uint32_t s = 0; s < cpu_topo.n_sockets; s++) { + for (uint32_t i = 0; i < cpu_topo.n_cores[s]; ++i) { + if ((uint32_t)cpu_topo.socket[s][i][0] == lcore_id) { + snprintf(dst, len, "%us%u", i, s); + return 0; + } else if ((uint32_t)cpu_topo.socket[s][i][1] == lcore_id) { + snprintf(dst, len, "%us%uh", i, s); + return 0; + } + } + } + + return -1; +} + +static int get_lcore_id(uint32_t socket_id, uint32_t core_id, int ht) +{ + if (cpu_topo.n_sockets == 0) { + if (read_cpu_topology() == -1) { + return -1; + } + } + + if (socket_id == UINT32_MAX) + socket_id = 0; + + if (socket_id >= MAX_SOCKETS) { + set_errf("Socket id %d too high (max allowed is %d)", MAX_SOCKETS); + return -1; + } + if (core_id >= RTE_MAX_LCORE) { + set_errf("Core id %d too high (max allowed is %d)", RTE_MAX_LCORE); + return -1; + } + if (socket_id >= cpu_topo.n_sockets) { + set_errf("Current CPU topology reported that there are %u CPU sockets, CPU topology = %u socket(s), %u physical cores per socket, %u thread(s) per physical core", + cpu_topo.n_sockets, cpu_topo.n_sockets, cpu_topo.n_cores[0], cpu_topo.socket[0][0][1] == -1? 1: 2); + return -1; + } + if (core_id >= cpu_topo.n_cores[socket_id]) { + set_errf("Core %u on socket %u does not exist, CPU topology = %u socket(s), %u physical cores per socket, %u thread(s) per physical core", + core_id, socket_id, cpu_topo.n_sockets, cpu_topo.n_cores[0], cpu_topo.socket[socket_id][0][1] == -1? 1: 2); + return -1; + } + if (cpu_topo.socket[socket_id][core_id][!!ht] == -1) { + set_errf("Core %u %son socket %u has no hyper-thread, CPU topology = %u socket(s), %u physical cores per socket, %u thread(s) per physical core", + core_id, ht ? "(hyper-thread) " : "", socket_id, cpu_topo.n_sockets, cpu_topo.n_cores[0], cpu_topo.socket[socket_id][core_id][1] == -1? 1: 2); + + return -1; + } + return cpu_topo.socket[socket_id][core_id][!!ht]; +} + +/* Returns 0 on success, negative on error. Parses the syntax XsYh + where sYh is optional. If sY is specified, Y is stored in the + socket argument. If, in addition, h is specified, *ht is set to + 1. In case the input is only a number, socket and ht are set to + -1.*/ +static int parse_core(int *socket, int *core, int *ht, const char* str) +{ + *socket = -1; + *core = -1; + *ht = -1; + + char* end; + + *core = strtol(str, &end, 10); + + if (*end == 's') { + *socket = 0; + *ht = 0; + + if (cpu_topo.n_sockets == 0) { + if (read_cpu_topology() == -1) { + return -1; + } + } + + ++end; + *socket = strtol(end, &end, 10); + if (*socket >= MAX_SOCKETS) { + set_errf("Socket id %d too high (max allowed is %d)", *socket, MAX_SOCKETS - 1); + return -1; + } + + if (*end == 'h') { + ++end; + *ht = 1; + } + + return 0; + } + + if (*end == 'h') { + set_errf("Can't find hyper-thread since socket has not been specified"); + return -1; + } + + return 0; +} + +static int parse_task(const char *str, uint32_t *socket, uint32_t *core, uint32_t *task, uint32_t *ht, enum ctrl_type *type) +{ + const char *str_beg = str; + char *end; + + *core = strtol(str, &end, 10); + if (str == end) { + set_errf("Expected number to in core-task definition:\n" + "\t(i.e. 5s1t0 for task 0 on core 5 on socket 1)\n" + "\tHave: '%s'.", end); + return -1; + } + + *task = 0; + *socket = -1; + *ht = -1; + *type = 0; + + str = end; + + if (*str == 's') { + str++; + *socket = 0; + *ht = 0; + + *socket = strtol(str, &end, 10); + str = end; + + if (*str == 'h') { + str++; + *ht = 1; + } + if (*str == 't') { + str++; + *task = strtol(str, &end, 10); + str = end; + if (*str == 'p') { + *type = CTRL_TYPE_PKT; + str += 1; + } + else if (*str == 'm') { + *type = CTRL_TYPE_MSG; + str += 1; + } + } + } else { + if (*str == 'h') { + set_errf("Can't find hyper-thread since socket has not been specified"); + return -1; + } + if (*str == 't') { + str++; + *task = strtol(str, &end, 10); + str = end; + if (*str == 'p') { + *type = CTRL_TYPE_PKT; + str += 1; + } + else if (*str == 'm') { + *type = CTRL_TYPE_MSG; + str += 1; + } + } + } + return str - str_beg; +} + +static int core_task_set_add(struct core_task_set *val, uint32_t core, uint32_t task, enum ctrl_type type) +{ + if (val->n_elems == sizeof(val->core_task)/sizeof(val->core_task[0])) + return -1; + + val->core_task[val->n_elems].core = core; + val->core_task[val->n_elems].task = task; + val->core_task[val->n_elems].type = type; + val->n_elems++; + + return 0; +} + +int parse_task_set(struct core_task_set *cts, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + cts->n_elems = 0; + + char *str3 = str; + int ret; + + uint32_t socket_beg, core_beg, task_beg, ht_beg, + socket_end, core_end, task_end, ht_end; + enum ctrl_type type_beg, type_end; + uint32_t task_group_start = -1; + + while (*str3 && *str3 != ' ') { + if (*str3 == '(') { + task_group_start = cts->n_elems; + str3 += 1; + continue; + } + if (*str3 == ')' && *(str3 + 1) == 't') { + str3 += 2; + char *end; + uint32_t t = strtol(str3, &end, 10); + enum ctrl_type type = 0; + str3 = end; + + if (*str3 == 'p') { + type = CTRL_TYPE_PKT; + str3 += 1; + } + else if (*str3 == 'm') { + type = CTRL_TYPE_MSG; + str3 += 1; + } + + for (uint32_t i = task_group_start; i < cts->n_elems; ++i) { + cts->core_task[i].task = t; + cts->core_task[i].type = type; + } + continue; + } + ret = parse_task(str3, &socket_beg, &core_beg, &task_beg, &ht_beg, &type_beg); + if (ret < 0) + return -1; + str3 += ret; + socket_end = socket_beg; + core_end = core_beg; + task_end = task_beg; + ht_end = ht_beg; + type_end = type_beg; + + if (*str3 == '-') { + str3 += 1; + ret = parse_task(str3, &socket_end, &core_end, &task_end, &ht_end, &type_end); + if (ret < 0) + return -1; + str3 += ret; + } + + if (*str3 == ',') + str3 += 1; + + if (socket_end != socket_beg) { + set_errf("Same socket must be used in range syntax."); + return -1; + } else if (ht_beg != ht_end) { + set_errf("If 'h' syntax is in range, it must be specified everywhere.\n"); + return -1; + } else if (task_end != task_beg && core_end != core_beg) { + set_errf("Same task must be used in range syntax when cores are different.\n"); + return -1; + } else if (task_end < task_beg) { + set_errf("Task for end of range must be higher than task for beginning of range.\n"); + return -1; + } else if (type_end != type_beg) { + set_errf("Task type for end of range must be the same as task type for beginning.\n"); + return -1; + } else if (core_end < core_beg) { + set_errf("Core for end of range must be higher than core for beginning of range.\n"); + return -1; + } + + for (uint32_t j = core_beg; j <= core_end; ++j) { + if (socket_beg != UINT32_MAX && ht_beg != UINT32_MAX) + ret = get_lcore_id(socket_beg, j, ht_beg); + else + ret = j; + if (ret < 0) + return -1; + for (uint32_t k = task_beg; k <= task_end; ++k) { + core_task_set_add(cts, ret, k, type_beg); + } + } + } + return 0; +} + +int parse_list_set(uint32_t *list, const char *str2, uint32_t max_list) +{ + char str[MAX_STR_LEN_PROC]; + char *parts[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + int n_parts = rte_strsplit(str, strlen(str), parts, MAX_STR_LEN_PROC, ','); + size_t list_count = 0; + + for (int i = 0; i < n_parts; ++i) { + char *cur_part = parts[i]; + char *sub_parts[3]; + int n_sub_parts = rte_strsplit(cur_part, strlen(cur_part), sub_parts, 3, '-'); + int socket1, socket2; + int ht1, ht2; + int core1, core2; + int ret = 0; + + if (n_sub_parts == 1) { + if (parse_core(&socket1, &core1, &ht1, sub_parts[0])) + return -1; + + socket2 = socket1; + core2 = core1; + ht2 = ht1; + } else if (n_sub_parts == 2) { + if (parse_core(&socket1, &core1, &ht1, sub_parts[0])) + return -1; + if (parse_core(&socket2, &core2, &ht2, sub_parts[1])) + return -1; + } else if (n_sub_parts >= 3) { + set_errf("Multiple '-' characters in range syntax found"); + return -1; + } else { + set_errf("Invalid list syntax"); + return -1; + } + + if (socket1 != socket2) { + set_errf("Same socket must be used in range syntax"); + return -1; + } + else if (ht1 != ht2) { + set_errf("If 'h' syntax is in range, it must be specified everywhere."); + return -1; + } + + for (int cur_core = core1; cur_core <= core2; ++cur_core) { + int effective_core; + + if (socket1 != -1) + effective_core = get_lcore_id(socket1, cur_core, ht1); + else + effective_core = cur_core; + + if (list_count >= max_list) { + set_errf("Too many elements in list\n"); + return -1; + } + list[list_count++] = effective_core; + } + } + + return list_count; +} + +int parse_kmg(uint32_t* val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + char c = str[strlen(str) - 1]; + *val = atoi(str); + + switch (c) { + case 'G': + if (*val >> 22) + return -2; + *val <<= 10; + case 'M': + if (*val >> 22) + return -2; + *val <<= 10; + case 'K': + if (*val >> 22) + return -2; + *val <<= 10; + break; + default: + /* only support optional KMG suffix */ + if (c < '0' || c > '9') { + set_errf("Unknown syntax for KMG suffix '%c' (expected K, M or G)", c); + return -1; + } + } + + return 0; +} + +int parse_bool(uint32_t* val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + if (!strcmp(str, "yes")) { + *val = 1; + return 0; + } + else if (!strcmp(str, "no")) { + *val = 0; + return 0; + } + set_errf("Unknown syntax for bool '%s' (expected yes or no)", str); + return -1; +} + +int parse_flag(uint32_t* val, uint32_t flag, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + uint32_t tmp; + if (parse_bool(&tmp, str)) + return -1; + + if (tmp) + *val |= flag; + else + *val &= ~flag; + + return 0; +} + +int parse_int(uint32_t* val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + int64_t tmp = strtol(str, 0, 0); + if (tmp > UINT32_MAX) { + set_errf("Integer is bigger than %u", UINT32_MAX); + return -1; + } + if (tmp < 0) { + set_errf("Integer is negative"); + return -2; + } + *val = tmp; + + return 0; +} + +int parse_float(float* val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + float tmp = strtof(str, 0); + if ((tmp >= HUGE_VALF) || (tmp <= -HUGE_VALF)) { + set_errf("Unable to parse float\n"); + return -1; + } + *val = tmp; + + return 0; +} + +int parse_u64(uint64_t* val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + errno = 0; + uint64_t tmp = strtoul(str, NULL, 0); + if (errno != 0) { + set_errf("Invalid u64 '%s' (%s)", str, strerror(errno)); + return -2; + } + *val = tmp; + + return 0; +} + +int parse_str(char* dst, const char *str2, size_t max_len) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + if (strlen(str) > max_len - 1) { + set_errf("String too long (%u > %u)", strlen(str), max_len - 1); + return -2; + } + + strncpy(dst, str, max_len); + return 0; +} + +int parse_path(char *dst, const char *str, size_t max_len) +{ + if (parse_str(dst, str, max_len)) + return -1; + if (access(dst, F_OK)) { + set_errf("Invalid file '%s' (%s)", dst, strerror(errno)); + return -1; + } + return 0; +} + +int parse_port_name(uint32_t *val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + for (uint8_t i = 0; i < nb_port_names; ++i) { + if (!strcmp(str, port_names[i].name)) { + *val = port_names[i].id; + return 0; + } + } + set_errf("Port with name %s not defined", str); + return 1; +} + +int parse_port_name_list(uint32_t *val, uint32_t* tot, uint8_t max_vals, const char *str2) +{ + char *elements[PROX_MAX_PORTS + 1]; + char str[MAX_STR_LEN_PROC]; + uint32_t cur; + int ret; + + if (parse_str(str, str2, sizeof(str))) + return -1; + + ret = rte_strsplit(str, strlen(str), elements, PROX_MAX_PORTS + 1, ','); + + if (ret == PROX_MAX_PORTS + 1 || ret > max_vals) { + set_errf("Too many ports in port list"); + return -1; + } + + strip_spaces(elements, ret); + for (uint8_t i = 0; i < ret; ++i) { + if (parse_port_name(&cur, elements[i])) { + return -1; + } + val[i] = cur; + } + if (tot) { + *tot = ret; + } + return 0; +} + +int parse_remap(uint8_t *mapping, const char *str) +{ + char *elements[PROX_MAX_PORTS + 1]; + char *elements2[PROX_MAX_PORTS + 1]; + char str_cpy[MAX_STR_LEN_PROC]; + uint32_t val; + int ret, ret2; + + if (strlen(str) > MAX_STR_LEN_PROC) { + set_errf("String too long (max supported: %d)", MAX_STR_LEN_PROC); + return -2; + } + strncpy(str_cpy, str, MAX_STR_LEN_PROC); + + ret = rte_strsplit(str_cpy, strlen(str_cpy), elements, PROX_MAX_PORTS + 1, ','); + if (ret <= 0) { + set_errf("Invalid remap syntax"); + return -1; + } + else if (ret > PROX_MAX_PORTS) { + set_errf("Too many remaps"); + return -2; + } + + strip_spaces(elements, ret); + for (uint8_t i = 0; i < ret; ++i) { + ret2 = rte_strsplit(elements[i], strlen(elements[i]), elements2, PROX_MAX_PORTS + 1, '|'); + strip_spaces(elements2, ret2); + if (ret2 > PROX_MAX_PORTS) { + set_errf("Too many remaps"); + return -2; + } + for (uint8_t j = 0; j < ret2; ++j) { + if (parse_port_name(&val, elements2[j])) { + return -1; + } + + /* This port will be mapped to the i'th + element specified before remap=. */ + mapping[val] = i; + } + } + + return ret; +} + +int add_port_name(uint32_t val, const char *str2) +{ + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + struct port_name* pn; + + if (nb_port_names == MAX_NB_PORT_NAMES) { + set_errf("Too many ports defined (can define %d)", MAX_NB_PORT_NAMES); + return -1; + } + + for (uint8_t i = 0; i < nb_port_names; ++i) { + /* each port has to have a unique name*/ + if (!strcmp(str, port_names[i].name)) { + set_errf("Port with name %s is already defined", str); + return -2; + } + } + + pn = &port_names[nb_port_names]; + strncpy(pn->name, str, sizeof(pn->name)); + pn->id = val; + + ++nb_port_names; + return 0; +} + +int set_self_var(const char *str) +{ + for (uint8_t i = 0; i < nb_vars; ++i) { + if (!strcmp("$self", vars[i].name)) { + sprintf(vars[i].val, "%s", str); + return 0; + } + } + + struct var *v = &vars[nb_vars]; + + strncpy(v->name, "$self", strlen("$self")); + sprintf(v->val, "%s", str); + nb_vars++; + + return 0; +} + +int add_var(const char* name, const char *str2, uint8_t cli) +{ + struct var* v; + + char str[MAX_STR_LEN_PROC]; + + if (parse_vars(str, sizeof(str), str2)) + return -1; + + if (strlen(name) == 0 || strlen(name) == 1) { + set_errf("Can't define variables with empty name"); + return -1; + } + + if (name[0] != '$') { + set_errf("Each variable should start with the $ character"); + return -1; + } + + if (nb_vars == MAX_NB_VARS) { + set_errf("Too many variables defined (can define %d)", MAX_NB_VARS); + return -2; + } + + for (uint8_t i = 0; i < nb_vars; ++i) { + if (!strcmp(name, vars[i].name)) { + + /* Variables defined through program arguments + take precedence. */ + if (!cli && vars[i].cli) { + return 0; + } + + set_errf("Variable with name %s is already defined", name); + return -3; + } + } + + v = &vars[nb_vars]; + PROX_PANIC(strlen(name) > sizeof(v->name), "\tUnable to parse var %s: too long\n", name); + PROX_PANIC(strlen(str) > sizeof(v->val), "\tUnable to parse var %s=%s: too long\n", name,str); + strncpy(v->name, name, sizeof(v->name)); + strncpy(v->val, str, sizeof(v->val)); + v->cli = cli; + + ++nb_vars; + return 0; +} + +static int read_cores_present(uint32_t *cores, int max_cores, int *res) +{ + FILE* fd = fopen("/sys/devices/system/cpu/present", "r"); + char buf[1024]; + + if (fd == NULL) { + set_errf("Could not opening file /sys/devices/system/cpu/present"); + return -1; + } + + if (fgets(buf, sizeof(buf), fd) == NULL) { + set_errf("Could not read cores range"); + return -1; + } + + fclose(fd); + + int ret = parse_list_set(cores, buf, max_cores); + + if (ret < 0) + return -1; + + *res = ret; + return 0; +} + +static int set_dummy_topology(void) +{ + int core_count = 0; + + for (int s = 0; s < MAX_SOCKETS; s++) { + for (int i = 0; i < 32; ++i) { + cpu_topo.socket[s][i][0] = core_count++; + cpu_topo.socket[s][i][1] = core_count++; + cpu_topo.n_cores[s]++; + } + } + cpu_topo.n_sockets = MAX_SOCKETS; + return 0; +} + +static int read_cpu_topology(void) +{ + if (cpu_topo.n_sockets != 0) + return 0; + if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) + return set_dummy_topology(); + + uint32_t cores[RTE_MAX_LCORE]; + int n_cores = 0; + + if (read_cores_present(cores, sizeof(cores)/sizeof(cores[0]), &n_cores) != 0) + return -1; + + for (int s = 0; s < MAX_SOCKETS; s++) { + for (int i = 0; i < RTE_MAX_LCORE; ++i) { + cpu_topo.socket[s][i][0] = -1; + cpu_topo.socket[s][i][1] = -1; + } + } + + for (int i = 0; i < n_cores; ++i) { + uint32_t socket_id, lcore_id, phys; + + lcore_id = cores[i]; + if (get_socket(lcore_id, &socket_id) != 0) + return -1; + if (socket_id >= MAX_SOCKETS) { + set_errf("Can't read CPU topology due too high socket ID (max allowed is %d)", + MAX_SOCKETS); + return -1; + } + if (socket_id >= cpu_topo.n_sockets) { + cpu_topo.n_sockets = socket_id + 1; + } + if (get_phys_core(&phys, lcore_id) != 0) + return -1; + if (phys >= RTE_MAX_LCORE) { + set_errf("Core ID %u too high", phys); + return -1; + } + + if (cpu_topo.socket[socket_id][phys][0] == -1) { + cpu_topo.socket[socket_id][phys][0] = lcore_id; + cpu_topo.n_cores[socket_id]++; + } + else if (cpu_topo.socket[socket_id][phys][1] == -1) { + cpu_topo.socket[socket_id][phys][1] = lcore_id; + } + else { + set_errf("Too many core siblings"); + return -1; + } + } + + /* There can be holes in the cpu_topo description at this + point. An example for this is a CPU topology where the + lowest core ID of 2 hyper-threads is always an even + number. Before finished up this phase, compact all the + cores to make the numbers consecutive. */ + + for (uint32_t i = 0; i < cpu_topo.n_sockets; ++i) { + int spread = 0, compact = 0; + while (cpu_topo.socket[i][spread][0] == -1) + spread++; + + for (uint32_t c = 0; c < cpu_topo.n_cores[i]; ++c) { + cpu_topo.socket[i][compact][0] = cpu_topo.socket[i][spread][0]; + cpu_topo.socket[i][compact][1] = cpu_topo.socket[i][spread][1]; + compact++; + spread++; + /* Skip gaps */ + while (cpu_topo.socket[i][spread][0] == -1) + spread++; + } + } + + return 0; +} + +static int bit_len_valid(uint32_t len, const char *str) +{ + if (len > 32) { + set_errf("Maximum random length is 32, but length of '%s' is %zu\n", str, len); + return 0; + } + if (len % 8) { + plog_err("Random should be multiple of 8 long\n"); + return 0; + } + if (len == 0) { + plog_err("Random should be at least 1 byte long\n"); + return 0; + } + return -1; +} + +int parse_random_str(uint32_t *mask, uint32_t *fixed, uint32_t *len, const char *str) +{ + const size_t len_bits = strlen(str); + + if (!bit_len_valid(len_bits, str)) + return -1; + + *mask = 0; + *fixed = 0; + *len = len_bits / 8; + + for (uint32_t j = 0; j < len_bits; ++j) { + /* Store in the lower bits the value of the rand string (note + that these are the higher bits in LE). */ + switch (str[j]) { + case 'X': + *mask |= 1 << (len_bits - 1 - j); + break; + case '1': + *fixed |= 1 << (len_bits - 1 - j); + break; + case '0': + break; + default: + set_errf("Unexpected %c\n", str[j]); + return -1; + } + } + return 0; +} diff --git a/VNFs/DPPD-PROX/parse_utils.h b/VNFs/DPPD-PROX/parse_utils.h new file mode 100644 index 00000000..14aee9eb --- /dev/null +++ b/VNFs/DPPD-PROX/parse_utils.h @@ -0,0 +1,121 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PARSE_UTILS_H_ +#define _PARSE_UTILS_H_ + +#include <inttypes.h> +#include "ip_subnet.h" + +#define MAX_STR_LEN_PROC (3 * 1518 + 20) + +struct ipv6_addr; +struct ether_addr; + +enum ctrl_type {CTRL_TYPE_DP, CTRL_TYPE_MSG, CTRL_TYPE_PKT}; + +struct core_task { + uint32_t core; + uint32_t task; + enum ctrl_type type; +}; + +struct core_task_set { + struct core_task core_task[64]; + uint32_t n_elems; +}; + +int parse_vars(char *val, size_t len, const char *name); + +int parse_int_mask(uint32_t* val, uint32_t* mask, const char *saddr); + +int parse_range(uint32_t* lo, uint32_t* hi, const char *saddr); + +/* parses CIDR notation. Note that bits within the address that are + outside the subnet (as specified by the prefix) are set to 0. */ +int parse_ip4_cidr(struct ip4_subnet *val, const char *saddr); +int parse_ip6_cidr(struct ip6_subnet *val, const char *saddr); + +int parse_ip(uint32_t *paddr, const char *saddr); + +int parse_ip6(struct ipv6_addr *addr, const char *saddr); + +int parse_mac(struct ether_addr *paddr, const char *saddr); + +/* return error on overflow or invalid suffix*/ +int parse_kmg(uint32_t* val, const char *str); + +int parse_bool(uint32_t* val, const char *str); + +int parse_flag(uint32_t* val, uint32_t flag, const char *str); + +int parse_list_set(uint32_t *list, const char *str, uint32_t max_limit); + +int parse_task_set(struct core_task_set *val, const char *str); + +int parse_int(uint32_t* val, const char *str); +int parse_float(float* val, const char *str); + +int parse_u64(uint64_t* val, const char *str); + +int parse_str(char* dst, const char *str, size_t max_len); + +int parse_path(char *dst, const char *str, size_t max_len); + +int parse_port_name(uint32_t *val, const char *str); + +/* The syntax for random fields is X0010101XXX... where X is a + randomized bit and 0, 1 are fixed bit. The resulting mask and fixed + arguments are in BE order. */ +int parse_random_str(uint32_t *mask, uint32_t *fixed, uint32_t *len, const char *str); + +int parse_port_name_list(uint32_t *val, uint32_t *tot, uint8_t max_vals, const char *str); + +/* Parses a comma separated list containing a remapping of ports + specified by their name. Hence, all port names referenced from the + list have to be added using add_port_name() before this function + can be used. The first elements in the list are mapped to 0, the + second to 1, etc. Multiple elements can be mapped to the same + index. If multiple elements are used, they are separated by + pipes. An example would be p0|p1,p2|p3. In this example, p0 and p1 + both map to 0 and p2 and p3 map both map to 1. The mapping should + contain at least enough entries as port ids. */ +int parse_remap(uint8_t *mapping, const char *str); + +/* Convert an lcore_id to socket notation */ +int lcore_to_socket_core_ht(uint32_t lcore_id, char *dst, size_t len); + +int add_port_name(uint32_t val, const char *str); + +/* The $self variable is something that can change its value (i.e. its + value represents the core that is currently being parsed). */ +int set_self_var(const char *str); + +int add_var(const char* name, const char *val, uint8_t cli); + +/* Parses str and returns pointer to the key value */ +char *get_cfg_key(char *str); + +/* Changes strings in place. */ +void strip_spaces(char *strings[], const uint32_t count); + +/* Contains error string if any of the above returned an error. */ +const char* get_parse_err(void); + +/* Returns true if running from a virtual machine. */ +int is_virtualized(void); + +#endif /* _PARSE_UTILS_H_ */ diff --git a/VNFs/DPPD-PROX/pkt_parser.h b/VNFs/DPPD-PROX/pkt_parser.h new file mode 100644 index 00000000..285d42f9 --- /dev/null +++ b/VNFs/DPPD-PROX/pkt_parser.h @@ -0,0 +1,178 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PKT_PARSER_H_ +#define _PKT_PARSER_H_ + +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_byteorder.h> + +#include "log.h" +#include "etypes.h" + +struct pkt_tuple { + uint32_t src_addr; + uint32_t dst_addr; + uint8_t proto_id; + uint16_t src_port; + uint16_t dst_port; + uint16_t l2_types[4]; +} __attribute__((packed)); + +struct l4_meta { + uint8_t *l4_hdr; + uint8_t *payload; + uint16_t len; +}; + +static void pkt_tuple_debug2(const struct pkt_tuple *pt) +{ + plogx_info("src_ip : %#010x\n", pt->src_addr); + plogx_info("dst_ip : %#010x\n", pt->dst_addr); + plogx_info("dst_port : %#06x\n", pt->dst_port); + plogx_info("src_port : %#06x\n", pt->src_port); + plogx_info("proto_id : %#04x\n", pt->proto_id); + plogx_info("l2 types: \n"); + for (int i = 0; i < 4; ++i) + plogx_info(" - %#04x\n", pt->l2_types[i]); +} + +static void pkt_tuple_debug(const struct pkt_tuple *pt) +{ + plogx_dbg("src_ip : %#010x\n", pt->src_addr); + plogx_dbg("dst_ip : %#010x\n", pt->dst_addr); + plogx_dbg("dst_port : %#06x\n", pt->dst_port); + plogx_dbg("src_port : %#06x\n", pt->src_port); + plogx_dbg("proto_id : %#04x\n", pt->proto_id); + plogx_dbg("l2 types: \n"); + for (int i = 0; i < 4; ++i) + plogx_dbg(" - %#04x\n", pt->l2_types[i]); +} + +/* Return 0 on success, i.e. packets parsed without any error. */ +static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta *l4_meta) +{ + struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + size_t l2_types_count = 0; + struct ipv4_hdr* pip = 0; + + /* L2 */ + pt->l2_types[l2_types_count++] = peth->ether_type; + + switch (peth->ether_type) { + case ETYPE_IPv4: + pip = (struct ipv4_hdr *)(peth + 1); + break; + case ETYPE_VLAN: { + struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + pt->l2_types[l2_types_count++] = vlan->eth_proto; + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (struct ipv4_hdr *)(peth + 1); + } + else if (vlan->eth_proto == ETYPE_VLAN) { + struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + pt->l2_types[l2_types_count++] = vlan->eth_proto; + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (struct ipv4_hdr *)(peth + 1); + } + else if (vlan->eth_proto == ETYPE_IPv6) { + return 1; + } + else { + /* TODO: handle BAD PACKET */ + return 1; + } + } + } + break; + case ETYPE_8021ad: { + struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + pt->l2_types[l2_types_count++] = vlan->eth_proto; + if (vlan->eth_proto == ETYPE_VLAN) { + struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + pt->l2_types[l2_types_count++] = vlan->eth_proto; + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (struct ipv4_hdr *)(peth + 1); + } + else { + return 1; + } + } + else { + return 1; + } + } + break; + case ETYPE_MPLSU: + return -1; + break; + default: + plogx_err("Parsing error: unknown packet ether type = %#06x\n", peth->ether_type); + return -1; + break; + } + + /* L3 */ + if ((pip->version_ihl >> 4) == 4) { + + if ((pip->version_ihl & 0x0f) != 0x05) { + /* TODO: optional fields */ + return 1; + } + + pt->proto_id = pip->next_proto_id; + pt->src_addr = pip->src_addr; + pt->dst_addr = pip->dst_addr; + } + else { + /* TODO: IPv6 and bad packets */ + return 1; + } + + /* L4 parser */ + if (pt->proto_id == IPPROTO_UDP) { + struct udp_hdr *udp = (struct udp_hdr*)(pip + 1); + l4_meta->l4_hdr = (uint8_t*)udp; + pt->src_port = udp->src_port; + pt->dst_port = udp->dst_port; + l4_meta->payload = ((uint8_t*)udp) + sizeof(struct udp_hdr); + l4_meta->len = rte_be_to_cpu_16(udp->dgram_len) - sizeof(struct udp_hdr); + } + else if (pt->proto_id == IPPROTO_TCP) { + struct tcp_hdr *tcp = (struct tcp_hdr*)(pip + 1); + l4_meta->l4_hdr = (uint8_t*)tcp; + pt->src_port = tcp->src_port; + pt->dst_port = tcp->dst_port; + + l4_meta->payload = ((uint8_t*)tcp) + ((tcp->data_off >> 4)*4); + l4_meta->len = rte_be_to_cpu_16(pip->total_length) - sizeof(struct ipv4_hdr) - ((tcp->data_off >> 4)*4); + } + else { + plog_err("unsupported protocol %d\n", pt->proto_id); + return 1; + } + + for (; l2_types_count < sizeof(pt->l2_types)/sizeof(pt->l2_types[0]); ++l2_types_count) + pt->l2_types[l2_types_count] = 0; + + return 0; +} + +#endif /* _PKT_PARSER_H_ */ diff --git a/VNFs/DPPD-PROX/pkt_prototypes.h b/VNFs/DPPD-PROX/pkt_prototypes.h new file mode 100644 index 00000000..5d55bacb --- /dev/null +++ b/VNFs/DPPD-PROX/pkt_prototypes.h @@ -0,0 +1,54 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PKT_PROTOTYPES_H_ +#define _PKT_PROTOTYPES_H_ + +#include <rte_ip.h> + +#include "gre.h" +#include "qinq.h" +#include "etypes.h" + +static const struct gre_hdr gre_hdr_proto = { + .type = ETYPE_IPv4, + .version = 0, + .flags = 0, + .recur = 0, + .bits = GRE_KEY_PRESENT +}; + +static const struct ipv4_hdr tunnel_ip_proto = { + .version_ihl = 0x45, + .type_of_service = 0, + .packet_id = 0, + .fragment_offset = 0x40, + /* no fragmentation */ + .time_to_live = 0x40, + /* gre protocol type */ + .next_proto_id = IPPROTO_GRE, + .hdr_checksum = 0 +}; + +static const struct qinq_hdr qinq_proto = { + .svlan.vlan_tci = 0, + .cvlan.vlan_tci = 0, + .svlan.eth_proto = ETYPE_8021ad, + .cvlan.eth_proto = ETYPE_VLAN, + .ether_type = ETYPE_IPv4 +}; + +#endif /* _PKT_PROTOTYPES_H_ */ diff --git a/VNFs/DPPD-PROX/prefetch.h b/VNFs/DPPD-PROX/prefetch.h new file mode 100644 index 00000000..a42afe4a --- /dev/null +++ b/VNFs/DPPD-PROX/prefetch.h @@ -0,0 +1,63 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PREFETCH_H_ +#define _PREFETCH_H_ + +#include <rte_mbuf.h> + +static inline void prefetch_nta(volatile void *p) +{ + asm volatile ("prefetchnta %[p]" : [p] "+m" (*(volatile char *)p)); +} + +#ifdef PROX_PREFETCH_OFFSET +#define PREFETCH0(p) rte_prefetch0(p) +#define PREFETCH_OFFSET PROX_PREFETCH_OFFSET +#else +#define PREFETCH0(p) do {} while (0) +#define PREFETCH_OFFSET 0 +#endif + +static inline void prefetch_pkts(__attribute__((unused)) struct rte_mbuf **mbufs, __attribute__((unused)) uint16_t n_pkts) +{ +#ifdef PROX_PREFETCH_OFFSET + for (uint16_t j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (uint16_t j = PROX_PREFETCH_OFFSET; j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + PREFETCH0(rte_pktmbuf_mtod(mbufs[j - PROX_PREFETCH_OFFSET], void*)); + } + for (uint16_t j = n_pkts - PROX_PREFETCH_OFFSET; j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void*)); + } +#endif +} + +static inline void prefetch_first(__attribute__((unused)) struct rte_mbuf **mbufs, __attribute__((unused)) uint16_t n_pkts) +{ +#ifdef PROX_PREFETCH_OFFSET + for (uint16_t j = 0; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(mbufs[j]); + } + for (uint16_t j = 1; j < PROX_PREFETCH_OFFSET && j < n_pkts; ++j) { + PREFETCH0(rte_pktmbuf_mtod(mbufs[j - 1], void *)); + } +#endif +} + +#endif /* _PREFETCH_H_ */ diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c new file mode 100644 index 00000000..df9a2ca4 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_args.c @@ -0,0 +1,1975 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <unistd.h> +#include <string.h> + +#include <rte_sched.h> +#include <rte_string_fns.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "version.h" +#include "defines.h" +#include "prox_args.h" +#include "prox_assert.h" +#include "prox_cfg.h" +#include "cfgfile.h" +#include "quit.h" +#include "log.h" +#include "parse_utils.h" +#include "prox_port_cfg.h" +#include "defaults.h" +#include "prox_lua.h" +#include "cqm.h" + +#define MAX_RTE_ARGV 64 +#define MAX_ARG_LEN 64 + +struct cfg_depr { + const char *opt; + const char *info; +}; + +/* Helper macro */ +#define STR_EQ(s1, s2) (!strcmp((s1), (s2))) + +/* configuration files support */ +static int get_rte_cfg(unsigned sindex, char *str, void *data); +static int get_global_cfg(unsigned sindex, char *str, void *data); +static int get_port_cfg(unsigned sindex, char *str, void *data); +static int get_defaults_cfg(unsigned sindex, char *str, void *data); +static int get_cache_set_cfg(unsigned sindex, char *str, void *data); +static int get_var_cfg(unsigned sindex, char *str, void *data); +static int get_lua_cfg(unsigned sindex, char *str, void *data); +static int get_core_cfg(unsigned sindex, char *str, void *data); + +static const char *cfg_file = DEFAULT_CONFIG_FILE; +static struct rte_cfg rte_cfg; +struct prox_cache_set_cfg prox_cache_set_cfg[PROX_MAX_CACHE_SET]; + +static char format_err_str[1024]; +static const char *err_str = "Unknown error"; + +static struct cfg_section eal_default_cfg = { + .name = "eal options", + .parser = get_rte_cfg, + .data = &rte_cfg, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section port_cfg = { + .name = "port #", + .parser = get_port_cfg, + .data = &prox_port_cfg, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section var_cfg = { + .name = "variables", + .parser = get_var_cfg, + .data = 0, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section cache_set_cfg = { + .name = "cache set #", + .parser = get_cache_set_cfg, + .data = &prox_cache_set_cfg, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section defaults_cfg = { + .name = "defaults", + .parser = get_defaults_cfg, + .data = 0, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section settings_cfg = { + .name = "global", + .parser = get_global_cfg, + .data = &prox_cfg, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static struct cfg_section lua_cfg = { + .name = "lua", + .parser = get_lua_cfg, + .raw_lines = 1, + .indexp[0] = 0, + .nbindex = 1, + .error = 0, +}; + +static struct cfg_section core_cfg = { + .name = "core #", + .parser = get_core_cfg, + .data = lcore_cfg_init, + .indexp[0] = 0, + .nbindex = 1, + .error = 0 +}; + +static void set_errf(const char *format, ...) +{ + va_list ap; + va_start(ap, format); + vsnprintf(format_err_str, sizeof(format_err_str), format, ap); + va_end(ap); + err_str = format_err_str; +} + +/* [eal options] parser */ +static int get_rte_cfg(__attribute__((unused))unsigned sindex, char *str, void *data) +{ + struct rte_cfg *pconfig = (struct rte_cfg *)data; + + if (str == NULL || pconfig == NULL) { + return -1; + } + + char *pkey = get_cfg_key(str); + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + if (STR_EQ(str, "-m")) { + return parse_int(&pconfig->memory, pkey); + } + if (STR_EQ(str, "-n")) { + if (parse_int(&pconfig->force_nchannel, pkey)) { + return -1; + } + if (pconfig->force_nchannel == 0) { + set_errf("Invalid number of memory channels"); + return -1; + } + return 0; + } + if (STR_EQ(str, "-r")) { + if (parse_int(&pconfig->force_nrank, pkey)) { + return -1; + } + if (pconfig->force_nrank == 0 || pconfig->force_nrank > 16) { + set_errf("Invalid number of memory ranks"); + return -1; + } + return 0; + } + /* debug options */ + if (STR_EQ(str, "no-pci")) { + return parse_bool(&pconfig->no_pci, pkey); + } + if (STR_EQ(str, "no-hpet")) { + return parse_bool(&pconfig->no_hpet, pkey); + } + if (STR_EQ(str, "no-shconf")) { + return parse_bool(&pconfig->no_shconf, pkey); + } + if (STR_EQ(str, "no-huge")) { + return parse_bool(&pconfig->no_hugetlbfs, pkey); + } + if (STR_EQ(str, "no-output")) { + return parse_bool(&pconfig->no_output, pkey); + } + + if (STR_EQ(str, "huge-dir")) { + if (pconfig->hugedir) { + free(pconfig->hugedir); + } + pconfig->hugedir = strdup(pkey); + return 0; + } + + if (STR_EQ(str, "eal")) { + char eal[MAX_STR_LEN_PROC]; + if (pconfig->eal) { + free(pconfig->eal); + pconfig->eal = NULL; + } + if (parse_str(eal, pkey, sizeof(eal))) + return -1; + pkey = eal; + strip_spaces(&pkey, 1); + if (*pkey) + pconfig->eal = strdup(pkey); + return 0; + } + + set_errf("Option '%s' is not known", str); + return -1; +} + +struct cfg_depr global_cfg_depr[] = { + {"virtualization", "This is now set automatically if needed"}, + {"qinq_tag", "This option is deprecated"}, + {"wait on quit", "This is now set automatically if needed"}, + {"version", ""} +}; + +const char *get_cfg_dir(void) +{ + static char dir[PATH_MAX]; + size_t end = strlen(cfg_file) - 1; + while (end > 0 && cfg_file[end] != '/') + end--; + + strncpy(dir, cfg_file, end); + return dir; +} + +static int get_lua_cfg(__attribute__((unused)) unsigned sindex, __attribute__((unused)) char *str, __attribute__((unused)) void *data) +{ + int status; + char cwd[1024]; + if (NULL == getcwd(cwd, sizeof(cwd))) { + set_errf("Failed to get current directory while loading Lua file\n"); + return -1; + } + status = chdir(get_cfg_dir()); + if (status) { + set_errf("Failed to change directory to '%s' while loading Lua file\n", get_cfg_dir()); + return -1; + } + + struct lua_State *l = prox_lua(); + + char str_cpy[1024]; + strncpy(str_cpy, str, sizeof(str_cpy)); + uint32_t len = strlen(str_cpy); + str_cpy[len++] = '\n'; + str_cpy[len++] = 0; + + status = luaL_loadstring(l, str_cpy); + if (status) { + set_errf("Lua error: '%s'\n", lua_tostring(l, -1)); + status = chdir(cwd); + return -1; + } + + status = lua_pcall(l, 0, LUA_MULTRET, 0); + if (status) { + set_errf("Lua error: '%s'\n", lua_tostring(l, -1)); + status = chdir(cwd); + return -1; + } + + status = chdir(cwd); + if (status) { + set_errf("Failed to restore current directory to '%s' while loading Lua file\n", cwd); + return -1; + } + + return 0; +} + +/* [global] parser */ +static int get_global_cfg(__attribute__((unused))unsigned sindex, char *str, void *data) +{ + struct prox_cfg *pset = (struct prox_cfg *)data; + + if (str == NULL || pset == NULL) { + return -1; + } + + char *pkey = get_cfg_key(str); + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + for (uint32_t i = 0; i < RTE_DIM(global_cfg_depr); ++i) { + if (STR_EQ(str, global_cfg_depr[i].opt)) { + set_errf("Option '%s' is deprecated%s%s", + global_cfg_depr[i].opt, strlen(global_cfg_depr[i].info)? ": ": "", global_cfg_depr[i].info); + return -1; + } + } + + if (STR_EQ(str, "name")) { + return parse_str(pset->name, pkey, sizeof(pset->name)); + } + + if (STR_EQ(str, "start time")) { + return parse_int(&pset->start_time, pkey); + } + + if (STR_EQ(str, "duration time")) { + return parse_int(&pset->duration_time, pkey); + } + + if (STR_EQ(str, "shuffle")) { + return parse_flag(&pset->flags, DSF_SHUFFLE, pkey); + } + if (STR_EQ(str, "disable cmt")) { + return parse_flag(&pset->flags, DSF_DISABLE_CMT, pkey); + } + if (STR_EQ(str, "mp rings")) { + return parse_flag(&pset->flags, DSF_MP_RINGS, pkey); + } + if (STR_EQ(str, "enable bypass")) { + return parse_flag(&pset->flags, DSF_ENABLE_BYPASS, pkey); + } + + if (STR_EQ(str, "cpe table map")) { + /* The config defined ports through 0, 1, 2 ... which + need to be associated with ports. This is done + through defining it using "cpe table map=" */ + return parse_port_name_list((uint32_t*)pset->cpe_table_ports, NULL, PROX_MAX_PORTS, pkey); + } + + if (STR_EQ(str, "pre cmd")) { + return system(pkey); + } + + if (STR_EQ(str, "unique mempool per socket")) { + return parse_flag(&pset->flags, UNIQUE_MEMPOOL_PER_SOCKET, pkey); + } + + if (STR_EQ(str, "log buffer size")) { + if (parse_kmg(&pset->logbuf_size, pkey)) { + return -1; + } + plog_info("Logging to buffer with size = %d\n", pset->logbuf_size); + return 0; + } + + set_errf("Option '%s' is not known", str); + return -1; +} + +/* [variable] parser */ +static int get_var_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data) +{ + return add_var(str, get_cfg_key(str), 0); +} + +/* [defaults] parser */ +static int get_defaults_cfg(__attribute__((unused)) unsigned sindex, char *str, __attribute__((unused)) void *data) +{ + uint32_t val; + char *pkey; + + pkey = get_cfg_key(str); + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + if (STR_EQ(str, "mempool size")) { + + if (parse_kmg(&val, pkey)) { + return -1; + } + + for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id]; + cur_lcore_cfg_init->id = lcore_id; + for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) { + struct task_args *targ = &cur_lcore_cfg_init->targs[task_id]; + targ->nb_mbuf = val; + targ->id = task_id; + } + } + return 0; + } + + if (STR_EQ(str, "qinq tag")) { + for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id]; + cur_lcore_cfg_init->id = lcore_id; + for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) { + struct task_args *targ = &cur_lcore_cfg_init->targs[task_id]; + parse_int(&targ->qinq_tag, pkey); + } + } + return 0; + } + if (STR_EQ(str, "memcache size")) { + + if (parse_kmg(&val, pkey)) { + return -1; + } + + for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + struct lcore_cfg *cur_lcore_cfg_init = &lcore_cfg_init[lcore_id]; + cur_lcore_cfg_init->id = lcore_id; + for (uint8_t task_id = 0; task_id < MAX_TASKS_PER_CORE; ++task_id) { + struct task_args *targ = &cur_lcore_cfg_init->targs[task_id]; + targ->nb_cache_mbuf = val; + } + } + return 0; + } + + set_errf("Option '%s' is not known", str); + return -1; +} + +/* [cache set] parser */ +static int get_cache_set_cfg(unsigned sindex, char *str, void *data) +{ + struct prox_cache_set_cfg *cfg = (struct prox_cache_set_cfg *)data; + + uint8_t cur_if = sindex & ~CFG_INDEXED; + + if (cur_if >= PROX_MAX_CACHE_SET) { + set_errf("Cache set ID is too high (max allowed %d)", PROX_MAX_CACHE_SET - 1 ); + return -1; + } + + cfg = &prox_cache_set_cfg[cur_if]; + + if (str == NULL || data == NULL) { + return -1; + } + + char *pkey = get_cfg_key(str); + + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + if (STR_EQ(str, "mask")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + cfg->mask = val; + cfg->socket_id = -1; + plog_info("\tCache set %d has mask %x\n", cur_if, cfg->mask); + return 0; + } + return 0; +} + +/* [port] parser */ +static int get_port_cfg(unsigned sindex, char *str, void *data) +{ + struct prox_port_cfg *cfg = (struct prox_port_cfg *)data; + + uint8_t cur_if = sindex & ~CFG_INDEXED; + + if (cur_if >= PROX_MAX_PORTS) { + set_errf("Port ID is too high (max allowed %d)", PROX_MAX_PORTS - 1 ); + return -1; + } + + cfg = &prox_port_cfg[cur_if]; + + if (str == NULL || data == NULL) { + return -1; + } + + char *pkey = get_cfg_key(str); + + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + if (STR_EQ(str, "mac")) { + if (STR_EQ(pkey, "hardware")) { + cfg->type = PROX_PORT_MAC_HW; + } + else if (STR_EQ(pkey, "random")) { + cfg->type = PROX_PORT_MAC_RAND; + } + else { + cfg->type = PROX_PORT_MAC_SET; + if (parse_mac(&cfg->eth_addr, pkey)) { + return -1; + } + } + } + else if (STR_EQ(str, "name")) { + uint32_t val; + strncpy(cfg->name, pkey, MAX_NAME_SIZE); + PROX_ASSERT(cur_if < PROX_MAX_PORTS); + return add_port_name(cur_if, pkey); + } + else if (STR_EQ(str, "rx desc")) { + return parse_int(&cfg->n_rxd, pkey); + } + else if (STR_EQ(str, "tx desc")) { + return parse_int(&cfg->n_txd, pkey); + } + else if (STR_EQ(str, "promiscuous")) { + uint32_t val; + if (parse_bool(&val, pkey)) { + return -1; + } + cfg->promiscuous = val; + } + else if (STR_EQ(str, "lsc")) { + cfg->lsc_set_explicitely = 1; + uint32_t val; + if (parse_bool(&val, pkey)) { + return -1; + } + cfg->lsc_val = val; + } + else if (STR_EQ(str, "strip crc")) { + uint32_t val; + if (parse_bool(&val, pkey)) { + return -1; + } + cfg->port_conf.rxmode.hw_strip_crc = val; + } + else if (STR_EQ(str, "rss")) { + uint32_t val; + if (parse_bool(&val, pkey)) { + return -1; + } + if (val) { + cfg->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; + cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4; + } + } + else if (STR_EQ(str, "rx_ring")) { + parse_str(cfg->rx_ring, pkey, sizeof(cfg->rx_ring)); + } + else if (STR_EQ(str, "tx_ring")) { + parse_str(cfg->tx_ring, pkey, sizeof(cfg->tx_ring)); + } + + return 0; +} + +static enum police_action str_to_color(const char *str) +{ + if (STR_EQ(str, "green")) + return ACT_GREEN; + if (STR_EQ(str, "yellow")) + return ACT_YELLOW; + if (STR_EQ(str, "red")) + return ACT_RED; + if (STR_EQ(str, "drop")) + return ACT_DROP; + return ACT_INVALID; +} + +struct cfg_depr task_cfg_depr[] = { + {"sig", ""}, +}; + +struct cfg_depr core_cfg_depr[] = { + {"do sig", ""}, + {"lat", ""}, + {"network side", ""}, +}; + +/* [core] parser */ +static int get_core_cfg(unsigned sindex, char *str, void *data) +{ + char *pkey; + struct lcore_cfg *lconf = (struct lcore_cfg *)data; + + if (str == NULL || lconf == NULL || !(sindex & CFG_INDEXED)) { + return -1; + } + + pkey = get_cfg_key(str); + if (pkey == NULL) { + set_errf("Missing key after option"); + return -1; + } + + uint32_t ncore = sindex & ~CFG_INDEXED; + if (ncore >= RTE_MAX_LCORE) { + set_errf("Core index too high (max allowed %d)", RTE_MAX_LCORE - 1); + return -1; + } + + lconf = &lconf[ncore]; + + for (uint32_t i = 0; i < RTE_DIM(core_cfg_depr); ++i) { + if (STR_EQ(str, core_cfg_depr[i].opt)) { + set_errf("Option '%s' is deprecated%s%s", + core_cfg_depr[i].opt, strlen(core_cfg_depr[i].info)? ": ": "", core_cfg_depr[i].info); + return -1; + } + } + + char buff[128]; + lcore_to_socket_core_ht(ncore, buff, sizeof(buff)); + set_self_var(buff); + if (STR_EQ(str, "task")) { + + uint32_t val; + if (parse_int(&val, pkey)) { + return -1; + } + if (val >= MAX_TASKS_PER_CORE) { + set_errf("Too many tasks for core (max allowed %d)", MAX_TASKS_PER_CORE - 1); + return -1; + } + if (val != lconf->n_tasks_all) { + set_errf("Task ID skipped or defined twice"); + return -1; + } + + lconf->active_task = val; + + lconf->targs[lconf->active_task].task = lconf->active_task; + + if (lconf->n_tasks_all < lconf->active_task + 1) { + lconf->n_tasks_all = lconf->active_task + 1; + } + return 0; + } + + struct task_args *targ = &lconf->targs[lconf->active_task]; + if (STR_EQ(str, "tx ports from routing table")) { + uint32_t vals[PROX_MAX_PORTS]; + uint32_t n_if; + if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) { + set_errf("tx port form route not supported mode %s", targ->task_init->mode_str); + return -1; + } + + if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) { + return -1; + } + + for (uint8_t i = 0; i < n_if; ++i) { + targ->tx_port_queue[i].port = vals[i]; + targ->nb_txports++; + } + targ->runtime_flags |= TASK_ROUTING; + return 0; + } + if (STR_EQ(str, "tx ports from cpe table")) { + uint32_t vals[PROX_MAX_PORTS]; + int n_remap = -1; + uint32_t ret; + uint32_t val; + char* mapping_str = strstr(pkey, " remap="); + + if (mapping_str != NULL) { + *mapping_str = 0; + mapping_str += strlen(" remap="); + n_remap = parse_remap(targ->mapping, mapping_str); + } + + if (parse_port_name_list(vals, &ret, PROX_MAX_PORTS, pkey)) { + return -1; + } + + if (n_remap != -1 && ret != (uint32_t)n_remap) { + set_errf("Expected %d remap elements but had %d", n_remap, ret); + return -1; + } + + for (uint8_t i = 0; i < ret; ++i) { + targ->tx_port_queue[i].port = vals[i]; + + /* default mapping this case is port0 -> port0 */ + if (n_remap == -1) { + targ->mapping[vals[i]] = i; + } + } + + targ->nb_txports = ret; + + return 0; + } + if (STR_EQ(str, "tx cores from routing table")) { + if (!(targ->task_init->flag_features & TASK_FEATURE_ROUTING)) { + set_errf("tx port form route not supported mode %s", targ->task_init->mode_str); + return -1; + } + + struct core_task_set *cts = &targ->core_task_set[0]; + + if (parse_task_set(cts, pkey)) + return -1; + + if (cts->n_elems > MAX_WT_PER_LB) { + set_errf("Maximum worker threads allowed is %u but have %u", MAX_WT_PER_LB, cts->n_elems); + return -1; + } + + targ->nb_worker_threads = cts->n_elems; + targ->nb_txrings = cts->n_elems; + + if (targ->nb_txrings > MAX_RINGS_PER_TASK) { + set_errf("Maximum allowed TX rings is %u but have %u", MAX_RINGS_PER_TASK, targ->nb_txrings); + return -1; + } + + targ->runtime_flags |= TASK_ROUTING; + return 0; + } + if (STR_EQ(str, "tx cores from cpe table")) { + struct core_task_set *core_task_set = &targ->core_task_set[0]; + int ret, ret2; + char *mapping_str; + + mapping_str = strstr(pkey, " remap="); + if (mapping_str == NULL) { + set_errf("There is no default mapping for tx cores from cpe table. Please specify it through remap="); + return -1; + } + *mapping_str = 0; + mapping_str += strlen(" remap="); + ret = parse_remap(targ->mapping, mapping_str); + if (ret <= 0) { + return -1; + } + + struct core_task_set *cts = &targ->core_task_set[0]; + + if (parse_task_set(cts, pkey)) + return -1; + if (cts->n_elems > MAX_RINGS_PER_TASK) { + set_errf("Maximum cores to route to is %u\n", MAX_RINGS_PER_TASK); + return -1; + } + + targ->nb_txrings = cts->n_elems; + + if (ret != targ->nb_txrings) { + set_errf("Expecting same number of remaps as cores\n", str); + return -1; + } + return 0; + } + + if (STR_EQ(str, "delay ms")) { + if (targ->delay_us) { + set_errf("delay ms and delay us are mutually exclusive\n", str); + return -1; + } + uint32_t delay_ms; + int rc = parse_int(&delay_ms, pkey); + targ->delay_us = delay_ms * 1000; + return rc; + } + if (STR_EQ(str, "delay us")) { + if (targ->delay_us) { + set_errf("delay ms and delay us are mutually exclusive\n", str); + return -1; + } + return parse_int(&targ->delay_us, pkey); + } + if (STR_EQ(str, "random delay us")) { + return parse_int(&targ->random_delay_us, pkey); + } + if (STR_EQ(str, "cpe table timeout ms")) { + return parse_int(&targ->cpe_table_timeout_ms, pkey); + } + if (STR_EQ(str, "ctrl path polling frequency")) { + int rc = parse_int(&targ->ctrl_freq, pkey); + if (rc == 0) { + if (targ->ctrl_freq == 0) { + set_errf("ctrl frequency must be non null."); + return -1; + } + } + return rc; + } + + if (STR_EQ(str, "handle arp")) { + return parse_flag(&targ->runtime_flags, TASK_CTRL_HANDLE_ARP, pkey); + } + if (STR_EQ(str, "fast path handle arp")) { + return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey); + } + if (STR_EQ(str, "multiple arp")) { + return parse_flag(&targ->flags, TASK_MULTIPLE_MAC, pkey); + } + + /* Using tx port name, only a _single_ port can be assigned to a task. */ + if (STR_EQ(str, "tx port")) { + if (targ->nb_txports > 0) { + set_errf("Only one tx port can be defined per task. Use a LB task or routing instead."); + return -1; + } + + uint32_t n_if = 0; + uint32_t ports[PROX_MAX_PORTS]; + + if(parse_port_name_list(ports, &n_if, PROX_MAX_PORTS, pkey)) { + return -1; + } + + PROX_ASSERT(n_if-1 < PROX_MAX_PORTS); + + for (uint8_t i = 0; i < n_if; ++i) { + targ->tx_port_queue[i].port = ports[i]; + targ->nb_txports++; + } + + if (n_if > 1) { + targ->nb_worker_threads = targ->nb_txports; + } + + return 0; + } + if (STR_EQ(str, "rx ring")) { + uint32_t val; + int err = parse_bool(&val, pkey); + if (!err && val && targ->rx_port_queue[0].port != OUT_DISCARD) { + set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead."); + return -1; + } + + return parse_flag(&targ->flags, TASK_ARG_RX_RING, pkey); + } + if (STR_EQ(str, "private")) { + return parse_bool(&targ->use_src, pkey); + } + if (STR_EQ(str, "use src ip")) { + return parse_bool(&targ->use_src, pkey); + } + if (STR_EQ(str, "nat table")) { + return parse_str(targ->nat_table, pkey, sizeof(targ->nat_table)); + } + if (STR_EQ(str, "rules")) { + return parse_str(targ->rules, pkey, sizeof(targ->rules)); + } + if (STR_EQ(str, "route table")) { + return parse_str(targ->route_table, pkey, sizeof(targ->route_table)); + } + if (STR_EQ(str, "dscp")) { + return parse_str(targ->dscp, pkey, sizeof(targ->dscp)); + } + if (STR_EQ(str, "tun_bindings")) { + return parse_str(targ->tun_bindings, pkey, sizeof(targ->tun_bindings)); + } + if (STR_EQ(str, "cpe table")) { + return parse_str(targ->cpe_table_name, pkey, sizeof(targ->cpe_table_name)); + } + if (STR_EQ(str, "user table")) { + return parse_str(targ->user_table, pkey, sizeof(targ->user_table)); + } + if (STR_EQ(str, "streams")) { + return parse_str(targ->streams, pkey, sizeof(targ->streams)); + } + if (STR_EQ(str, "local lpm")) { + return parse_flag(&targ->flags, TASK_ARG_LOCAL_LPM, pkey); + } + if (STR_EQ(str, "drop")) { + return parse_flag(&targ->flags, TASK_ARG_DROP, pkey); + } + if (STR_EQ(str, "loop")) { + parse_flag(&targ->loop, 1, pkey); + return parse_flag(&targ->loop, 1, pkey); + } + if (STR_EQ(str, "qinq")) { + return parse_flag(&targ->flags, TASK_ARG_QINQ_ACL, pkey); + } + if (STR_EQ(str, "bps")) { + return parse_u64(&targ->rate_bps, pkey); + } + if (STR_EQ(str, "random")) { + return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0])); + } + if (STR_EQ(str, "rand_offset")) { + if (targ->n_rand_str == 0) { + set_errf("No random defined previously (use random=...)"); + return -1; + } + + return parse_int(&targ->rand_offset[targ->n_rand_str - 1], pkey); + } + if (STR_EQ(str, "keep src mac")) { + return parse_flag(&targ->flags, DSF_KEEP_SRC_MAC, pkey); + } + if (STR_EQ(str, "pcap file")) { + return parse_str(targ->pcap_file, pkey, sizeof(targ->pcap_file)); + } + if (STR_EQ(str, "pkt inline")) { + char pkey2[MAX_CFG_STRING_LEN]; + if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) { + set_errf("Error while parsing pkt line, too long\n"); + return -1; + } + + const size_t pkey_len = strlen(pkey2); + targ->pkt_size = 0; + + for (size_t i = 0; i < pkey_len; ++i) { + if (pkey2[i] == ' ') + continue; + + if (i + 1 == pkey_len) { + set_errf("Incomplete byte at character %z", i); + return -1; + } + + uint8_t byte = 0; + + if (pkey2[i] >= '0' && pkey2[i] <= '9') { + byte = (pkey2[i] - '0') << 4; + } + else if (pkey2[i] >= 'a' && pkey2[i] <= 'f') { + byte = (pkey2[i] - 'a' + 10) << 4; + } + else if (pkey2[i] >= 'A' && pkey2[i] <= 'F') { + byte = (pkey2[i] - 'A' + 10) << 4; + } + else { + set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i]); + return -1; + } + + if (pkey2[i + 1] >= '0' && pkey2[i + 1] <= '9') { + byte |= (pkey2[i + 1] - '0'); + } + else if (pkey2[i + 1] >= 'a' && pkey2[i + 1] <= 'f') { + byte |= (pkey2[i + 1] - 'a' + 10); + } + else if (pkey2[i + 1] >= 'A' && pkey2[i + 1] <= 'F') { + byte |= (pkey2[i + 1] - 'A' + 10); + } + else { + set_errf("Invalid character in pkt inline at byte %d (%c)", i, pkey2[i + 1]); + return -1; + } + if (targ->pkt_size == sizeof(targ->pkt_inline)) { + set_errf("Inline packet definition can't be longer than 1518"); + } + + targ->pkt_inline[targ->pkt_size++] = byte; + i += 1; + } + + return 0; + } + if (STR_EQ(str, "accuracy limit nsec")) { + return parse_int(&targ->accuracy_limit_nsec, pkey); + } + if (STR_EQ(str, "latency bucket size")) { + return parse_int(&targ->bucket_size, pkey); + } + if (STR_EQ(str, "latency buffer size")) { + return parse_int(&targ->latency_buffer_size, pkey); + } + if (STR_EQ(str, "accuracy pos")) { + return parse_int(&targ->accur_pos, pkey); + } + if (STR_EQ(str, "signature")) { + return parse_int(&targ->sig, pkey); + } + if (STR_EQ(str, "signature pos")) { + return parse_int(&targ->sig_pos, pkey); + } + if (STR_EQ(str, "lat pos")) { + targ->lat_enabled = 1; + return parse_int(&targ->lat_pos, pkey); + } + if (STR_EQ(str, "packet id pos")) { + return parse_int(&targ->packet_id_pos, pkey); + } + if (STR_EQ(str, "probability")) { + float probability; + int rc = parse_float(&probability, pkey); + if (probability == 0) { + set_errf("Probability must be != 0\n"); + return -1; + } else if (probability > 100.0) { + set_errf("Probability must be < 100\n"); + return -1; + } + targ->probability = probability * 10000; + return rc; + } + if (STR_EQ(str, "concur conn")) { + return parse_int(&targ->n_concur_conn, pkey); + } + if (STR_EQ(str, "max setup rate")) { + return parse_int(&targ->max_setup_rate, pkey); + } + if (STR_EQ(str, "pkt size")) { + return parse_int(&targ->pkt_size, pkey); + } + if (STR_EQ(str, "min bulk size")) { + return parse_int(&targ->min_bulk_size, pkey); + } + if (STR_EQ(str, "max bulk size")) { + return parse_int(&targ->max_bulk_size, pkey); + } + if (STR_EQ(str, "rx port")) { + if (targ->flags & TASK_ARG_RX_RING) { + set_errf("Can't read both from internal ring and external port from the same task. Use multiple tasks instead."); + return -1; + } + uint32_t vals[PROX_MAX_PORTS]; + uint32_t n_if; + + if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) { + return -1; + } + + for (uint8_t i = 0; i < n_if; ++i) { + PROX_ASSERT(vals[i] < PROX_MAX_PORTS); + targ->rx_port_queue[i].port = vals[i]; + targ->nb_rxports++; + } + return 0; + } + + if (STR_EQ(str, "mode")) { + /* Check deprecated task modes */ + char mode[255]; + int ret = parse_str(mode, pkey, sizeof(mode)); + if (ret) + return ret; + + for (uint32_t i = 0; i < RTE_DIM(task_cfg_depr); ++i) { + if (STR_EQ(mode, task_cfg_depr[i].opt)) { + set_errf("Task mode '%s' is deprecated%s%s", + task_cfg_depr[i].opt, strlen(task_cfg_depr[i].info)? ": ": "", task_cfg_depr[i].info); + return -1; + } + } + + /* master is a special mode that is always needed (cannot be turned off) */ + if (STR_EQ(mode, "master")) { + prox_cfg.master = ncore; + targ->mode = MASTER; + if (lconf->n_tasks_all > 1 || targ->task != 0) { + set_errf("Master core can only have one task\n"); + return -1; + } + return 0; + } + + struct task_init* task_init = to_task_init(mode, ""); + if (task_init) { + targ->mode = task_init->mode; + } + else { + set_errf("Task mode '%s' is invalid", mode); + tasks_list(); + return -1; + } + targ->task_init = task_init; + return 0; + } + if (STR_EQ(str, "users")) { + return parse_int(&targ->n_flows, pkey); + } + + if (STR_EQ(str, "mark")) { + return parse_flag(&targ->runtime_flags, TASK_MARK, pkey); + } + + if (STR_EQ(str, "mark green")) { + return parse_int(&targ->marking[0], pkey); + } + + if (STR_EQ(str, "mark yellow")) { + return parse_int(&targ->marking[1], pkey); + } + + if (STR_EQ(str, "mark red")) { + return parse_int(&targ->marking[2], pkey); + } + + if (STR_EQ(str, "tx cores")) { + uint8_t dest_task = 0; + /* if user did not specify, dest_port is left at default (first type) */ + uint8_t dest_proto = 0; + uint8_t ctrl = CTRL_TYPE_DP; + char *task_str = strstr(pkey, "proto="); + if (task_str) { + task_str += strlen("proto="); + + if (STR_EQ(task_str, "ipv4")) { + dest_proto = IPV4; + } + else if (STR_EQ(task_str, "arp")) { + dest_proto = ARP; + } + else if (STR_EQ(task_str, "ipv6")) { + dest_proto = IPV6; + } + else { + set_errf("proto needs to be either ipv4, arp or ipv6"); + return -1; + } + + } + + task_str = strstr(pkey, "task="); + + if (task_str) { + --task_str; + *task_str = 0; + task_str++; + task_str += strlen("task="); + char *task_str_end = strstr(task_str, " "); + if (task_str_end) { + *task_str_end = 0; + } + if (0 == strlen(task_str)) { + set_errf("Invalid task= syntax"); + return -1; + } + + switch (task_str[strlen(task_str) - 1]) { + case 'p': + ctrl = CTRL_TYPE_PKT; + break; + case 'm': + ctrl = CTRL_TYPE_MSG; + break; + case '\n': + case 0: + break; + default: + if (task_str[strlen(task_str) -1] < '0' || + task_str[strlen(task_str) -1] > '9') { + set_errf("Unknown ring type %c.\n", + task_str[strlen(task_str) - 1]); + return -1; + } + } + + dest_task = atoi(task_str); + if (dest_task >= MAX_TASKS_PER_CORE) { + set_errf("Destination task too high (max allowed %d)", MAX_TASKS_PER_CORE - 1); + return -1; + } + } + else { + dest_task = 0; + } + + struct core_task_set *cts = &targ->core_task_set[dest_proto]; + + if (parse_task_set(cts, pkey)) + return -1; + + if (cts->n_elems > MAX_WT_PER_LB) { + set_errf("Too many worker threads (max allowed %d)", MAX_WT_PER_LB - 1); + return -1; + } + + targ->nb_worker_threads = cts->n_elems; + targ->nb_txrings += cts->n_elems; + + return 0; + } + if (STR_EQ(str, "tx crc")) { + return parse_flag(&targ->runtime_flags, TASK_TX_CRC, pkey); + } + if (STR_EQ(str, "ring size")) { + return parse_int(&targ->ring_size, pkey); + } + if (STR_EQ(str, "mempool size")) { + return parse_kmg(&targ->nb_mbuf, pkey); + } + + else if (STR_EQ(str, "mbuf size")) { + targ->mbuf_size_set_explicitely = 1; + return parse_int(&targ->mbuf_size, pkey); + } + if (STR_EQ(str, "memcache size")) { + return parse_kmg(&targ->nb_cache_mbuf, pkey); + } + + if (STR_EQ(str, "byte offset")) { + return parse_int(&targ->byte_offset, pkey); + } + + if (STR_EQ(str, "name")) { + return parse_str(lconf->name, pkey, sizeof(lconf->name)); + } + /* MPLS configuration */ + if (STR_EQ(str, "untag mpls")) { + return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey); + } + + if (STR_EQ(str, "add mpls")) { + return parse_flag(&targ->runtime_flags, TASK_MPLS_TAGGING, pkey); + } + + if (STR_EQ(str, "ether type")) { + return parse_int(&targ->etype, pkey); + } + + if (STR_EQ(str, "cache set")) { + return parse_int(&lconf->cache_set, pkey); + } + + if (STR_EQ(str, "sub mode")) { + const char* mode_str = targ->task_init->mode_str; + const char *sub_mode_str = pkey; + + targ->task_init = to_task_init(mode_str, sub_mode_str); + if (!targ->task_init) { + set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str); + return -1; + } + return 0; + } + + if (STR_EQ(str, "mempool name")) { + return parse_str(targ->pool_name, pkey, sizeof(targ->pool_name)); + } + if (STR_EQ(str, "dpi engine")) { + return parse_str(targ->dpi_engine_path, pkey, sizeof(targ->dpi_engine_path)); + } + if (STR_EQ(str, "dpi engine arg")) { + return parse_str(targ->dpi_engine_args[targ->n_dpi_engine_args++], pkey, + sizeof(targ->dpi_engine_args[0])); + } + if (STR_EQ(str, "dst mac")) { /* destination MAC address to be used for packets */ + if (parse_mac(&targ->edaddr, pkey)) { + if (STR_EQ(pkey, "no")) { + targ->flags |= TASK_ARG_DO_NOT_SET_DST_MAC; + return 0; + } + if (STR_EQ(pkey, "packet") == 0) + return -1; + else + return 0; + } + targ->flags |= TASK_ARG_DST_MAC_SET; + return 0; + } + if (STR_EQ(str, "src mac")) { + if (parse_mac(&targ->esaddr, pkey)) { + if (STR_EQ(pkey, "no")) { + targ->flags |= TASK_ARG_DO_NOT_SET_SRC_MAC; + return 0; + } + else if (STR_EQ(pkey, "packet")) + return 0; + else if (STR_EQ(pkey, "packet")) { + targ->flags |= TASK_ARG_HW_SRC_MAC; + return 0; + } else { + return -1; + } + } + targ->flags |= TASK_ARG_SRC_MAC_SET; + return 0; + } + if (STR_EQ(str, "gateway ipv4")) { /* Gateway IP address used when generating */ + return parse_ip(&targ->gateway_ipv4, pkey); + } + if (STR_EQ(str, "number of ip")) { /* Gateway IP address used when generating */ + return parse_int(&targ->number_gen_ip, pkey); + } + if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */ + return parse_ip(&targ->local_ipv4, pkey); + } + if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */ + return parse_ip6(&targ->local_ipv6, pkey); + } + if (STR_EQ(str, "number of packets")) + return parse_int(&targ->n_pkts, pkey); + if (STR_EQ(str, "pipes")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) + return -1; + if (!val || !rte_is_power_of_2(val)) { + set_errf("Number of pipes has to be power of 2 and not zero"); + return -1; + } + + targ->qos_conf.port_params.n_pipes_per_subport = val; + return 0; + } + if (STR_EQ(str, "queue size")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + + targ->qos_conf.port_params.qsize[0] = val; + targ->qos_conf.port_params.qsize[1] = val; + targ->qos_conf.port_params.qsize[2] = val; + targ->qos_conf.port_params.qsize[3] = val; + return 0; + } + if (STR_EQ(str, "subport tb rate")) { + return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey); + } + if (STR_EQ(str, "subport tb size")) { + return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey); + } + if (STR_EQ(str, "subport tc 0 rate")) { + return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey); + } + if (STR_EQ(str, "subport tc 1 rate")) { + return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey); + } + if (STR_EQ(str, "subport tc 2 rate")) { + return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey); + } + if (STR_EQ(str, "subport tc 3 rate")) { + return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey); + } + + if (STR_EQ(str, "subport tc rate")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + + targ->qos_conf.subport_params[0].tc_rate[0] = val; + targ->qos_conf.subport_params[0].tc_rate[1] = val; + targ->qos_conf.subport_params[0].tc_rate[2] = val; + targ->qos_conf.subport_params[0].tc_rate[3] = val; + + return 0; + } + if (STR_EQ(str, "subport tc period")) { + return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey); + } + if (STR_EQ(str, "pipe tb rate")) { + return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey); + } + if (STR_EQ(str, "pipe tb size")) { + return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey); + } + if (STR_EQ(str, "pipe tc rate")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + + targ->qos_conf.pipe_params[0].tc_rate[0] = val; + targ->qos_conf.pipe_params[0].tc_rate[1] = val; + targ->qos_conf.pipe_params[0].tc_rate[2] = val; + targ->qos_conf.pipe_params[0].tc_rate[3] = val; + return 0; + } + if (STR_EQ(str, "pipe tc 0 rate")) { + return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey); + } + if (STR_EQ(str, "pipe tc 1 rate")) { + return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey); + } + if (STR_EQ(str, "pipe tc 2 rate")) { + return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey); + } + if (STR_EQ(str, "pipe tc 3 rate")) { + return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey); + } + if (STR_EQ(str, "pipe tc period")) { + return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey); + } + if (STR_EQ(str, "police action")) { + char *in = strstr(pkey, " io="); + if (in == NULL) { + set_errf("Need to specify io colors using io=in_color,out_color\n"); + return -1; + } + *in = 0; + in += strlen(" io="); + + char *out = strstr(in, ","); + if (out == NULL) { + set_errf("Output color not specified\n"); + } + *out = 0; + out++; + + enum police_action in_color = str_to_color(in); + enum police_action out_color = str_to_color(out); + + if (in_color == ACT_INVALID) { + set_errf("Invalid input color %s. Expected green, yellow or red", in); + return -1; + } + if (out_color == ACT_INVALID) { + set_errf("Invalid output color %s. Expected green, yellow or red", out); + return -1; + } + enum police_action action = str_to_color(pkey); + if (action == ACT_INVALID) { + set_errf("Error action %s. Expected green, yellow, red or drop", pkey); + return -1; + } + targ->police_act[in_color][out_color] = action; + + return 0; + } + if (STR_EQ(str, "qinq tag")) { + return parse_int(&targ->qinq_tag, pkey); + } + if (STR_EQ(str, "cir")) { + return parse_int(&targ->cir, pkey); + } + if (STR_EQ(str, "cbs")) { + return parse_int(&targ->cbs, pkey); + } + if (STR_EQ(str, "pir")) { + return parse_int(&targ->pir, pkey); + } + if (STR_EQ(str, "pbs")) { + return parse_int(&targ->pbs, pkey); + } + if (STR_EQ(str, "ebs")) { + return parse_int(&targ->ebs, pkey); + } + uint32_t queue_id = 0; + if (sscanf(str, "queue %d weight", &queue_id) == 1) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + targ->qos_conf.pipe_params[0].wrr_weights[queue_id] = val; + return 0; + } + if (STR_EQ(str, "classify")) { + if (!(targ->task_init->flag_features & TASK_FEATURE_CLASSIFY)) { + set_errf("Classify is not supported in '%s' mode", targ->task_init->mode_str); + return -1; + } + + return parse_flag(&targ->runtime_flags, TASK_CLASSIFY, pkey); + } + if (STR_EQ(str, "flow table size")) { + return parse_int(&targ->flow_table_size, pkey); + } +#ifdef GRE_TP + if (STR_EQ(str, "tbf rate")) { + return parse_int(&targ->tb_rate, pkey); + } + if (STR_EQ(str, "tbf size")) { + return parse_int(&targ->tb_size, pkey); + } +#endif + if (STR_EQ(str, "max rules")) { + return parse_int(&targ->n_max_rules, pkey); + } + + if (STR_EQ(str, "tunnel hop limit")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + targ->tunnel_hop_limit = val; + return 0; + } + + if (STR_EQ(str, "lookup port mask")) { + uint32_t val; + int err = parse_int(&val, pkey); + if (err) { + return -1; + } + targ->lookup_port_mask = val; + return 0; + } + + set_errf("Option '%s' is not known", str); + /* fail on unknown keys */ + return -1; +} + +static int str_is_number(const char *in) +{ + int dot_once = 0; + + for (size_t i = 0; i < strlen(in); ++i) { + if (!dot_once && in[i] == '.') { + dot_once = 1; + continue; + } + + if (in[i] < '0' || in[i] > '9') + return 0; + } + + return 1; +} + +/* command line parameters parsing procedure */ +int prox_parse_args(int argc, char **argv) +{ + int i, opt, ret; + char *tmp, *tmp2; + char tmp3[64]; + + /* Default settings */ + prox_cfg.flags |= DSF_AUTOSTART | DSF_WAIT_ON_QUIT; + prox_cfg.ui = PROX_UI_CURSES; + + plog_info("\tCommand line:"); + for (i = 0; i < argc; ++i) { + plog_info(" %s", argv[i]); + } + plog_info("\n"); + + while ((opt = getopt(argc, argv, "f:dnzpo:tkuar:emsiw:l:v:q:")) != EOF) { + switch (opt) { + case 'f': + /* path to config file */ + cfg_file = optarg; + size_t offset = 0; + for (size_t i = 0; i < strlen(cfg_file); ++i) { + if (cfg_file[i] == '/') { + offset = i + 1; + } + } + + strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE); + break; + case 'v': + plog_set_lvl(atoi(optarg)); + break; + case 'l': + prox_cfg.log_name_pid = 0; + strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE); + break; + case 'p': + prox_cfg.log_name_pid = 1; + break; + case 'k': + prox_cfg.use_stats_logger = 1; + break; + case 'd': + prox_cfg.flags |= DSF_DAEMON; + prox_cfg.ui = PROX_UI_NONE; + break; + case 'z': + prox_cfg.flags |= DSF_USE_DUMMY_CPU_TOPO; + prox_cfg.flags |= DSF_CHECK_INIT; + break; + case 'n': + prox_cfg.flags |= DSF_USE_DUMMY_DEVICES; + break; + case 'r': + if (!str_is_number(optarg) || strlen(optarg) > 11) + return -1; + strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str)); + break; + case 'o': + if (prox_cfg.flags & DSF_DAEMON) + break; + + if (!strcmp(optarg, "curses")) { + prox_cfg.ui = PROX_UI_CURSES; + } + else if (!strcmp(optarg, "cli")) { + prox_cfg.ui = PROX_UI_CLI; + } + else if (!strcmp(optarg, "none")) { + prox_cfg.ui = PROX_UI_NONE; + } + else { + plog_err("Invalid local UI '%s', local UI can be 'curses', 'cli' or 'none'.", optarg); + return -1; + } + break; + case 'q': + if (luaL_loadstring(prox_lua(), optarg)) { + set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1)); + return -1; + } + + if (lua_pcall(prox_lua(), 0, LUA_MULTRET, 0)) { + set_errf("Lua error: '%s'\n", lua_tostring(prox_lua(), -1)); + return -1; + } + + break; + case 'a': + /* autostart all cores */ + prox_cfg.flags |= DSF_AUTOSTART; + break; + case 'e': + /* don't autostart */ + prox_cfg.flags &= ~DSF_AUTOSTART; + break; + case 't': + prox_cfg.flags |= DSF_LISTEN_TCP; + break; + case 'u': + prox_cfg.flags |= DSF_LISTEN_UDS; + break; + case 'm': + /* list supported task modes and exit */ + prox_cfg.flags |= DSF_LIST_TASK_MODES; + break; + case 's': + /* check configuration file syntax and exit */ + prox_cfg.flags |= DSF_CHECK_SYNTAX; + break; + case 'i': + /* check initialization sequence and exit */ + prox_cfg.flags |= DSF_CHECK_INIT; + break; + case 'w': + tmp = optarg; + tmp2 = 0; + if (strlen(tmp) >= 3 && + (tmp2 = strchr(tmp, '='))) { + *tmp2 = 0; + tmp3[0] = '$'; + strncpy(tmp3 + 1, tmp, 63); + plog_info("\tAdding variable: %s = %s\n", tmp3, tmp2 + 1); + ret = add_var(tmp3, tmp2 + 1, 1); + if (ret == -2) { + plog_err("\tFailed to add variable, too many variables defines\n"); + return -1; + } + else if(ret == -3) { + plog_err("\tFailed to add variable, already defined\n"); + return -1; + } + break; + } + /* fall-through */ + default: + plog_err("\tUnknown option\n"); + return -1; + } + } + + /* reset getopt lib for DPDK */ + optind = 0; + + return 0; +} + +static int check_cfg(void) +{ + /* Sanity check */ +#define RETURN_IF(cond, err) \ + if (cond) { \ + plog_err(err); \ + return -1; \ + }; + + RETURN_IF(rte_cfg.force_nchannel == 0, "\tError: number of memory channels not specified in [eal options] section\n"); + RETURN_IF(prox_cfg.master >= RTE_MAX_LCORE, "\tError: No master core specified (one core needs to have mode=master)\n"); + +#undef RETURN_IF + + return 0; +} + +static int calc_tot_rxrings(void) +{ + struct lcore_cfg *slconf, *dlconf; + struct task_args *starg, *dtarg; + uint32_t dlcore_id; + uint8_t dtask_id; + struct core_task ct; + + dlconf = NULL; + while (core_targ_next_early(&dlconf, &dtarg, 1) == 0) { + dtarg->tot_rxrings = 0; + } + + slconf = NULL; + while (core_targ_next_early(&slconf, &starg, 1) == 0) { + for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) { + for (uint8_t ring_idx = 0; ring_idx < starg->core_task_set[idx].n_elems; ++ring_idx) { + ct = starg->core_task_set[idx].core_task[ring_idx]; + if (!prox_core_active(ct.core, 0)) { + set_errf("Core %u is disabled but Core %u task %u is sending to it\n", + ct.core, slconf->id, starg->id); + return -1; + } + + dlconf = &lcore_cfg_init[ct.core]; + + if (ct.task >= dlconf->n_tasks_all) { + set_errf("Core %u task %u not enabled\n", ct.core, ct.task); + return -1; + } + + dtarg = &dlconf->targs[ct.task]; + + /* Control rings are not relevant at this point. */ + if (ct.type) + continue; + + if (!(dtarg->flags & TASK_ARG_RX_RING)) { + set_errf("Core %u task %u is not expecting to receive through a ring\n", + ct.core, ct.task); + return -1; + } + + dtarg->tot_rxrings++; + if (dtarg->tot_rxrings > MAX_RINGS_PER_TASK) { + set_errf("Core %u task %u is receiving from too many tasks", + ct.core, ct.task); + return -1; + } + } + } + } + + return 0; +} + +static void prox_set_core_mask(void) +{ + struct lcore_cfg *lconf; + + prox_core_clr(); + for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + lconf = &lcore_cfg_init[lcore_id]; + if (lconf->n_tasks_all > 0 && lconf->targs[0].mode != MASTER) { + prox_core_set_active(lcore_id); + } + } +} + +static int is_using_no_drop(void) +{ + uint32_t lcore_id; + struct lcore_cfg *lconf; + struct task_args *targs; + + lcore_id = -1; + while(prox_core_next(&lcore_id, 1) == 0) { + lconf = &lcore_cfg_init[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + targs = &lconf->targs[task_id]; + if (!(targs->flags & TASK_ARG_DROP)) + return 1; + } + } + return 0; +} + +int prox_read_config_file(void) +{ + set_global_defaults(&prox_cfg); + set_task_defaults(&prox_cfg, lcore_cfg_init); + set_port_defaults(); + plog_info("=== Parsing configuration file '%s' ===\n", cfg_file); + struct cfg_file *pcfg = cfg_open(cfg_file); + if (pcfg == NULL) { + return -1; + } + + struct cfg_section* config_sections[] = { + &lua_cfg , + &var_cfg , + &eal_default_cfg , + &cache_set_cfg , + &port_cfg , + &defaults_cfg , + &settings_cfg , + &core_cfg , + NULL + }; + + for (struct cfg_section** section = config_sections; *section != NULL; ++section) { + const char* name = (*section)->name; + size_t len = strlen(name); + plog_info("\t*** Reading [%s] section%s ***\n", name, name[len - 1] == '#'? "s": ""); + cfg_parse(pcfg, *section); + + if ((*section)->error) { + plog_err("At line %u, section [%s], entry %u: '%s'\n\t%s\n" + , pcfg->err_line, pcfg->err_section, pcfg->err_entry + 1, pcfg->cur_line, + strlen(get_parse_err())? get_parse_err() : err_str); + cfg_close(pcfg); /* cannot close before printing error, print uses internal buffer */ + return -1; + } + } + + cfg_close(pcfg); + + prox_set_core_mask(); + + if (is_using_no_drop()) { + prox_cfg.flags &= ~DSF_WAIT_ON_QUIT; + } + + if (calc_tot_rxrings()) { + plog_err("Error in configuration: %s\n", err_str); + return -1; + } + + return check_cfg(); +} + +static void failed_rte_eal_init(__attribute__((unused))const char *prog_name) +{ + plog_err("\tError in rte_eal_init()\n"); +} + +int prox_setup_rte(const char *prog_name) +{ + char *rte_argv[MAX_RTE_ARGV]; + char rte_arg[MAX_RTE_ARGV][MAX_ARG_LEN]; + char tmp[PROX_CM_STR_LEN]; + /* create mask of used cores */ + plog_info("=== Setting up RTE EAL ===\n"); + + if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) { + plog_info("Using dummy cpu topology\n"); + snprintf(tmp, sizeof(tmp), "0x1"); + } else { + prox_core_to_hex(tmp, sizeof(tmp), 0); + plog_info("\tWorker threads core mask is %s\n", tmp); + prox_core_to_hex(tmp, sizeof(tmp), 1); + plog_info("\tWith master core index %u, full core mask is %s\n", prox_cfg.master, tmp); + } + + /* fake command line parameters for rte_eal_init() */ + int argc = 0; + rte_argv[argc] = strdup(prog_name); + sprintf(rte_arg[++argc], "-c%s", tmp); + rte_argv[argc] = rte_arg[argc]; +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) + sprintf(rte_arg[++argc], "--master-lcore=%u", 0); + else + sprintf(rte_arg[++argc], "--master-lcore=%u", prox_cfg.master); + rte_argv[argc] = rte_arg[argc]; +#else + /* For old DPDK versions, the master core had to be the first + core. */ + uint32_t first_core = -1; + + if (prox_core_next(&first_core, 1) == -1) { + plog_err("Can't core ID of first core in use\n"); + return -1; + } + if (first_core != prox_cfg.master) { + plog_err("The master core needs to be the first core (master core = %u, first core = %u).\n", first_core, prox_cfg.master); + return -1; + } +#endif + + if (rte_cfg.memory) { + sprintf(rte_arg[++argc], "-m%u", rte_cfg.memory); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.force_nchannel) { + sprintf(rte_arg[++argc], "-n%u", rte_cfg.force_nchannel); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.force_nrank) { + sprintf(rte_arg[++argc], "-r%u", rte_cfg.force_nrank); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.no_hugetlbfs) { + strcpy(rte_arg[++argc], "--no-huge"); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.no_pci) { + strcpy(rte_arg[++argc], "--no-pci"); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.no_hpet) { + strcpy(rte_arg[++argc], "--no-hpet"); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.no_shconf) { + strcpy(rte_arg[++argc], "--no-shconf"); + rte_argv[argc] = rte_arg[argc]; + } + + if (rte_cfg.eal != NULL) { + char *ptr = rte_cfg.eal; + char *ptr2; + while (ptr != NULL) { + while (isspace(*ptr)) + ptr++; + ptr2 = ptr; + ptr = strchr(ptr, ' '); + if (ptr) { + *ptr++ = '\0'; + } + strcpy(rte_arg[++argc], ptr2); + rte_argv[argc] = rte_arg[argc]; + } + } + + if (rte_cfg.hugedir != NULL) { + strcpy(rte_arg[++argc], "--huge-dir"); + rte_argv[argc] = rte_arg[argc]; + rte_argv[++argc] = rte_cfg.hugedir; + } + + if (rte_cfg.no_output) { + rte_set_log_level(0); + } + /* init EAL */ + plog_info("\tEAL command line:"); + if (argc >= MAX_RTE_ARGV) { + plog_err("too many arguments for EAL\n"); + return -1; + } + + for (int h = 0; h <= argc; ++h) { + plog_info(" %s", rte_argv[h]); + } + plog_info("\n"); + + rte_set_application_usage_hook(failed_rte_eal_init); + if (rte_eal_init(++argc, rte_argv) < 0) { + plog_err("\tError in rte_eal_init()\n"); + return -1; + } + plog_info("\tEAL Initialized\n"); + + if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO) + return 0; + + /* check if all active cores are in enabled in DPDK */ + for (uint32_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { + if (lcore_id == prox_cfg.master) { + if (!rte_lcore_is_enabled(lcore_id)) + return -1; + } + else if (rte_lcore_is_enabled(lcore_id) != prox_core_active(lcore_id, 0)) { + plog_err("\tFailed to enable lcore %u\n", lcore_id); + return -1; + } + else if (lcore_cfg_init[lcore_id].n_tasks_all != 0 && !rte_lcore_is_enabled(lcore_id)) { + plog_err("\tFailed to enable lcore %u\n", lcore_id); + return -1; + } + } + return 0; +} diff --git a/VNFs/DPPD-PROX/prox_args.h b/VNFs/DPPD-PROX/prox_args.h new file mode 100644 index 00000000..1c900054 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_args.h @@ -0,0 +1,41 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_ARGS_H_ +#define _PROX_ARGS_H_ + +#include "lconf.h" + +struct rte_cfg { + /* DPDK standard options */ + uint32_t memory; /* amount of asked memory */ + uint32_t force_nchannel; /* force number of channels */ + uint32_t force_nrank; /* force number of ranks */ + uint32_t no_hugetlbfs; /* true to disable hugetlbfs */ + uint32_t no_pci; /* true to disable PCI */ + uint32_t no_hpet; /* true to disable HPET */ + uint32_t no_shconf; /* true if there is no shared config */ + char *hugedir; /* dir where hugetlbfs is mounted */ + char *eal; /* any additional eal option */ + uint32_t no_output; /* disable EAL debug output */ +}; + +int prox_parse_args(int argc, char **argv); +int prox_read_config_file(void); +int prox_setup_rte(const char *prog_name); +const char *get_cfg_dir(void); + +#endif /* _PROX_ARGS_H_ */ diff --git a/VNFs/DPPD-PROX/prox_assert.h b/VNFs/DPPD-PROX/prox_assert.h new file mode 100644 index 00000000..cc4f24e6 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_assert.h @@ -0,0 +1,39 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_ASSERT_H_ +#define _PROX_ASSERT_H_ + +#include <assert.h> +#include "display.h" + +#if defined(__KLOCWORK__) || defined(ASSERT) + +#ifdef NDEBUG +#error When enabling asserts, NDEBUG must be undefined +#endif + +#define PROX_ASSERT(cond) do { \ + if (!(cond)) { \ + display_end(); \ + assert(cond); \ + } \ + } while (0) +#else +#define PROX_ASSERT(cond) do {} while(0) +#endif + +#endif /* _PROX_ASSERT_H_ */ diff --git a/VNFs/DPPD-PROX/prox_cfg.c b/VNFs/DPPD-PROX/prox_cfg.c new file mode 100644 index 00000000..a2cf7953 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_cfg.c @@ -0,0 +1,145 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> + +#include "prox_cfg.h" + +#define CM_N_BITS (sizeof(prox_cfg.core_mask[0]) * 8) +#define CM_ALL_N_BITS (sizeof(prox_cfg.core_mask) * 8) + +struct prox_cfg prox_cfg = { + .update_interval_str = "1" +}; + +static int prox_cm_isset(const uint32_t lcore_id) +{ + uint64_t cm; + uint32_t cm_idx; + + if (lcore_id > CM_ALL_N_BITS) + return -1; + + cm = __UINT64_C(1) << (lcore_id % CM_N_BITS); + cm_idx = PROX_CM_DIM - 1 - lcore_id / CM_N_BITS; + return !!(prox_cfg.core_mask[cm_idx] & cm); +} + +int prox_core_active(const uint32_t lcore_id, const int with_master) +{ + int ret; + + ret = prox_cm_isset(lcore_id); + if (ret < 0) + return 0; + + if (with_master) + return ret || lcore_id == prox_cfg.master; + else + return ret && lcore_id != prox_cfg.master; +} + +int prox_core_next(uint32_t* lcore_id, const int with_master) +{ + for (uint32_t i = *lcore_id + 1; i < CM_ALL_N_BITS; ++i) { + if (prox_core_active(i, with_master)) { + *lcore_id = i; + return 0; + } + } + return -1; +} + +int prox_core_to_hex(char *dst, const size_t size, const int with_master) +{ + uint64_t cm; + uint32_t cm_len; + uint32_t cm_first = 0; + uint32_t master = prox_cfg.master; + + /* Minimum size of the string has to big enough to hold the + bitmask in hex (including the prefix "0x"). */ + if (size < PROX_CM_STR_LEN) + return 0; + + snprintf(dst, size, "0x"); + for (uint32_t i = 0; i < PROX_CM_DIM; ++i, cm_first = i) { + if ((with_master && ((CM_ALL_N_BITS - 1 - master) / CM_N_BITS == i * CM_N_BITS)) || + prox_cfg.core_mask[i]) { + break; + } + } + + for (uint32_t i = cm_first; i < PROX_CM_DIM; ++i) { + cm = prox_cfg.core_mask[i]; + if (with_master && ((CM_ALL_N_BITS - 1 - master) / CM_N_BITS == i)) { + cm |= (__UINT64_C(1) << (master % CM_N_BITS)); + } + + snprintf(dst + strlen(dst), size - strlen(dst), i == cm_first? "%lx" : "%016lx", cm); + } + + return 0; +} + +int prox_core_to_str(char *dst, const size_t size, const int with_master) +{ + uint32_t lcore_id = -1; + uint32_t first = 1; + + *dst = 0; + lcore_id - 1; + while (prox_core_next(&lcore_id, with_master) == 0) { + /* Stop printing to string if there is not engough + space left. Assume that adding 1 core to the string + will take at most 5 + 1 bytes implying that + lcore_id < 999. Check if ther is space for another + 6 bytes to add an elipsis */ + if (12 + strlen(dst) > size) { + if (6 + strlen(dst) > size) { + snprintf(dst + strlen(dst), size - strlen(dst), ", ..."); + return 0; + } + return -1; + } + + snprintf(dst + strlen(dst), size - strlen(dst), first? "%u" : ", %u", lcore_id); + first = 0; + } + + return 0; +} + +void prox_core_clr(void) +{ + memset(prox_cfg.core_mask, 0, sizeof(prox_cfg.core_mask)); +} + +int prox_core_set_active(const uint32_t lcore_id) +{ + uint32_t cm_idx; + uint64_t cm; + + if (lcore_id > CM_ALL_N_BITS) + return -1; + + cm = __UINT64_C(1) << (lcore_id % CM_N_BITS); + cm_idx = PROX_CM_DIM - 1 - lcore_id / CM_N_BITS; + prox_cfg.core_mask[cm_idx] |= cm; + + return 0; +} diff --git a/VNFs/DPPD-PROX/prox_cfg.h b/VNFs/DPPD-PROX/prox_cfg.h new file mode 100644 index 00000000..a7d0e7ea --- /dev/null +++ b/VNFs/DPPD-PROX/prox_cfg.h @@ -0,0 +1,87 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_CFG_H +#define _PROX_CFG_H + +#include <inttypes.h> + +#include "prox_globals.h" + +#define PROX_CM_STR_LEN (2 + 2 * sizeof(prox_cfg.core_mask) + 1) +#define PROX_CM_DIM (RTE_MAX_LCORE/(sizeof(uint64_t) * 8)) + +#define DSF_AUTOSTART 0x00000001 /* start all cores automatically */ +#define DSF_CHECK_INIT 0x00000002 /* check initialization sequence and exit */ +#define DSF_CHECK_SYNTAX 0x00000004 /* check configuration file syntax and exit */ +#define DSF_SHUFFLE 0x00000008 /* shuffle memory addresses within memory pool */ +#define DSF_WAIT_ON_QUIT 0x00000010 /* wait for all cores to stop before exiting */ +#define DSF_LISTEN_TCP 0x00000020 /* Listen on TCP port 8474 for input */ +#define DSF_LISTEN_UDS 0x00000040 /* Listen on /tmp/prox.sock for input */ +#define DSF_DAEMON 0x00000080 /* Run process as Daemon */ +#define UNIQUE_MEMPOOL_PER_SOCKET 0x00000100 /* Use Only one mempool per socket, shared between all cores on that socket */ +#define DSF_KEEP_SRC_MAC 0x00000200 /* In gen mode, do not overwrite src_mac by mac of physical port */ +#define DSF_MP_RINGS 0x00000400 /* Use Multi Producer rings when possible */ +#define DSF_USE_DUMMY_DEVICES 0x00000800 /* Instead of relying on real PCI devices, create null devices instead */ +#define DSF_USE_DUMMY_CPU_TOPO 0x00001000 /* Instead of relying on the cpu topology, load a cpu toplogy that will work with all cfgs. */ +#define DSF_DISABLE_CMT 0x00002000 /* CMT disabled */ +#define DSF_LIST_TASK_MODES 0x00004000 /* list supported task modes and exit */ +#define DSF_ENABLE_BYPASS 0x00008000 /* Use Multi Producer rings to enable ring bypass */ + +#define MAX_PATH_LEN 1024 + +enum prox_ui { + PROX_UI_CURSES, + PROX_UI_CLI, + PROX_UI_NONE, +}; + +struct prox_cfg { + enum prox_ui ui; /* By default, curses is used as a UI. */ + char update_interval_str[16]; + int use_stats_logger; + uint32_t flags; /* TGSF_* flags above */ + uint32_t master; /* master core to run user interface on */ + uint64_t core_mask[PROX_CM_DIM]; /* Active cores without master core */ + uint32_t start_time; /* if set (not 0), average pps will be calculated starting after start_time seconds */ + uint32_t duration_time; /* if set (not 0), prox will exit duration_time seconds after start_time */ + char name[MAX_NAME_SIZE]; + uint8_t log_name_pid; + char log_name[MAX_PATH_LEN]; + int32_t cpe_table_ports[PROX_MAX_PORTS]; + uint32_t logbuf_size; + uint32_t logbuf_pos; + char *logbuf; +}; + +extern struct prox_cfg prox_cfg; + +int prox_core_active(const uint32_t lcore_id, const int with_master); + +/* Returns non-zero if supplied lcore_id is the last active core. The + first core can be found by setting *lcore_id == -1. The function is + indented to be used as an interator. */ +int prox_core_next(uint32_t *lcore_id, const int with_master); + +int prox_core_to_hex(char *dst, const size_t size, const int with_master); + +int prox_core_to_str(char *dst, const size_t size, const int with_master); + +void prox_core_clr(void); + +int prox_core_set_active(const uint32_t lcore_id); + +#endif /* __PROX_CFG_H_ */ diff --git a/VNFs/DPPD-PROX/prox_cksum.c b/VNFs/DPPD-PROX/prox_cksum.c new file mode 100644 index 00000000..b69c06f6 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_cksum.c @@ -0,0 +1,148 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "prox_cksum.h" +#include "prox_port_cfg.h" +#include <rte_byteorder.h> +#include "log.h" + +/* compute IP 16 bit checksum */ +void prox_ip_cksum_sw(struct ipv4_hdr *buf) +{ + const uint16_t size = sizeof(struct ipv4_hdr); + uint32_t cksum = 0; + uint32_t nb_dwords; + uint32_t tail, mask; + uint32_t *pdwd = (uint32_t *)buf; + + /* compute 16 bit checksum using hi and low parts of 32 bit integers */ + for (nb_dwords = (size >> 2); nb_dwords > 0; --nb_dwords) { + cksum += (*pdwd >> 16); + cksum += (*pdwd & 0xFFFF); + ++pdwd; + } + + /* deal with the odd byte length */ + if (size & 0x03) { + tail = *pdwd; + /* calculate mask for valid parts */ + mask = 0xFFFFFFFF << ((size & 0x03) << 3); + /* clear unused bits */ + tail &= ~mask; + + cksum += (tail >> 16) + (tail & 0xFFFF); + } + + cksum = (cksum >> 16) + (cksum & 0xFFFF); + cksum = (cksum >> 16) + (cksum & 0xFFFF); + + buf->hdr_checksum = ~((uint16_t)cksum); +} + +static uint16_t calc_pseudo_checksum(uint8_t ipproto, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +{ + uint32_t csum = 0; + + csum += (src_ip_addr >> 16) + (src_ip_addr & 0xFFFF); + csum += (dst_ip_addr >> 16) + (dst_ip_addr & 0xFFFF); + csum += rte_bswap16(ipproto) + rte_bswap16(len); + csum = (csum >> 16) + (csum & 0xFFFF); + return csum; +} + +static void prox_write_udp_pseudo_hdr(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +{ + /* Note that the csum is not complemented, while the pseaudo + header checksum is calculated as "... the 16-bit one's + complement of the one's complement sum of a pseudo header + of information ...", the psuedoheader forms as a basis for + the actual checksum calculated later either in software or + hardware. */ + udp->dgram_cksum = calc_pseudo_checksum(IPPROTO_UDP, len, src_ip_addr, dst_ip_addr); +} + +static void prox_write_tcp_pseudo_hdr(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +{ + tcp->cksum = calc_pseudo_checksum(IPPROTO_TCP, len, src_ip_addr, dst_ip_addr); +} + +void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload) +{ + prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & IPV4_CKSUM); + +#ifndef SOFT_CRC + if (cksum_offload & UDP_CKSUM) + mbuf->ol_flags |= PKT_TX_UDP_CKSUM; +#endif + + uint32_t l4_len = rte_bswap16(pip->total_length) - l3_len; + if (pip->next_proto_id == IPPROTO_UDP) { + struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t*)pip) + l3_len); +#ifndef SOFT_CRC + if (cksum_offload & UDP_CKSUM) + prox_write_udp_pseudo_hdr(udp, l4_len, pip->src_addr, pip->dst_addr); + else +#endif + prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr); + } else if (pip->next_proto_id == IPPROTO_TCP) { + struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t*)pip) + l3_len); +#ifndef SOFT_CRC + if (cksum_offload & UDP_CKSUM) + prox_write_tcp_pseudo_hdr(tcp, l4_len, pip->src_addr, pip->dst_addr); + else +#endif + prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr); + } +} + +static uint16_t checksum_byte_seq(uint16_t *buf, uint16_t len) +{ + uint32_t csum = 0; + + while (len > 1) { + csum += *buf; + while (csum >> 16) { + csum &= 0xffff; + csum +=1; + } + buf++; + len -= 2; + } + + if (len) { + csum += *(uint8_t*)buf; + while (csum >> 16) { + csum &= 0xffff; + csum +=1; + } + } + return ~csum; +} + +void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +{ + prox_write_udp_pseudo_hdr(udp, len, src_ip_addr, dst_ip_addr); + uint16_t csum = checksum_byte_seq((uint16_t *)udp, len); + udp->dgram_cksum = csum; +} + +void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +{ + prox_write_tcp_pseudo_hdr(tcp, len, src_ip_addr, dst_ip_addr); + + uint16_t csum = checksum_byte_seq((uint16_t *)tcp, len); + tcp->cksum = csum; +} diff --git a/VNFs/DPPD-PROX/prox_cksum.h b/VNFs/DPPD-PROX/prox_cksum.h new file mode 100644 index 00000000..c11b17a5 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_cksum.h @@ -0,0 +1,68 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_CKSUM_H_ +#define _PROX_CKSUM_H_ + +#include <inttypes.h> +#include <string.h> +#include <stdio.h> +#include <rte_version.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_tcp.h> +#include <rte_mbuf.h> + +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) +#define CALC_TX_OL(l2_len, l3_len) ((uint64_t)(l2_len) | (uint64_t)(l3_len) << 7) +#else +#define CALC_TX_OL(l2_len, l3_len) (((uint64_t)(l2_len) << 9) | (uint64_t)(l3_len)) +#endif + +static void prox_ip_cksum_hw(struct rte_mbuf *mbuf, uint16_t l2_len, uint16_t l3_len) +{ +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) + mbuf->pkt.vlan_macip.data = CALC_TX_OL(l2_len, l3_len); +#else + mbuf->tx_offload = CALC_TX_OL(l2_len, l3_len); +#endif + mbuf->ol_flags |= PKT_TX_IP_CKSUM; +} + +void prox_ip_cksum_sw(struct ipv4_hdr *buf); + +static inline void prox_ip_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int offload) +{ + buf->hdr_checksum = 0; +#ifdef SOFT_CRC + prox_ip_cksum_sw(buf); +#else + if (offload) + prox_ip_cksum_hw(mbuf, l2_len, l3_len); + else { + prox_ip_cksum_sw(buf); + /* TODO: calculate UDP checksum */ + } +#endif +} + +void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int cksum_offload); + +/* src_ip_addr/dst_ip_addr are in network byte order */ +void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); +void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); + +#endif /* _PROX_CKSUM_H_ */ diff --git a/VNFs/DPPD-PROX/prox_globals.h b/VNFs/DPPD-PROX/prox_globals.h new file mode 100644 index 00000000..b09f3a52 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_globals.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#define PROX_MAX_PORTS 16 +#define MAX_TASKS_PER_CORE 8 +#define MAX_SOCKETS 64 +#define MAX_NAME_SIZE 64 +#define MAX_PROTOCOLS 3 +#define MAX_RINGS_PER_TASK (MAX_WT_PER_LB*MAX_PROTOCOLS) +#define MAX_WT_PER_LB 64 diff --git a/VNFs/DPPD-PROX/prox_lua.c b/VNFs/DPPD-PROX/prox_lua.c new file mode 100644 index 00000000..b5c2fec9 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_lua.c @@ -0,0 +1,411 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdlib.h> + +#include "prox_lua.h" +#include "lua_compat.h" +#include "parse_utils.h" + +static struct lua_State *lua_instance; + +static int l_mask(lua_State *L) +{ + uint32_t val, mask; + + if (lua_gettop(L) != 2) { + return luaL_error(L, "Expecting 2 argument and got %d\n", lua_gettop(L)); + } + if (!lua_isnumber(L, -1) || !lua_isnumber(L, -2)) { + return luaL_error(L, "Expecting (integer, integer) as arguments\n"); + } + val = lua_tonumber(L, -1); + mask = lua_tonumber(L, -2); + + lua_pushinteger(L, val & mask); + + return 1; +} + +static int l_server_content(lua_State *L) +{ + uint32_t beg, len; + + if (lua_gettop(L) != 2) { + return luaL_error(L, "Expecting 2 argument and got %d\n", lua_gettop(L)); + } + if (!lua_isnumber(L, -1) || !lua_isnumber(L, -2)) { + return luaL_error(L, "Expecting (integer, integer) as arguments\n"); + } + len = lua_tonumber(L, -1); + beg = lua_tonumber(L, -2); + + lua_createtable(L, 0, 3); + + lua_pushinteger(L, beg); + lua_setfield(L, -2, "beg"); + lua_pushinteger(L, len); + lua_setfield(L, -2, "len"); + lua_pushinteger(L, 0); + lua_setfield(L, -2, "peer"); + + return 1; +} + +static int l_client_content(lua_State *L) +{ + uint32_t beg, len; + + if (lua_gettop(L) != 2) { + return luaL_error(L, "Expecting 2 argument and got %d\n", lua_gettop(L)); + } + if (!lua_isnumber(L, -1) || !lua_isnumber(L, -2)) { + return luaL_error(L, "Expecting (integer, integer) as arguments\n"); + } + len = lua_tonumber(L, -1); + beg = lua_tonumber(L, -2); + + lua_createtable(L, 0, 3); + + lua_pushinteger(L, beg); + lua_setfield(L, -2, "beg"); + lua_pushinteger(L, len); + lua_setfield(L, -2, "len"); + lua_pushinteger(L, 1); + lua_setfield(L, -2, "peer"); + + return 1; +} + +static int l_bin_read(lua_State *L) +{ + const char *file_name = lua_tostring(L, -1); + int beg = lua_tonumber(L, -2); + int len = lua_gettop(L) == 3? lua_tonumber(L, -3) : -1; + + if (lua_gettop(L) == 2) { + if (!lua_isnumber(L, -1) || !lua_isstring(L, -2)) { + return luaL_error(L, "Expecting (string, integer) as arguments\n"); + } + + file_name = lua_tostring(L, -2); + beg = lua_tonumber(L, -1); + len = -1; + } + else if (lua_gettop(L) == 3) { + if (!lua_isnumber(L, -1) || !lua_isnumber(L, -2) || !lua_isstring(L, 3)) { + return luaL_error(L, "Expecting (string, integer, integer) as arguments\n"); + } + + file_name = lua_tostring(L, -3); + beg = lua_tonumber(L, -2); + len = lua_tonumber(L, -1); + } + else + return luaL_error(L, "Expecting 2 or 3 arguments\n"); + + lua_createtable(L, 0, 3); + + lua_pushstring(L, file_name); + lua_setfield(L, -2, "file_name"); + lua_pushinteger(L, beg); + lua_setfield(L, -2, "beg"); + lua_pushinteger(L, len); + lua_setfield(L, -2, "len"); + + return 1; +} + +static int l_mac(lua_State *L) +{ + int mac[6]; + + if (lua_isstring(L, -1)) { + const char *arg = lua_tostring(L, -1); + char arg2[128]; + strncpy(arg2, arg, sizeof(arg2)); + + char *p = arg2; + int count = 0; + + while ((p = strchr(p, ':'))) { + count++; + p++; + } + p = arg2; + if (count != 5) + return luaL_error(L, "Invalid MAC format\n"); + + lua_createtable(L, 6, 0); + for (size_t i = 0; i < 6; ++i) { + char *n = strchr(p, ':'); + if (n) + *n = 0; + if (strlen(p) != 2) { + return luaL_error(L, "Invalid MAC format\n"); + } + + lua_pushinteger(L, strtol(p, NULL, 16)); + lua_rawseti(L, -2, i + 1); + p = n + 1; + } + return 1; + } + + return luaL_error(L, "Invalid argument\n"); +} + +static int l_ip(lua_State *L) +{ + int ip[4]; + if (lua_isnumber(L, -1)) { + uint32_t arg = lua_tointeger(L, -1); + + ip[0] = arg >> 24 & 0xff; + ip[1] = arg >> 16 & 0xff; + ip[2] = arg >> 8 & 0xff; + ip[3] = arg >> 0 & 0xff; + + lua_createtable(L, 4, 0); + for (size_t i = 0; i < 4; ++i) { + lua_pushinteger(L, ip[i]); + lua_rawseti(L, -2, i + 1); + } + + return 1; + } + if (lua_isstring(L, -1)) { + const char *arg = lua_tostring(L, -1); + + if (sscanf(arg, "%d.%d.%d.%d", &ip[0], &ip[1], &ip[2], &ip[3]) != 4) { + return luaL_error(L, "Invalid IP address format\n"); + } + + lua_createtable(L, 4, 0); + for (size_t i = 0; i < 4; ++i) { + lua_pushinteger(L, ip[i]); + lua_rawseti(L, -2, i + 1); + } + + return 1; + } + + return luaL_error(L, "Invalid argument\n"); +} + +static int l_ip6(lua_State *L) +{ + int ip[16]; + + if (!lua_isstring(L, -1)) { + return luaL_error(L, "Invalid argument type\n"); + } + + const char *arg = lua_tostring(L, -1); + char arg2[64]; + char *addr_parts[8]; + int n_parts = 0; + size_t str_len = strlen(arg); + int next_str = 1; + int ret; + + strncpy(arg2, arg, sizeof(arg2)); + + for (size_t i = 0; i < str_len; ++i) { + if (next_str) { + if (n_parts == 8) + return luaL_error(L, "IPv6 address can't be longer than 16 bytes\n"); + addr_parts[n_parts++] = &arg2[i]; + next_str = 0; + + } + if (arg2[i] == ':') { + arg2[i] = 0; + next_str = 1; + } + } + + int omitted = 0; + + for (int i = 0, j = 0; i < n_parts; ++i) { + if (*addr_parts[i] == 0) { + if (omitted == 0) { + return luaL_error(L, "Can omit zeros only once\n"); + } + omitted = 1; + j += 8 - n_parts; + } + else { + uint16_t w = strtoll(addr_parts[i], NULL, 16); + ip[j++] = (w >> 8) & 0xff; + ip[j++] = w & 0xff; + } + } + + lua_createtable(L, 16, 0); + for (size_t i = 0; i < 16; ++i) { + lua_pushinteger(L, ip[i]); + lua_rawseti(L, -2, i + 1); + } + + return 1; +} + +static int l_cidr(lua_State *L) +{ + const char *arg = lua_tostring(L, -1); + + char tmp[128]; + strncpy(tmp, arg, sizeof(tmp)); + + char *slash = strchr(tmp, '/'); + *slash = 0; + slash++; + + lua_createtable(L, 0, 2); + lua_pushstring(L, "ip"); + + lua_pushstring(L, tmp); + l_ip(L); + lua_remove(L, -2); + + lua_settable(L, -3); + + lua_pushstring(L, "depth"); + lua_pushinteger(L, atoi(slash)); + lua_settable(L, -3); + return 1; +} + +static int l_cidr6(lua_State *L) +{ + const char *arg = lua_tostring(L, -1); + + char tmp[128]; + strncpy(tmp, arg, sizeof(tmp)); + + char *slash = strchr(tmp, '/'); + *slash = 0; + slash++; + + lua_createtable(L, 0, 2); + lua_pushstring(L, "ip6"); + + lua_pushstring(L, tmp); + l_ip6(L); + lua_remove(L, -2); + + lua_settable(L, -3); + + lua_pushstring(L, "depth"); + lua_pushinteger(L, atoi(slash)); + lua_settable(L, -3); + return 1; +} + +static int l_val_mask(lua_State *L) +{ + if (!lua_isinteger(L, -2)) + return luaL_error(L, "Argument 1 is not an integer\n"); + if (!lua_isinteger(L, -1)) + return luaL_error(L, "Argument 2 is not an integer\n"); + + uint32_t val = lua_tointeger(L, -2); + uint32_t mask = lua_tointeger(L, -1); + + lua_createtable(L, 0, 2); + lua_pushstring(L, "val"); + lua_pushinteger(L, val); + lua_settable(L, -3); + + lua_pushstring(L, "mask"); + lua_pushinteger(L, mask); + lua_settable(L, -3); + + return 1; +} + +static int l_val_range(lua_State *L) +{ + if (!lua_isinteger(L, -2)) + return luaL_error(L, "Argument 1 is not an integer\n"); + if (!lua_isinteger(L, -1)) + return luaL_error(L, "Argument 2 is not an integer\n"); + + uint32_t beg = lua_tointeger(L, -2); + uint32_t end = lua_tointeger(L, -1); + + lua_createtable(L, 0, 2); + lua_pushstring(L, "beg"); + lua_pushinteger(L, beg); + lua_settable(L, -3); + + lua_pushstring(L, "end"); + lua_pushinteger(L, end); + lua_settable(L, -3); + + return 1; +} + +static int l_task_count(lua_State *L) +{ + struct core_task_set cts; + const char *str; + + if (!lua_isstring(L, -1)) + return luaL_error(L, "Argument 1 is not an string\n"); + str = lua_tostring(L, -1); + if (parse_task_set(&cts, str)) + return luaL_error(L, "Invalid core task set syntax\n"); + lua_pushinteger(L, cts.n_elems); + return 1; +} + +struct lua_State *prox_lua(void) +{ + if (!lua_instance) { + lua_instance = luaL_newstate(); + + luaL_openlibs(lua_instance); + + lua_pushcfunction(lua_instance, l_ip); + lua_setglobal(lua_instance, "ip"); + lua_pushcfunction(lua_instance, l_ip6); + lua_setglobal(lua_instance, "ip6"); + lua_pushcfunction(lua_instance, l_cidr); + lua_setglobal(lua_instance, "cidr"); + lua_pushcfunction(lua_instance, l_cidr6); + lua_setglobal(lua_instance, "cidr6"); + lua_pushcfunction(lua_instance, l_mac); + lua_setglobal(lua_instance, "mac"); + lua_pushcfunction(lua_instance, l_mask); + lua_setglobal(lua_instance, "mask"); + lua_pushcfunction(lua_instance, l_val_mask); + lua_setglobal(lua_instance, "val_mask"); + lua_pushcfunction(lua_instance, l_val_range); + lua_setglobal(lua_instance, "val_range"); + lua_pushcfunction(lua_instance, l_bin_read); + lua_setglobal(lua_instance, "bin_read"); + lua_pushcfunction(lua_instance, l_client_content); + lua_setglobal(lua_instance, "client_content"); + lua_pushcfunction(lua_instance, l_server_content); + lua_setglobal(lua_instance, "server_content"); + lua_pushcfunction(lua_instance, l_task_count); + lua_setglobal(lua_instance, "task_count"); + } + return lua_instance; +} diff --git a/VNFs/DPPD-PROX/prox_lua.h b/VNFs/DPPD-PROX/prox_lua.h new file mode 100644 index 00000000..8d29df69 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_lua.h @@ -0,0 +1,27 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_LUA_H_ +#define _PROX_LUA_H_ + +#include <lua.h> +#include <lauxlib.h> +#include <lualib.h> +#include "lua_compat.h" + +struct lua_State *prox_lua(void); + +#endif /* _PROX_LUA_H_ */ diff --git a/VNFs/DPPD-PROX/prox_lua_types.c b/VNFs/DPPD-PROX/prox_lua_types.c new file mode 100644 index 00000000..7a0b6e08 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_lua_types.c @@ -0,0 +1,1156 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <lua.h> +#include <lauxlib.h> +#include <lualib.h> + +#include <string.h> +#include <rte_ether.h> +#include <rte_lpm.h> +#include <rte_lpm6.h> +#include <rte_acl.h> +#include <rte_version.h> +#include <rte_hash_crc.h> + +#include "prox_malloc.h" +#include "etypes.h" +#include "prox_lua.h" +#include "log.h" +#include "quit.h" +#include "defines.h" +#include "prox_globals.h" +#include "prox_lua_types.h" +#include "ip_subnet.h" +#include "hash_entry_types.h" +#include "handle_qinq_encap4.h" +#include "toeplitz.h" +#include "handle_lb_5tuple.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +static char error_str[1024]; +static char *cur_pos; + +const char *get_lua_to_errors(void) +{ + return error_str; +} + +static void null_terminate_error(void) +{ + size_t diff = cur_pos - error_str; + + if (diff >= sizeof(error_str) && + error_str[sizeof(error_str) - 1] != 0) + error_str[sizeof(error_str) - 1] = 0; +} + +__attribute__((format(printf, 1, 2))) static void set_err(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + cur_pos = error_str; + cur_pos += vsnprintf(cur_pos, sizeof(error_str) - (cur_pos - error_str), fmt, ap); + null_terminate_error(); + + va_end(ap); +} + +__attribute__((format(printf, 1, 2))) static void concat_err(const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + cur_pos += vsnprintf(cur_pos, sizeof(error_str) - (cur_pos - error_str), fmt, ap); + null_terminate_error(); + + va_end(ap); +} + +/* Make sure that an element is on the top of the stack (zero on success) */ +int lua_getfrom(struct lua_State *L, enum lua_place from, const char *name) +{ + switch (from) { + case STACK: + return lua_gettop(L) > 0? 0 : -1; + case TABLE: + if (!lua_istable(L, -1)) { + set_err("Failed to get field '%s' from table (no table)\n", name); + return -1; + } + + lua_pushstring(L, name); + lua_gettable(L, -2); + if (lua_isnil(L, -1)) { + set_err("Field '%s' is missing from table\n", name); + lua_pop(L, 1); + return -1; + } + return 1; + case GLOBAL: + lua_getglobal(L, name); + if (lua_isnil(L, -1)) { + set_err("Couldn't find global data '%s'\n", name); + lua_pop(L, 1); + return -1; + } + return 1; + } + return -1; +} + +int lua_to_ip(struct lua_State *L, enum lua_place from, const char *name, uint32_t *ip) +{ + uint32_t n_entries; + uint32_t ip_array[4]; + ptrdiff_t v; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + if (n_entries != 4) { + set_err("Invalid IPv4 format\n"); + return -1; + } + + *ip = 0; + for (int i = 0; i < 4; ++i) { + lua_pushinteger(L, i + 1); + lua_gettable(L, -2); + v = lua_tointeger(L, -1); + lua_pop(L, 1); + if (!(v >= 0 && v <= 255)) { + set_err("Invalid IPv4 format\n"); + return -1; + } + *ip |= v << (24 - i*8); + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8_t *ip) +{ + uint32_t n_entries; + ptrdiff_t v; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + if (n_entries != 16) { + set_err("Invalid IPv6 format\n"); + return -1; + } + + for (int i = 0; i < 16; ++i) { + lua_pushinteger(L, i + 1); + lua_gettable(L, -2); + v = lua_tointeger(L, -1); + lua_pop(L, 1); + ip[i] = v; + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac) +{ + uint32_t n_entries; + uint32_t mac_array[4]; + ptrdiff_t v; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + if (n_entries != 6) { + set_err("Invalid MAC format\n"); + return -1; + } + + for (int i = 0; i < 6; ++i) { + lua_pushinteger(L, i + 1); + lua_gettable(L, -2); + v = lua_tointeger(L, -1); + lua_pop(L, 1); + if (!(v >= 0 && v <= 255)) { + set_err("Invalid MAC format\n"); + return -1; + } + mac->addr_bytes[i] = v; + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_cidr(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr) +{ + uint32_t depth, ip; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("CIDR is not a table\n"); + return -1; + } + + if (lua_to_ip(L, TABLE, "ip", &ip) || + lua_to_int(L, TABLE, "depth", &depth)) { + return -1; + } + cidr->ip = ip; + cidr->prefix = depth; + + lua_pop(L, pop); + return 0; +} + +int lua_to_cidr6(struct lua_State *L, enum lua_place from, const char *name, struct ip6_subnet *cidr) +{ + uint32_t depth; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("CIDR6 is not a table\n"); + return -1; + } + + if (lua_to_ip6(L, TABLE, "ip6", cidr->ip) || + lua_to_int(L, TABLE, "depth", &depth)) { + return -1; + } + cidr->prefix = depth; + + lua_pop(L, pop); + return 0; +} + +int lua_to_val_mask(struct lua_State *L, enum lua_place from, const char *name, struct val_mask *val_mask) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("data entry is not a table\n"); + return -1; + } + + if (lua_to_int(L, TABLE, "val", &val_mask->val) || + lua_to_int(L, TABLE, "mask", &val_mask->mask)) + return -1; + + lua_pop(L, pop); + return 0; +} + +int lua_to_val_range(struct lua_State *L, enum lua_place from, const char *name, struct val_range *val_range) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("data entry is not a table\n"); + return -1; + } + + if (lua_to_int(L, TABLE, "beg", &val_range->beg) || + lua_to_int(L, TABLE, "end", &val_range->end)) + return -1; + + lua_pop(L, pop); + return 0; +} + +int lua_to_action(struct lua_State *L, enum lua_place from, const char *name, enum acl_action *action) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_isstring(L, -1)) { + set_err("data entry is not a table\n"); + return -1; + } + + const char *s = lua_tostring(L, -1); + + if (!strcmp(s, "drop")) + *action = ACL_DROP; + else if (!strcmp(s, "allow")) + *action = ACL_ALLOW; + else if (!strcmp(s, "rate_limit")) + *action = ACL_RATE_LIMIT; + else + return -1; + + lua_pop(L, pop); + return 0; +} + +int lua_to_string(struct lua_State *L, enum lua_place from, const char *name, char *dst, size_t size) +{ + const char *str; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_isstring(L, -1)) { + plog_err("data is not an integer\n"); + return -1; + } + str = lua_tostring(L, -1); + + strncpy(dst, str, size); + + lua_pop(L, pop); + return 0; +} + +int lua_to_port(struct lua_State *L, enum lua_place from, const char *name, uint16_t *port) +{ + double tmp = 0; + int ret; + + ret = lua_to_double(L, from, name, &tmp); + if (ret == 0) + *port = tmp; + return ret; +} + +int lua_to_int(struct lua_State *L, enum lua_place from, const char *name, uint32_t *val) +{ + double tmp = 0; + int ret; + + ret = lua_to_double(L, from, name, &tmp); + if (ret == 0) + *val = tmp; + return ret; +} + +int lua_to_double(struct lua_State *L, enum lua_place from, const char *name, double *val) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_isnumber(L, -1)) { + set_err("data is not a number\n"); + return -1; + } + *val = lua_tonumber(L, -1); + + lua_pop(L, pop); + return 0; +} + +int lua_to_routes4_entry(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr, uint32_t *nh_idx) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Can't read routes4 entry since data is not a table\n"); + return -1; + } + + if (lua_to_cidr(L, TABLE, "cidr", cidr) || + lua_to_int(L, TABLE, "next_hop_id", nh_idx)) { + return -1; + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_next_hop(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct next_hop **nh) +{ + struct next_hop *ret; + uint32_t next_hop_index; + uint32_t port_id; + uint32_t ip; + uint32_t mpls; + struct ether_addr mac; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Can't read next hop since data is not a table\n"); + return -1; + } + + ret = prox_zmalloc(sizeof(*ret) * MAX_HOP_INDEX, socket); + PROX_PANIC(ret == NULL, "Could not allocate memory for next hop\n"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_int(L, TABLE, "id", &next_hop_index) || + lua_to_int(L, TABLE, "port_id", &port_id) || + lua_to_ip(L, TABLE, "ip", &ip) || + lua_to_mac(L, TABLE, "mac", &mac) || + lua_to_int(L, TABLE, "mpls", &mpls)) + return -1; + + PROX_PANIC(port_id >= PROX_MAX_PORTS, "Port id too high (only supporting %d ports)\n", PROX_MAX_PORTS); + PROX_PANIC(next_hop_index >= MAX_HOP_INDEX, "Next-hop to high (only supporting %d next hops)\n", MAX_HOP_INDEX); + + ret[next_hop_index].mac_port.out_idx = port_id; + ret[next_hop_index].ip_dst = ip; + + ret[next_hop_index].mac_port.mac = mac; + ret[next_hop_index].mpls = mpls; + + lua_pop(L, 1); + } + + *nh = ret; + lua_pop(L, pop); + return 0; +} + +int lua_to_next_hop6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct next_hop6 **nh) +{ + struct next_hop6 *ret; + uint32_t next_hop_index, port_id, mpls; + struct ether_addr mac; + uint8_t ip[16]; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Data is not a table\n"); + return -1; + } + + ret = prox_zmalloc(sizeof(*ret) * MAX_HOP_INDEX, socket); + PROX_PANIC(ret == NULL, "Could not allocate memory for next hop\n"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_int(L, TABLE, "id", &next_hop_index) || + lua_to_int(L, TABLE, "port_id", &port_id) || + lua_to_ip6(L, TABLE, "ip6", ip) || + lua_to_mac(L, TABLE, "mac", &mac) || + lua_to_int(L, TABLE, "mpls", &mpls)) + return -1; + + PROX_PANIC(port_id >= PROX_MAX_PORTS, "Port id too high (only supporting %d ports)\n", PROX_MAX_PORTS); + PROX_PANIC(next_hop_index >= MAX_HOP_INDEX, "Next-hop to high (only supporting %d next hops)\n", MAX_HOP_INDEX); + + ret[next_hop_index].mac_port.out_idx = port_id; + memcpy(ret[next_hop_index].ip_dst,ip, 16); + + ret[next_hop_index].mac_port.mac = mac; + ret[next_hop_index].mpls = mpls; + + lua_pop(L, 1); + } + + *nh = ret; + lua_pop(L, pop); + return 0; +} + +int lua_to_routes4(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm4 *lpm) +{ + struct ip4_subnet dst; + uint32_t next_hop_index; + uint32_t n_loaded_rules; + uint32_t n_tot_rules; + struct rte_lpm *new_lpm; + char lpm_name[64]; + int ret; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + snprintf(lpm_name, sizeof(lpm_name), "IPv4_lpm_s%u", socket); + + if (!lua_istable(L, -1)) { + set_err("Data is not a table\n"); + return -1; + } + + lua_len(L, -1); + n_tot_rules = lua_tointeger(L, -1); + n_loaded_rules = 0; + lua_pop(L, 1); +#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1) + struct rte_lpm_config conf; + conf.max_rules = 2 * n_tot_rules; + conf.number_tbl8s = 256; + conf.flags = 0; + new_lpm = rte_lpm_create(lpm_name, socket, &conf); +#else + new_lpm = rte_lpm_create(lpm_name, socket, 2 * n_tot_rules, 0); +#endif + PROX_PANIC(NULL == new_lpm, "Failed to allocate lpm\n"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_routes4_entry(L, STACK, NULL, &dst, &next_hop_index)) { + set_err("Failed to read entry while setting up lpm\n"); + return -1; + } + ret = rte_lpm_add(new_lpm, dst.ip, dst.prefix, next_hop_index); + + if (ret != 0) { + set_err("Failed to add (%d) index %u ip %x/%u to lpm\n", + ret, next_hop_index, dst.ip, dst.prefix); + } + else if (++n_loaded_rules % 10000 == 0) { + plog_info("Route %d added\n", n_loaded_rules); + } + + lua_pop(L, 1); + } + + lpm->rte_lpm = new_lpm; + lpm->n_used_rules = n_loaded_rules; + lpm->n_free_rules = 2 * n_tot_rules - n_loaded_rules; + + lua_pop(L, pop); + return 0; +} + +int lua_to_lpm4(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm4 **lpm) +{ + struct lpm4 *ret; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + ret = prox_zmalloc(sizeof(struct lpm4), socket); + + if (!lua_istable(L, -1)) { + set_err("Can't read lpm4 since data is not a table\n"); + return -1; + } + + if (lua_to_routes4(L, TABLE, "routes", socket, ret) || + lua_to_next_hop(L, TABLE, "next_hops", socket, &ret->next_hops)) { + return -1; + } + + if (ret->rte_lpm) + plog_info("Loaded %d routes\n", ret->n_used_rules); + + *lpm = ret; + lua_pop(L, pop); + return 0; +} + +int lua_to_lpm6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm6 **lpm) +{ + struct lpm6 *ret; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Lpm6 is not a table\n"); + return -1; + } + + ret = prox_zmalloc(sizeof(struct lpm6), socket); + + if (lua_to_routes6(L, TABLE, "routes6", socket, ret) || + lua_to_next_hop6(L, TABLE, "next_hops6", socket, &ret->next_hops)) + return -1; + + if (ret->rte_lpm6) + plog_info("Loaded %d routes\n", ret->n_used_rules); + + *lpm = ret; + + lua_pop(L, pop); + return 0; +} + +static int lua_to_lpm6_entry(struct lua_State *L, enum lua_place from, const char *name, struct ip6_subnet *cidr, uint32_t *nh_idx) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("lpm6 entry is not a table\n"); + return -1; + } + if (lua_to_cidr6(L, TABLE, "cidr6", cidr) || + lua_to_int(L, TABLE, "next_hop_id", nh_idx)) { + return -1; + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_routes6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm6 *lpm) +{ + struct ip6_subnet dst; + uint32_t next_hop_index; + uint32_t n_loaded_rules; + struct rte_lpm6 *new_lpm; + struct rte_lpm6_config config; + uint32_t n_tot_rules; + char lpm_name[64]; + int ret; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + snprintf(lpm_name, sizeof(lpm_name), "IPv6_lpm_s%u", socket); + + if (!lua_istable(L, -1)) { + set_err("Data is not a table\n"); + return -1; + } + + lua_len(L, -1); + n_tot_rules = lua_tointeger(L, -1); + n_loaded_rules = 0; + lua_pop(L, 1); + + config.max_rules = n_tot_rules; + config.number_tbl8s = (1 << 16); + config.flags = 0; + + new_lpm = rte_lpm6_create(lpm_name, socket, &config); + PROX_PANIC(NULL == new_lpm, "Failed to allocate lpm\n"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + + if (lua_to_lpm6_entry(L, STACK, NULL, &dst, &next_hop_index)) { + concat_err("Failed to read entry while setting up lpm\n"); + return -1; + } + + ret = rte_lpm6_add(new_lpm, dst.ip, dst.prefix, next_hop_index); + + if (ret != 0) { + plog_warn("Failed to add (%d) index %u, %d\n", + ret, next_hop_index, dst.prefix); + } + else if (++n_loaded_rules % 10000 == 0) { + plog_info("Route %d added\n", n_loaded_rules); + } + + lua_pop(L, 1); + } + + lpm->rte_lpm6 = new_lpm; + lpm->n_used_rules = n_loaded_rules; + lpm->n_free_rules = 2 * n_tot_rules - n_loaded_rules; + + lua_pop(L, pop); + return 0; +} + +int lua_to_dscp(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, uint8_t **dscp) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("DSCP is not a table\n"); + return -1; + } + + uint32_t dscp_bits, tc, queue; + int status; + *dscp = prox_zmalloc(64, socket); + PROX_PANIC(dscp == NULL, "Error creating dscp table"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_int(L, TABLE, "dscp", &dscp_bits) || + lua_to_int(L, TABLE, "tc", &tc) || + lua_to_int(L, TABLE, "queue", &queue)) { + concat_err("Failed to read dscp config\n"); + return -1; + } + + lua_pop(L, 1); + + (*dscp)[dscp_bits] = tc << 2 | queue; + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_qinq_gre_map(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct qinq_gre_map **qinq_gre_map) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + if (from != STACK) + set_err("QinQ to gre map is not a table\n"); + else + set_err("QinQ to gre map %s is not a table\n", name); + return -1; + } + + struct qinq_gre_map *ret; + uint32_t svlan, cvlan; + uint16_t be_svlan, be_cvlan; + uint32_t user; + uint32_t gre_id; + + uint32_t n_entries; + uint32_t idx = 0; + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + size_t mem_size = 0; + mem_size += sizeof(struct qinq_gre_map); + mem_size += n_entries * sizeof(struct qinq_gre_entry); + + ret = prox_zmalloc(mem_size, socket); + PROX_PANIC(ret == NULL, "Error creating gre_qinq map"); + + ret->count = n_entries; + + lua_pushnil(L); + while (lua_next(L, -2)) { + + if (lua_to_int(L, TABLE, "svlan_id", &svlan) || + lua_to_int(L, TABLE, "cvlan_id", &cvlan) || + lua_to_int(L, TABLE, "gre_id", &gre_id) || + lua_to_int(L, TABLE, "user_id", &user)) { + concat_err("Failed to read user table config\n"); + return -1; + } + + be_svlan = rte_bswap16((uint16_t)svlan); + be_cvlan = rte_bswap16((uint16_t)cvlan); + + ret->entries[idx].user = user; + ret->entries[idx].svlan = be_svlan; + ret->entries[idx].cvlan = be_cvlan; + ret->entries[idx].gre_id = gre_id; + ret->entries[idx].rss = toeplitz_hash((uint8_t *)&be_cvlan, 4); + + plog_dbg("elem %u: be_svlan=%x, be_cvlan=%x, rss_input=%x, rss=%x, gre_id=%x\n", + idx, be_svlan, be_cvlan, be_cvlan, ret->entries[idx].rss, gre_id); + + idx++; + lua_pop(L, 1); + } + + *qinq_gre_map = ret; + + lua_pop(L, pop); + return 0; +} + +int lua_to_user_table(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, uint16_t **user_table) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Data is not a table\n"); + return -1; + } + + uint32_t svlan, cvlan; + uint16_t be_svlan, be_cvlan; + uint32_t user; + + *user_table = prox_zmalloc(0x1000000 * sizeof(uint16_t), socket); + PROX_PANIC(*user_table == NULL, "Error creating user table"); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_int(L, TABLE, "svlan_id", &svlan) || + lua_to_int(L, TABLE, "cvlan_id", &cvlan) || + lua_to_int(L, TABLE, "user_id", &user)) { + concat_err("Failed to read user table config\n"); + return -1; + } + + be_svlan = rte_bswap16((uint16_t)svlan); + be_cvlan = rte_bswap16((uint16_t)cvlan); + + (*user_table)[PKT_TO_LUTQINQ(be_svlan, be_cvlan)] = user; + + lua_pop(L, 1); + } + + lua_pop(L, pop); + return 0; +} + +int lua_to_ip6_tun_binding(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct ipv6_tun_binding_table **data) +{ + struct ipv6_tun_binding_table *ret; + uint32_t n_entries; + uint32_t idx = 0; + uint32_t port = 0; + size_t memsize = 0; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Can't read IPv6 tunnel bindings entry since ret is not a table\n"); + return -1; + } + + lua_len(L, -1); + n_entries = lua_tointeger(L, -1); + lua_pop(L, 1); + + memsize = sizeof(struct ipv6_tun_binding_table); + memsize += n_entries * sizeof(struct ipv6_tun_binding_entry); + + ret = prox_zmalloc(memsize, socket); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_ip6(L, TABLE, "ip6", ret->entry[idx].endpoint_addr.bytes) || + lua_to_mac(L, TABLE, "mac", &ret->entry[idx].next_hop_mac) || + lua_to_ip(L, TABLE, "ip", &ret->entry[idx].public_ipv4) || + lua_to_int(L, TABLE, "port", &port)) + return -1; + + ret->entry[idx].public_port = port; + idx++; + lua_pop(L, 1); + } + ret->num_binding_entries = idx; + + plog_info("\tRead %d IPv6 Tunnel Binding entries\n", idx); + + *data = ret; + + lua_pop(L, pop); + return 0; +} + +int lua_to_cpe_table_data(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct cpe_table_data **data) +{ + struct cpe_table_data *ret; + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Can't read IPv6 tunnel bindings entry since ret is not a table\n"); + return -1; + } + + /* Each entry in the input table expands to multiple entries + depending on the number of hosts within the subnet. For + this reason, go through the whole table and find out how + many entries will be added in total. */ + struct ip4_subnet cidr; + uint32_t n_entries = 0; + uint32_t port_idx, gre_id, svlan, cvlan, user; + struct ether_addr mac; + uint32_t idx = 0; + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_cidr(L, TABLE, "cidr", &cidr)) + return -1; + n_entries += ip4_subet_get_n_hosts(&cidr); + lua_pop(L, 1); + } + + ret = prox_zmalloc(sizeof(*ret) + n_entries * sizeof(struct cpe_table_entry), 0); + + lua_pushnil(L); + while (lua_next(L, -2)) { + if (lua_to_int(L, TABLE, "dest_id", &port_idx) || + lua_to_int(L, TABLE, "gre_id", &gre_id) || + lua_to_int(L, TABLE, "svlan_id", &svlan) || + lua_to_int(L, TABLE, "cvlan_id", &cvlan) || + lua_to_cidr(L, TABLE, "cidr", &cidr) || + lua_to_mac(L, TABLE, "mac", &mac) || + lua_to_int(L, TABLE, "user_id", &user)) + return -1; + + uint32_t n_hosts = ip4_subet_get_n_hosts(&cidr); + + for (uint32_t i = 0; i < n_hosts; ++i) { + ret->entries[idx].port_idx = port_idx; + ret->entries[idx].gre_id = gre_id; + ret->entries[idx].svlan = rte_bswap16(svlan); + ret->entries[idx].cvlan = rte_bswap16(cvlan); + ret->entries[idx].eth_addr = mac; + ret->entries[idx].user = user; + + PROX_PANIC(ip4_subnet_to_host(&cidr, i, &ret->entries[idx].ip), "Invalid host in address\n"); + ret->entries[idx].ip = rte_bswap32(ret->entries[idx].ip); + idx++; + } + + lua_pop(L, 1); + } + + ret->n_entries = n_entries; + *data = ret; + + lua_pop(L, pop); + return 0; +} + +struct acl4_rule { + struct rte_acl_rule_data data; + struct rte_acl_field fields[9]; +}; + +int lua_to_rules(struct lua_State *L, enum lua_place from, const char *name, struct rte_acl_ctx *ctx, uint32_t* n_max_rules, int use_qinq, uint16_t qinq_tag) +{ + int pop; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + set_err("Can't read rules since data is not a table\n"); + return -1; + } + + struct val_mask svlan, cvlan, ip_proto; + struct ip4_subnet src_cidr, dst_cidr; + struct val_range sport, dport; + enum acl_action action; + uint32_t n_rules = 0; + lua_pushnil(L); + while (lua_next(L, -2)) { + if (n_rules == *n_max_rules) { + set_err("Too many rules"); + return -1; + } + if (use_qinq) { + if (lua_to_val_mask(L, TABLE, "svlan_id", &svlan) || + lua_to_val_mask(L, TABLE, "cvlan_id", &cvlan)) + return -1; + } + + if (lua_to_val_mask(L, TABLE, "ip_proto", &ip_proto) || + lua_to_cidr(L, TABLE, "src_cidr", &src_cidr) || + lua_to_cidr(L, TABLE, "dst_cidr", &dst_cidr) || + lua_to_val_range(L, TABLE, "sport", &sport) || + lua_to_val_range(L, TABLE, "dport", &dport) || + lua_to_action(L, TABLE, "action", &action)) + return -1; + + struct acl4_rule rule; + + rule.data.userdata = action; /* allow, drop or rate_limit */ + rule.data.category_mask = 1; + rule.data.priority = n_rules++; + + /* Configuration for rules is done in little-endian so no bswap is needed here.. */ + + rule.fields[0].value.u8 = ip_proto.val; + rule.fields[0].mask_range.u8 = ip_proto.mask; + rule.fields[1].value.u32 = src_cidr.ip; + rule.fields[1].mask_range.u32 = src_cidr.prefix; + + rule.fields[2].value.u32 = dst_cidr.ip; + rule.fields[2].mask_range.u32 = dst_cidr.prefix; + + rule.fields[3].value.u16 = sport.beg; + rule.fields[3].mask_range.u16 = sport.end; + + rule.fields[4].value.u16 = dport.beg; + rule.fields[4].mask_range.u16 = dport.end; + + if (use_qinq) { + rule.fields[5].value.u16 = rte_bswap16(qinq_tag); + rule.fields[5].mask_range.u16 = 0xffff; + + /* To mask out the TCI and only keep the VID, the mask should be 0x0fff */ + rule.fields[6].value.u16 = svlan.val; + rule.fields[6].mask_range.u16 = svlan.mask; + + rule.fields[7].value.u16 = rte_bswap16(ETYPE_VLAN); + rule.fields[7].mask_range.u16 = 0xffff; + + rule.fields[8].value.u16 = cvlan.val; + rule.fields[8].mask_range.u16 = cvlan.mask; + } + else { + /* Reuse first ethertype from vlan to check if packet is IPv4 packet */ + rule.fields[5].value.u16 = rte_bswap16(ETYPE_IPv4); + rule.fields[5].mask_range.u16 = 0xffff; + + /* Other fields are ignored */ + rule.fields[6].value.u16 = 0; + rule.fields[6].mask_range.u16 = 0; + rule.fields[7].value.u16 = 0; + rule.fields[7].mask_range.u16 = 0; + rule.fields[8].value.u16 = 0; + rule.fields[8].mask_range.u16 = 0; + } + + rte_acl_add_rules(ctx, (struct rte_acl_rule*) &rule, 1); + lua_pop(L, 1); + } + + *n_max_rules -= n_rules; + lua_pop(L, pop); + return 0; +} + +static inline uint32_t ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val) +{ + const union ipv4_5tuple_host *k; + uint32_t t; + const uint32_t *p; + + k = data; + t = k->proto; + p = (const uint32_t *)&k->port_src; + + init_val = rte_hash_crc_4byte(t, init_val); + init_val = rte_hash_crc_4byte(k->ip_src, init_val); + init_val = rte_hash_crc_4byte(k->ip_dst, init_val); + init_val = rte_hash_crc_4byte(*p, init_val); + return (init_val); +} + +int lua_to_tuples(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct rte_hash **lookup_hash, uint8_t **out_if) +{ + int pop; + char s[64]; + + if ((pop = lua_getfrom(L, from, name)) < 0) + return -1; + + if (!lua_istable(L, -1)) { + plog_err("Can't read rules since data is not a table\n"); + return -1; + } + + lua_len(L, -1); + uint32_t n_tot_tuples = lua_tointeger(L, -1); + lua_pop(L, 1); + + struct rte_hash_parameters ipv4_l3fwd_hash_params = { + .name = NULL, + .entries = n_tot_tuples * 4, + .key_len = sizeof(union ipv4_5tuple_host), +#if RTE_VERSION < RTE_VERSION_NUM(2, 1, 0, 0) + .bucket_entries = 4, +#endif + .hash_func = ipv4_hash_crc, + .hash_func_init_val = 0, + }; + + /* create lb_5tuple hash - same hash is shared between cores on same socket */ + snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socket); + if ((*lookup_hash = rte_hash_find_existing(s)) == NULL) { + ipv4_l3fwd_hash_params.name = s; + ipv4_l3fwd_hash_params.socket_id = socket; + *lookup_hash = rte_hash_create(&ipv4_l3fwd_hash_params); + PROX_PANIC(*lookup_hash == NULL, "Unable to create the lb_5tuple hash\n"); + } + + lua_pushnil(L); + while (lua_next(L, -2)) { + uint32_t if_out, ip_src, ip_dst, port_src, port_dst, proto; + union ipv4_5tuple_host newkey; + + if (lua_to_int(L, TABLE, "if_out", &if_out) || + lua_to_int(L, TABLE, "ip_src", &ip_src) || + lua_to_int(L, TABLE, "ip_dst", &ip_dst) || + lua_to_int(L, TABLE, "port_src", &port_src) || + lua_to_int(L, TABLE, "port_dst", &port_dst) || + lua_to_int(L, TABLE, "proto", &proto)) { + plog_err("Failed to read user table config\n"); + return -1; + } + + newkey.ip_dst = rte_cpu_to_be_32(ip_dst); + newkey.ip_src = rte_cpu_to_be_32(ip_src); + newkey.port_dst = rte_cpu_to_be_16((uint16_t)port_dst); + newkey.port_src = rte_cpu_to_be_16((uint16_t)port_src); + newkey.proto = (uint8_t)proto; + newkey.pad0 = 0; + newkey.pad1 = 0; + + int32_t ret = rte_hash_add_key(*lookup_hash, (void *) &newkey); + PROX_PANIC(ret < 0, "Unable to add entry (err code %d)\n", ret); + (*out_if)[ret] = (uint8_t) if_out; + + lua_pop(L, 1); + } + lua_pop(L, pop); + return 0; +} diff --git a/VNFs/DPPD-PROX/prox_lua_types.h b/VNFs/DPPD-PROX/prox_lua_types.h new file mode 100644 index 00000000..182c9055 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_lua_types.h @@ -0,0 +1,142 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_LUA_TYPES_H_ +#define _PROX_LUA_TYPES_H_ + +#include <inttypes.h> +#include <rte_ether.h> +#include <rte_hash.h> + +#include "ip6_addr.h" + +struct lua_State; +struct ether_addr; +struct ip4_subnet; +struct ip6_subnet; +struct next_hop; +struct rte_lpm; +struct rte_lpm6; +struct next_hop6; +struct rte_acl_ctx; +struct qinq_gre_map; + +#define MAX_HOP_INDEX 128 +enum l4gen_peer {PEER_SERVER, PEER_CLIENT}; + +static const char *l4gen_peer_to_str(enum l4gen_peer peer) +{ + return peer == PEER_SERVER? "server" : "client"; +} + +struct peer_data { + uint8_t *hdr; + uint32_t hdr_len; + uint8_t *content; +}; + +struct peer_action { + enum l4gen_peer peer; + uint32_t beg; + uint32_t len; +}; + +struct lpm4 { + uint32_t n_free_rules; + uint32_t n_used_rules; + struct next_hop *next_hops; + struct rte_lpm *rte_lpm; +}; + +struct lpm6 { + struct rte_lpm6 *rte_lpm6; + struct next_hop6 *next_hops; + uint32_t n_free_rules; + uint32_t n_used_rules; +}; + +struct ipv6_tun_binding_entry { + struct ipv6_addr endpoint_addr; // IPv6 local addr + struct ether_addr next_hop_mac; // mac addr of next hop towards lwB4 + uint32_t public_ipv4; // Public IPv4 address + uint16_t public_port; // Public base port (together with port mask, defines the Port Set) +} __attribute__((__packed__)); + +struct ipv6_tun_binding_table { + uint32_t num_binding_entries; + struct ipv6_tun_binding_entry entry[0]; +}; + +struct cpe_table_entry { + uint32_t port_idx; + uint32_t gre_id; + uint32_t svlan; + uint32_t cvlan; + uint32_t ip; + struct ether_addr eth_addr; + uint32_t user; +}; + +struct cpe_table_data { + uint32_t n_entries; + struct cpe_table_entry entries[0]; +}; + +struct val_mask { + uint32_t val; + uint32_t mask; +}; + +struct val_range { + uint32_t beg; + uint32_t end; +}; + +enum acl_action {ACL_NOT_SET, ACL_ALLOW, ACL_DROP, ACL_RATE_LIMIT}; + +const char *get_lua_to_errors(void); + +enum lua_place {STACK, TABLE, GLOBAL}; +int lua_getfrom(struct lua_State *L, enum lua_place from, const char *name); + +int lua_to_port(struct lua_State *L, enum lua_place from, const char *name, uint16_t *port); +int lua_to_ip(struct lua_State *L, enum lua_place from, const char *name, uint32_t *ip); +int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8_t *ip); +int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac); +int lua_to_cidr(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr); +int lua_to_cidr6(struct lua_State *L, enum lua_place from, const char *name, struct ip6_subnet *cidr); +int lua_to_int(struct lua_State *L, enum lua_place from, const char *name, uint32_t *val); +int lua_to_double(struct lua_State *L, enum lua_place from, const char *name, double *val); +int lua_to_string(struct lua_State *L, enum lua_place from, const char *name, char *dst, size_t size); +int lua_to_val_mask(struct lua_State *L, enum lua_place from, const char *name, struct val_mask *val_mask); +int lua_to_val_range(struct lua_State *L, enum lua_place from, const char *name, struct val_range *val_range); +int lua_to_action(struct lua_State *L, enum lua_place from, const char *name, enum acl_action *action); +int lua_to_dscp(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, uint8_t **dscp); +int lua_to_user_table(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, uint16_t **user_table); +int lua_to_lpm4(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm4 **lpm); +int lua_to_routes4(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm4 *lpm); +int lua_to_next_hop(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct next_hop **nh); +int lua_to_lpm6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm6 **lpm); +int lua_to_ip6_tun_binding(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct ipv6_tun_binding_table **data); +int lua_to_qinq_gre_map(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct qinq_gre_map **qinq_gre_map); +int lua_to_cpe_table_data(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct cpe_table_data **data); +int lua_to_rules(struct lua_State *L, enum lua_place from, const char *name, struct rte_acl_ctx *ctx, uint32_t* n_max_rules, int use_qinq, uint16_t qinq_tag); +int lua_to_routes4_entry(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr, uint32_t *nh_idx); +int lua_to_next_hop6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct next_hop6 **nh); +int lua_to_routes6(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm6 *lpm); +int lua_to_tuples(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct rte_hash **lookup_hash, uint8_t **out_if); + +#endif /* _PROX_LUA_TYPES_H_ */ diff --git a/VNFs/DPPD-PROX/prox_malloc.c b/VNFs/DPPD-PROX/prox_malloc.c new file mode 100644 index 00000000..cec80f3e --- /dev/null +++ b/VNFs/DPPD-PROX/prox_malloc.c @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_malloc.h> + +#include "prox_malloc.h" + +#ifndef RTE_CACHE_LINE_SIZE +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +void *prox_zmalloc(size_t size, int socket) +{ + return rte_zmalloc_socket(NULL, size, RTE_CACHE_LINE_SIZE, socket); +} + +void prox_free(void *ptr) +{ + rte_free(ptr); +} diff --git a/VNFs/DPPD-PROX/prox_malloc.h b/VNFs/DPPD-PROX/prox_malloc.h new file mode 100644 index 00000000..c75667de --- /dev/null +++ b/VNFs/DPPD-PROX/prox_malloc.h @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_MALLOC_H_ +#define _PROX_MALLOC_H_ + +#include <stddef.h> + +void *prox_zmalloc(size_t size, int socket); +void prox_free(void *ptr); + +#endif /* _PROX_MALLOC_H_ */ diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c new file mode 100644 index 00000000..831a8ff9 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_port_cfg.c @@ -0,0 +1,473 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> +#include <rte_version.h> +#include <rte_eth_ring.h> +#include <rte_mbuf.h> +#if (RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)) && (RTE_VERSION <= RTE_VERSION_NUM(17,5,0,1)) +#include <rte_eth_null.h> +#endif + +#include "prox_port_cfg.h" +#include "prox_globals.h" +#include "log.h" +#include "quit.h" +#include "defaults.h" +#include "toeplitz.h" +#include "defines.h" +#include "prox_cksum.h" + +struct prox_port_cfg prox_port_cfg[PROX_MAX_PORTS]; +rte_atomic32_t lsc; + +int prox_nb_active_ports(void) +{ + int ret = 0; + for (uint32_t i = 0; i < PROX_MAX_PORTS; ++i) { + ret += prox_port_cfg[i].active; + } + return ret; +} + +int prox_last_port_active(void) +{ + int ret = 0; + for (uint32_t i = 0; i < PROX_MAX_PORTS; ++i) { + if (prox_port_cfg[i].active) { + ret = i; + } + } + return ret; +} + +static void lsc_cb(__attribute__((unused)) uint8_t port_id, enum rte_eth_event_type type, __attribute__((unused)) void *param) +{ + struct rte_eth_link link; + + if (RTE_ETH_EVENT_INTR_LSC != type) { + return; + } + + rte_atomic32_inc(&lsc); +} + +struct prox_pktmbuf_reinit_args { + struct rte_mempool *mp; + struct lcore_cfg *lconf; +}; + +/* standard mbuf initialization procedure */ +void prox_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsigned i) +{ + struct rte_mbuf *mbuf = _m; + +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + mbuf->tx_offload = CALC_TX_OL(sizeof(struct ether_hdr), sizeof(struct ipv4_hdr)); +#else + mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr); + mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr); +#endif + + rte_pktmbuf_init(mp, opaque_arg, mbuf, i); +} + +void prox_pktmbuf_reinit(void *arg, void *start, __attribute__((unused)) void *end, uint32_t idx) +{ + struct prox_pktmbuf_reinit_args *init_args = arg; + struct rte_mbuf *m; + char* obj = start; + + obj += init_args->mp->header_size; + m = (struct rte_mbuf*)obj; + + prox_pktmbuf_init(init_args->mp, init_args->lconf, obj, idx); +} + +/* initialize rte devices and check the number of available ports */ +void init_rte_dev(int use_dummy_devices) +{ + uint8_t nb_ports, port_id_max, port_id_last; + struct rte_eth_dev_info dev_info; + + nb_ports = rte_eth_dev_count(); + /* get available ports configuration */ + PROX_PANIC(use_dummy_devices && nb_ports, "Can't use dummy devices while there are also real ports\n"); + + if (use_dummy_devices) { +#if (RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)) && (RTE_VERSION <= RTE_VERSION_NUM(17,5,0,1)) + nb_ports = prox_last_port_active() + 1; + plog_info("Creating %u dummy devices\n", nb_ports); + + char port_name[32] = "0dummy_dev"; + for (uint32_t i = 0; i < nb_ports; ++i) { + eth_dev_null_create(port_name, 0, ETHER_MIN_LEN, 0); + port_name[0]++; + } +#else + PROX_PANIC(use_dummy_devices, "Can't use dummy devices\n"); +#endif + } + else { + PROX_PANIC(nb_ports == 0, "\tError: DPDK could not find any port\n"); + plog_info("\tDPDK has found %u ports\n", nb_ports); + } + + if (nb_ports > PROX_MAX_PORTS) { + plog_warn("\tWarning: I can deal with at most %u ports." + " Please update PROX_MAX_PORTS and recompile.\n", PROX_MAX_PORTS); + + nb_ports = PROX_MAX_PORTS; + } + port_id_max = nb_ports - 1; + port_id_last = prox_last_port_active(); + PROX_PANIC(port_id_last > port_id_max, + "\tError: invalid port(s) specified, last port index active: %d (max index is %d)\n", + port_id_last, port_id_max); + + /* Assign ports to PROX interfaces & Read max RX/TX queues per port */ + for (uint8_t port_id = 0; port_id < nb_ports; ++port_id) { + /* skip ports that are not enabled */ + if (!prox_port_cfg[port_id].active) { + continue; + } + plog_info("\tGetting info for rte dev %u\n", port_id); + rte_eth_dev_info_get(port_id, &dev_info); + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + port_cfg->socket = -1; + + port_cfg->max_txq = dev_info.max_tx_queues; + port_cfg->max_rxq = dev_info.max_rx_queues; + + if (!dev_info.pci_dev) + continue; + + snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr), + "%04x:%02x:%02x.%1x", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); + strncpy(port_cfg->driver_name, dev_info.driver_name, sizeof(port_cfg->driver_name)); + plog_info("\tPort %u : driver='%s' tx_queues=%d rx_queues=%d\n", port_id, !strcmp(port_cfg->driver_name, "")? "null" : port_cfg->driver_name, port_cfg->max_txq, port_cfg->max_rxq); + + if (strncmp(port_cfg->driver_name, "rte_", 4) == 0) { + strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name)); + } else if (strncmp(port_cfg->driver_name, "net_", 4) == 0) { + strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name)); + } else { + strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name, sizeof(port_cfg->short_name)); + } + char *ptr; + if ((ptr = strstr(port_cfg->short_name, "_pmd")) != NULL) { + *ptr = '\x0'; + } + + /* Try to find the device's numa node */ + char buf[1024]; + snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/numa_node", port_cfg->pci_addr); + FILE* numa_node_fd = fopen(buf, "r"); + if (numa_node_fd) { + if (fgets(buf, sizeof(buf), numa_node_fd) == NULL) { + plog_warn("Failed to read numa_node for device %s\n", port_cfg->pci_addr); + } + port_cfg->socket = strtol(buf, 0, 0); + if (port_cfg->socket == -1) { + plog_warn("System did not report numa_node for device %s\n", port_cfg->pci_addr); + } + fclose(numa_node_fd); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { + port_cfg->capabilities.tx_offload_cksum |= IPV4_CKSUM; + } + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { + port_cfg->capabilities.tx_offload_cksum |= UDP_CKSUM; + } + } +} + +/* Create rte ring-backed devices */ +uint8_t init_rte_ring_dev(void) +{ + uint8_t nb_ring_dev = 0; + + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + /* skip ports that are not enabled */ + if (!prox_port_cfg[port_id].active) { + continue; + } + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + if (port_cfg->rx_ring[0] != '\0') { + plog_info("\tRing-backed port %u: rx='%s' tx='%s'\n", port_id, port_cfg->rx_ring, port_cfg->tx_ring); + + struct rte_ring* rx_ring = rte_ring_lookup(port_cfg->rx_ring); + PROX_PANIC(rx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->rx_ring, port_id); + struct rte_ring* tx_ring = rte_ring_lookup(port_cfg->tx_ring); + PROX_PANIC(tx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->tx_ring, port_id); + + int ret = rte_eth_from_rings(port_cfg->name, &rx_ring, 1, &tx_ring, 1, rte_socket_id()); + PROX_PANIC(ret != 0, "Failed to create eth_dev from rings for port %d\n", port_id); + + port_cfg->port_conf.intr_conf.lsc = 0; /* Link state interrupt not supported for ring-backed ports */ + + nb_ring_dev++; + } + } + + return nb_ring_dev; +} + +static void init_port(struct prox_port_cfg *port_cfg) +{ + static char dummy_pool_name[] = "0_dummy"; + struct rte_eth_link link; + uint8_t port_id; + int ret; + + port_id = port_cfg - prox_port_cfg; + plog_info("\t*** Initializing port %u ***\n", port_id); + plog_info("\t\tPort name is set to %s\n", port_cfg->name); + plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq); + plog_info("\t\tPort driver is %s\n", port_cfg->driver_name); + + PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0, + "\t\t port %u is enabled but no RX or TX queues have been configured", port_id); + + if (port_cfg->n_rxq == 0) { + /* not receiving on this port */ + plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id); + port_cfg->n_rxq = 1; + uint32_t mbuf_size = MBUF_SIZE; + if (strcmp(port_cfg->short_name, "vmxnet3") == 0) { + mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM; + } + plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n", + port_cfg->socket, port_cfg->n_rxd, mbuf_size); + port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size, + 0, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + prox_pktmbuf_init, 0, + port_cfg->socket, 0); + PROX_PANIC(port_cfg->pool[0] == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n", + port_cfg->socket, port_cfg->n_rxd); + dummy_pool_name[0]++; + } else { + // Most pmd do not support setting mtu yet... + if (!strcmp(port_cfg->short_name, "ixgbe")) { + plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id); + ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu); + PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret); + } + + if (port_cfg->n_txq == 0) { + /* not sending on this port */ + plog_info("\t\tPort %u had no TX queues, setting to 1\n", port_id); + port_cfg->n_txq = 1; + } + } + + if (port_cfg->n_rxq > 1) { + // Enable RSS if multiple receive queues + port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS; + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key; + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN; +#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0) + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONFRAG_IPV4_UDP; +#else + port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP; +#endif + } + + plog_info("\t\tConfiguring port %u... with %u RX queues and %u TX queues\n", + port_id, port_cfg->n_rxq, port_cfg->n_txq); + + PROX_PANIC(port_cfg->n_rxq > port_cfg->max_rxq, "\t\t\tToo many RX queues (configuring %u, max is %u)\n", port_cfg->n_rxq, port_cfg->max_rxq); + PROX_PANIC(port_cfg->n_txq > port_cfg->max_txq, "\t\t\tToo many TX queues (configuring %u, max is %u)\n", port_cfg->n_txq, port_cfg->max_txq); + + if (!strcmp(port_cfg->short_name, "ixgbe_vf") || + !strcmp(port_cfg->short_name, "virtio") || +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) + !strcmp(port_cfg->short_name, "i40e") || +#endif + !strcmp(port_cfg->short_name, "i40e_vf") || + !strcmp(port_cfg->driver_name, "") || /* NULL device */ + !strcmp(port_cfg->short_name, "vmxnet3")) { + port_cfg->port_conf.intr_conf.lsc = 0; + plog_info("\t\tDisabling link state interrupt for vmxnet3/VF/virtio (unsupported)\n"); + } + + if (port_cfg->lsc_set_explicitely) { + port_cfg->port_conf.intr_conf.lsc = port_cfg->lsc_val; + plog_info("\t\tOverriding link state interrupt configuration to '%s'\n", port_cfg->lsc_val? "enabled" : "disabled"); + } + if (!strcmp(port_cfg->short_name, "vmxnet3")) { + if (port_cfg->n_txd < 512) { + // Vmxnet3 driver requires minimum 512 tx descriptors + plog_info("\t\tNumber of TX descriptors is set to 512 (minimum required for vmxnet3\n"); + port_cfg->n_txd = 512; + } + } + + ret = rte_eth_dev_configure(port_id, port_cfg->n_rxq, + port_cfg->n_txq, &port_cfg->port_conf); + PROX_PANIC(ret < 0, "\t\t\trte_eth_dev_configure() failed on port %u: %s (%d)\n", port_id, strerror(-ret), ret); + + if (port_cfg->port_conf.intr_conf.lsc) { + rte_eth_dev_callback_register(port_id, RTE_ETH_EVENT_INTR_LSC, lsc_cb, NULL); + } + + plog_info("\t\tMAC address set to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes)); + + /* initialize RX queues */ + for (uint16_t queue_id = 0; queue_id < port_cfg->n_rxq; ++queue_id) { + plog_info("\t\tSetting up RX queue %u on port %u on socket %u with %u desc (pool 0x%p)\n", + queue_id, port_id, port_cfg->socket, + port_cfg->n_rxd, port_cfg->pool[queue_id]); + + ret = rte_eth_rx_queue_setup(port_id, queue_id, + port_cfg->n_rxd, + port_cfg->socket, &port_cfg->rx_conf, + port_cfg->pool[queue_id]); + + PROX_PANIC(ret < 0, "\t\t\trte_eth_rx_queue_setup() failed on port %u: error %s (%d)\n", port_id, strerror(-ret), ret); + } + if (!strcmp(port_cfg->short_name, "virtio")) { + port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS; + plog_info("\t\tDisabling TX offloads (virtio does not support TX offloads)\n"); + } + + if (!strcmp(port_cfg->short_name, "vmxnet3")) { + port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS | ETH_TXQ_FLAGS_NOMULTSEGS; + plog_info("\t\tDisabling TX offloads and multsegs on port %d as vmxnet3 does not support them\n", port_id); + } + /* initialize one TX queue per logical core on each port */ + for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) { + plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n", + queue_id, port_cfg->socket, port_cfg->n_txd); + ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd, + port_cfg->socket, &port_cfg->tx_conf); + PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret); + } + + plog_info("\t\tStarting up port %u ...", port_id); + ret = rte_eth_dev_start(port_id); + + PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_start() failed on port %u: error %d\n", port_id, ret); + plog_info(" done: "); + + /* Getting link status can be done without waiting if Link + State Interrupt is enabled since in that case, if the link + is recognized as being down, an interrupt will notify that + it has gone up. */ + if (port_cfg->port_conf.intr_conf.lsc) + rte_eth_link_get_nowait(port_id, &link); + else + rte_eth_link_get(port_id, &link); + + port_cfg->link_up = link.link_status; + port_cfg->link_speed = link.link_speed; + if (link.link_status) { + plog_info("Link Up - speed %'u Mbps - %s\n", + link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + "full-duplex" : "half-duplex"); + } + else { + plog_info("Link Down\n"); + } + + if (port_cfg->promiscuous) { + rte_eth_promiscuous_enable(port_id); + plog_info("\t\tport %u in promiscuous mode\n", port_id); + } + + if (strcmp(port_cfg->short_name, "ixgbe_vf") && + strcmp(port_cfg->short_name, "i40e") && + strcmp(port_cfg->short_name, "i40e_vf") && + strcmp(port_cfg->short_name, "vmxnet3")) { + for (uint8_t i = 0; i < 16; ++i) { + ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i); + if (ret) { + plog_info("\t\trte_eth_dev_set_rx_queue_stats_mapping() failed: error %d\n", ret); + } + ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i); + if (ret) { + plog_info("\t\trte_eth_dev_set_tx_queue_stats_mapping() failed: error %d\n", ret); + } + } + } +} + +void init_port_all(void) +{ + uint8_t max_port_idx = prox_last_port_active() + 1; + + for (uint8_t portid = 0; portid < max_port_idx; ++portid) { + if (!prox_port_cfg[portid].active) { + continue; + } + init_port(&prox_port_cfg[portid]); + } +} + +void close_ports_atexit(void) +{ + uint8_t max_port_idx = prox_last_port_active() + 1; + + for (uint8_t portid = 0; portid < max_port_idx; ++portid) { + if (!prox_port_cfg[portid].active) { + continue; + } + rte_eth_dev_close(portid); + } +} + +void init_port_addr(void) +{ + struct prox_port_cfg *port_cfg; + + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + if (!prox_port_cfg[port_id].active) { + continue; + } + port_cfg = &prox_port_cfg[port_id]; + + switch (port_cfg->type) { + case PROX_PORT_MAC_HW: + rte_eth_macaddr_get(port_id, &port_cfg->eth_addr); + break; + case PROX_PORT_MAC_RAND: + eth_random_addr(port_cfg->eth_addr.addr_bytes); + break; + case PROX_PORT_MAC_SET: + break; + } + } +} + +int port_is_active(uint8_t port_id) +{ + if (port_id > PROX_MAX_PORTS) { + plog_info("requested port is higher than highest supported port ID (%u)\n", PROX_MAX_PORTS); + return 0; + } + + struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id]; + if (!port_cfg->active) { + plog_info("Port %u is not active\n", port_id); + return 0; + } + return 1; +} diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h new file mode 100644 index 00000000..17616187 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_port_cfg.h @@ -0,0 +1,83 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_PORT_CFG_H +#define _PROX_PORT_CFG_H + +#include <rte_ether.h> +#include <rte_ethdev.h> + +#include "prox_globals.h" + +enum addr_type {PROX_PORT_MAC_HW, PROX_PORT_MAC_SET, PROX_PORT_MAC_RAND}; + +#define IPV4_CKSUM 1 +#define UDP_CKSUM 2 + +struct prox_port_cfg { + struct rte_mempool *pool[32]; /* Rx/Tx mempool */ + size_t pool_size[32]; + uint8_t promiscuous; + uint8_t lsc_set_explicitely; /* Explicitly enable/disable lsc */ + uint8_t lsc_val; + uint8_t active; + int socket; + uint16_t max_rxq; /* max number of Tx queues */ + uint16_t max_txq; /* max number of Tx queues */ + uint16_t n_rxq; /* number of used Rx queues */ + uint16_t n_txq; /* number of used Tx queues */ + uint32_t n_rxd; + uint32_t n_txd; + uint8_t link_up; + uint32_t link_speed; + uint32_t mtu; + enum addr_type type; + struct ether_addr eth_addr; /* port MAC address */ + char name[MAX_NAME_SIZE]; + char short_name[MAX_NAME_SIZE]; + char driver_name[MAX_NAME_SIZE]; + char rx_ring[MAX_NAME_SIZE]; + char tx_ring[MAX_NAME_SIZE]; + char pci_addr[32]; + struct rte_eth_conf port_conf; + struct rte_eth_rxconf rx_conf; + struct rte_eth_txconf tx_conf; + struct { + int tx_offload_cksum; + } capabilities; +}; + +extern rte_atomic32_t lsc; + +int prox_nb_active_ports(void); +int prox_last_port_active(void); + +extern struct prox_port_cfg prox_port_cfg[]; + +void init_rte_dev(int use_dummy_devices); +uint8_t init_rte_ring_dev(void); +void init_port_addr(void); +void init_port_all(void); +void close_ports_atexit(void); + +struct rte_mempool; + +void prox_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsigned i); +void prox_pktmbuf_reinit(void *arg, void *start, void *end, uint32_t idx); + +int port_is_active(uint8_t port_id); + +#endif /* __PROX_PORT_CFG_H_ */ diff --git a/VNFs/DPPD-PROX/prox_shared.c b/VNFs/DPPD-PROX/prox_shared.c new file mode 100644 index 00000000..890d564b --- /dev/null +++ b/VNFs/DPPD-PROX/prox_shared.c @@ -0,0 +1,174 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdio.h> +#include <rte_hash.h> +#include <rte_hash_crc.h> +#include <rte_version.h> + +#include "quit.h" +#include "log.h" +#include "prox_shared.h" +#include "prox_globals.h" + +#define INIT_HASH_TABLE_SIZE 8192 + +struct prox_shared { + struct rte_hash *hash; + size_t size; +}; + +struct prox_shared sh_system; +struct prox_shared sh_socket[MAX_SOCKETS]; +struct prox_shared sh_core[RTE_MAX_LCORE]; + +static char* get_sh_name(void) +{ + static char name[] = "prox_sh"; + + name[0]++; + return name; +} + +struct rte_hash_parameters param = { + .key_len = 256, + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = 0, +}; + +static void prox_sh_create_hash(struct prox_shared *ps, size_t size) +{ + param.entries = size; + param.name = get_sh_name(); + ps->hash = rte_hash_create(¶m); + PROX_PANIC(ps->hash == NULL, "Failed to create hash table for shared data"); + ps->size = size; + if (ps->size == INIT_HASH_TABLE_SIZE) + plog_info("Shared data tracking hash table created with size %zu\n", ps->size); + else + plog_info("Shared data tracking hash table grew to %zu\n", ps->size); +} + +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) +static int copy_hash(struct rte_hash *new_hash, struct rte_hash *old_hash) +{ + const void *next_key; + void *next_data; + uint32_t iter = 0; + + while (rte_hash_iterate(old_hash, &next_key, &next_data, &iter) >= 0) { + if (rte_hash_add_key_data(new_hash, next_key, next_data) < 0) + return -1; + } + + return 0; +} +#endif + +static int prox_sh_add(struct prox_shared *ps, const char *name, void *data) +{ + char key[256] = {0}; + int ret; + + strncpy(key, name, sizeof(key)); + if (ps->size == 0) { + prox_sh_create_hash(ps, INIT_HASH_TABLE_SIZE); + } + +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + do { + ret = rte_hash_add_key_data(ps->hash, key, data); + if (ret < 0) { + struct rte_hash *old = ps->hash; + int success; + do { + prox_sh_create_hash(ps, ps->size * 2); + success = !copy_hash(ps->hash, old); + if (success) + rte_hash_free(old); + else + rte_hash_free(ps->hash); + } while (!success); + } + } while (ret < 0); +#else + PROX_PANIC(1, "DPDK < 2.1 not fully supported"); +#endif + return 0; +} + +static void *prox_sh_find(struct prox_shared *sh, const char *name) +{ +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0) + char key[256] = {0}; + int ret; + void *data; + + if (!sh->hash) + return NULL; + + strncpy(key, name, sizeof(key)); + ret = rte_hash_lookup_data(sh->hash, key, &data); + if (ret >= 0) + return data; +#else + PROX_PANIC(1, "DPDK < 2.1 not fully supported"); +#endif + return NULL; +} + +int prox_sh_add_system(const char *name, void *data) +{ + return prox_sh_add(&sh_system, name, data); +} + +int prox_sh_add_socket(const int socket_id, const char *name, void *data) +{ + if (socket_id >= MAX_SOCKETS) + return -1; + + return prox_sh_add(&sh_socket[socket_id], name, data); +} + +int prox_sh_add_core(const int core_id, const char *name, void *data) +{ + if (core_id >= RTE_MAX_LCORE) + return -1; + + return prox_sh_add(&sh_core[core_id], name, data); +} + +void *prox_sh_find_system(const char *name) +{ + return prox_sh_find(&sh_system, name); +} + +void *prox_sh_find_socket(const int socket_id, const char *name) +{ + if (socket_id >= MAX_SOCKETS) + return NULL; + + return prox_sh_find(&sh_socket[socket_id], name); +} + +void *prox_sh_find_core(const int core_id, const char *name) +{ + if (core_id >= RTE_MAX_LCORE) + return NULL; + + return prox_sh_find(&sh_core[core_id], name); +} diff --git a/VNFs/DPPD-PROX/prox_shared.h b/VNFs/DPPD-PROX/prox_shared.h new file mode 100644 index 00000000..c98b1d64 --- /dev/null +++ b/VNFs/DPPD-PROX/prox_shared.h @@ -0,0 +1,32 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROX_SHARED_H_ +#define _PROX_SHARED_H_ + +#include <rte_ether.h> + +/* Data can be shared at different levels. The levels are core wide, + socket wide and system wide. */ +int prox_sh_add_system(const char *name, void *data); +int prox_sh_add_socket(const int socket_id, const char *name, void *data); +int prox_sh_add_core(const int core_id, const char *name, void *data); + +void *prox_sh_find_system(const char *name); +void *prox_sh_find_socket(const int socket_id, const char *name); +void *prox_sh_find_core(const int core_id, const char *name); + +#endif /* _PROX_SHARED_H_ */ diff --git a/VNFs/DPPD-PROX/qinq.h b/VNFs/DPPD-PROX/qinq.h new file mode 100644 index 00000000..14da9753 --- /dev/null +++ b/VNFs/DPPD-PROX/qinq.h @@ -0,0 +1,40 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _QINQ_H_ +#define _QINQ_H_ + +#include <rte_ether.h> + +struct my_vlan_hdr { + uint16_t eth_proto; + uint16_t vlan_tci; +} __attribute__((packed)); + +struct vlans { + struct my_vlan_hdr svlan; + struct my_vlan_hdr cvlan; +}; + +struct qinq_hdr { + struct ether_addr d_addr; + struct ether_addr s_addr; + struct my_vlan_hdr svlan; + struct my_vlan_hdr cvlan; + uint16_t ether_type; +} __attribute__((packed)); + +#endif /* _QINQ_H_ */ diff --git a/VNFs/DPPD-PROX/quit.h b/VNFs/DPPD-PROX/quit.h new file mode 100644 index 00000000..a01c0a02 --- /dev/null +++ b/VNFs/DPPD-PROX/quit.h @@ -0,0 +1,46 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _QUIT_H_ +#define _QUIT_H_ + +#include <signal.h> +#include <sys/types.h> +#include <unistd.h> + +#include <rte_debug.h> + +#include "display.h" +#include "prox_cfg.h" + +/* PROX_PANIC for checks that are possibly hit due to configuration or + when feature is not implemented. */ +/* Restore tty and abort if there is a problem */ +#define PROX_PANIC(cond, ...) do { \ + if (cond) { \ + plog_info(__VA_ARGS__); \ + display_end(); \ + if (prox_cfg.flags & DSF_DAEMON) { \ + pid_t ppid = getppid(); \ + plog_info("sending SIGUSR2 to %d\n", ppid);\ + kill(ppid, SIGUSR2); \ + } \ + rte_panic("PANIC at %s:%u, callstack:\n", \ + __FILE__, __LINE__); \ + } \ + } while (0) + +#endif /* _QUIT_H_ */ diff --git a/VNFs/DPPD-PROX/random.h b/VNFs/DPPD-PROX/random.h new file mode 100644 index 00000000..85508126 --- /dev/null +++ b/VNFs/DPPD-PROX/random.h @@ -0,0 +1,58 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +/* + This pseudorandom number generator is based on ref_xorshift128plus, + as implemented by reference_xorshift.h, which has been obtained + from https://sourceforge.net/projects/xorshift-cpp/ + + The licensing terms for reference_xorshift.h are reproduced below. + + // Written in 2014 by Ivo Doko (ivo.doko@gmail.com) + // based on code written by Sebastiano Vigna (vigna@acm.org) + // To the extent possible under law, the author has dedicated + // all copyright and related and neighboring rights to this + // software to the public domain worldwide. This software is + // distributed without any warranty. + // See <http://creativecommons.org/publicdomain/zero/1.0/>. +*/ + +#ifndef _RANDOM_H_ +#define _RANDOM_H_ + +#include <rte_cycles.h> + +struct random { + uint64_t state[2]; +}; + +static void random_init_seed(struct random *random) +{ + random->state[0] = rte_rdtsc(); + random->state[1] = rte_rdtsc(); +} + +static uint64_t random_next(struct random *random) +{ + const uint64_t s0 = random->state[1]; + const uint64_t s1 = random->state[0] ^ (random->state[0] << 23); + + random->state[0] = random->state[1]; + random->state[1] = (s1 ^ (s1 >> 18) ^ s0 ^ (s0 >> 5)) + s0; + return random->state[1]; +} + +#endif /* _RANDOM_H_ */ diff --git a/VNFs/DPPD-PROX/run.c b/VNFs/DPPD-PROX/run.c new file mode 100644 index 00000000..971d7148 --- /dev/null +++ b/VNFs/DPPD-PROX/run.c @@ -0,0 +1,241 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <string.h> + +#include <rte_launch.h> +#include <rte_cycles.h> +#include <rte_atomic.h> + +#include "run.h" +#include "prox_cfg.h" +#include "prox_port_cfg.h" +#include "quit.h" +#include "commands.h" +#include "main.h" +#include "log.h" +#include "display.h" +#include "stats.h" +#include "stats_cons.h" +#include "stats_cons_log.h" +#include "stats_cons_cli.h" + +#include "input.h" +#include "input_curses.h" +#include "input_conn.h" + +static int needs_refresh; +static uint64_t update_interval; +static int stop_prox = 0; /* set to 1 to stop prox */ + +void set_update_interval(uint32_t msec) +{ + update_interval = msec_to_tsc(msec); +} + +void req_refresh(void) +{ + needs_refresh = 1; +} + +void quit(void) +{ + static rte_atomic32_t already_leaving = RTE_ATOMIC32_INIT(0); + if (!rte_atomic32_test_and_set(&already_leaving)) + return; + + plog_info("Leaving...\n"); + if (lcore_cfg == NULL) + exit(EXIT_SUCCESS); + stop_core_all(-1); + stop_prox = 1; +} + +static void update_link_states(void) +{ + struct prox_port_cfg *port_cfg; + struct rte_eth_link link; + + for (uint8_t portid = 0; portid < PROX_MAX_PORTS; ++portid) { + if (!prox_port_cfg[portid].active) { + continue; + } + + port_cfg = &prox_port_cfg[portid]; + rte_eth_link_get_nowait(portid, &link); + port_cfg->link_up = link.link_status; + port_cfg->link_speed = link.link_speed; + } +} + +static struct stats_cons stats_cons[8]; +static size_t n_stats_cons = 0; +static uint16_t stats_cons_flags = 0; + +static void stats_cons_add(struct stats_cons *sc) +{ + if (n_stats_cons == sizeof(stats_cons)/sizeof(stats_cons[0])) + return; + + stats_cons[n_stats_cons++] = *sc; + sc->init(); + stats_cons_flags |= sc->flags; +} + +static void stats_cons_notify(void) +{ + for (size_t i = 0; i < n_stats_cons; ++i) { + stats_cons[i].notify(); + } +} + +static void stats_cons_refresh(void) +{ + for (size_t i = 0; i < n_stats_cons; ++i) { + if (stats_cons[i].refresh) + stats_cons[i].refresh(); + } +} + +static void stats_cons_finish(void) +{ + for (size_t i = 0; i < n_stats_cons; ++i) { + if (stats_cons[i].finish) + stats_cons[i].finish(); + } +} + +static void busy_wait_until(uint64_t deadline) +{ + while (rte_rdtsc() < deadline) + ; +} + +static void multiplexed_input_stats(uint64_t deadline) +{ + input_proc_until(deadline); + + if (needs_refresh) { + needs_refresh = 0; + stats_cons_refresh(); + } + + if (rte_atomic32_read(&lsc)) { + rte_atomic32_dec(&lsc); + update_link_states(); + stats_cons_refresh(); + } +} + +static void print_warnings(void) +{ + if (get_n_warnings() == -1) { + plog_info("Warnings disabled\n"); + } + else if (get_n_warnings() > 0) { + int n_print = get_n_warnings() < 5? get_n_warnings(): 5; + plog_info("Started with %d warnings, last %d warnings: \n", get_n_warnings(), n_print); + for (int i = -n_print + 1; i <= 0; ++i) { + plog_info("%s", get_warning(i)); + } + } + else { + plog_info("Started without warnings\n"); + } +} + +/* start main loop */ +void __attribute__((noreturn)) run(uint32_t flags) +{ + uint64_t cur_tsc; + uint64_t next_update; + uint64_t stop_tsc = 0; + const uint64_t update_interval_threshold = usec_to_tsc(1); + + if (flags & DSF_LISTEN_TCP) + PROX_PANIC(reg_input_tcp(), "Failed to start listening on TCP port 8474: %s\n", strerror(errno)); + if (flags & DSF_LISTEN_UDS) + PROX_PANIC(reg_input_uds(), "Failed to start listening on UDS /tmp/prox.sock: %s\n", strerror(errno)); + + if (prox_cfg.use_stats_logger) + stats_cons_add(stats_cons_log_get()); + + stats_init(prox_cfg.start_time, prox_cfg.duration_time); + stats_update(STATS_CONS_F_ALL); + + switch (prox_cfg.ui) { + case PROX_UI_CURSES: + reg_input_curses(); + stats_cons_add(&display); + break; + case PROX_UI_CLI: + stats_cons_add(stats_cons_cli_get()); + break; + case PROX_UI_NONE: + default: + break; + } + + if (flags & DSF_AUTOSTART) + start_core_all(-1); + else + stop_core_all(-1); + + cur_tsc = rte_rdtsc(); + if (prox_cfg.duration_time != 0) { + stop_tsc = cur_tsc + sec_to_tsc(prox_cfg.start_time + prox_cfg.duration_time); + } + + stats_cons_notify(); + stats_cons_refresh(); + + update_interval = str_to_tsc(prox_cfg.update_interval_str); + next_update = cur_tsc + update_interval; + + cmd_rx_tx_info(); + print_warnings(); + + while (stop_prox == 0) { + + if (update_interval < update_interval_threshold) + busy_wait_until(next_update); + else + multiplexed_input_stats(next_update); + + next_update += update_interval; + + stats_update(stats_cons_flags); + stats_cons_notify(); + + if (stop_tsc && rte_rdtsc() >= stop_tsc) { + stop_prox = 1; + } + } + + stats_cons_finish(); + + if (prox_cfg.flags & DSF_WAIT_ON_QUIT) { + stop_core_all(-1); + } + + if (prox_cfg.logbuf) { + file_print(prox_cfg.logbuf); + } + + display_end(); + exit(EXIT_SUCCESS); +} diff --git a/VNFs/DPPD-PROX/run.h b/VNFs/DPPD-PROX/run.h new file mode 100644 index 00000000..3c61aca6 --- /dev/null +++ b/VNFs/DPPD-PROX/run.h @@ -0,0 +1,25 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _RUN_H_ +#define _RUN_H_ + +void run(uint32_t flags); +void quit(void); +void req_refresh(void); +void set_update_interval(uint32_t msec); + +#endif /* _RUN_H_ */ diff --git a/VNFs/DPPD-PROX/rw_reg.c b/VNFs/DPPD-PROX/rw_reg.c new file mode 100644 index 00000000..a0e59085 --- /dev/null +++ b/VNFs/DPPD-PROX/rw_reg.c @@ -0,0 +1,36 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ethdev.h> +#include "rw_reg.h" + +int read_reg(uint8_t port_id, uint32_t addr, uint32_t *reg) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct _dev_hw *hw = (struct _dev_hw *)dev->data->dev_private; + + *reg = PROX_READ_REG(hw, addr); + return 0; +} + +int write_reg(uint8_t port_id, uint32_t reg, uint32_t val) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct _dev_hw *hw = (struct _dev_hw *)dev->data->dev_private; + + PROX_WRITE_REG(hw, reg, val); + return 0; +} diff --git a/VNFs/DPPD-PROX/rw_reg.h b/VNFs/DPPD-PROX/rw_reg.h new file mode 100644 index 00000000..1e38c7d0 --- /dev/null +++ b/VNFs/DPPD-PROX/rw_reg.h @@ -0,0 +1,43 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef __RW_REG_H__ +#define __RW_REG_H__ + +/* Simplified, from DPDK 1.8 */ +struct _dev_hw { + uint8_t *hw_addr; +}; +/* Registers access */ + +#define PROX_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) +#define PROX_READ_REG(hw, reg) \ + prox_read_addr(PROX_PCI_REG_ADDR((hw), (reg))) +#define PROX_PCI_REG(reg) (*((volatile uint32_t *)(reg))) +#define PROX_PCI_REG_WRITE(reg_addr, value) \ + *((volatile uint32_t *) (reg_addr)) = (value) +#define PROX_WRITE_REG(hw,reg,value) \ + PROX_PCI_REG_WRITE(PROX_PCI_REG_ADDR((hw), (reg)), (value)) + +static inline uint32_t prox_read_addr(volatile void* addr) +{ + return rte_le_to_cpu_32(PROX_PCI_REG(addr)); +} + +int read_reg(uint8_t portid, uint32_t addr, uint32_t *reg); +int write_reg(uint8_t portid, uint32_t reg, uint32_t val); +#endif diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c new file mode 100644 index 00000000..a6c1fd10 --- /dev/null +++ b/VNFs/DPPD-PROX/rx_pkt.c @@ -0,0 +1,427 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_ethdev.h> +#include <rte_version.h> + +#include "rx_pkt.h" +#include "task_base.h" +#include "clock.h" +#include "stats.h" +#include "log.h" +#include "mbuf_utils.h" +#include "input.h" /* Needed for callback on dump */ + +/* _param version of the rx_pkt_hw functions are used to create two + instances of very similar variations of these functions. The + variations are specified by the "multi" parameter which significies + that the rte_eth_rx_burst function should be called multiple times. + The reason for this is that with the vector PMD, the maximum number + of packets being returned is 32. If packets have been split in + multiple mbufs then rte_eth_rx_burst might even receive less than + 32 packets. + Some algorithms (like QoS) only work correctly if more than 32 + packets are received if the dequeue step involves finding 32 packets. +*/ + +#define MIN_PMD_RX 32 + +static uint16_t rx_pkt_hw_port_queue(struct port_queue *pq, struct rte_mbuf **mbufs, int multi) +{ + uint16_t nb_rx, n; + + nb_rx = rte_eth_rx_burst(pq->port, pq->queue, mbufs, MAX_PKT_BURST); + + if (multi) { + n = nb_rx; + while (n != 0 && MAX_PKT_BURST - nb_rx >= MIN_PMD_RX) { + n = rte_eth_rx_burst(pq->port, pq->queue, mbufs + nb_rx, MIN_PMD_RX); + nb_rx += n; + PROX_PANIC(nb_rx > 64, "Received %d packets while expecting maximum %d\n", n, MIN_PMD_RX); + } + } + return nb_rx; +} + +static void next_port(struct rx_params_hw *rx_params_hw) +{ + ++rx_params_hw->last_read_portid; + if (unlikely(rx_params_hw->last_read_portid == rx_params_hw->nb_rxports)) { + rx_params_hw->last_read_portid = 0; + } +} + +static void next_port_pow2(struct rx_params_hw *rx_params_hw) +{ + rx_params_hw->last_read_portid = (rx_params_hw->last_read_portid + 1) & rx_params_hw->rxport_mask; +} + +static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbufs, int multi, + void (*next)(struct rx_params_hw *rx_param_hw)) +{ + uint8_t last_read_portid; + uint16_t nb_rx; + + START_EMPTY_MEASSURE(); + *mbufs = tbase->ws_mbuf->mbuf[0] + + (RTE_ALIGN_CEIL(tbase->ws_mbuf->idx[0].prod, 2) & WS_MBUF_MASK); + + last_read_portid = tbase->rx_params_hw.last_read_portid; + struct port_queue *pq = &tbase->rx_params_hw.rx_pq[last_read_portid]; + + nb_rx = rx_pkt_hw_port_queue(pq, *mbufs, multi); + next(&tbase->rx_params_hw); + + if (likely(nb_rx > 0)) { + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; +} + +static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs, int multi) +{ + uint16_t nb_rx, n; + + START_EMPTY_MEASSURE(); + *mbufs = tbase->ws_mbuf->mbuf[0] + + (RTE_ALIGN_CEIL(tbase->ws_mbuf->idx[0].prod, 2) & WS_MBUF_MASK); + + nb_rx = rte_eth_rx_burst(tbase->rx_params_hw1.rx_pq.port, + tbase->rx_params_hw1.rx_pq.queue, + *mbufs, MAX_PKT_BURST); + + if (multi) { + n = nb_rx; + while ((n != 0) && (MAX_PKT_BURST - nb_rx >= MIN_PMD_RX)) { + n = rte_eth_rx_burst(tbase->rx_params_hw1.rx_pq.port, + tbase->rx_params_hw1.rx_pq.queue, + *mbufs + nb_rx, MIN_PMD_RX); + nb_rx += n; + PROX_PANIC(nb_rx > 64, "Received %d packets while expecting maximum %d\n", n, MIN_PMD_RX); + } + } + + if (likely(nb_rx > 0)) { + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; +} + +uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 0, next_port); +} + +uint16_t rx_pkt_hw_pow2(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2); +} + +uint16_t rx_pkt_hw1(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw1_param(tbase, mbufs, 0); +} + +uint16_t rx_pkt_hw_multi(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 1, next_port); +} + +uint16_t rx_pkt_hw_pow2_multi(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2); +} + +uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw1_param(tbase, mbufs, 1); +} + +/* The following functions implement ring access */ +static uint16_t ring_deq(struct rte_ring *r, struct rte_mbuf **mbufs) +{ + void **v_mbufs = (void **)mbufs; +#ifdef BRAS_RX_BULK +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + return rte_ring_sc_dequeue_bulk(r, v_mbufs, MAX_RING_BURST) < 0? 0 : MAX_RING_BURST; +#else + return rte_ring_sc_dequeue_bulk(r, v_mbufs, MAX_RING_BURST, NULL); +#endif +#else +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + return rte_ring_sc_dequeue_burst(r, v_mbufs, MAX_RING_BURST); +#else + return rte_ring_sc_dequeue_burst(r, v_mbufs, MAX_RING_BURST, NULL); +#endif +#endif +} + +uint16_t rx_pkt_sw(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + START_EMPTY_MEASSURE(); + *mbufs = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + uint8_t lr = tbase->rx_params_sw.last_read_ring; + uint16_t nb_rx; + + do { + nb_rx = ring_deq(tbase->rx_params_sw.rx_rings[lr], *mbufs); + lr = lr + 1 == tbase->rx_params_sw.nb_rxrings? 0 : lr + 1; + } while(!nb_rx && lr != tbase->rx_params_sw.last_read_ring); + + tbase->rx_params_sw.last_read_ring = lr; + + if (nb_rx != 0) { + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + else { + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; + } +} + +/* Same as rx_pkt_sw expect with a mask for the number of receive + rings (can only be used if nb_rxring is a power of 2). */ +uint16_t rx_pkt_sw_pow2(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + START_EMPTY_MEASSURE(); + *mbufs = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + uint8_t lr = tbase->rx_params_sw.last_read_ring; + uint16_t nb_rx; + + do { + nb_rx = ring_deq(tbase->rx_params_sw.rx_rings[lr], *mbufs); + lr = (lr + 1) & tbase->rx_params_sw.rxrings_mask; + } while(!nb_rx && lr != tbase->rx_params_sw.last_read_ring); + + tbase->rx_params_sw.last_read_ring = lr; + + if (nb_rx != 0) { + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + else { + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; + } +} + +uint16_t rx_pkt_self(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + START_EMPTY_MEASSURE(); + uint16_t nb_rx = tbase->ws_mbuf->idx[0].nb_rx; + if (nb_rx) { + tbase->ws_mbuf->idx[0].nb_rx = 0; + *mbufs = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + else { + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; + } +} + +/* Used for tasks that do not receive packets (i.e. Packet +generation). Always returns 1 but never returns packets and does not +increment statistics. This function allows to use the same code path +as for tasks that actually receive packets. */ +uint16_t rx_pkt_dummy(__attribute__((unused)) struct task_base *tbase, + __attribute__((unused)) struct rte_mbuf ***mbufs) +{ + return 1; +} + +/* After the system has been configured, it is known if there is only + one RX ring. If this is the case, a more specialized version of the + function above can be used to save cycles. */ +uint16_t rx_pkt_sw1(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + START_EMPTY_MEASSURE(); + *mbufs = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + uint16_t nb_rx = ring_deq(tbase->rx_params_sw1.rx_ring, *mbufs); + + if (nb_rx != 0) { + TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx); + return nb_rx; + } + else { + TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc); + return 0; + } +} + +static uint16_t call_prev_rx_pkt(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t ret; + + if (tbase->aux->rx_prev_idx + 1 == tbase->aux->rx_prev_count) { + ret = tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_idx](tbase, mbufs); + } else { + tbase->aux->rx_prev_idx++; + ret = tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_idx](tbase, mbufs); + tbase->aux->rx_prev_idx--; + } + + return ret; +} + +/* Only used when there are packets to be dumped. This function is + meant as a debugging tool and is therefore not optimized. When the + number of packets to dump falls back to 0, the original (optimized) + rx function is restored. This allows to support dumping packets + without any performance impact if the feature is not used. */ +uint16_t rx_pkt_dump(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t ret = call_prev_rx_pkt(tbase, mbufs); + + if (ret) { + uint32_t n_dump = tbase->aux->task_rt_dump.n_print_rx; + n_dump = ret < n_dump? ret : n_dump; + + if (tbase->aux->task_rt_dump.input->reply == NULL) { + for (uint32_t i = 0; i < n_dump; ++i) { + plogd_info((*mbufs)[i], "RX: "); + } + } + else { + struct input *input = tbase->aux->task_rt_dump.input; + + for (uint32_t i = 0; i < n_dump; ++i) { + /* TODO: Execute callback with full + data in a single call. */ + char tmp[128]; + int strlen; + +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + int port_id = ((*mbufs)[i])->port; +#else + int port_id = ((*mbufs)[i])->pkt.in_port; +#endif + strlen = snprintf(tmp, sizeof(tmp), "pktdump,%d,%d\n", port_id, + rte_pktmbuf_pkt_len((*mbufs)[i])); + + input->reply(input, tmp, strlen); + input->reply(input, rte_pktmbuf_mtod((*mbufs)[i], char *), rte_pktmbuf_pkt_len((*mbufs)[i])); + input->reply(input, "\n", 1); + } + } + + tbase->aux->task_rt_dump.n_print_rx -= n_dump; + + if (0 == tbase->aux->task_rt_dump.n_print_rx) { + task_base_del_rx_pkt_function(tbase, rx_pkt_dump); + } + } + return ret; +} + +uint16_t rx_pkt_trace(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t ret = call_prev_rx_pkt(tbase, mbufs); + + if (ret) { + uint32_t n_trace = tbase->aux->task_rt_dump.n_trace; + n_trace = ret < n_trace? ret : n_trace; + tbase->aux->task_rt_dump.cur_trace = n_trace; + + for (uint32_t i = 0; i < n_trace; ++i) { + uint8_t *pkt = rte_pktmbuf_mtod((*mbufs)[i], uint8_t *); + rte_memcpy(tbase->aux->task_rt_dump.pkt_cpy[i], pkt, sizeof(tbase->aux->task_rt_dump.pkt_cpy[i])); + tbase->aux->task_rt_dump.pkt_cpy_len[i] = rte_pktmbuf_pkt_len((*mbufs)[i]); + tbase->aux->task_rt_dump.pkt_mbuf_addr[i] = (*mbufs)[i]; + } + + tbase->aux->task_rt_dump.n_trace -= n_trace; + /* Unset by TX when n_trace = 0 */ + } + return ret; +} + +/* Gather the distribution of the number of packets that have been + received from one RX call. Since the value is only modified by the + task that receives the packet, no atomic operation is needed. */ +uint16_t rx_pkt_distr(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t ret = call_prev_rx_pkt(tbase, mbufs); + + tbase->aux->rx_bucket[ret]++; + return ret; +} + +uint16_t rx_pkt_bw(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t ret = call_prev_rx_pkt(tbase, mbufs); + uint32_t tot_bytes = 0; + + for (uint16_t i = 0; i < ret; ++i) { + tot_bytes += mbuf_wire_size((*mbufs)[i]); + } + + TASK_STATS_ADD_RX_BYTES(&tbase->aux->stats, tot_bytes); + + return ret; +} + +uint16_t rx_pkt_tsc(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint64_t before = rte_rdtsc(); + uint16_t ret = call_prev_rx_pkt(tbase, mbufs); + uint64_t after = rte_rdtsc(); + + tbase->aux->tsc_rx.before = before; + tbase->aux->tsc_rx.after = after; + + return ret; +} + +uint16_t rx_pkt_all(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + uint16_t tot = 0; + uint16_t ret = 0; + struct rte_mbuf **new_mbufs; + struct rte_mbuf **dst = tbase->aux->all_mbufs; + + /* In case we receive less than MAX_PKT_BURST packets in one + iteration, do no perform any copying of mbuf pointers. Use + the buffer itself instead. */ + ret = call_prev_rx_pkt(tbase, &new_mbufs); + if (ret < MAX_PKT_BURST/2) { + *mbufs = new_mbufs; + return ret; + } + + memcpy(dst + tot, new_mbufs, ret * sizeof(*dst)); + tot += ret; + *mbufs = dst; + + do { + ret = call_prev_rx_pkt(tbase, &new_mbufs); + memcpy(dst + tot, new_mbufs, ret * sizeof(*dst)); + tot += ret; + } while (ret == MAX_PKT_BURST/2 && tot < MAX_RX_PKT_ALL - MAX_PKT_BURST); + + if (tot >= MAX_RX_PKT_ALL - MAX_PKT_BURST) { + plog_err("Could not receive all packets - buffer full\n"); + } + + return tot; +} diff --git a/VNFs/DPPD-PROX/rx_pkt.h b/VNFs/DPPD-PROX/rx_pkt.h new file mode 100644 index 00000000..57b948e2 --- /dev/null +++ b/VNFs/DPPD-PROX/rx_pkt.h @@ -0,0 +1,49 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _RX_PKT_H_ +#define _RX_PKT_H_ + +#include <inttypes.h> + +struct rte_mbuf; +struct task_base; + +uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_hw_pow2(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_hw1(struct task_base *tbase, struct rte_mbuf ***mbufs); + +/* The _multi variation of the function is used to work-around the + problem with QoS, multi-seg mbufs and vector PMD. When vector + PMD returns more than 32 packets, the two variations of the + receive function can be merged back together. */ +uint16_t rx_pkt_hw_multi(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_hw_pow2_multi(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs); + +uint16_t rx_pkt_sw(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_sw_pow2(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_sw1(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_self(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_dummy(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_dump(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_trace(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_distr(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_bw(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_tsc(struct task_base *tbase, struct rte_mbuf ***mbufs); +uint16_t rx_pkt_all(struct task_base *tbase, struct rte_mbuf ***mbufs); + +#endif /* _RX_PKT_H_ */ diff --git a/VNFs/DPPD-PROX/stats.c b/VNFs/DPPD-PROX/stats.c new file mode 100644 index 00000000..2418826f --- /dev/null +++ b/VNFs/DPPD-PROX/stats.c @@ -0,0 +1,100 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "prox_malloc.h" +#include "prox_cfg.h" +#include "stats.h" +#include "stats_port.h" +#include "stats_mempool.h" +#include "stats_ring.h" +#include "stats_l4gen.h" +#include "stats_latency.h" +#include "stats_global.h" +#include "stats_core.h" +#include "stats_task.h" +#include "stats_prio_task.h" +#include "stats_latency.h" + +/* Stores all readed values from the cores, displaying is done afterwards because + displaying introduces overhead. If displaying was done right after the values + are read, inaccuracy is introduced for later cores */ +int last_stat; /* 0 or 1 to track latest 2 measurements */ + +void stats_reset(void) +{ + stats_task_reset(); + stats_prio_task_reset(); + stats_port_reset(); + stats_latency_reset(); + stats_global_reset(); +} + +void stats_init(unsigned avg_start, unsigned duration) +{ + stats_lcore_init(); + stats_task_init(); + stats_prio_task_init(); + stats_port_init(); + stats_mempool_init(); + stats_latency_init(); + stats_l4gen_init(); + stats_ring_init(); + stats_global_init(avg_start, duration); +} + +void stats_update(uint16_t flag_cons) +{ + /* Keep track of last 2 measurements. */ + last_stat = !last_stat; + + if (flag_cons & STATS_CONS_F_TASKS) + stats_task_update(); + + if (flag_cons & STATS_CONS_F_PRIO_TASKS) + stats_prio_task_update(); + + if (flag_cons & STATS_CONS_F_LCORE) + stats_lcore_update(); + + if (flag_cons & STATS_CONS_F_PORTS) + stats_port_update(); + + if (flag_cons & STATS_CONS_F_MEMPOOLS) + stats_mempool_update(); + + if (flag_cons & STATS_CONS_F_LATENCY) + stats_latency_update(); + + if (flag_cons & STATS_CONS_F_L4GEN) + stats_l4gen_update(); + + if (flag_cons & STATS_CONS_F_RINGS) + stats_ring_update(); + + if (flag_cons & STATS_CONS_F_LCORE) + stats_lcore_post_proc(); + + if (flag_cons & STATS_CONS_F_TASKS) + stats_task_post_proc(); + + if (flag_cons & STATS_CONS_F_PRIO_TASKS) + stats_prio_task_post_proc(); + + if (flag_cons & STATS_CONS_F_GLOBAL) + stats_global_post_proc(); +} diff --git a/VNFs/DPPD-PROX/stats.h b/VNFs/DPPD-PROX/stats.h new file mode 100644 index 00000000..382fc4d0 --- /dev/null +++ b/VNFs/DPPD-PROX/stats.h @@ -0,0 +1,31 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_H_ +#define _STATS_H_ + +#include <rte_atomic.h> + +#include "stats_cons.h" +#include "clock.h" +#include "prox_globals.h" +#include "genl4_bundle.h" + +void stats_reset(void); +void stats_init(unsigned avg_start, unsigned duration); +void stats_update(uint16_t flag_cons); + +#endif /* _STATS_H_ */ diff --git a/VNFs/DPPD-PROX/stats_cons.h b/VNFs/DPPD-PROX/stats_cons.h new file mode 100644 index 00000000..ba51f49f --- /dev/null +++ b/VNFs/DPPD-PROX/stats_cons.h @@ -0,0 +1,39 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_CONS_H_ +#define _STATS_CONS_H_ + +#define STATS_CONS_F_TASKS 0x01 +#define STATS_CONS_F_LCORE 0x02 +#define STATS_CONS_F_PORTS 0x04 +#define STATS_CONS_F_MEMPOOLS 0x08 +#define STATS_CONS_F_RINGS 0x10 +#define STATS_CONS_F_LATENCY 0x20 +#define STATS_CONS_F_L4GEN 0x40 +#define STATS_CONS_F_GLOBAL 0x80 +#define STATS_CONS_F_PRIO_TASKS 0x100 +#define STATS_CONS_F_ALL 0x1ff + +struct stats_cons { + void (*init)(void); + void (*notify)(void); + void (*refresh)(void); /* Only called if not NULL, used to signal lsc or core stop/start */ + void (*finish)(void); + uint16_t flags; +}; + +#endif /* _STATS_CONS_H_ */ diff --git a/VNFs/DPPD-PROX/stats_cons_cli.c b/VNFs/DPPD-PROX/stats_cons_cli.c new file mode 100644 index 00000000..4f9235c6 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_cons_cli.c @@ -0,0 +1,48 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "stats.h" +#include "stats_cons_cli.h" +#include "prox_cfg.h" +#include "prox_args.h" +#include "prox_assert.h" +#include "commands.h" + +static struct stats_cons stats_cons_cli = { + .init = stats_cons_cli_init, + .notify = stats_cons_cli_notify, + .finish = stats_cons_cli_finish, + .flags = STATS_CONS_F_ALL, +}; + +struct stats_cons *stats_cons_cli_get(void) +{ + return &stats_cons_cli; +} + +void stats_cons_cli_init(void) +{ +} + +void stats_cons_cli_notify(void) +{ +} + +void stats_cons_cli_finish(void) +{ +} diff --git a/VNFs/DPPD-PROX/stats_cons_cli.h b/VNFs/DPPD-PROX/stats_cons_cli.h new file mode 100644 index 00000000..b5856d6d --- /dev/null +++ b/VNFs/DPPD-PROX/stats_cons_cli.h @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_CONS_CLI_H_ +#define _STATS_CONS_CLI_H_ + +#include "stats_cons.h" + +void stats_cons_cli_init(void); +void stats_cons_cli_notify(void); +void stats_cons_cli_finish(void); + +struct stats_cons *stats_cons_cli_get(void); + +#endif /* _STATS_CONS_CLI_H_ */ diff --git a/VNFs/DPPD-PROX/stats_cons_log.c b/VNFs/DPPD-PROX/stats_cons_log.c new file mode 100644 index 00000000..7e966533 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_cons_log.c @@ -0,0 +1,269 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "stats.h" +#include "stats_l4gen.h" +#include "stats_cons_log.h" +#include "prox_cfg.h" +#include "prox_args.h" +#include "prox_assert.h" +#include "commands.h" + +static struct stats_cons stats_cons_log = { + .init = stats_cons_log_init, + .notify = stats_cons_log_notify, + .finish = stats_cons_log_finish, +#ifndef DPI_STATS + .flags = STATS_CONS_F_ALL, +#else + .flags = STATS_CONS_F_PORTS|STATS_CONS_F_TASKS, +#endif +}; + +struct header { + uint64_t hz; + uint64_t now; + uint64_t n_entries; + uint64_t n_entry_fields; + uint8_t n_entry_field_size[64]; +}; + +static void header_init(struct header *hdr, uint64_t hz, uint64_t now, uint64_t n_entries) { + memset(hdr, 0, sizeof(*hdr)); + hdr->hz = hz; + hdr->now = now; + hdr->n_entries = n_entries; +} + +static void header_add_field(struct header *hdr, uint8_t size) { + hdr->n_entry_field_size[hdr->n_entry_fields++] = size; +} + +static void header_write(struct header *hdr, FILE *fp) { + size_t header_size_no_fields = sizeof(*hdr) - sizeof(hdr->n_entry_field_size); + size_t header_size_effective = header_size_no_fields + hdr->n_entry_fields; + + fwrite(hdr, header_size_effective, 1, fp); +} + +#define BUFFERED_RECORD_LEN 16384 + +#define STATS_DUMP_FILE_NAME "stats_dump" +static FILE *fp; + +struct entry { + uint32_t lcore_id; + uint32_t task_id; +#ifndef DPI_STATS + uint32_t l4_stats_id; +#endif +}; + +static struct entry entries[64]; +static uint64_t n_entries; + +#ifndef DPI_STATS +struct record { + uint32_t lcore_id; + uint32_t task_id; + uint64_t active_connections; + uint64_t bundles_created; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t tsc; +} __attribute__((packed)); +#else +struct record { + uint32_t lcore_id; + uint32_t task_id; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t drop_bytes; + uint64_t tsc; +} __attribute__((packed)); +#endif + +static struct record buf[BUFFERED_RECORD_LEN]; +static size_t buf_pos = 0; + +struct stats_cons *stats_cons_log_get(void) +{ + return &stats_cons_log; +} + +#ifndef DPI_STATS +void stats_cons_log_init(void) +{ + fp = fopen(STATS_DUMP_FILE_NAME, "w"); + if (!fp) + return; + + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (lconf->n_tasks_all && (strcmp(lconf->targs[0].task_init->mode_str, "genl4") || + strcmp(lconf->targs[0].task_init->sub_mode_str, ""))) + continue; + + for (uint32_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + entries[n_entries].lcore_id = lcore_id; + entries[n_entries].task_id = task_id; + entries[n_entries].l4_stats_id = n_entries; + n_entries++; + if (n_entries == sizeof(entries)/sizeof(entries[0])) + break; + } + cmd_rx_bw_start(lcore_id); + cmd_tx_bw_start(lcore_id); + if (n_entries == sizeof(entries)/sizeof(entries[0])) + break; + } + + struct header hdr; + + header_init(&hdr, rte_get_tsc_hz(), rte_rdtsc(), n_entries); + header_add_field(&hdr, sizeof(((struct record *)0)->lcore_id)); + header_add_field(&hdr, sizeof(((struct record *)0)->task_id)); + header_add_field(&hdr, sizeof(((struct record *)0)->active_connections)); + header_add_field(&hdr, sizeof(((struct record *)0)->bundles_created)); + header_add_field(&hdr, sizeof(((struct record *)0)->rx_bytes)); + header_add_field(&hdr, sizeof(((struct record *)0)->tx_bytes)); + header_add_field(&hdr, sizeof(((struct record *)0)->tsc)); + + header_write(&hdr, fp); +} + +void stats_cons_log_notify(void) +{ + const uint32_t n_l4gen = stats_get_n_l4gen(); + + if (buf_pos + n_entries > sizeof(buf)/sizeof(buf[0])) { + fwrite(buf, sizeof(buf[0]), buf_pos, fp); + buf_pos = 0; + } + PROX_ASSERT(buf_pos + n_entries <= sizeof(buf)/sizeof(buf[0])); + + for (uint32_t i = 0; i < n_entries; ++i) { + uint32_t c = entries[i].lcore_id; + uint32_t t = entries[i].task_id; + uint32_t j = entries[i].l4_stats_id; + struct l4_stats_sample *clast = stats_get_l4_stats_sample(j, 1); + struct task_stats *l = stats_get_task_stats(c, t); + struct task_stats_sample *last = stats_get_task_stats_sample(c, t, 1); + + buf[buf_pos].lcore_id = c; + buf[buf_pos].task_id = t; + + uint64_t tot_created = clast->stats.tcp_created + clast->stats.udp_created; + uint64_t tot_finished = clast->stats.tcp_finished_retransmit + clast->stats.tcp_finished_no_retransmit + + clast->stats.udp_finished + clast->stats.udp_expired + clast->stats.tcp_expired; + + buf[buf_pos].active_connections = tot_created - tot_finished; + buf[buf_pos].bundles_created = clast->stats.bundles_created; + buf[buf_pos].rx_bytes = last->rx_bytes; + buf[buf_pos].tx_bytes = last->tx_bytes; + buf[buf_pos].tsc = clast->tsc; + + buf_pos++; + } +} + +#else +void stats_cons_log_init(void) +{ + uint64_t el = rte_get_tsc_hz(); + uint64_t now = rte_rdtsc(); + + fp = fopen(STATS_DUMP_FILE_NAME, "w"); + if (!fp) + return; + + uint32_t lcore_id = -1; + + while(prox_core_next(&lcore_id, 0) == 0) { + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + if (!lconf->n_tasks_all) + continue; + + for (uint32_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + if (strcmp(lconf->targs[task_id].task_init->mode_str, "lbpos")) + continue; + + entries[n_entries].lcore_id = lcore_id; + entries[n_entries].task_id = task_id; + n_entries++; + if (n_entries == sizeof(entries)/sizeof(entries[0])) + break; + } + cmd_rx_bw_start(lcore_id); + cmd_tx_bw_start(lcore_id); + if (n_entries == sizeof(entries)/sizeof(entries[0])) + break; + } + + struct header hdr; + + header_init(&hdr, rte_get_tsc_hz(), rte_rdtsc(), n_entries); + header_add_field(&hdr, sizeof(((struct record *)0)->lcore_id)); + header_add_field(&hdr, sizeof(((struct record *)0)->task_id)); + header_add_field(&hdr, sizeof(((struct record *)0)->rx_bytes)); + header_add_field(&hdr, sizeof(((struct record *)0)->tx_bytes)); + header_add_field(&hdr, sizeof(((struct record *)0)->drop_bytes)); + header_add_field(&hdr, sizeof(((struct record *)0)->tsc)); + header_write(&hdr, fp); +} + +void stats_cons_log_notify(void) +{ + for (uint32_t i = 0; i < n_entries; ++i) { + uint32_t c = entries[i].lcore_id; + uint32_t t = entries[i].task_id; + struct task_stats *l = stats_get_task_stats(c, t); + struct task_stats_sample *last = stats_get_task_stats_sample(c, t, 1); + + buf[buf_pos].lcore_id = c; + buf[buf_pos].task_id = t; + buf[buf_pos].tx_bytes = last->tx_bytes; + buf[buf_pos].rx_bytes = last->rx_bytes; + buf[buf_pos].drop_bytes = last->drop_bytes; + /* buf[buf_pos].drop_tx_fail = l->tot_drop_tx_fail; */ + buf[buf_pos].tsc = last->tsc; + + buf_pos++; + + if (buf_pos == sizeof(buf)/sizeof(buf[0])) { + fwrite(buf, sizeof(buf), 1, fp); + buf_pos = 0; + } + } +} +#endif + +void stats_cons_log_finish(void) +{ + if (fp) { + if (buf_pos) { + fwrite(buf, sizeof(buf[0]), buf_pos, fp); + buf_pos = 0; + } + fclose(fp); + } +} diff --git a/VNFs/DPPD-PROX/stats_cons_log.h b/VNFs/DPPD-PROX/stats_cons_log.h new file mode 100644 index 00000000..92597de2 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_cons_log.h @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_CONS_LOG_H_ +#define _STATS_CONS_LOG_H_ + +#include "stats_cons.h" + +void stats_cons_log_init(void); +void stats_cons_log_notify(void); +void stats_cons_log_finish(void); + +struct stats_cons *stats_cons_log_get(void); + +#endif /* _STATS_CONS_LOG_H_ */ diff --git a/VNFs/DPPD-PROX/stats_core.c b/VNFs/DPPD-PROX/stats_core.c new file mode 100644 index 00000000..845399e3 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_core.c @@ -0,0 +1,293 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_lcore.h> + +#include "prox_malloc.h" +#include "stats_core.h" +#include "cqm.h" +#include "log.h" +#include "msr.h" +#include "parse_utils.h" +#include "prox_cfg.h" +#include "lconf.h" + +struct stats_core_manager { + struct rdt_features rdt_features; + int msr_support; + int max_core_id; + uint16_t n_lcore_stats; + int cache_size[RTE_MAX_LCORE]; + struct lcore_stats lcore_stats_set[0]; +}; + +static struct stats_core_manager *scm; +extern int last_stat; + +static int get_L3_size(void) +{ + char buf[1024]= "/proc/cpuinfo"; + FILE* fd = fopen(buf, "r"); + if (fd == NULL) { + plogx_err("Could not open %s", buf); + return -1; + } + int lcore = -1, val = 0, size = 0; + while (fgets(buf, sizeof(buf), fd) != NULL) { + if (sscanf(buf, "processor : %u", &val) == 1) { + lcore = val; + scm->max_core_id = lcore; + } + if (sscanf(buf, "cache size : %u", &val) == 1) { + size = val; + if ((lcore != -1) && (lcore < RTE_MAX_LCORE)) { + scm->cache_size[lcore] = size * 1024; + } + } + } + fclose(fd); + plog_info("\tMaximum core_id = %d\n", scm->max_core_id); + return 0; +} + +int stats_get_n_lcore_stats(void) +{ + return scm->n_lcore_stats; +} + +int stats_cpu_freq_enabled(void) +{ + return scm->msr_support; +} + +int stats_cmt_enabled(void) +{ + return cmt_is_supported(); +} + +int stats_cat_enabled(void) +{ + return cat_is_supported(); +} + +int stats_mbm_enabled(void) +{ + return mbm_is_supported(); +} + +uint32_t stats_lcore_find_stat_id(uint32_t lcore_id) +{ + for (int i = 0; i < scm->n_lcore_stats; ++i) + if (scm->lcore_stats_set[i].lcore_id == lcore_id) + return i; + return 0; +} + +struct lcore_stats_sample *stats_get_lcore_stats_sample(uint32_t stat_id, int l) +{ + return &scm->lcore_stats_set[stat_id].sample[l == last_stat]; +} + +struct lcore_stats *stats_get_lcore_stats(uint32_t stat_id) +{ + return &scm->lcore_stats_set[stat_id]; +} + +static struct stats_core_manager *alloc_stats_core_manager(void) +{ + const int socket_id = rte_lcore_to_socket_id(rte_lcore_id()); + uint32_t n_lcore_stats = 0; + uint32_t lcore_id; + size_t mem_size; + + lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) + n_lcore_stats++; + mem_size = sizeof(struct stats_core_manager) + sizeof(struct lcore_stats) * n_lcore_stats; + return prox_zmalloc(mem_size, socket_id); +} + +void stats_lcore_init(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id; + int j = 0; + + scm = alloc_stats_core_manager(); + + if (is_virtualized()) { + plog_info("Not initializing msr as running in a VM\n"); + scm->msr_support = 0; + } else if ((scm->msr_support = !msr_init()) == 0) { + plog_warn("Failed to open msr pseudo-file (missing msr kernel module?)\n"); + } + + scm->n_lcore_stats = 0; + lcore_id = -1; + get_L3_size(); + while (prox_core_next(&lcore_id, 0) == 0) { + scm->lcore_stats_set[scm->n_lcore_stats++].lcore_id = lcore_id; + } + if (!rdt_is_supported()) + return; + + if (!scm->msr_support) { + plog_warn("CPU supports RDT but msr module not loaded. Disabling RDT stats.\n"); + return; + } + + if (0 != rdt_get_features(&scm->rdt_features)) { + plog_warn("Failed to get RDT features\n"); + return; + } + else { + rdt_init_stat_core(rte_lcore_id()); + } + + /* Start using last rmid, to keep first rmid for technologies (like cat) where there are less rmid */ + uint32_t last_rmid = scm->rdt_features.cmt_max_rmid; + for (uint32_t i = 0; i < scm->n_lcore_stats; ++i) { + scm->lcore_stats_set[i].rmid = last_rmid; // cmt_max_rmid is used by non-monitored cores + last_rmid--; + } + + uint64_t cache_set; + for (uint32_t i = 0; i < scm->n_lcore_stats; ++i) { + plog_info("\tAssociating core %u to rmid %lu (associating each core used by prox to a different rmid)\n", scm->lcore_stats_set[i].lcore_id, scm->lcore_stats_set[i].rmid); + cqm_assoc(scm->lcore_stats_set[i].lcore_id, scm->lcore_stats_set[i].rmid); + uint32_t lcore_id = scm->lcore_stats_set[i].lcore_id; + lconf = &lcore_cfg[lcore_id]; + cache_set = lconf->cache_set; + if ((cache_set) && (cache_set < PROX_MAX_CACHE_SET)) { + scm->lcore_stats_set[i].class = cache_set; + scm->lcore_stats_set[i].cat_mask = prox_cache_set_cfg[cache_set].mask; + if (prox_cache_set_cfg[cache_set].socket_id == -1) { + prox_cache_set_cfg[cache_set].socket_id = scm->lcore_stats_set[i].socket_id; + prox_cache_set_cfg[cache_set].lcore_id = lcore_id; + } else if (prox_cache_set_cfg[cache_set].socket_id != (int32_t)scm->lcore_stats_set[i].socket_id) { + plog_err("Unsupported config: Using same cache set on two different socket\n"); + } + } else { + scm->lcore_stats_set[i].class = 0; + scm->lcore_stats_set[i].cat_mask = (1 << cat_get_num_ways()) -1; + } + } + cat_log_init(0); + last_rmid = scm->rdt_features.cat_max_rmid; + for (int i = 0; i < PROX_MAX_CACHE_SET; i++) { + if (prox_cache_set_cfg[i].mask) { + plog_info("\tSetting cache set %d to %x\n", i, prox_cache_set_cfg[i].mask); + cat_set_class_mask(prox_cache_set_cfg[i].lcore_id, i, prox_cache_set_cfg[i].mask); + } + } + for (uint32_t i = 0; i < scm->n_lcore_stats; ++i) { + uint32_t lcore_id = scm->lcore_stats_set[i].lcore_id; + lconf = &lcore_cfg[lcore_id]; + cache_set = lconf->cache_set; + if (cache_set) { + if (prox_cache_set_cfg[cache_set].mask) { + scm->lcore_stats_set[i].rmid = (scm->lcore_stats_set[i].rmid) | (cache_set << 32); + plog_info("\tCache set = %ld for core %d\n", cache_set, lcore_id); + cqm_assoc(lcore_id, scm->lcore_stats_set[i].rmid); + } else { + plog_err("\tUndefined Cache set = %ld for core %d\n", cache_set, lcore_id); + } + } else { + if (prox_cache_set_cfg[cache_set].mask) { + scm->lcore_stats_set[i].rmid = (scm->lcore_stats_set[i].rmid); + plog_info("\tUsing default cache set for core %d\n", lcore_id); + cqm_assoc(lcore_id, scm->lcore_stats_set[i].rmid); + } else { + plog_info("\tNo default cache set for core %d\n", lcore_id); + } + } + } +} + +static void stats_lcore_update_freq(void) +{ + for (uint8_t i = 0; i < scm->n_lcore_stats; ++i) { + struct lcore_stats *ls = &scm->lcore_stats_set[i]; + struct lcore_stats_sample *lss = &ls->sample[last_stat]; + + msr_read(&lss->afreq, ls->lcore_id, 0xe8); + msr_read(&lss->mfreq, ls->lcore_id, 0xe7); + } +} +void stats_update_cache_mask(uint32_t lcore_id, uint32_t mask) +{ + for (uint8_t i = 0; i < scm->n_lcore_stats; ++i) { + struct lcore_stats *ls = &scm->lcore_stats_set[i]; + if (ls->lcore_id == lcore_id) { + plog_info("Updating core %d stats %d to mask %x\n", lcore_id, i, mask); + scm->lcore_stats_set[i].cat_mask = mask; + } + } +} + +static void stats_lcore_update_rdt(void) +{ + for (uint8_t i = 0; i < scm->n_lcore_stats; ++i) { + struct lcore_stats *ls = &scm->lcore_stats_set[i]; + + if (ls->rmid) { + cmt_read_ctr(&ls->cmt_data, ls->rmid, ls->lcore_id); + mbm_read_tot_bdw(&ls->mbm_tot, ls->rmid, ls->lcore_id); + mbm_read_loc_bdw(&ls->mbm_loc, ls->rmid, ls->lcore_id); + } + } +} + +void stats_lcore_post_proc(void) +{ + /* update CQM stats (calculate fraction and bytes reported) */ + for (uint8_t i = 0; i < scm->n_lcore_stats; ++i) { + struct lcore_stats *ls = &scm->lcore_stats_set[i]; + struct lcore_stats_sample *lss = &ls->sample[last_stat]; + + if (ls->rmid) { + ls->cmt_bytes = ls->cmt_data * scm->rdt_features.upscaling_factor; + lss->mbm_tot_bytes = ls->mbm_tot * scm->rdt_features.upscaling_factor; + lss->mbm_loc_bytes = ls->mbm_loc * scm->rdt_features.upscaling_factor; + plogx_dbg("cache[core %d] = %ld\n", ls->lcore_id, ls->cmt_bytes); + } + } + for (uint8_t i = 0; i < scm->n_lcore_stats; ++i) { + struct lcore_stats *ls = &scm->lcore_stats_set[i]; + + if (ls->rmid && scm->cache_size[ls->lcore_id]) + ls->cmt_fraction = ls->cmt_bytes * 10000 / scm->cache_size[ls->lcore_id]; + else + ls->cmt_fraction = 0; + } +} + +void stats_lcore_update(void) +{ + if (scm->msr_support) + stats_lcore_update_freq(); + if (rdt_is_supported()) + stats_lcore_update_rdt(); +} + +void stats_lcore_assoc_rmid(void) +{ + for (uint32_t i = 0; i < scm->n_lcore_stats; ++i) { + uint32_t lcore_id = scm->lcore_stats_set[i].lcore_id; + scm->lcore_stats_set[i].rmid = scm->lcore_stats_set[i].rmid & 0xffffffff; + cqm_assoc(lcore_id, scm->lcore_stats_set[i].rmid); + } +} diff --git a/VNFs/DPPD-PROX/stats_core.h b/VNFs/DPPD-PROX/stats_core.h new file mode 100644 index 00000000..f9939def --- /dev/null +++ b/VNFs/DPPD-PROX/stats_core.h @@ -0,0 +1,59 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_CORE_H_ +#define _STATS_CORE_H_ + +#include <inttypes.h> + +struct lcore_stats_sample { + uint64_t afreq; + uint64_t mfreq; + uint64_t mbm_tot_bytes; + uint64_t mbm_loc_bytes; +}; + +struct lcore_stats { + uint32_t lcore_id; + uint32_t socket_id; + uint64_t rmid; + uint64_t cmt_data; + uint64_t cmt_bytes; + uint64_t mbm_tot_bytes; + uint64_t mbm_loc_bytes; + uint64_t cmt_fraction; + uint32_t cat_mask; + uint64_t mbm_tot; + uint64_t mbm_loc; + uint32_t class; + struct lcore_stats_sample sample[2]; +}; + +uint32_t stats_lcore_find_stat_id(uint32_t lcore_id); +int stats_get_n_lcore_stats(void); +struct lcore_stats *stats_get_lcore_stats(uint32_t stat_id); +struct lcore_stats_sample *stats_get_lcore_stats_sample(uint32_t stat_id, int last); +int stats_cpu_freq_enabled(void); +int stats_cmt_enabled(void); +int stats_cat_enabled(void); +int stats_mbm_enabled(void); +void stats_lcore_update(void); +void stats_lcore_init(void); +void stats_lcore_post_proc(void); +void stats_update_cache_mask(uint32_t lcore_id, uint32_t mask); +void stats_lcore_assoc_rmid(void); + +#endif /* _STATS_CORE_H_ */ diff --git a/VNFs/DPPD-PROX/stats_global.c b/VNFs/DPPD-PROX/stats_global.c new file mode 100644 index 00000000..d7529d34 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_global.c @@ -0,0 +1,110 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <rte_cycles.h> +#include <inttypes.h> + +#include "stats_global.h" +#include "stats_port.h" +#include "stats_task.h" + +struct global_stats { + struct global_stats_sample sample[2]; + struct global_stats_sample beg; + uint8_t started_avg; + uint64_t start_tsc; + uint64_t end_tsc; +}; + +extern int last_stat; +static struct global_stats global_stats; + +uint64_t stats_get_last_tsc(void) +{ + return global_stats.sample[last_stat].tsc; +} + +uint64_t stats_global_start_tsc(void) +{ + return global_stats.start_tsc; +} + +uint64_t stats_global_beg_tsc(void) +{ + return global_stats.beg.tsc; +} + +uint64_t stats_global_end_tsc(void) +{ + return global_stats.end_tsc; +} + +struct global_stats_sample *stats_get_global_stats(int last) +{ + return &global_stats.sample[last == last_stat]; +} + +struct global_stats_sample *stats_get_global_stats_beg(void) +{ + return (global_stats.beg.tsc < global_stats.sample[last_stat].tsc)? &global_stats.beg : NULL; +} + +void stats_global_reset(void) +{ + uint64_t now = rte_rdtsc(); + uint64_t last_tsc = global_stats.sample[last_stat].tsc; + uint64_t prev_tsc = global_stats.sample[!last_stat].tsc; + uint64_t end_tsc = global_stats.end_tsc; + + memset(&global_stats, 0, sizeof(struct global_stats)); + global_stats.sample[last_stat].tsc = last_tsc; + global_stats.sample[!last_stat].tsc = prev_tsc; + global_stats.start_tsc = now; + global_stats.beg.tsc = now; + global_stats.end_tsc = end_tsc; +} + +void stats_global_init(unsigned avg_start, unsigned duration) +{ + uint64_t now = rte_rdtsc(); + + global_stats.start_tsc = now; + /* + 1 for rounding */ + tsc_hz = rte_get_tsc_hz(); + if (duration) + global_stats.end_tsc = global_stats.start_tsc + (avg_start + duration + 1) * tsc_hz; + + global_stats.beg.tsc = global_stats.start_tsc + avg_start * tsc_hz; +} + +void stats_global_post_proc(void) +{ + uint64_t *rx = &global_stats.sample[last_stat].host_rx_packets; + uint64_t *tx = &global_stats.sample[last_stat].host_tx_packets; + uint64_t *tsc = &global_stats.sample[last_stat].tsc; + + stats_task_get_host_rx_tx_packets(rx, tx, tsc); + global_stats.sample[last_stat].nics_ierrors = stats_port_get_ierrors(); + global_stats.sample[last_stat].nics_imissed = stats_port_get_imissed(); + global_stats.sample[last_stat].nics_rx_packets = stats_port_get_rx_packets(); + global_stats.sample[last_stat].nics_tx_packets = stats_port_get_tx_packets(); + + if (global_stats.sample[last_stat].tsc > global_stats.beg.tsc && !global_stats.started_avg) { + global_stats.started_avg = 1; + global_stats.beg = global_stats.sample[last_stat]; + } +} diff --git a/VNFs/DPPD-PROX/stats_global.h b/VNFs/DPPD-PROX/stats_global.h new file mode 100644 index 00000000..8f53ab5c --- /dev/null +++ b/VNFs/DPPD-PROX/stats_global.h @@ -0,0 +1,42 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_GLOBAL_H_ +#define _STATS_GLOBAL_H_ + +uint64_t stats_get_last_tsc(void); + +struct global_stats_sample { + uint64_t tsc; + uint64_t host_rx_packets; + uint64_t host_tx_packets; + uint64_t nics_rx_packets; + uint64_t nics_tx_packets; + uint64_t nics_ierrors; + uint64_t nics_imissed; +}; + +void stats_global_reset(void); +void stats_global_init(unsigned avg_start, unsigned duration); +void stats_global_post_proc(void); + +struct global_stats_sample *stats_get_global_stats(int last); +struct global_stats_sample *stats_get_global_stats_beg(void); +uint64_t stats_global_start_tsc(void); +uint64_t stats_global_beg_tsc(void); +uint64_t stats_global_end_tsc(void); + +#endif /* _STATS_GLOBAL_H_ */ diff --git a/VNFs/DPPD-PROX/stats_l4gen.c b/VNFs/DPPD-PROX/stats_l4gen.c new file mode 100644 index 00000000..37af7ac4 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_l4gen.c @@ -0,0 +1,110 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> + +#include "prox_malloc.h" +#include "prox_cfg.h" +#include "stats_l4gen.h" +#include "task_init.h" + +struct task_l4gen_stats { + struct task_base base; + struct l4_stats l4_stats; +}; + +struct stats_l4gen_manager { + uint16_t n_l4gen; + struct task_l4_stats task_l4_stats[0]; +}; + +extern int last_stat; +static struct stats_l4gen_manager *sl4m; + +int stats_get_n_l4gen(void) +{ + return sl4m->n_l4gen; +} + +struct task_l4_stats *stats_get_l4_stats(uint32_t i) +{ + return &sl4m->task_l4_stats[i]; +} + +struct l4_stats_sample *stats_get_l4_stats_sample(uint32_t i, int l) +{ + return &sl4m->task_l4_stats[i].sample[l == last_stat]; +} + +static struct stats_l4gen_manager *alloc_stats_l4gen_manager(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + size_t mem_size; + uint32_t n_l4gen = 0; + const int socket_id = rte_lcore_to_socket_id(rte_lcore_id()); + + lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + + if (!strcmp(targ->task_init->mode_str, "genl4")) + n_l4gen++; + } + } + + mem_size = sizeof(struct stats_l4gen_manager) + sizeof(struct task_l4_stats) * n_l4gen; + return prox_zmalloc(mem_size, socket_id); +} + +void stats_l4gen_init(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + + sl4m = alloc_stats_l4gen_manager(); + + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + + if (!strcmp(targ->task_init->mode_str, "genl4")) { + sl4m->task_l4_stats[sl4m->n_l4gen].task = (struct task_l4gen_stats *)lconf->tasks_all[task_id]; + sl4m->task_l4_stats[sl4m->n_l4gen].lcore_id = lcore_id; + sl4m->task_l4_stats[sl4m->n_l4gen].task_id = task_id; + sl4m->n_l4gen++; + } + } + } +} + +void stats_l4gen_update(void) +{ + uint64_t before, after; + + for (uint16_t i = 0; i < sl4m->n_l4gen; ++i) { + struct task_l4gen_stats *task_l4gen = sl4m->task_l4_stats[i].task; + + before = rte_rdtsc(); + sl4m->task_l4_stats[i].sample[last_stat].stats = task_l4gen->l4_stats; + after = rte_rdtsc(); + + sl4m->task_l4_stats[i].sample[last_stat].tsc = (before >> 1) + (after >> 1); + } +} diff --git a/VNFs/DPPD-PROX/stats_l4gen.h b/VNFs/DPPD-PROX/stats_l4gen.h new file mode 100644 index 00000000..dfc4e111 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_l4gen.h @@ -0,0 +1,44 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_L4GEN_H_ +#define _STATS_L4GEN_H_ + +#include <inttypes.h> + +#include "genl4_bundle.h" + +struct task_l4gen_stats; + +struct l4_stats_sample { + uint64_t tsc; + struct l4_stats stats; +}; + +struct task_l4_stats { + struct task_l4gen_stats *task; + struct l4_stats_sample sample[2]; + uint8_t lcore_id; + uint8_t task_id; +}; + +void stats_l4gen_init(void); +void stats_l4gen_update(void); +int stats_get_n_l4gen(void); +struct task_l4_stats *stats_get_l4_stats(uint32_t i); +struct l4_stats_sample *stats_get_l4_stats_sample(uint32_t i, int l); + +#endif /* _STATS_L4GEN_H_ */ diff --git a/VNFs/DPPD-PROX/stats_latency.c b/VNFs/DPPD-PROX/stats_latency.c new file mode 100644 index 00000000..52027892 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_latency.c @@ -0,0 +1,225 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "prox_malloc.h" +#include "stats_latency.h" +#include "handle_lat.h" +#include "prox_cfg.h" +#include "prox_args.h" + +struct stats_latency_manager_entry { + struct task_lat *task; + uint8_t lcore_id; + uint8_t task_id; + struct lat_test lat_test; + struct lat_test tot_lat_test; + struct stats_latency stats; + struct stats_latency tot; +}; + +struct stats_latency_manager { + uint16_t n_latency; + struct stats_latency_manager_entry entries[0]; /* copy of stats when running update stats. */ +}; + +static struct stats_latency_manager *slm; + +void stats_latency_reset(void) +{ + for (uint16_t i = 0; i < slm->n_latency; ++i) + lat_test_reset(&slm->entries[i].tot_lat_test); +} + +int stats_get_n_latency(void) +{ + return slm->n_latency; +} + +uint32_t stats_latency_get_core_id(uint32_t i) +{ + return slm->entries[i].lcore_id; +} + +uint32_t stats_latency_get_task_id(uint32_t i) +{ + return slm->entries[i].task_id; +} + +struct stats_latency *stats_latency_get(uint32_t i) +{ + return &slm->entries[i].stats; +} + +struct stats_latency *stats_latency_tot_get(uint32_t i) +{ + return &slm->entries[i].tot; +} + +static struct stats_latency_manager_entry *stats_latency_entry_find(uint8_t lcore_id, uint8_t task_id) +{ + struct stats_latency_manager_entry *entry; + + for (uint16_t i = 0; i < stats_get_n_latency(); ++i) { + entry = &slm->entries[i]; + + if (entry->lcore_id == lcore_id && entry->task_id == task_id) { + return entry; + } + } + return NULL; +} + +struct stats_latency *stats_latency_tot_find(uint32_t lcore_id, uint32_t task_id) +{ + struct stats_latency_manager_entry *entry = stats_latency_entry_find(lcore_id, task_id); + + if (!entry) + return NULL; + else + return &entry->tot; +} + +struct stats_latency *stats_latency_find(uint32_t lcore_id, uint32_t task_id) +{ + struct stats_latency_manager_entry *entry = stats_latency_entry_find(lcore_id, task_id); + + if (!entry) + return NULL; + else + return &entry->stats; +} + +static int task_runs_observable_latency(struct task_args *targ) +{ + /* TODO: make this work with multiple ports and with + rings. Currently, only showing lat tasks which have 1 RX + port. */ + return !strcmp(targ->task_init->mode_str, "lat") && + (targ->nb_rxports == 1 || targ->nb_rxrings == 1); +} + +static struct stats_latency_manager *alloc_stats_latency_manager(void) +{ + const uint32_t socket_id = rte_lcore_to_socket_id(rte_lcore_id()); + struct stats_latency_manager *ret; + struct lcore_cfg *lconf; + uint32_t n_latency = 0; + uint32_t lcore_id; + size_t mem_size; + + lcore_id = -1; + while (prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + if (task_runs_observable_latency(targ)) + ++n_latency; + } + } + mem_size = sizeof(*ret) + sizeof(ret->entries[0]) * n_latency; + + ret = prox_zmalloc(mem_size, socket_id); + return ret; +} + +static void stats_latency_add_task(struct lcore_cfg *lconf, struct task_args *targ) +{ + struct stats_latency_manager_entry *new_entry = &slm->entries[slm->n_latency]; + + new_entry->task = (struct task_lat *)targ->tbase; + new_entry->lcore_id = lconf->id; + new_entry->task_id = targ->id; + slm->n_latency++; +} + +void stats_latency_init(void) +{ + struct lcore_cfg *lconf = NULL; + struct task_args *targ; + + slm = alloc_stats_latency_manager(); + + while (core_targ_next(&lconf, &targ, 0) == 0) { + if (task_runs_observable_latency(targ)) + stats_latency_add_task(lconf, targ); + } +} + +#ifdef LATENCY_HISTOGRAM +void stats_core_lat_histogram(uint8_t lcore_id, uint8_t task_id, uint64_t **buckets) +{ + struct stats_latency_manager_entry *lat_stats; + uint64_t tsc; + + lat_stats = stats_latency_entry_find(lcore_id, task_id); + + if (lat_stats) + *buckets = lat_stats->lat_test.buckets; + else + *buckets = NULL; +} +#endif + +static void stats_latency_fetch_entry(struct stats_latency_manager_entry *entry) +{ + struct stats_latency *cur = &entry->stats; + struct lat_test *lat_test_local = &entry->lat_test; + struct lat_test *lat_test_remote = task_lat_get_latency_meassurement(entry->task); + + if (!lat_test_remote) + return; + + if (lat_test_remote->tot_all_pkts) { + lat_test_copy(&entry->lat_test, lat_test_remote); + lat_test_reset(lat_test_remote); + lat_test_combine(&entry->tot_lat_test, &entry->lat_test); + } + + task_lat_use_other_latency_meassurement(entry->task); +} + +static void stats_latency_from_lat_test(struct stats_latency *dst, struct lat_test *src) +{ + /* In case packets were received, but measurements were too + inaccurate */ + if (src->tot_pkts) { + dst->max = lat_test_get_max(src); + dst->min = lat_test_get_min(src); + dst->avg = lat_test_get_avg(src); + dst->stddev = lat_test_get_stddev(src); + } + dst->accuracy_limit = lat_test_get_accuracy_limit(src); + dst->tot_packets = src->tot_pkts; + dst->tot_all_packets = src->tot_all_pkts; + dst->lost_packets = src->lost_packets; +} + +static void stats_latency_update_entry(struct stats_latency_manager_entry *entry) +{ + if (!entry->lat_test.tot_all_pkts) + return; + + stats_latency_from_lat_test(&entry->stats, &entry->lat_test); + stats_latency_from_lat_test(&entry->tot, &entry->tot_lat_test); +} + +void stats_latency_update(void) +{ + for (uint16_t i = 0; i < slm->n_latency; ++i) + stats_latency_fetch_entry(&slm->entries[i]); + for (uint16_t i = 0; i < slm->n_latency; ++i) + stats_latency_update_entry(&slm->entries[i]); +} diff --git a/VNFs/DPPD-PROX/stats_latency.h b/VNFs/DPPD-PROX/stats_latency.h new file mode 100644 index 00000000..83cd4a18 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_latency.h @@ -0,0 +1,54 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_LATENCY_H_ +#define _STATS_LATENCY_H_ + +#include <inttypes.h> + +#include "handle_lat.h" + +struct stats_latency { + struct time_unit_err avg; + struct time_unit_err min; + struct time_unit_err max; + struct time_unit_err stddev; + + struct time_unit accuracy_limit; + uint64_t lost_packets; + uint64_t tot_packets; + uint64_t tot_all_packets; +}; + +uint32_t stats_latency_get_core_id(uint32_t i); +uint32_t stats_latency_get_task_id(uint32_t i); +struct stats_latency *stats_latency_get(uint32_t i); +struct stats_latency *stats_latency_find(uint32_t lcore_id, uint32_t task_id); + +struct stats_latency *stats_latency_tot_get(uint32_t i); +struct stats_latency *stats_latency_tot_find(uint32_t lcore_id, uint32_t task_id); + +void stats_latency_init(void); +void stats_latency_update(void); +void stats_latency_reset(void); + +int stats_get_n_latency(void); + +#ifdef LATENCY_HISTOGRAM +void stats_core_lat_histogram(uint8_t lcore_id, uint8_t task_id, uint64_t **buckets); +#endif + +#endif /* _STATS_LATENCY_H_ */ diff --git a/VNFs/DPPD-PROX/stats_mempool.c b/VNFs/DPPD-PROX/stats_mempool.c new file mode 100644 index 00000000..c5861eb5 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_mempool.c @@ -0,0 +1,96 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_mempool.h> +#include <rte_version.h> +#include <inttypes.h> + +#include "prox_malloc.h" +#include "prox_port_cfg.h" +#include "stats_mempool.h" + +struct stats_mempool_manager { + uint32_t n_mempools; + struct mempool_stats mempool_stats[0]; +}; + +static struct stats_mempool_manager *smm; + +struct mempool_stats *stats_get_mempool_stats(uint32_t i) +{ + return &smm->mempool_stats[i]; +} + +int stats_get_n_mempools(void) +{ + return smm->n_mempools; +} + +static struct stats_mempool_manager *alloc_stats_mempool_manager(void) +{ + const uint32_t socket_id = rte_lcore_to_socket_id(rte_lcore_id()); + uint32_t n_max_mempools = sizeof(prox_port_cfg[0].pool)/sizeof(prox_port_cfg[0].pool[0]); + uint32_t n_mempools = 0; + size_t mem_size = sizeof(struct stats_mempool_manager); + + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + if (!prox_port_cfg[i].active) + continue; + + for (uint8_t j = 0; j < n_max_mempools; ++j) { + if (prox_port_cfg[i].pool[j] && prox_port_cfg[i].pool_size[j]) { + mem_size += sizeof(struct mempool_stats); + } + } + } + + return prox_zmalloc(mem_size, socket_id); +} + +void stats_mempool_init(void) +{ + uint32_t n_max_mempools = sizeof(prox_port_cfg[0].pool)/sizeof(prox_port_cfg[0].pool[0]); + + smm = alloc_stats_mempool_manager(); + for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i) { + if (!prox_port_cfg[i].active) + continue; + + for (uint8_t j = 0; j < n_max_mempools; ++j) { + if (prox_port_cfg[i].pool[j] && prox_port_cfg[i].pool_size[j]) { + struct mempool_stats *ms = &smm->mempool_stats[smm->n_mempools]; + + ms->pool = prox_port_cfg[i].pool[j]; + ms->port = i; + ms->queue = j; + ms->size = prox_port_cfg[i].pool_size[j]; + smm->n_mempools++; + } + } + } +} + +void stats_mempool_update(void) +{ + for (uint8_t mp_id = 0; mp_id < smm->n_mempools; ++mp_id) { + /* Note: The function free_count returns the number of used entries. */ +#if RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0) + smm->mempool_stats[mp_id].free = rte_mempool_avail_count(smm->mempool_stats[mp_id].pool); +#else + smm->mempool_stats[mp_id].free = rte_mempool_count(smm->mempool_stats[mp_id].pool); +#endif + } +} diff --git a/VNFs/DPPD-PROX/stats_mempool.h b/VNFs/DPPD-PROX/stats_mempool.h new file mode 100644 index 00000000..b62e111f --- /dev/null +++ b/VNFs/DPPD-PROX/stats_mempool.h @@ -0,0 +1,36 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_MEMPOOL_H_ +#define _STATS_MEMPOOL_H_ + +#include <inttypes.h> +#include <stddef.h> + +struct mempool_stats { + struct rte_mempool *pool; + uint16_t port; + uint16_t queue; + size_t free; + size_t size; +}; + +void stats_mempool_init(void); +struct mempool_stats *stats_get_mempool_stats(uint32_t i); +int stats_get_n_mempools(void); +void stats_mempool_update(void); + +#endif /* _STATS_MEMPOOL_H_ */ diff --git a/VNFs/DPPD-PROX/stats_parser.c b/VNFs/DPPD-PROX/stats_parser.c new file mode 100644 index 00000000..aa9d6741 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_parser.c @@ -0,0 +1,897 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stddef.h> + +#include "stats_parser.h" +#include "log.h" +#include "stats.h" +#include "parse_utils.h" +#include "handle_lat.h" +#include "prox_port_cfg.h" +#include "stats_port.h" +#include "stats_mempool.h" +#include "stats_ring.h" +#include "stats_l4gen.h" +#include "stats_latency.h" +#include "stats_global.h" +#include "stats_prio_task.h" + +struct stats_path_str { + const char *str; + uint64_t (*func)(int argc, const char *argv[]); +}; + +static int args_to_core_task(const char *core_str, const char *task_str, uint32_t *lcore_id, uint32_t *task_id) +{ + if (parse_list_set(lcore_id, core_str, 1) != 1) + return -1; + *task_id = atoi(task_str); + + return 0; +} + +static uint64_t sp_task_idle_cycles(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->tsc; +} + +static uint64_t sp_task_rx_packets(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->rx_pkt_count; +} + +static uint64_t sp_task_tx_packets(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->tx_pkt_count; +} + +static uint64_t sp_task_drop_tx_fail(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->drop_tx_fail; +} + +static uint64_t sp_task_drop_tx_fail_prio(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + if (stats_get_prio_task_stats_sample_by_core_task(c, t, 1)) + return stats_get_prio_task_stats_sample_by_core_task(c, t, 1)->drop_tx_fail_prio[atoi(argv[2])]; + else + return -1; +} + +static uint64_t sp_task_rx_prio(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_prio_task_stats_sample_by_core_task(c, t, 1)->rx_prio[atoi(argv[2])]; +} + +static uint64_t sp_task_drop_discard(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->drop_discard; +} + +static uint64_t sp_task_drop_handled(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->drop_handled; +} + +static uint64_t sp_task_rx_bytes(int argc, const char *argv[]) +{ + return -1; +} + +static uint64_t sp_task_tx_bytes(int argc, const char *argv[]) +{ + return -1; +} + +static uint64_t sp_task_tsc(int argc, const char *argv[]) +{ + struct task_stats_sample *last; + uint32_t c, t; + + if (args_to_core_task(argv[0], argv[1], &c, &t)) + return -1; + return stats_get_task_stats_sample(c, t, 1)->tsc; +} + +static uint64_t sp_l4gen_created(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_created + clast->stats.udp_created; +} + +static uint64_t sp_l4gen_finished(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_finished_retransmit + clast->stats.tcp_finished_no_retransmit + + clast->stats.udp_finished + clast->stats.udp_expired + clast->stats.tcp_expired; +} + +static uint64_t sp_l4gen_expire_tcp(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_expired; +} + +static uint64_t sp_l4gen_expire_udp(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.udp_expired; +} + +static uint64_t sp_l4gen_retx(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_retransmits; +} + +static uint64_t sp_l4gen_tsc(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->tsc; +} + +static uint64_t sp_l4gen_torndown_no_retx(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_finished_no_retransmit; +} + +static uint64_t sp_l4gen_torndown_retx(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_finished_retransmit; +} + +static uint64_t sp_l4gen_torndown_udp(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.udp_finished; +} + +static uint64_t sp_l4gen_created_tcp(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_created; + +} + +static uint64_t sp_l4gen_created_udp(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.udp_created; +} + +static uint64_t sp_l4gen_created_all(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.tcp_created + clast->stats.udp_created; +} + +static uint64_t sp_l4gen_created_bundles(int argc, const char *argv[]) +{ + struct l4_stats_sample *clast = NULL; + + if (atoi(argv[0]) >= stats_get_n_l4gen()) + return -1; + clast = stats_get_l4_stats_sample(atoi(argv[0]), 1); + return clast->stats.bundles_created; +} + +static uint64_t sp_latency_min(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->min.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_mem_used(int argc, const char *argv[]) +{ + struct mempool_stats *ms; + + if (atoi(argv[0]) > stats_get_n_mempools()) + return -1; + ms = stats_get_mempool_stats(atoi(argv[0])); + return ms->size - ms->free; +} + +static uint64_t sp_mem_free(int argc, const char *argv[]) +{ + struct mempool_stats *ms; + + if (atoi(argv[0]) > stats_get_n_mempools()) + return -1; + ms = stats_get_mempool_stats(atoi(argv[0])); + return ms->free; +} + +static uint64_t sp_mem_size(int argc, const char *argv[]) +{ + struct mempool_stats *ms; + + if (atoi(argv[0]) > stats_get_n_mempools()) + return -1; + ms = stats_get_mempool_stats(atoi(argv[0])); + return ms->size; +} + +static uint64_t sp_port_no_mbufs(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->no_mbufs; +} + +static uint64_t sp_port_ierrors(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->ierrors; +} + +static uint64_t sp_port_imissed(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->imissed; +} + +static uint64_t sp_port_oerrors(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->oerrors; +} + +static uint64_t sp_port_rx_packets(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->rx_tot; +} + +static uint64_t sp_port_tx_packets(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_tot; +} + +static uint64_t sp_port_rx_bytes(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->rx_bytes; +} + +static uint64_t sp_port_tx_bytes(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_bytes; +} + +static uint64_t sp_port_tx_packets_64(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_64]; +} + +static uint64_t sp_port_tx_packets_65_127(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_65]; +} + +static uint64_t sp_port_tx_packets_128_255(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_128]; +} + +static uint64_t sp_port_tx_packets_256_511(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_256]; +} + +static uint64_t sp_port_tx_packets_512_1023(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_512]; +} + +static uint64_t sp_port_tx_packets_1024_1522(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_1024]; +} + +static uint64_t sp_port_tx_packets_1523_max(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tx_pkt_size[PKT_SIZE_1522]; +} + +static uint64_t sp_port_tsc(int argc, const char *argv[]) +{ + uint32_t port_id = atoi(argv[0]); + struct port_stats_sample *ps; + + if (port_id > PROX_MAX_PORTS || !prox_port_cfg[port_id].active) + return -1; + ps = stats_get_port_stats_sample(port_id, 1); + return ps->tsc; +} + +static uint64_t sp_latency_max(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->max.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_latency_avg(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->avg.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_latency_lost(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + return lat_test->lost_packets; +} + +static uint64_t sp_latency_tot_lost(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + return lat_test->lost_packets; +} + +static uint64_t sp_latency_total(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_all_packets) + return -1; + + return lat_test->tot_all_packets; +} + +static uint64_t sp_latency_used(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_all_packets) + return -1; + + return lat_test->tot_packets; +} + +static uint64_t sp_latency_tot_total(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_all_packets) + return -1; + + return lat_test->tot_all_packets; +} + +static uint64_t sp_latency_tot_used(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_all_packets) + return -1; + + return lat_test->tot_packets; +} + +static uint64_t sp_latency_tot_min(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->min.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_latency_tot_max(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->max.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_latency_tot_avg(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_tot_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->avg.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_latency_stddev(int argc, const char *argv[]) +{ + struct stats_latency *lat_test = NULL; + + if (atoi(argv[0]) >= stats_get_n_latency()) + return -1; + lat_test = stats_latency_get(atoi(argv[0])); + + if (!lat_test->tot_packets) + return -1; + + struct time_unit tu = lat_test->stddev.time; + return time_unit_to_usec(&tu); +} + +static uint64_t sp_ring_used(int argc, const char *argv[]) +{ + struct ring_stats *rs = NULL; + + if (atoi(argv[0]) >= stats_get_n_rings()) + return -1; + rs = stats_get_ring_stats(atoi(argv[0])); + return rs->size - rs->free; +} + +static uint64_t sp_ring_free(int argc, const char *argv[]) +{ + struct ring_stats *rs = NULL; + + if (atoi(argv[0]) >= stats_get_n_rings()) + return -1; + rs = stats_get_ring_stats(atoi(argv[0])); + return rs->free; +} + +static uint64_t sp_ring_size(int argc, const char *argv[]) +{ + struct ring_stats *rs = NULL; + + if (atoi(argv[0]) >= stats_get_n_rings()) + return -1; + rs = stats_get_ring_stats(atoi(argv[0])); + return rs->size; +} + +static uint64_t sp_global_host_rx_packets(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->host_rx_packets; +} + +static uint64_t sp_global_host_tx_packets(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->host_tx_packets; +} + +static uint64_t sp_global_nics_rx_packets(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->nics_rx_packets; +} + +static uint64_t sp_global_nics_tx_packets(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->nics_tx_packets; +} + +static uint64_t sp_global_nics_ierrors(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->nics_ierrors; +} + +static uint64_t sp_global_nics_imissed(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->nics_imissed; +} + +static uint64_t sp_global_tsc(int argc, const char *argv[]) +{ + return stats_get_global_stats(1)->tsc; +} + +static uint64_t sp_hz(int argc, const char *argv[]) +{ + return rte_get_tsc_hz(); +} + +struct stats_path_str stats_paths[] = { + {"hz", sp_hz}, + + {"global.host.rx.packets", sp_global_host_rx_packets}, + {"global.host.tx.packets", sp_global_host_tx_packets}, + {"global.nics.rx.packets", sp_global_nics_rx_packets}, + {"global.nics.tx.packets", sp_global_nics_tx_packets}, + {"global.nics.ierrrors", sp_global_nics_ierrors}, + {"global.nics.imissed", sp_global_nics_imissed}, + {"global.tsc", sp_global_tsc}, + + {"task.core(#).task(#).idle_cycles", sp_task_idle_cycles}, + {"task.core(#).task(#).rx.packets", sp_task_rx_packets}, + {"task.core(#).task(#).tx.packets", sp_task_tx_packets}, + {"task.core(#).task(#).drop.tx_fail", sp_task_drop_tx_fail}, + {"task.core(#).task(#).drop.discard", sp_task_drop_discard}, + {"task.core(#).task(#).drop.handled", sp_task_drop_handled}, + {"task.core(#).task(#).rx.bytes", sp_task_rx_bytes}, + {"task.core(#).task(#).tx.bytes", sp_task_tx_bytes}, + {"task.core(#).task(#).tsc", sp_task_tsc}, + {"task.core(#).task(#).drop.tx_fail_prio(#)", sp_task_drop_tx_fail_prio}, + {"task.core(#).task(#).rx_prio(#)", sp_task_rx_prio}, + + {"port(#).no_mbufs", sp_port_no_mbufs}, + {"port(#).ierrors", sp_port_ierrors}, + {"port(#).imissed", sp_port_imissed}, + {"port(#).oerrors", sp_port_oerrors}, + {"port(#).rx.packets", sp_port_rx_packets}, + {"port(#).tx.packets", sp_port_tx_packets}, + {"port(#).rx.bytes", sp_port_rx_bytes}, + {"port(#).tx.bytes", sp_port_tx_bytes}, + {"port(#).tx.packets_64", sp_port_tx_packets_64}, + {"port(#).tx.packets_65_127", sp_port_tx_packets_65_127}, + {"port(#).tx.packets_128_255", sp_port_tx_packets_128_255}, + {"port(#).tx.packets_256_511", sp_port_tx_packets_256_511}, + {"port(#).tx.packets_512_1023", sp_port_tx_packets_512_1023}, + {"port(#).tx.packets_1024_1522", sp_port_tx_packets_1024_1522}, + {"port(#).tx.packets_1523_max", sp_port_tx_packets_1523_max}, + {"port(#).tsc", sp_port_tsc}, + + {"mem(#).used", sp_mem_used}, + {"mem(#).free", sp_mem_free}, + {"mem(#).size", sp_mem_size}, + + {"latency(#).min", sp_latency_min}, + {"latency(#).max", sp_latency_max}, + {"latency(#).avg", sp_latency_avg}, + {"latency(#).lost", sp_latency_lost}, + {"latency(#).used", sp_latency_used}, + {"latency(#).total", sp_latency_total}, + {"latency(#).tot.min", sp_latency_tot_min}, + {"latency(#).tot.max", sp_latency_tot_max}, + {"latency(#).tot.avg", sp_latency_tot_avg}, + {"latency(#).tot.lost", sp_latency_tot_lost}, + {"latency(#).tot.used", sp_latency_tot_used}, + {"latency(#).tot.total", sp_latency_tot_total}, + {"latency(#).stddev", sp_latency_stddev}, + + {"ring(#).used", sp_ring_used}, + {"ring(#).free", sp_ring_free}, + {"ring(#).size", sp_ring_size}, + + {"l4gen(#).created.tcp", sp_l4gen_created_tcp}, + {"l4gen(#).created.udp", sp_l4gen_created_udp}, + {"l4gen(#).created.all", sp_l4gen_created_all}, + {"l4gen(#).created.bundles", sp_l4gen_created_bundles}, + {"l4gen(#).torndown.no_retx", sp_l4gen_torndown_no_retx}, + {"l4gen(#).torndown.retx", sp_l4gen_torndown_retx}, + {"l4gen(#).torndown.udp", sp_l4gen_torndown_udp}, + {"l4gen(#).expired.tcp", sp_l4gen_expire_tcp}, + {"l4gen(#).expired.udp", sp_l4gen_expire_udp}, + {"l4gen(#).created", sp_l4gen_created}, + {"l4gen(#).finished", sp_l4gen_finished}, + {"l4gen(#).retx", sp_l4gen_retx}, + {"l4gen(#).tsc", sp_l4gen_tsc}, +}; + +static int stats_parser_extract_args(char *stats_path, size_t *argc, char **argv) +{ + size_t len = strlen(stats_path); + size_t j = 0; + size_t k = 0; + int state = 0; + + for (size_t i = 0; i < len; ++i) { + switch (state) { + case 0: + if (stats_path[i] == '(') { + state = 1; + k = 0; + } + else if (stats_path[i] == ')') + return -1; + stats_path[j] = stats_path[i]; + j++; + break; + case 1: + if (stats_path[i] == ')') { + state = 0; + stats_path[j] = '#'; + j++; + stats_path[j] = ')'; + j++; + (*argc)++; + } + else { + argv[*argc][k++] = stats_path[i]; + } + break; + } + } + if (state == 1) + return -1; + stats_path[j] = 0; + return 0; +} + +uint64_t stats_parser_get(const char *stats_path) +{ + size_t stats_path_len; + + char stats_path_cpy[128]; + + strncpy(stats_path_cpy, stats_path, sizeof(stats_path_cpy)); + stats_path_len = strlen(stats_path); + + size_t max_argc = 16; + size_t argc = 0; + char argv_data[16][16] = {{0}}; + char *argv[16]; + const char *argv_c[16]; + + for (size_t i = 0; i < 16; ++i) { + argv[i] = argv_data[i]; + argv_c[i] = argv_data[i]; + } + + if (stats_parser_extract_args(stats_path_cpy, &argc, argv)) + return -1; + + for (size_t i = 0; i < sizeof(stats_paths)/sizeof(stats_paths[0]); ++i) { + if (strcmp(stats_paths[i].str, stats_path_cpy) == 0) { + if (stats_paths[i].func == NULL) + return -1; + return stats_paths[i].func(argc, argv_c); + } + } + + return -1; +} diff --git a/VNFs/DPPD-PROX/stats_parser.h b/VNFs/DPPD-PROX/stats_parser.h new file mode 100644 index 00000000..0812dbfc --- /dev/null +++ b/VNFs/DPPD-PROX/stats_parser.h @@ -0,0 +1,24 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_PARSER_H_ +#define _STATS_PARSER_H_ + +#include <inttypes.h> + +uint64_t stats_parser_get(const char *stats_path); + +#endif /* _STATS_PARSER_H_ */ diff --git a/VNFs/DPPD-PROX/stats_port.c b/VNFs/DPPD-PROX/stats_port.c new file mode 100644 index 00000000..b5e70dcc --- /dev/null +++ b/VNFs/DPPD-PROX/stats_port.c @@ -0,0 +1,407 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> + +#include <rte_version.h> +#include <rte_ethdev.h> +#include <rte_cycles.h> +#include <rte_byteorder.h> + +#include "prox_malloc.h" +#include "log.h" +#include "quit.h" +#include "stats_port.h" +#include "prox_port_cfg.h" +#include "rw_reg.h" + +#if defined(PROX_STATS) && defined(PROX_HW_DIRECT_STATS) + +/* Directly access hardware counters instead of going through DPDK. This allows getting + * specific counters that DPDK does not report or aggregates with other ones. + */ + +/* Definitions for IXGBE (taken from PMD) */ +#define PROX_IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define PROX_IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define PROX_IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define PROX_IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define PROX_IXGBE_GPTC 0x04080 +#define PROX_IXGBE_TPR 0x040D0 +#define PROX_IXGBE_TORL 0x040C0 +#define PROX_IXGBE_TORH 0x040C4 +#define PROX_IXGBE_GOTCL 0x04090 +#define PROX_IXGBE_GOTCH 0x04094 + +#define IXGBE_QUEUE_STAT_COUNTERS 16 + +static void ixgbe_read_stats(uint8_t port_id, struct port_stats_sample* stats, struct port_stats_sample *prev, int last_stat) +{ + uint64_t before, after; + unsigned i; + + struct rte_eth_dev* dev = &rte_eth_devices[port_id]; + + /* WARNING: Assumes hardware address is first field of structure! This may change! */ + struct _dev_hw* hw = (struct _dev_hw *)(dev->data->dev_private); + + stats->no_mbufs = dev->data->rx_mbuf_alloc_failed; + + /* Since we only read deltas from the NIC, we have to add to previous values + * even though we actually substract again later to find out the rates! + */ + stats->ierrors = prev->ierrors; + stats->imissed = prev->imissed; + stats->rx_bytes = prev->rx_bytes; + stats->rx_tot = prev->rx_tot; + stats->tx_bytes = prev->tx_bytes; + stats->tx_tot = prev->tx_tot; + + /* WARNING: In this implementation, we count as imiised only the "no descriptor" + * missed packets cases and not the actual receive errors. + */ + before = rte_rdtsc(); + for (i = 0; i < 8; i++) { + stats->imissed += PROX_READ_REG(hw, PROX_IXGBE_MPC(i)); + } + + /* RX stats */ +#if 0 + /* This version is equivalent to what ixgbe PMD does. It only accounts for packets + * actually received on the host. + */ + for (i = 0; i < IXGBE_QUEUE_STAT_COUNTERS; i++) { + /* ipackets: */ + stats->rx_tot += PROX_READ_REG(hw, PROX_IXGBE_QPRC(i)); + /* ibytes: */ + stats->rx_bytes += PROX_READ_REG(hw, PROX_IXGBE_QBRC_L(i)); + stats->rx_bytes += ((uint64_t)PROX_READ_REG(hw, PROX_IXGBE_QBRC_H(i)) << 32); + } +#else + /* This version reports the packets received by the NIC, regardless of whether they + * reached the host or not, etc. (no need to add ierrors or imissedto this packet count) + */ + stats->rx_tot += PROX_READ_REG(hw, PROX_IXGBE_TPR); + stats->rx_bytes += PROX_READ_REG(hw, PROX_IXGBE_TORL); + stats->rx_bytes += ((uint64_t)PROX_READ_REG(hw, PROX_IXGBE_TORH) << 32); +#endif + + /* TX stats */ + /* opackets: */ + stats->tx_tot += PROX_READ_REG(hw, PROX_IXGBE_GPTC); + /* obytes: */ + stats->tx_bytes += PROX_READ_REG(hw, PROX_IXGBE_GOTCL); + stats->tx_bytes += ((uint64_t)PROX_READ_REG(hw, PROX_IXGBE_GOTCH) << 32); + after = rte_rdtsc(); + stats->tsc = (before >> 1) + (after >> 1); +} + +#endif + +extern int last_stat; +static struct port_stats port_stats[PROX_MAX_PORTS]; +static uint8_t nb_interface; +static uint8_t n_ports; +static int num_xstats[PROX_MAX_PORTS] = {0}; +static int num_ixgbe_xstats = 0; + +#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,1) +#define XSTATS_SUPPORT 1 +#else +#define XSTATS_SUPPORT 0 +#endif + +#if XSTATS_SUPPORT +#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0) +static struct rte_eth_xstat *eth_xstats[PROX_MAX_PORTS] = {0}; +static struct rte_eth_xstat_name *eth_xstat_names[PROX_MAX_PORTS] = {0}; +#else +static struct rte_eth_xstats *eth_xstats[PROX_MAX_PORTS] = {0}; +static struct rte_eth_xstats *eth_xstat_names[PROX_MAX_PORTS] = {0}; +#endif +static int xstat_tpr_offset[PROX_MAX_PORTS] ={0}, xstat_tor_offset[PROX_MAX_PORTS] = {0}; +static int tx_pkt_size_offset[PROX_MAX_PORTS][PKT_SIZE_COUNT]; +#endif + +#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0) +static int find_xstats_str(struct rte_eth_xstat_name *xstats, int n, const char *name) +#else +static int find_xstats_str(struct rte_eth_xstats *xstats, int n, const char *name) +#endif +{ + for (int i = 0; i < n; i++) { + if (strcmp(xstats[i].name, name) == 0) + return i; + } + + return -1; +} + +void stats_port_init(void) +{ + int potential_ixgbe_warn = 0; + for (int i = 0; i < PROX_MAX_PORTS; i++) { + xstat_tpr_offset[i] = -1; + xstat_tor_offset[i] = -1; + for (int j = 0; j < PKT_SIZE_COUNT; j++) { + tx_pkt_size_offset[i][j] = -1; + } + } +#if XSTATS_SUPPORT + nb_interface = prox_last_port_active() + 1; + n_ports = prox_nb_active_ports(); + + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) { +#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0) + num_xstats[port_id] = rte_eth_xstats_get_names(port_id, NULL, 0); + eth_xstat_names[port_id] = prox_zmalloc(num_xstats[port_id] * sizeof(struct rte_eth_xstat_name), prox_port_cfg[port_id].socket); + PROX_PANIC(eth_xstat_names[port_id] == NULL, "Error allocating memory for xstats"); + num_xstats[port_id] = rte_eth_xstats_get_names(port_id, eth_xstat_names[port_id], num_xstats[port_id]); + eth_xstats[port_id] = prox_zmalloc(num_xstats[port_id] * sizeof(struct rte_eth_xstat), prox_port_cfg[port_id].socket); + PROX_PANIC(eth_xstats[port_id] == NULL, "Error allocating memory for xstats"); +#else + num_xstats[port_id] = rte_eth_xstats_get(port_id, NULL, 0); + eth_xstats[port_id] = prox_zmalloc(num_xstats[port_id] * sizeof(struct rte_eth_xstats), prox_port_cfg[port_id].socket); + PROX_PANIC(eth_xstats[port_id] == NULL, "Error allocating memory for xstats"); + eth_xstat_names[port_id] = eth_xstats[port_id]; + num_xstats[port_id] = rte_eth_xstats_get(port_id, eth_xstats[port_id], num_xstats[port_id]); +#endif + if (!strcmp(prox_port_cfg[port_id].short_name, "ixgbe")) { + potential_ixgbe_warn = 1; + xstat_tor_offset[port_id] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "rx_total_bytes"); + xstat_tpr_offset[port_id] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "rx_total_packets"); + } + tx_pkt_size_offset[port_id][PKT_SIZE_64] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_64_packets"); + tx_pkt_size_offset[port_id][PKT_SIZE_65] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_65_to_127_packets"); + tx_pkt_size_offset[port_id][PKT_SIZE_128] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_128_to_255_packets"); + tx_pkt_size_offset[port_id][PKT_SIZE_256] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_256_to_511_packets"); + tx_pkt_size_offset[port_id][PKT_SIZE_512] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_512_to_1023_packets"); + if (0 == strcmp(prox_port_cfg[port_id].short_name, "ixgbe")) { + tx_pkt_size_offset[port_id][PKT_SIZE_1024] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_1024_to_max_packets"); + } else { + tx_pkt_size_offset[port_id][PKT_SIZE_1024] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_1024_to_1522_packets"); + tx_pkt_size_offset[port_id][PKT_SIZE_1522] = find_xstats_str(eth_xstat_names[port_id], num_xstats[port_id], "tx_size_1523_to_max_packets"); + } + plog_info("offset = %d, %d, %d, %d, %d, %d %d\n", tx_pkt_size_offset[port_id][PKT_SIZE_64], tx_pkt_size_offset[port_id][PKT_SIZE_65], tx_pkt_size_offset[port_id][PKT_SIZE_128], tx_pkt_size_offset[port_id][PKT_SIZE_256], tx_pkt_size_offset[port_id][PKT_SIZE_512], tx_pkt_size_offset[port_id][PKT_SIZE_1024], tx_pkt_size_offset[port_id][PKT_SIZE_1522]); +#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0) + prox_free(eth_xstat_names[port_id]); +#endif + if (num_xstats[port_id] == 0 || eth_xstats[port_id] == NULL) { + plog_warn("Failed to initialize xstat for port %d, running without xstats\n", port_id); + num_xstats[port_id] = 0; + } + } + } + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if ((xstat_tor_offset[port_id] != -1) && (xstat_tpr_offset[port_id] != -1)) { + num_ixgbe_xstats = 2; // ixgbe PMD supports tor and tpr xstats + break; + } + } + if ((num_ixgbe_xstats == 0) && (potential_ixgbe_warn)) + plog_warn("Failed to initialize ixgbe xstat, running without ixgbe xstats\n"); +#endif +} + +static void nic_read_stats(uint8_t port_id) +{ + unsigned is_ixgbe = (0 == strcmp(prox_port_cfg[port_id].short_name, "ixgbe")); + + struct port_stats_sample *stats = &port_stats[port_id].sample[last_stat]; + +#if defined(PROX_STATS) && defined(PROX_HW_DIRECT_STATS) + if (is_ixgbe) { + struct port_stats_sample *prev = &port_stats[port_id].sample[!last_stat]; + ixgbe_read_stats(port_id, stats, prev, last_stat); + return; + } +#endif + uint64_t before, after; + + struct rte_eth_stats eth_stat; + + before = rte_rdtsc(); + rte_eth_stats_get(port_id, ð_stat); + after = rte_rdtsc(); + + stats->tsc = (before >> 1) + (after >> 1); + stats->no_mbufs = eth_stat.rx_nombuf; + stats->ierrors = eth_stat.ierrors; + stats->imissed = eth_stat.imissed; + stats->oerrors = eth_stat.oerrors; + stats->rx_bytes = eth_stat.ibytes; + + /* The goal would be to get the total number of bytes received + by the NIC (including overhead). Without the patch + (i.e. num_ixgbe_xstats == 0) we can't do this directly with + DPDK 2.1 API. So, we report the number of bytes (including + overhead) received by the host. */ + +#if XSTATS_SUPPORT + if (num_xstats[port_id]) { + rte_eth_xstats_get(port_id, eth_xstats[port_id], num_xstats[port_id]); + for (size_t i = 0; i < sizeof(tx_pkt_size_offset[0])/sizeof(tx_pkt_size_offset[0][0]); ++i) { + if (tx_pkt_size_offset[port_id][i] != -1) + stats->tx_pkt_size[i] = (eth_xstats[port_id][tx_pkt_size_offset[port_id][i]]).value; + else + stats->tx_pkt_size[i] = -1; + } + } else { + for (size_t i = 0; i < sizeof(tx_pkt_size_offset[0])/sizeof(tx_pkt_size_offset[0][0]); ++i) { + stats->tx_pkt_size[i] = -1; + } + } +#endif + if (is_ixgbe) { +#if XSTATS_SUPPORT + if (num_ixgbe_xstats) { + stats->rx_tot = eth_xstats[port_id][xstat_tpr_offset[port_id]].value; + stats->rx_bytes = eth_xstats[port_id][xstat_tor_offset[port_id]].value; + } else +#endif + { + stats->rx_tot = eth_stat.ipackets + eth_stat.ierrors + eth_stat.imissed; + /* On ixgbe, the rx_bytes counts bytes + received by Host without overhead. The + rx_tot counts the number of packets + received by the NIC. If we only add 20 * + rx_tot to rx_bytes, the result will also + take into account 20 * "number of packets + dropped by the nic". Note that in case CRC + is stripped on ixgbe, the CRC bytes are not + counted. */ + if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) + stats->rx_bytes = eth_stat.ibytes + + (24 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed)); + else + stats->rx_bytes = eth_stat.ibytes + + (20 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed)); + } + } else if (strcmp(prox_port_cfg[port_id].short_name, "i40e_vf") == 0) { + // For I40E VF, imissed already part of received packets + stats->rx_tot = eth_stat.ipackets; + } else { + stats->rx_tot = eth_stat.ipackets + eth_stat.imissed; + } + stats->tx_tot = eth_stat.opackets; + stats->tx_bytes = eth_stat.obytes; +} + +void stats_port_reset(void) +{ + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) { + rte_eth_stats_reset(port_id); + memset(&port_stats[port_id], 0, sizeof(struct port_stats)); + } + } +} + +void stats_port_update(void) +{ + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) { + nic_read_stats(port_id); + } + } +} + +uint64_t stats_port_get_ierrors(void) +{ + uint64_t ret = 0; + + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) + ret += port_stats[port_id].sample[last_stat].ierrors; + } + return ret; +} + +uint64_t stats_port_get_imissed(void) +{ + uint64_t ret = 0; + + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) + ret += port_stats[port_id].sample[last_stat].imissed; + } + return ret; +} + +uint64_t stats_port_get_rx_packets(void) +{ + uint64_t ret = 0; + + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) + ret += port_stats[port_id].sample[last_stat].rx_tot; + } + return ret; +} + +uint64_t stats_port_get_tx_packets(void) +{ + uint64_t ret = 0; + + for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) { + if (prox_port_cfg[port_id].active) + ret += port_stats[port_id].sample[last_stat].tx_tot; + } + return ret; +} + +int stats_get_n_ports(void) +{ + return n_ports; +} + +struct port_stats_sample *stats_get_port_stats_sample(uint32_t port_id, int l) +{ + return &port_stats[port_id].sample[l == last_stat]; +} + +int stats_port(uint8_t port_id, struct get_port_stats *gps) +{ + if (!prox_port_cfg[port_id].active) + return -1; + + struct port_stats_sample *last = &port_stats[port_id].sample[last_stat]; + struct port_stats_sample *prev = &port_stats[port_id].sample[!last_stat]; + + gps->no_mbufs_diff = last->no_mbufs - prev->no_mbufs; + gps->ierrors_diff = last->ierrors - prev->ierrors; + gps->imissed_diff = last->imissed - prev->imissed; + gps->rx_bytes_diff = last->rx_bytes - prev->rx_bytes; + gps->tx_bytes_diff = last->tx_bytes - prev->tx_bytes; + gps->rx_pkts_diff = last->rx_tot - prev->rx_tot; + if (unlikely(prev->rx_tot > last->rx_tot)) + gps->rx_pkts_diff = 0; + gps->tx_pkts_diff = last->tx_tot - prev->tx_tot; + if (unlikely(prev->tx_tot > last->tx_tot)) + gps->rx_pkts_diff = 0; + gps->rx_tot = last->rx_tot; + gps->tx_tot = last->tx_tot; + gps->no_mbufs_tot = last->no_mbufs; + gps->ierrors_tot = last->ierrors; + gps->imissed_tot = last->imissed; + + gps->last_tsc = last->tsc; + gps->prev_tsc = prev->tsc; + + return 0; +} diff --git a/VNFs/DPPD-PROX/stats_port.h b/VNFs/DPPD-PROX/stats_port.h new file mode 100644 index 00000000..4e166e1b --- /dev/null +++ b/VNFs/DPPD-PROX/stats_port.h @@ -0,0 +1,79 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_PORT_H_ +#define _STATS_PORT_H_ + +#include <inttypes.h> + +enum PKT_SIZE_BIN { + PKT_SIZE_64, + PKT_SIZE_65, + PKT_SIZE_128, + PKT_SIZE_256, + PKT_SIZE_512, + PKT_SIZE_1024, + PKT_SIZE_1522, + PKT_SIZE_COUNT, +}; + +struct port_stats_sample { + uint64_t tsc; + uint64_t no_mbufs; + uint64_t ierrors; + uint64_t imissed; + uint64_t oerrors; + uint64_t rx_tot; + uint64_t tx_tot; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t tx_pkt_size[PKT_SIZE_COUNT]; +}; + +struct port_stats { + struct port_stats_sample sample[2]; +}; + +struct get_port_stats { + uint64_t no_mbufs_diff; + uint64_t ierrors_diff; + uint64_t imissed_diff; + uint64_t rx_bytes_diff; + uint64_t tx_bytes_diff; + uint64_t rx_pkts_diff; + uint64_t tx_pkts_diff; + uint64_t rx_tot; + uint64_t tx_tot; + uint64_t no_mbufs_tot; + uint64_t ierrors_tot; + uint64_t imissed_tot; + uint64_t last_tsc; + uint64_t prev_tsc; +}; + +int stats_port(uint8_t port_id, struct get_port_stats *ps); +void stats_port_init(void); +void stats_port_reset(void); +void stats_port_update(void); +uint64_t stats_port_get_ierrors(void); +uint64_t stats_port_get_imissed(void); +uint64_t stats_port_get_rx_packets(void); +uint64_t stats_port_get_tx_packets(void); + +int stats_get_n_ports(void); +struct port_stats_sample *stats_get_port_stats_sample(uint32_t port_id, int l); + +#endif /* _STATS_PORT_H_ */ diff --git a/VNFs/DPPD-PROX/stats_prio.c b/VNFs/DPPD-PROX/stats_prio.c new file mode 100644 index 00000000..3d39d580 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_prio.c @@ -0,0 +1,131 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stddef.h> + +#include "handle_aggregator.h" +#include "stats_prio_task.h" +#include "prox_cfg.h" +#include "prox_globals.h" +#include "lconf.h" + +struct lcore_task_stats { + struct task_stats task_stats[MAX_TASKS_PER_CORE]; +}; + +struct lcore_prio_task_stats { + struct prio_task_stats prio_task_stats[MAX_TASKS_PER_CORE]; +}; + +extern int last_stat; +static struct prio_task_stats prio_task_stats_set[RTE_MAX_LCORE * MAX_TASKS_PER_CORE]; +static uint8_t nb_prio_tasks_tot; + +int stats_get_n_prio_tasks_tot(void) +{ + return nb_prio_tasks_tot; +} + +struct prio_task_stats_sample *stats_get_prio_task_stats_sample(uint32_t prio_task_id, int l) +{ + return &prio_task_stats_set[prio_task_id].sample[l == last_stat]; +} + +struct prio_task_stats_sample *stats_get_prio_task_stats_sample_by_core_task(uint32_t lcore_id, uint32_t prio_task_id, int l) +{ + for (uint8_t task_id = 0; task_id < nb_prio_tasks_tot; ++task_id) { + if ((prio_task_stats_set[task_id].lcore_id == lcore_id) && (prio_task_stats_set[task_id].task_id == task_id)) + return &prio_task_stats_set[prio_task_id].sample[l == last_stat]; + } + return NULL; +} + +void stats_prio_task_reset(void) +{ + struct prio_task_stats *cur_task_stats; + + for (uint8_t task_id = 0; task_id < nb_prio_tasks_tot; ++task_id) { + cur_task_stats = &prio_task_stats_set[task_id]; + for (int i = 0; i < 8; i++) { + cur_task_stats->tot_drop_tx_fail_prio[i] = 0; + cur_task_stats->tot_rx_prio[i] = 0; + } + } +} + +uint64_t stats_core_task_tot_drop_tx_fail_prio(uint8_t prio_task_id, uint8_t prio) +{ + return prio_task_stats_set[prio_task_id].tot_drop_tx_fail_prio[prio]; +} + +uint64_t stats_core_task_tot_rx_prio(uint8_t prio_task_id, uint8_t prio) +{ + return prio_task_stats_set[prio_task_id].tot_rx_prio[prio]; +} + +void stats_prio_task_post_proc(void) +{ + for (uint8_t task_id = 0; task_id < nb_prio_tasks_tot; ++task_id) { + struct prio_task_stats *cur_task_stats = &prio_task_stats_set[task_id]; + const struct prio_task_stats_sample *last = &cur_task_stats->sample[last_stat]; + const struct prio_task_stats_sample *prev = &cur_task_stats->sample[!last_stat]; + + for (int i=0; i<8; i++) { + cur_task_stats->tot_rx_prio[i] += last->rx_prio[i] - prev->rx_prio[i]; + cur_task_stats->tot_drop_tx_fail_prio[i] += last->drop_tx_fail_prio[i] - prev->drop_tx_fail_prio[i]; + } + } +} + +void stats_prio_task_update(void) +{ + uint64_t before, after; + + for (uint8_t task_id = 0; task_id < nb_prio_tasks_tot; ++task_id) { + struct prio_task_stats *cur_task_stats = &prio_task_stats_set[task_id]; + struct prio_task_rt_stats *stats = cur_task_stats->stats; + struct prio_task_stats_sample *last = &cur_task_stats->sample[last_stat]; + + before = rte_rdtsc(); + for (int i=0; i<8; i++) { + last->drop_tx_fail_prio[i] = stats->drop_tx_fail_prio[i]; + last->rx_prio[i] = stats->rx_prio[i]; + } + after = rte_rdtsc(); + last->tsc = (before >> 1) + (after >> 1); + } +} + +void stats_prio_task_init(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id; + + /* add cores that are receiving from and sending to physical ports first */ + lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + if (strcmp(targ->task_init->mode_str, "aggreg") == 0) { + struct prio_task_rt_stats *stats = &((struct task_aggregator *)(lconf->tasks_all[task_id]))->stats; + prio_task_stats_set[nb_prio_tasks_tot].stats = stats; + prio_task_stats_set[nb_prio_tasks_tot].lcore_id = lcore_id; + prio_task_stats_set[nb_prio_tasks_tot++].task_id = task_id; + } + } + } +} diff --git a/VNFs/DPPD-PROX/stats_prio_task.h b/VNFs/DPPD-PROX/stats_prio_task.h new file mode 100644 index 00000000..ce150591 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_prio_task.h @@ -0,0 +1,55 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_PRIO_TASK_H_ +#define _STATS_PRIO_TASK_H_ + +#include <inttypes.h> + +#include "clock.h" + +struct prio_task_stats_sample { + uint64_t tsc; + uint64_t drop_tx_fail_prio[8]; + uint64_t rx_prio[8]; +}; + +struct prio_task_rt_stats { + uint64_t drop_tx_fail_prio[8]; + uint64_t rx_prio[8]; +}; + +struct prio_task_stats { + uint64_t tot_drop_tx_fail_prio[8]; + uint64_t tot_rx_prio[8]; + uint8_t lcore_id; + uint8_t task_id; + struct prio_task_stats_sample sample[2]; + struct prio_task_rt_stats *stats; +}; + +int stats_get_n_prio_tasks_tot(void); +void stats_prio_task_reset(void); +void stats_prio_task_post_proc(void); +void stats_prio_task_update(void); +void stats_prio_task_init(void); + +struct prio_task_stats_sample *stats_get_prio_task_stats_sample(uint32_t task_id, int last); +struct prio_task_stats_sample *stats_get_prio_task_stats_sample_by_core_task(uint32_t lcore_id, uint32_t task_id, int last); +uint64_t stats_core_task_tot_drop_tx_fail_prio(uint8_t task_id, uint8_t prio); +uint64_t stats_core_task_tot_rx_prio(uint8_t task_id, uint8_t prio); + +#endif /* _STATS_PRIO_TASK_H_ */ diff --git a/VNFs/DPPD-PROX/stats_ring.c b/VNFs/DPPD-PROX/stats_ring.c new file mode 100644 index 00000000..d0792ac1 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_ring.c @@ -0,0 +1,160 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <rte_ring.h> +#include <rte_version.h> + +#include "prox_malloc.h" +#include "stats_ring.h" +#include "prox_port_cfg.h" +#include "prox_cfg.h" +#include "lconf.h" +#include "log.h" +#include "quit.h" + +struct stats_ring_manager { + uint16_t n_rings; + struct ring_stats ring_stats[0]; +}; + +static struct stats_ring_manager *rsm; + +int stats_get_n_rings(void) +{ + return rsm->n_rings; +} + +struct ring_stats *stats_get_ring_stats(uint32_t i) +{ + return &rsm->ring_stats[i]; +} + +void stats_ring_update(void) +{ + for (uint16_t r_id = 0; r_id < rsm->n_rings; ++r_id) { + rsm->ring_stats[r_id].free = rte_ring_free_count(rsm->ring_stats[r_id].ring); + } +} + +static struct ring_stats *init_rings_add(struct stats_ring_manager *rsm, struct rte_ring *ring) +{ + for (uint16_t i = 0; i < rsm->n_rings; ++i) { + if (strcmp(ring->name, rsm->ring_stats[i].ring->name) == 0) + return &rsm->ring_stats[i]; + } + rsm->ring_stats[rsm->n_rings++].ring = ring; + return &rsm->ring_stats[rsm->n_rings - 1]; +} + +static struct stats_ring_manager *alloc_stats_ring_manager(void) +{ + const uint32_t socket_id = rte_lcore_to_socket_id(rte_lcore_id()); + struct lcore_cfg *lconf; + uint32_t lcore_id = -1; + uint32_t n_rings = 0; + struct task_args *targ; + + /* n_rings could be more than total number of rings since + rings could be referenced by multiple cores. */ + while(prox_core_next(&lcore_id, 1) == 0) { + lconf = &lcore_cfg[lcore_id]; + + for(uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + targ = &lconf->targs[task_id]; + + for(uint32_t rxring_id = 0; rxring_id < targ->nb_rxrings; ++rxring_id) { + if (!targ->tx_opt_ring_task) + n_rings++; + } + for (uint32_t txring_id = 0; txring_id < targ->nb_txrings; ++txring_id) { + if (!targ->tx_opt_ring) + n_rings++; + } + } + } + + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + if (!prox_port_cfg[port_id].active) { + continue; + } + + if (prox_port_cfg[port_id].rx_ring[0] != '\0') + n_rings++; + + if (prox_port_cfg[port_id].tx_ring[0] != '\0') + n_rings++; + } + + size_t mem_size = sizeof(struct stats_ring_manager) + + n_rings * sizeof(struct ring_stats); + + return prox_zmalloc(mem_size, socket_id); +} + +void stats_ring_init(void) +{ + uint32_t lcore_id = -1; + struct lcore_cfg *lconf; + struct task_args *targ; + + rsm = alloc_stats_ring_manager(); + while(prox_core_next(&lcore_id, 1) == 0) { + lconf = &lcore_cfg[lcore_id]; + + for(uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + targ = &lconf->targs[task_id]; + + for(uint32_t rxring_id = 0; rxring_id < targ->nb_rxrings; ++rxring_id) { + if (!targ->tx_opt_ring_task) + init_rings_add(rsm, targ->rx_rings[rxring_id]); + } + + for (uint32_t txring_id = 0; txring_id < targ->nb_txrings; ++txring_id) { + if (!targ->tx_opt_ring) + init_rings_add(rsm, targ->tx_rings[txring_id]); + } + } + } + + struct ring_stats *stats = NULL; + + for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) { + if (!prox_port_cfg[port_id].active) { + continue; + } + + if (prox_port_cfg[port_id].rx_ring[0] != '\0') { + stats = init_rings_add(rsm, rte_ring_lookup(prox_port_cfg[port_id].rx_ring)); + stats->port[stats->nb_ports++] = &prox_port_cfg[port_id]; + } + + if (prox_port_cfg[port_id].tx_ring[0] != '\0') { + stats = init_rings_add(rsm, rte_ring_lookup(prox_port_cfg[port_id].tx_ring)); + stats->port[stats->nb_ports++] = &prox_port_cfg[port_id]; + } + } + + /* The actual usable space for a ring is size - 1. There is at + most one free entry in the ring to distinguish between + full/empty. */ + for (uint16_t ring_id = 0; ring_id < rsm->n_rings; ++ring_id) +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + rsm->ring_stats[ring_id].size = rsm->ring_stats[ring_id].ring->prod.size - 1; +#else + rsm->ring_stats[ring_id].size = rsm->ring_stats[ring_id].ring->size - 1; +#endif +} diff --git a/VNFs/DPPD-PROX/stats_ring.h b/VNFs/DPPD-PROX/stats_ring.h new file mode 100644 index 00000000..d9d4d63f --- /dev/null +++ b/VNFs/DPPD-PROX/stats_ring.h @@ -0,0 +1,34 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "prox_globals.h" + +struct rte_ring; +struct prox_port_cfg; + +struct ring_stats { + struct rte_ring *ring; + uint32_t nb_ports; + struct prox_port_cfg *port[PROX_MAX_PORTS]; + uint32_t free; + uint32_t size; +}; + +void stats_ring_update(void); +void stats_ring_init(void); + +int stats_get_n_rings(void); +struct ring_stats *stats_get_ring_stats(uint32_t i); diff --git a/VNFs/DPPD-PROX/stats_task.c b/VNFs/DPPD-PROX/stats_task.c new file mode 100644 index 00000000..6b4dc2dd --- /dev/null +++ b/VNFs/DPPD-PROX/stats_task.c @@ -0,0 +1,227 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stddef.h> + +#include "stats_task.h" +#include "prox_cfg.h" +#include "prox_globals.h" +#include "lconf.h" + +struct lcore_task_stats { + struct task_stats task_stats[MAX_TASKS_PER_CORE]; +}; + +#define TASK_STATS_RX 0x01 +#define TASK_STATS_TX 0x02 + +extern int last_stat; +static struct lcore_task_stats lcore_task_stats_all[RTE_MAX_LCORE]; +static struct task_stats *task_stats_set[RTE_MAX_LCORE * MAX_TASKS_PER_CORE]; +static uint8_t nb_tasks_tot; +int stats_get_n_tasks_tot(void) +{ + return nb_tasks_tot; +} + +struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id) +{ + return &lcore_task_stats_all[lcore_id].task_stats[task_id]; +} + +struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int l) +{ + return &lcore_task_stats_all[lcore_id].task_stats[task_id].sample[l == last_stat]; +} + +void stats_task_reset(void) +{ + struct task_stats *cur_task_stats; + + for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) { + cur_task_stats = task_stats_set[task_id]; + cur_task_stats->tot_rx_pkt_count = 0; + cur_task_stats->tot_tx_pkt_count = 0; + cur_task_stats->tot_drop_tx_fail = 0; + cur_task_stats->tot_drop_discard = 0; + cur_task_stats->tot_drop_handled = 0; + } +} + +uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id) +{ + return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_pkt_count; +} + +uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id) +{ + return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_pkt_count; +} + +uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id) +{ + return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail + + lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_discard + + lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_handled; +} + +uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id) +{ + return lcore_task_stats_all[lcore_id].task_stats[task_id].sample[last_stat].tsc; +} + +static void init_core_port(struct task_stats *ts, struct task_rt_stats *stats, uint8_t flags) +{ + ts->stats = stats; + ts->flags |= flags; +} + +void stats_task_post_proc(void) +{ + for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) { + struct task_stats *cur_task_stats = task_stats_set[task_id]; + const struct task_stats_sample *last = &cur_task_stats->sample[last_stat]; + const struct task_stats_sample *prev = &cur_task_stats->sample[!last_stat]; + + /* no total stats for empty loops */ + cur_task_stats->tot_rx_pkt_count += last->rx_pkt_count - prev->rx_pkt_count; + cur_task_stats->tot_tx_pkt_count += last->tx_pkt_count - prev->tx_pkt_count; + cur_task_stats->tot_drop_tx_fail += last->drop_tx_fail - prev->drop_tx_fail; + cur_task_stats->tot_drop_discard += last->drop_discard - prev->drop_discard; + cur_task_stats->tot_drop_handled += last->drop_handled - prev->drop_handled; + } +} + +void stats_task_update(void) +{ + uint64_t before, after; + + for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) { + struct task_stats *cur_task_stats = task_stats_set[task_id]; + struct task_rt_stats *stats = cur_task_stats->stats; + struct task_stats_sample *last = &cur_task_stats->sample[last_stat]; + + /* Read TX first and RX second, in order to prevent displaying + a negative packet loss. Depending on the configuration + (when forwarding, for example), TX might be bigger than RX. */ + before = rte_rdtsc(); + last->tx_pkt_count = stats->tx_pkt_count; + last->drop_tx_fail = stats->drop_tx_fail; + last->drop_discard = stats->drop_discard; + last->drop_handled = stats->drop_handled; + last->rx_pkt_count = stats->rx_pkt_count; + last->empty_cycles = stats->idle_cycles; + last->tx_bytes = stats->tx_bytes; + last->rx_bytes = stats->rx_bytes; + last->drop_bytes = stats->drop_bytes; + after = rte_rdtsc(); + last->tsc = (before >> 1) + (after >> 1); + } +} + +void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc) +{ + const struct task_stats *t; + + *rx = 0; + *tx = 0; + + for (uint8_t task_id = 0; task_id < nb_tasks_tot; ++task_id) { + t = task_stats_set[task_id]; + + if (t->flags & TASK_STATS_RX) + *rx += t->tot_rx_pkt_count; + + if (t->flags & TASK_STATS_TX) + *tx += t->tot_tx_pkt_count; + } + if (nb_tasks_tot) + *tsc = task_stats_set[nb_tasks_tot - 1]->sample[last_stat].tsc; +} + +/* Populate active_stats_set for stats reporting, the order of the + cores is important for gathering the most accurate statistics. TX + cores should be updated before RX cores (to prevent negative Loss + stats). The total number of tasks are saved in nb_tasks_tot. */ +void stats_task_init(void) +{ + struct lcore_cfg *lconf; + uint32_t lcore_id; + + /* add cores that are receiving from and sending to physical ports first */ + lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats; + if (targ->nb_rxrings == 0 && targ->nb_txrings == 0) { + struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id]; + + init_core_port(ts, stats, TASK_STATS_RX | TASK_STATS_TX); + task_stats_set[nb_tasks_tot++] = ts; + } + } + } + + /* add cores that are sending to physical ports second */ + lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats; + if (targ->nb_rxrings != 0 && targ->nb_txrings == 0) { + struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id]; + + init_core_port(ts, stats, TASK_STATS_TX); + task_stats_set[nb_tasks_tot++] = ts; + } + } + } + + /* add cores that are receiving from physical ports third */ + lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats; + if (targ->nb_rxrings == 0 && targ->nb_txrings != 0) { + struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id]; + + init_core_port(ts, stats, TASK_STATS_RX); + task_stats_set[nb_tasks_tot++] = ts; + } + } + } + + /* add cores that are working internally (no physical ports attached) */ + lcore_id = -1; + while(prox_core_next(&lcore_id, 0) == 0) { + lconf = &lcore_cfg[lcore_id]; + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + struct task_rt_stats *stats = &lconf->tasks_all[task_id]->aux->stats; + if (targ->nb_rxrings != 0 && targ->nb_txrings != 0) { + struct task_stats *ts = &lcore_task_stats_all[lcore_id].task_stats[task_id]; + + init_core_port(ts, stats, 0); + task_stats_set[nb_tasks_tot++] = ts; + } + } + } +} diff --git a/VNFs/DPPD-PROX/stats_task.h b/VNFs/DPPD-PROX/stats_task.h new file mode 100644 index 00000000..156eb326 --- /dev/null +++ b/VNFs/DPPD-PROX/stats_task.h @@ -0,0 +1,145 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STATS_TASK_H_ +#define _STATS_TASK_H_ + +#include <inttypes.h> + +#include "clock.h" + +/* The struct task_stats is read/write from the task itself and + read-only from the core that collects the stats. Since only the + task executing the actual work ever modifies the stats, no locking + is required. Both a read and a write are atomic (assuming the + correct alignment). From this, it followed that the statistics can + be incremented directly by the task itself. In cases where these + assumptions do not hold, a possible solution (although slightly + less accurate) would be to keep accumulate statistics temporarily + in a separate structure and periodically copying the statistics to + the statistics core through atomic primitives, for example through + rte_atomic32_set(). The accuracy would be determined by the + frequency at which the statistics are transferred to the statistics + core. */ + +struct task_rt_stats { + uint32_t rx_pkt_count; + uint32_t tx_pkt_count; + uint32_t drop_tx_fail; + uint32_t drop_discard; + uint32_t drop_handled; + uint32_t idle_cycles; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t drop_bytes; +} __attribute__((packed)) __rte_cache_aligned; + +#ifdef PROX_STATS +#define TASK_STATS_ADD_IDLE(stats, cycles) do { \ + (stats)->idle_cycles += (cycles) + rdtsc_overhead_stats; \ + } while(0) \ + +#define TASK_STATS_ADD_TX(stats, ntx) do { \ + (stats)->tx_pkt_count += ntx; \ + } while(0) \ + +#define TASK_STATS_ADD_DROP_TX_FAIL(stats, ntx) do { \ + (stats)->drop_tx_fail += ntx; \ + } while(0) \ + +#define TASK_STATS_ADD_DROP_HANDLED(stats, ntx) do { \ + (stats)->drop_handled += ntx; \ + } while(0) \ + +#define TASK_STATS_ADD_DROP_DISCARD(stats, ntx) do { \ + (stats)->drop_discard += ntx; \ + } while(0) \ + +#define TASK_STATS_ADD_RX(stats, ntx) do { \ + (stats)->rx_pkt_count += ntx; \ + } while (0) \ + +#define TASK_STATS_ADD_RX_BYTES(stats, bytes) do { \ + (stats)->rx_bytes += bytes; \ + } while (0) \ + +#define TASK_STATS_ADD_TX_BYTES(stats, bytes) do { \ + (stats)->tx_bytes += bytes; \ + } while (0) \ + +#define TASK_STATS_ADD_DROP_BYTES(stats, bytes) do { \ + (stats)->drop_bytes += bytes; \ + } while (0) \ + +#define START_EMPTY_MEASSURE() uint64_t cur_tsc = rte_rdtsc(); +#else +#define TASK_STATS_ADD_IDLE(stats, cycles) do {} while(0) +#define TASK_STATS_ADD_TX(stats, ntx) do {} while(0) +#define TASK_STATS_ADD_DROP_TX_FAIL(stats, ntx) do {} while(0) +#define TASK_STATS_ADD_DROP_HANDLED(stats, ntx) do {} while(0) +#define TASK_STATS_ADD_DROP_DISCARD(stats, ntx) do {} while(0) +#define TASK_STATS_ADD_RX(stats, ntx) do {} while(0) +#define TASK_STATS_ADD_RX_BYTES(stats, bytes) do {} while(0) +#define TASK_STATS_ADD_TX_BYTES(stats, bytes) do {} while(0) +#define TASK_STATS_ADD_DROP_BYTES(stats, bytes) do {} while(0) +#define START_EMPTY_MEASSURE() do {} while(0) +#endif + +struct task_stats_sample { + uint64_t tsc; + uint32_t tx_pkt_count; + uint32_t drop_tx_fail; + uint32_t drop_discard; + uint32_t drop_handled; + uint32_t rx_pkt_count; + uint32_t empty_cycles; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t drop_bytes; +}; + +struct task_stats { + uint64_t tot_tx_pkt_count; + uint64_t tot_drop_tx_fail; + uint64_t tot_drop_discard; + uint64_t tot_drop_handled; + uint64_t tot_rx_pkt_count; + + struct task_stats_sample sample[2]; + + struct task_rt_stats *stats; + /* flags set if total RX/TX values need to be reported set at + initialization time, only need to access stats values in port */ + uint8_t flags; +}; + +void stats_task_reset(void); +void stats_task_post_proc(void); +void stats_task_update(void); +void stats_task_init(void); + +int stats_get_n_tasks_tot(void); + +struct task_stats *stats_get_task_stats(uint32_t lcore_id, uint32_t task_id); +struct task_stats_sample *stats_get_task_stats_sample(uint32_t lcore_id, uint32_t task_id, int last); +void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc); + +uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id); +uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id); +uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id); +uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id); + +#endif /* _STATS_TASK_H_ */ diff --git a/VNFs/DPPD-PROX/task_base.h b/VNFs/DPPD-PROX/task_base.h new file mode 100644 index 00000000..b2fab2fc --- /dev/null +++ b/VNFs/DPPD-PROX/task_base.h @@ -0,0 +1,247 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TASK_BASE_H_ +#define _TASK_BASE_H_ + +#include <rte_common.h> +#ifndef __rte_cache_aligned +#include <rte_memory.h> +#endif + +#include "defaults.h" +#include "prox_globals.h" +#include "stats_task.h" + +// runtime_flags 8 bits only +#define TASK_MPLS_TAGGING 0x0001 +#define TASK_ROUTING 0x0002 +#define TASK_CLASSIFY 0x0004 +#define TASK_CTRL_HANDLE_ARP 0x0008 +#define TASK_MARK 0x0020 +#define TASK_FP_HANDLE_ARP 0x0040 +#define TASK_TX_CRC 0x0080 + +// flag_features 64 bits +#define TASK_FEATURE_ROUTING 0x0001 +#define TASK_FEATURE_CLASSIFY 0x0002 +#define TASK_FEATURE_MULTI_RX 0x0004 +#define TASK_FEATURE_NEVER_DISCARDS 0x0008 +#define TASK_FEATURE_NO_RX 0x0010 +#define TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS 0x0020 +#define TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS 0x0040 +#define TASK_FEATURE_ZERO_RX 0x0080 +#define TASK_FEATURE_TXQ_FLAGS_REFCOUNT 0x0100 +#define TASK_FEATURE_TSC_RX 0x0200 +#define TASK_FEATURE_THROUGHPUT_OPT 0x0400 +#define TASK_FEATURE_GRE_ID 0x1000 +#define TASK_FEATURE_LUT_QINQ_RSS 0x2000 +#define TASK_FEATURE_LUT_QINQ_HASH 0x4000 +#define TASK_FEATURE_RX_ALL 0x8000 +#define TASK_MULTIPLE_MAC 0x10000 + +#define FLAG_TX_FLUSH 0x01 +#define FLAG_NEVER_FLUSH 0x02 +// Task specific flags +#define BASE_FLAG_LUT_QINQ_HASH 0x08 +#define BASE_FLAG_LUT_QINQ_RSS 0x10 + +#define OUT_DISCARD 0xFF +#define OUT_HANDLED 0xFE + +#define WS_MBUF_MASK (2 * MAX_PKT_BURST - 1) + +/* struct ws_mbuf stores the working set of mbufs. It starts with a + prod/cons index to keep track of the number of elemenets. */ +struct ws_mbuf { + struct { + uint16_t prod; + uint16_t cons; + uint16_t nb_rx; + uint16_t pad; /* reserved */ + } idx[MAX_RINGS_PER_TASK]; + struct rte_mbuf *mbuf[][MAX_RING_BURST * 3] __rte_cache_aligned; +}; + +struct port_queue { + uint8_t port; + uint8_t queue; +} __attribute__((packed)); + +struct rx_params_hw { + union { + uint8_t nb_rxports; + uint8_t rxport_mask; + }; + uint8_t last_read_portid; + struct port_queue *rx_pq; +} __attribute__((packed)); + +struct rx_params_hw1 { + struct port_queue rx_pq; +} __attribute__((packed)); + +struct rx_params_sw { + union { + uint8_t nb_rxrings; + uint8_t rxrings_mask; /* Used if rte_is_power_of_2(nb_rxrings)*/ + }; + uint8_t last_read_ring; + struct rte_ring **rx_rings; +} __attribute__((packed)); + +/* If there is only one input ring, the pointer to it can be stored + directly into the task_base instead of having to use a pointer to a + set of rings which would require two dereferences. */ +struct rx_params_sw1 { + struct rte_ring *rx_ring; +} __attribute__((packed)); + +struct tx_params_hw { + uint16_t nb_txports; + struct port_queue *tx_port_queue; +} __attribute__((packed)); + +struct tx_params_sw { + uint16_t nb_txrings; + struct rte_ring **tx_rings; +} __attribute__((packed)); + +struct tx_params_hw_sw { /* Only one port supported in this mode */ + uint16_t nb_txrings; + struct rte_ring **tx_rings; + struct port_queue tx_port_queue; +} __attribute__((packed)); + +struct task_rt_dump { + uint32_t n_print_rx; + uint32_t n_print_tx; + struct input *input; + uint32_t n_trace; + uint32_t cur_trace; + void *pkt_mbuf_addr[MAX_RING_BURST]; /* To track reordering */ + uint8_t pkt_cpy[MAX_RING_BURST][128]; + uint16_t pkt_cpy_len[MAX_RING_BURST]; +}; + +struct task_base; + +#define MAX_RX_PKT_ALL 16384 + +#define MAX_STACKED_RX_FUCTIONS 16 + +typedef uint16_t (*rx_pkt_func) (struct task_base *tbase, struct rte_mbuf ***mbufs); + +struct task_base_aux { + /* Not used when PROX_STATS is not defined */ + struct task_rt_stats stats; + struct task_rt_dump task_rt_dump; + + /* Used if TASK_TSC_RX is enabled*/ + struct { + uint64_t before; + uint64_t after; + } tsc_rx; + + struct rte_mbuf **all_mbufs; + + int rx_prev_count; + int rx_prev_idx; + uint16_t (*rx_pkt_prev[MAX_STACKED_RX_FUCTIONS])(struct task_base *tbase, struct rte_mbuf ***mbufs); + + uint32_t rx_bucket[MAX_RING_BURST + 1]; + uint32_t tx_bucket[MAX_RING_BURST + 1]; + int (*tx_pkt_orig)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts); + void (*stop)(struct task_base *tbase); + void (*start)(struct task_base *tbase); + void (*stop_last)(struct task_base *tbase); + void (*start_first)(struct task_base *tbase); +}; + +/* The task_base is accessed for _all_ task types. In case + no debugging is needed, it has been optimized to fit + into a single cache line to minimize cache pollution */ +struct task_base { + int (*handle_bulk)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts); + int (*tx_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + uint16_t (*rx_pkt)(struct task_base *tbase, struct rte_mbuf ***mbufs); + + struct task_base_aux* aux; + /* The working set of mbufs contains mbufs that are currently + being handled. */ + struct ws_mbuf *ws_mbuf; + + uint16_t flags; + + union { + struct rx_params_hw rx_params_hw; + struct rx_params_hw1 rx_params_hw1; + struct rx_params_sw rx_params_sw; + struct rx_params_sw1 rx_params_sw1; + }; + + union { + struct tx_params_hw tx_params_hw; + struct tx_params_sw tx_params_sw; + struct tx_params_hw_sw tx_params_hw_sw; + }; +} __attribute__((packed)) __rte_cache_aligned; + +static void task_base_add_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_add) +{ + if (tbase->aux->rx_prev_count == MAX_STACKED_RX_FUCTIONS) { + return; + } + + for (int16_t i = tbase->aux->rx_prev_count; i >= 0; --i) { + tbase->aux->rx_pkt_prev[i + 1] = tbase->aux->rx_pkt_prev[i]; + } + tbase->aux->rx_pkt_prev[0] = tbase->rx_pkt; + tbase->rx_pkt = to_add; + tbase->aux->rx_prev_count++; +} + +static void task_base_del_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_del) +{ + int cur = 0; + int found = 0; + + if (tbase->aux->rx_prev_count == 1) { + tbase->rx_pkt = tbase->aux->rx_pkt_prev[0]; + found = 1; + } else { + for (int16_t i = 0; i < tbase->aux->rx_prev_count; ++i) { + if (found || tbase->aux->rx_pkt_prev[i] != to_del) + tbase->aux->rx_pkt_prev[cur++] = tbase->aux->rx_pkt_prev[i]; + else + found = 1; + } + } + if (found) + tbase->aux->rx_prev_count--; +} + +static rx_pkt_func task_base_get_original_rx_pkt_function(struct task_base *tbase) +{ + if (tbase->aux->rx_prev_count == 0) + return tbase->rx_pkt; + else + return tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_count - 1]; +} + +#endif /* _TASK_BASE_H_ */ diff --git a/VNFs/DPPD-PROX/task_init.c b/VNFs/DPPD-PROX/task_init.c new file mode 100644 index 00000000..6d9c7b3d --- /dev/null +++ b/VNFs/DPPD-PROX/task_init.c @@ -0,0 +1,401 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <string.h> +#include <stdio.h> +#include <rte_version.h> + +#include "prox_port_cfg.h" +#include "prox_malloc.h" +#include "task_init.h" +#include "rx_pkt.h" +#include "tx_pkt.h" +#include "log.h" +#include "quit.h" +#include "lconf.h" +#include "thread_generic.h" +#include "prox_assert.h" + +#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0) +#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE +#endif + +static unsigned first_task = 1; +LIST_HEAD(,task_init) head; + +void reg_task(struct task_init* t) +{ + PROX_PANIC(t->handle == NULL, "No handle function specified for task with name %d\n", t->mode); + + if (t->thread_x == NULL) + t->thread_x = thread_generic; + + if (first_task) { + first_task = 0; + LIST_INIT(&head); + } + + LIST_INSERT_HEAD(&head, t, entries); +} + +struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str) +{ + struct task_init *cur_t; + + LIST_FOREACH(cur_t, &head, entries) { + if (!strcmp(mode_str, cur_t->mode_str) && + !strcmp(sub_mode_str, cur_t->sub_mode_str)) { + return cur_t; + } + } + + return NULL; +} + +static int compare_strcmp(const void *a, const void *b) +{ + return strcmp(*(const char * const *)a, *(const char * const *)b); +} + +void tasks_list(void) +{ + struct task_init *cur_t; + char buf[sizeof(cur_t->mode_str) + sizeof(cur_t->sub_mode_str) + 4]; + + int nb_modes = 1; /* master */ + LIST_FOREACH(cur_t, &head, entries) { + ++nb_modes; + } + + char **modes = calloc(nb_modes, sizeof(*modes)); + char **cur_m = modes; + *cur_m++ = strdup("master"); + LIST_FOREACH(cur_t, &head, entries) { + snprintf(buf, sizeof(buf), "%s%s%s", + cur_t->mode_str, + (cur_t->sub_mode_str[0] == 0) ? "" : " / ", + cur_t->sub_mode_str); + *cur_m++ = strdup(buf); + } + qsort(modes, nb_modes, sizeof(*modes), compare_strcmp); + + plog_info("=== List of supported task modes / sub modes ===\n"); + for (cur_m = modes; nb_modes; ++cur_m, --nb_modes) { + plog_info("\t%s\n", *cur_m); + free(*cur_m); + } + free(modes); +} + +static size_t calc_memsize(struct task_args *targ, size_t task_size) +{ + size_t memsize = task_size; + + memsize += sizeof(struct task_base_aux); + + if (targ->nb_rxports != 0) { + memsize += 2 * sizeof(uint8_t)*targ->nb_rxports; + } + if (targ->nb_rxrings != 0 || targ->tx_opt_ring_task) { + memsize += sizeof(struct rte_ring *)*targ->nb_rxrings; + } + if (targ->nb_txrings != 0) { + memsize += sizeof(struct rte_ring *) * targ->nb_txrings; + memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE); + memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txrings; + } + else if (targ->nb_txports != 0) { + memsize += sizeof(struct port_queue) * targ->nb_txports; + memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE); + memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * targ->nb_txports; + } + else { + memsize = RTE_ALIGN_CEIL(memsize, RTE_CACHE_LINE_SIZE); + memsize += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]); + } + + return memsize; +} + +static void *flush_function(struct task_args *targ) +{ + if (targ->flags & TASK_ARG_DROP) { + return targ->nb_txrings ? flush_queues_sw : flush_queues_hw; + } + else { + return targ->nb_txrings ? flush_queues_no_drop_sw : flush_queues_no_drop_hw; + } +} + +static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *tbase, size_t offset) +{ + if (targ->tx_opt_ring_task) { + tbase->rx_pkt = rx_pkt_self; + } + else if (targ->nb_rxrings != 0) { + + if (targ->nb_rxrings == 1) { + tbase->rx_pkt = rx_pkt_sw1; + tbase->rx_params_sw1.rx_ring = targ->rx_rings[0]; + } + else { + tbase->rx_pkt = rx_pkt_sw; + tbase->rx_params_sw.nb_rxrings = targ->nb_rxrings; + tbase->rx_params_sw.rx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset); + offset += sizeof(struct rte_ring *)*tbase->rx_params_sw.nb_rxrings; + + for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) { + tbase->rx_params_sw.rx_rings[i] = targ->rx_rings[i]; + } + + if (rte_is_power_of_2(targ->nb_rxrings)) { + tbase->rx_pkt = rx_pkt_sw_pow2; + tbase->rx_params_sw.rxrings_mask = targ->nb_rxrings - 1; + } + } + } + else { + if (targ->nb_rxports == 1) { + tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1; + tbase->rx_params_hw1.rx_pq.port = targ->rx_port_queue[0].port; + tbase->rx_params_hw1.rx_pq.queue = targ->rx_port_queue[0].queue; + } + else { + PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX)); + tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw; + tbase->rx_params_hw.nb_rxports = targ->nb_rxports; + tbase->rx_params_hw.rx_pq = (struct port_queue *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct port_queue) * tbase->rx_params_hw.nb_rxports; + for (int i = 0; i< targ->nb_rxports; i++) { + tbase->rx_params_hw.rx_pq[i].port = targ->rx_port_queue[i].port; + tbase->rx_params_hw.rx_pq[i].queue = targ->rx_port_queue[i].queue; + } + + if (rte_is_power_of_2(targ->nb_rxports)) { + tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2; + tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1; + } + } + } + + if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) { + // Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core. + // Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore + // (as npt being scheduled). + struct core_task ct; + struct task_args *dtarg; + for (unsigned int j = 0; j < targ->nb_txrings; j++) { + ct = targ->core_task_set[0].core_task[j]; + PROX_PANIC(ct.core == targ->lconf->id, "Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task); + //plog_info("Core %d, task %d: NO_DROP task transmitting to another task (core %d, task %d) running on on same core => potential deadlock\n", targ->lconf->id, targ->id, ct.core, ct.task); + } + } + if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) { + /* Transmitting to multiple rings and one port */ + plog_info("Initializing with 1 port %d queue %d nb_rings=%d\n", targ->tx_port_queue[0].port, targ->tx_port_queue[0].queue, targ->nb_txrings); + tbase->tx_params_hw_sw.tx_port_queue.port = targ->tx_port_queue[0].port; + tbase->tx_params_hw_sw.tx_port_queue.queue = targ->tx_port_queue[0].queue; + if (!targ->tx_opt_ring) { + tbase->tx_params_hw_sw.nb_txrings = targ->nb_txrings; + tbase->tx_params_hw_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset); + offset += sizeof(struct rte_ring *)*tbase->tx_params_hw_sw.nb_txrings; + + for (uint8_t i = 0; i < tbase->tx_params_hw_sw.nb_txrings; ++i) { + tbase->tx_params_hw_sw.tx_rings[i] = targ->tx_rings[i]; + } + + offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE); + tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw_sw.nb_txrings; + } + } + else if (!targ->tx_opt_ring) { + if (targ->nb_txrings != 0) { + tbase->tx_params_sw.nb_txrings = targ->nb_txrings; + tbase->tx_params_sw.tx_rings = (struct rte_ring **)(((uint8_t *)tbase) + offset); + offset += sizeof(struct rte_ring *)*tbase->tx_params_sw.nb_txrings; + + for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) { + tbase->tx_params_sw.tx_rings[i] = targ->tx_rings[i]; + } + + offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE); + tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_sw.nb_txrings; + } + else if (targ->nb_txports != 0) { + tbase->tx_params_hw.nb_txports = targ->nb_txports; + tbase->tx_params_hw.tx_port_queue = (struct port_queue *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct port_queue) * tbase->tx_params_hw.nb_txports; + for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) { + tbase->tx_params_hw.tx_port_queue[i].port = targ->tx_port_queue[i].port; + tbase->tx_params_hw.tx_port_queue[i].queue = targ->tx_port_queue[i].queue; + } + + offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE); + tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]) * tbase->tx_params_hw.nb_txports; + } + else { + offset = RTE_ALIGN_CEIL(offset, RTE_CACHE_LINE_SIZE); + tbase->ws_mbuf = (struct ws_mbuf *)(((uint8_t *)tbase) + offset); + offset += sizeof(struct ws_mbuf) + sizeof(((struct ws_mbuf*)0)->mbuf[0]); + } + + struct ws_mbuf* w = tbase->ws_mbuf; + struct task_args *prev = targ->tx_opt_ring_task; + + while (prev) { + prev->tbase->ws_mbuf = w; + prev = prev->tx_opt_ring_task; + } + } + if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) { + if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) { + if (targ->tx_opt_ring) { + tbase->tx_pkt = tx_pkt_never_discard_self; + } + else if (targ->flags & TASK_ARG_DROP) { + if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_thrpt_opt; + else + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_never_discard_sw1 : tx_pkt_never_discard_hw1_lat_opt; + } + else { + if (targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_thrpt_opt; + else + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt; + } + if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0)) + tbase->flags |= FLAG_NEVER_FLUSH; + else + targ->lconf->flush_queues[targ->task] = flush_function(targ); + } + else { + if (targ->tx_opt_ring) { + tbase->tx_pkt = tx_pkt_self; + } + else if (targ->flags & TASK_ARG_DROP) { + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw1 : tx_pkt_hw1; + } + else { + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1; + } + tbase->flags |= FLAG_NEVER_FLUSH; + } + } + else { + if (targ->flags & TASK_ARG_DROP) { + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_sw : tx_pkt_hw; + } + else { + tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw : tx_pkt_no_drop_hw; + } + + targ->lconf->flush_queues[targ->task] = flush_function(targ); + } + + if (targ->task_init->flag_features & TASK_FEATURE_NO_RX) { + tbase->rx_pkt = rx_pkt_dummy; + } + + if (targ->nb_txrings == 0 && targ->nb_txports == 0) { + tbase->tx_pkt = tx_pkt_drop_all; + } + + return offset; +} + +struct task_base *init_task_struct(struct task_args *targ) +{ + struct task_init* t = targ->task_init; + size_t offset = 0; + size_t memsize = calc_memsize(targ, t->size); + uint8_t task_socket = rte_lcore_to_socket_id(targ->lconf->id); + struct task_base *tbase = prox_zmalloc(memsize, task_socket); + PROX_PANIC(tbase == NULL, "Failed to allocate memory for task (%zu bytes)", memsize); + offset += t->size; + + if (targ->nb_txrings == 0 && targ->nb_txports == 0) + tbase->flags |= FLAG_NEVER_FLUSH; + + offset = init_rx_tx_rings_ports(targ, tbase, offset); + tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset); + + if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) { + task_base_add_rx_pkt_function(tbase, rx_pkt_all); + tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket); + } + if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) { + task_base_add_rx_pkt_function(tbase, rx_pkt_tsc); + } + + offset += sizeof(struct task_base_aux); + + tbase->handle_bulk = t->handle; + + targ->tbase = tbase; + if (t->init) { + t->init(tbase, targ); + } + tbase->aux->start = t->start; + tbase->aux->stop = t->stop; + tbase->aux->start_first = t->start_first; + tbase->aux->stop_last = t->stop_last; + if ((targ->nb_txrings != 0) && (targ->nb_txports == 1)) { + tbase->aux->tx_pkt_hw = tx_pkt_no_drop_never_discard_hw1_no_pointer; + } + if (targ->tx_opt_ring) { + tbase->aux->tx_pkt_try = tx_try_self; + } else if (targ->nb_txrings == 1) { + tbase->aux->tx_pkt_try = tx_try_sw1; + } else if (targ->nb_txports) { + tbase->aux->tx_pkt_try = tx_try_hw1; + } + + return tbase; +} + +struct task_args *find_reachable_task_sending_to_port(struct task_args *from) +{ + if (!from->nb_txrings) + return from; + + struct core_task ct; + struct task_args *dtarg, *ret; + + for (uint32_t i = 0; i < from->nb_txrings; ++i) { + ct = from->core_task_set[0].core_task[i]; + dtarg = core_targ_get(ct.core, ct.task); + ret = find_reachable_task_sending_to_port(dtarg); + if (ret) + return ret; + } + return NULL; +} + +struct prox_port_cfg *find_reachable_port(struct task_args *from) +{ + struct task_args *dst = find_reachable_task_sending_to_port(from); + + if (dst) { + int port_id = dst->tx_port_queue[0].port; + + return &prox_port_cfg[port_id]; + } + return NULL; +} diff --git a/VNFs/DPPD-PROX/task_init.h b/VNFs/DPPD-PROX/task_init.h new file mode 100644 index 00000000..beb4de02 --- /dev/null +++ b/VNFs/DPPD-PROX/task_init.h @@ -0,0 +1,239 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TASK_INIT_H_ +#define _TASK_INIT_H_ + +#include <sys/queue.h> + +#include <rte_common.h> +#include <rte_sched.h> +#include <rte_ether.h> +#include "task_base.h" +#include "prox_globals.h" +#include "ip6_addr.h" +#include "flow_iter.h" +#include "parse_utils.h" + +struct rte_mbuf; +struct lcore_cfg; + +#if MAX_RINGS_PER_TASK < PROX_MAX_PORTS +#error MAX_RINGS_PER_TASK < PROX_MAX_PORTS +#endif + +#define TASK_ARG_DROP 0x01 +#define TASK_ARG_RX_RING 0x02 +#define TASK_ARG_RTE_TABLE 0x08 +#define TASK_ARG_LOCAL_LPM 0x10 +#define TASK_ARG_QINQ_ACL 0x20 +#define TASK_ARG_CTRL_RINGS_P 0x40 +#define TASK_ARG_DST_MAC_SET 0x80 +#define TASK_ARG_SRC_MAC_SET 0x100 +#define TASK_ARG_DO_NOT_SET_SRC_MAC 0x200 +#define TASK_ARG_DO_NOT_SET_DST_MAC 0x400 +#define TASK_ARG_HW_SRC_MAC 0x800 + +enum protocols {IPV4, ARP, IPV6}; + +struct qos_cfg { + struct rte_sched_port_params port_params; + struct rte_sched_subport_params subport_params[1]; + struct rte_sched_pipe_params pipe_params[1]; +}; + +enum task_mode {NOT_SET, MASTER, QINQ_DECAP4, QINQ_DECAP6, + QINQ_ENCAP4, QINQ_ENCAP6, GRE_DECAP, GRE_ENCAP,CGNAT, +}; + +struct task_args; + +struct task_init { + enum task_mode mode; + char mode_str[32]; + char sub_mode_str[32]; + void (*early_init)(struct task_args *targ); + void (*init)(struct task_base *tbase, struct task_args *targ); + int (*handle)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts); + void (*start)(struct task_base *tbase); + void (*stop)(struct task_base *tbase); + void (*start_first)(struct task_base *tbase); + void (*stop_last)(struct task_base *tbase); + int (*thread_x)(struct lcore_cfg* lconf); + struct flow_iter flow_iter; + size_t size; + uint16_t flag_req_data; /* flags from prox_shared.h */ + uint64_t flag_features; + uint32_t mbuf_size; + LIST_ENTRY(task_init) entries; +}; + +static int task_init_flag_set(struct task_init *task_init, uint64_t flag) +{ + return !!(task_init->flag_features & flag); +} + +enum police_action { + ACT_GREEN = e_RTE_METER_GREEN, + ACT_YELLOW = e_RTE_METER_YELLOW, + ACT_RED = e_RTE_METER_RED, + ACT_DROP = 3, + ACT_INVALID = 4 +}; + +/* Configuration for task that is only used during startup. */ +struct task_args { + struct task_base *tbase; + struct task_init* task_init; + struct rte_mempool *pool; + char pool_name[MAX_NAME_SIZE]; + struct lcore_cfg *lconf; + uint32_t nb_mbuf; + uint32_t mbuf_size; + uint8_t mbuf_size_set_explicitely; + uint32_t nb_cache_mbuf; + uint8_t nb_slave_threads; + uint8_t nb_worker_threads; + uint8_t worker_thread_id; + uint8_t task; + uint32_t id; + struct core_task_set core_task_set[MAX_PROTOCOLS]; + struct task_args *prev_tasks[MAX_RINGS_PER_TASK]; + uint32_t n_prev_tasks; + uint32_t ring_size; /* default is RX_RING_SIZE */ + struct qos_cfg qos_conf; + uint32_t flags; + uint32_t runtime_flags; + uint8_t nb_txports; + uint8_t nb_txrings; + uint8_t nb_rxrings; + uint8_t tot_rxrings; + uint8_t nb_rxports; + uint32_t byte_offset; + uint32_t gateway_ipv4; + uint32_t number_gen_ip; + uint32_t local_ipv4; + struct ipv6_addr local_ipv6; /* For IPv6 Tunnel, it's the local tunnel endpoint address */ + struct rte_ring *rx_rings[MAX_RINGS_PER_TASK]; + struct rte_ring *tx_rings[MAX_RINGS_PER_TASK]; + uint32_t tot_n_txrings_inited; + struct ether_addr edaddr; + struct ether_addr esaddr; + struct port_queue tx_port_queue[PROX_MAX_PORTS]; + struct port_queue rx_port_queue[PROX_MAX_PORTS]; + /* Used to set up actual task at initialization time. */ + enum task_mode mode; + /* Destination output position in hw or sw when using mac learned dest port. */ + uint8_t mapping[PROX_MAX_PORTS]; + struct rte_table_hash *cpe_table; + struct rte_table_hash *qinq_gre_table; + struct rte_hash *cpe_gre_hash; + struct rte_hash *qinq_gre_hash; + struct cpe_data *cpe_data; + struct cpe_gre_data *cpe_gre_data; + struct qinq_gre_data *qinq_gre_data; + uint8_t tx_opt_ring; + struct task_args *tx_opt_ring_task; + uint32_t qinq_tag; + +#ifdef ENABLE_EXTRA_USER_STATISTICS + uint32_t n_users; // Number of users in user table. +#endif + uint32_t n_flows; // Number of flows used in policing + uint32_t cir; + uint32_t cbs; + uint32_t ebs; + uint32_t pir; + uint32_t pbs; + uint32_t overhead; + enum police_action police_act[3][3]; + uint32_t marking[4]; + uint32_t n_max_rules; + uint32_t random_delay_us; + uint32_t delay_us; + uint32_t cpe_table_timeout_ms; + uint32_t etype; +#ifdef GRE_TP + uint32_t tb_rate; /**< Pipe token bucket rate (measured in bytes per second) */ + uint32_t tb_size; /**< Pipe token bucket size (measured in credits) */ +#endif + uint8_t tunnel_hop_limit; /* IPv6 Tunnel - Hop limit */ + uint16_t lookup_port_mask; /* Ipv6 Tunnel - Mask applied to UDP/TCP port before lookup */ + uint32_t ctrl_freq; + uint8_t lb_friend_core; + uint8_t lb_friend_task; + /* gen related*/ + uint64_t rate_bps; + uint32_t n_rand_str; + char rand_str[64][64]; + uint32_t rand_offset[64]; + char pcap_file[256]; + uint32_t accur_pos; + uint32_t sig_pos; + uint32_t sig; + uint32_t lat_pos; + uint32_t packet_id_pos; + uint32_t latency_buffer_size; + uint32_t bucket_size; + uint32_t lat_enabled; + uint32_t pkt_size; + uint8_t pkt_inline[ETHER_MAX_LEN]; + uint32_t probability; + char nat_table[256]; + uint32_t use_src; + char route_table[256]; + char rules[256]; + char dscp[256]; + char tun_bindings[256]; + char cpe_table_name[256]; + char user_table[256]; + uint32_t n_concur_conn; + char streams[256]; + uint32_t min_bulk_size; + uint32_t max_bulk_size; + uint32_t max_setup_rate; + uint32_t n_pkts; + uint32_t loop; + uint32_t flow_table_size; + char dpi_engine_path[256]; + char dpi_engine_args[16][256]; + uint32_t n_dpi_engine_args; + uint32_t generator_id; + uint32_t accuracy_limit_nsec; + /* cgnat related */ + uint32_t public_ip_count; + struct public_ip_config_info *public_ip_config_info; + struct public_entry *public_entries; + struct private_flow_entry *private_flow_entries; + struct rte_hash *public_ip_port_hash; + struct rte_hash *private_ip_port_hash; + struct rte_hash *private_ip_hash; + struct private_ip_info *private_ip_info; +}; + +/* Return the first port that is reachable through the task. If the + task itself does not send directly to a port, the function will + search reachable tasks through each outgoing ring */ +struct task_args *find_reachable_task_sending_to_port(struct task_args *from); +struct prox_port_cfg *find_reachable_port(struct task_args *from); + +struct task_base *init_task_struct(struct task_args *targ); +struct task_init *to_task_init(const char *mode_str, const char *sub_mode_str); +void tasks_list(void); + +void reg_task(struct task_init* t); + +#endif /* _TASK_INIT_H_ */ diff --git a/VNFs/DPPD-PROX/thread_generic.c b/VNFs/DPPD-PROX/thread_generic.c new file mode 100644 index 00000000..f596bf25 --- /dev/null +++ b/VNFs/DPPD-PROX/thread_generic.c @@ -0,0 +1,196 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_table_hash.h> + +#include "log.h" +#include "thread_generic.h" +#include "stats.h" +#include "tx_pkt.h" +#include "lconf.h" +#include "hash_entry_types.h" +#include "defines.h" +#include "hash_utils.h" + +struct tsc_task { + uint64_t tsc; + uint64_t (* tsc_task)(struct lcore_cfg *lconf); +}; + +static uint64_t tsc_drain(struct lcore_cfg *lconf) +{ + lconf_flush_all_queues(lconf); + return DRAIN_TIMEOUT; +} + +static uint64_t tsc_term(struct lcore_cfg *lconf) +{ + if (lconf_is_req(lconf) && lconf_do_flags(lconf)) { + lconf_flush_all_queues(lconf); + return -2; + } + return TERM_TIMEOUT; +} + +static uint64_t tsc_period(struct lcore_cfg *lconf) +{ + lconf->period_func(lconf->period_data); + return lconf->period_timeout; +} + +static uint64_t tsc_ctrl(struct lcore_cfg *lconf) +{ + const uint8_t n_tasks_all = lconf->n_tasks_all; + void *msgs[MAX_RING_BURST]; + uint16_t n_msgs; + + for (uint8_t task_id = 0; task_id < n_tasks_all; ++task_id) { + if (lconf->ctrl_rings_m[task_id] && lconf->ctrl_func_m[task_id]) { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST); +#else + n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_m[task_id], msgs, MAX_RING_BURST, NULL); +#endif + if (n_msgs) { + lconf->ctrl_func_m[task_id](lconf->tasks_all[task_id], msgs, n_msgs); + } + } + if (lconf->ctrl_rings_p[task_id] && lconf->ctrl_func_p[task_id]) { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST); +#else + n_msgs = rte_ring_sc_dequeue_burst(lconf->ctrl_rings_p[task_id], msgs, MAX_RING_BURST, NULL); +#endif + if (n_msgs) { + lconf->ctrl_func_p[task_id](lconf->tasks_all[task_id], (struct rte_mbuf **)msgs, n_msgs); + } + } + } + return lconf->ctrl_timeout; +} + +int thread_generic(struct lcore_cfg *lconf) +{ + struct task_base *tasks[MAX_TASKS_PER_CORE]; + int next[MAX_TASKS_PER_CORE] = {0}; + struct rte_mbuf **mbufs; + uint64_t cur_tsc = rte_rdtsc(); + uint8_t zero_rx[MAX_TASKS_PER_CORE] = {0}; + struct tsc_task tsc_tasks[] = { + {.tsc = cur_tsc, .tsc_task = tsc_term}, + {.tsc = cur_tsc + DRAIN_TIMEOUT, .tsc_task = tsc_drain}, + {.tsc = -1}, + {.tsc = -1}, + {.tsc = -1}, + }; + uint8_t n_tasks_run = lconf->n_tasks_run; + + if (lconf->period_func) { + tsc_tasks[2].tsc = cur_tsc + lconf->period_timeout; + tsc_tasks[2].tsc_task = tsc_period; + } + + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + if (lconf->ctrl_func_m[task_id]) { + tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout; + tsc_tasks[3].tsc_task = tsc_ctrl; + break; + } + if (lconf->ctrl_func_p[task_id]) { + tsc_tasks[3].tsc = cur_tsc + lconf->ctrl_timeout; + tsc_tasks[3].tsc_task = tsc_ctrl; + break; + } + } + + /* sort tsc tasks */ + for (size_t i = 0; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) { + for (size_t j = i + 1; j < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++j) { + if (tsc_tasks[i].tsc > tsc_tasks[j].tsc) { + struct tsc_task tmp = tsc_tasks[i]; + tsc_tasks[i] = tsc_tasks[j]; + tsc_tasks[j] = tmp; + } + } + } + struct tsc_task next_tsc = tsc_tasks[0]; + + for (;;) { + cur_tsc = rte_rdtsc(); + /* Sort scheduled tsc_tasks starting from earliest + first. A linear search is performed moving + tsc_tasks that are scheduled earlier to the front + of the list. There is a high frequency tsc_task in + most cases. As a consequence, the currently + scheduled tsc_task will be rescheduled to be + executed as the first again. If many tsc_tasks are + to be used, the algorithm should be replaced with a + priority-queue (heap). */ + if (unlikely(cur_tsc >= next_tsc.tsc)) { + uint64_t resched_diff = tsc_tasks[0].tsc_task(lconf); + + if (resched_diff == (uint64_t)-2) { + n_tasks_run = lconf->n_tasks_run; + if (!n_tasks_run) + return 0; + for (int i = 0; i < lconf->n_tasks_run; ++i) { + tasks[i] = lconf->tasks_run[i]; + + uint8_t task_id = lconf_get_task_id(lconf, tasks[i]); + if (lconf->targs[task_id].task_init->flag_features & TASK_FEATURE_ZERO_RX) + zero_rx[i] = 1; + } + } + + uint64_t new_tsc = tsc_tasks[0].tsc + resched_diff; + tsc_tasks[0].tsc = new_tsc; + next_tsc.tsc = new_tsc; + + for (size_t i = 1; i < sizeof(tsc_tasks)/sizeof(tsc_tasks[0]); ++i) { + if (new_tsc < tsc_tasks[i].tsc) { + if (i > 1) { + tsc_tasks[i - 1] = next_tsc; + next_tsc = tsc_tasks[0]; + } + break; + } + else + tsc_tasks[i - 1] = tsc_tasks[i]; + } + } + + uint16_t nb_rx; + for (uint8_t task_id = 0; task_id < n_tasks_run; ++task_id) { + struct task_base *t = tasks[task_id]; + struct task_args *targ = &lconf->targs[task_id]; + // Do not skip a task receiving packets from an optimized ring + // as the transmitting task expects such a receiving task to always run and consume + // the transmitted packets. + if (unlikely(next[task_id] && (targ->tx_opt_ring_task == NULL))) { + // plogx_info("task %d is too busy\n", task_id); + next[task_id] = 0; + } else { + nb_rx = t->rx_pkt(t, &mbufs); + if (likely(nb_rx || zero_rx[task_id])) { + next[task_id] = t->handle_bulk(t, mbufs, nb_rx); + } + } + + } + } + return 0; +} diff --git a/VNFs/DPPD-PROX/thread_generic.h b/VNFs/DPPD-PROX/thread_generic.h new file mode 100644 index 00000000..a5b45a18 --- /dev/null +++ b/VNFs/DPPD-PROX/thread_generic.h @@ -0,0 +1,30 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _THREAD_GENERIC_H_ +#define _THREAD_GENERIC_H_ + +struct lcore_cfg; + +/* The generic thread can do everything needed for each of the tasks. + It is not optimized for any specific case and suggested use is only + for testing purpose and for tasks that require to run a function + periodically (i.e. ARP management). More specific "thread_XXX" + functions should be used to only do the steps only necessary for + the task. */ +int thread_generic(struct lcore_cfg *lconf); + +#endif /* _THREAD_GENERIC_H_ */ diff --git a/VNFs/DPPD-PROX/thread_nop.c b/VNFs/DPPD-PROX/thread_nop.c new file mode 100644 index 00000000..ba30dc61 --- /dev/null +++ b/VNFs/DPPD-PROX/thread_nop.c @@ -0,0 +1,66 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> + +#include "log.h" +#include "lconf.h" +#include "thread_nop.h" +#include "handle_nop.h" +#include "stats.h" +#include "lconf.h" +#include "defines.h" + +int thread_nop(struct lcore_cfg *lconf) +{ + struct task_base *tasks[MAX_TASKS_PER_CORE]; + struct rte_mbuf **mbufs; + uint64_t cur_tsc = rte_rdtsc(); + uint64_t term_tsc = cur_tsc; + uint64_t drain_tsc = cur_tsc; + uint8_t n_tasks_run = 0; + + for (;;) { + cur_tsc = rte_rdtsc(); + if (cur_tsc > term_tsc) { + term_tsc = cur_tsc + TERM_TIMEOUT; + if (lconf_is_req(lconf) && lconf_do_flags(lconf)) { + n_tasks_run = lconf->n_tasks_run; + + if (!n_tasks_run) + return 0; + for (int i = 0; i < lconf->n_tasks_run; ++i) { + tasks[i] = lconf->tasks_run[i]; + } + } + } + if (cur_tsc > drain_tsc) { + drain_tsc = cur_tsc + DRAIN_TIMEOUT; + lconf_flush_all_queues(lconf); + } + + for (uint8_t task_id = 0; task_id < n_tasks_run; ++task_id) { + struct task_base *t = tasks[task_id]; + uint16_t nb_rx = t->rx_pkt(t, &mbufs); + + if (likely(nb_rx)) { + handle_nop_bulk(t, mbufs, nb_rx); + } + } + } + + return 0; +} diff --git a/VNFs/DPPD-PROX/thread_nop.h b/VNFs/DPPD-PROX/thread_nop.h new file mode 100644 index 00000000..6bc4465a --- /dev/null +++ b/VNFs/DPPD-PROX/thread_nop.h @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _THREAD_NOP_H_ +#define _THREAD_NOP_H_ + +struct lcore_cfg; + +/* A separate threading function specifically with minimal features is + supplied to allow testing with minimal overhead. This thread + function is only used when all tasks on the core use have the + .thread_x field set to thread_nop. */ +int thread_nop(struct lcore_cfg *lconf); + +#endif /* _THREAD_NOP_H_ */ diff --git a/VNFs/DPPD-PROX/thread_pipeline.c b/VNFs/DPPD-PROX/thread_pipeline.c new file mode 100644 index 00000000..242b137b --- /dev/null +++ b/VNFs/DPPD-PROX/thread_pipeline.c @@ -0,0 +1,295 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_cycles.h> +#include <rte_port_ethdev.h> +#include <rte_port_ring.h> +#include <rte_version.h> + +#include "log.h" +#include "quit.h" +#include "thread_pipeline.h" +#include "lconf.h" +#include "defines.h" + +/* Helper function: create pipeline, input ports and output ports */ +void init_pipe_create_in_out(struct task_pipe *tpipe, struct task_args *targ) +{ + struct task_base *tbase = (struct task_base *)tpipe; + const char *name = targ->lconf->name; + const char *mode = targ->task_init->mode_str; + uint8_t lcore_id = targ->lconf->id; + uint8_t task_id = targ->task; + int err; + + /* create pipeline */ + struct rte_pipeline_params pipeline_params = { + .name = name, + .socket_id = rte_lcore_to_socket_id(lcore_id), + }; + tpipe->p = rte_pipeline_create(&pipeline_params); + PROX_PANIC(tpipe->p == NULL, + "Failed to create %s pipeline on core %u task %u\n", + mode, lcore_id, task_id); + + /* create pipeline input ports */ + if (targ->nb_rxrings != 0) { + for (uint8_t i = 0; i < tbase->rx_params_sw.nb_rxrings; ++i) { + struct rte_port_ring_reader_params port_ring_params = { + .ring = tbase->rx_params_sw.rx_rings[i], + }; + struct rte_pipeline_port_in_params port_params = { + .ops = &rte_port_ring_reader_ops, + .arg_create = &port_ring_params, + .f_action = NULL, //TODO: fill metadata + .arg_ah = NULL, + .burst_size = MAX_RING_BURST, + }; + err = rte_pipeline_port_in_create(tpipe->p, + &port_params, &tpipe->port_in_id[i]); + PROX_PANIC(err != 0, "Failed to create SW input port %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, mode, lcore_id, task_id, err); + } + tpipe->n_ports_in = tbase->rx_params_sw.nb_rxrings; + } + else { + for (uint8_t i = 0; i < tbase->rx_params_hw.nb_rxports; ++i) { + struct rte_port_ethdev_reader_params port_ethdev_params = { + .port_id = tbase->rx_params_hw.rx_pq[i].port, + .queue_id = tbase->rx_params_hw.rx_pq[i].queue, + }; + struct rte_pipeline_port_in_params port_params = { + .ops = &rte_port_ethdev_reader_ops, + .arg_create = &port_ethdev_params, + .f_action = NULL, //TODO: fill metadata + .arg_ah = NULL, + .burst_size = MAX_PKT_BURST, + }; + err = rte_pipeline_port_in_create(tpipe->p, + &port_params, &tpipe->port_in_id[0]); + PROX_PANIC(err != 0, "Failed to create HW input port " + "for %s pipeline on core %u task %u: " + "err = %d\n", + mode, lcore_id, task_id, err); + } + tpipe->n_ports_in = tbase->rx_params_hw.nb_rxports; + } + PROX_PANIC(tpipe->n_ports_in < 1, "No input port created " + "for %s pipeline on core %u task %u\n", + mode, lcore_id, task_id); + + /* create pipeline output ports */ + if (targ->nb_txrings != 0) { + for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) { + struct rte_port_ring_writer_params port_ring_params = { + .ring = tbase->tx_params_sw.tx_rings[i], + .tx_burst_sz = MAX_RING_BURST, + }; + struct rte_pipeline_port_out_params port_params = { + .ops = &rte_port_ring_writer_ops, + .arg_create = &port_ring_params, + .f_action = NULL, //TODO +#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0) + .f_action_bulk = NULL, //TODO +#endif + .arg_ah = NULL, + }; + err = rte_pipeline_port_out_create(tpipe->p, + &port_params, &tpipe->port_out_id[i]); + PROX_PANIC(err != 0, "Failed to create SW output port %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, mode, lcore_id, task_id, err); + } + tpipe->n_ports_out = tbase->tx_params_sw.nb_txrings; + } + else { + for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) { + struct rte_port_ethdev_writer_params port_ethdev_params = { + .port_id = tbase->tx_params_hw.tx_port_queue[i].port, + .queue_id = tbase->tx_params_hw.tx_port_queue[i].queue, + .tx_burst_sz = MAX_PKT_BURST, + }; + struct rte_pipeline_port_out_params port_params = { + .ops = &rte_port_ethdev_writer_ops, + .arg_create = &port_ethdev_params, + .f_action = NULL, //TODO +#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0) + .f_action_bulk = NULL, //TODO +#endif + .arg_ah = NULL, + }; + err = rte_pipeline_port_out_create(tpipe->p, + &port_params, &tpipe->port_out_id[i]); + PROX_PANIC(err != 0, "Failed to create HW output port %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, mode, lcore_id, task_id, err); + } + tpipe->n_ports_out = tbase->tx_params_hw.nb_txports; + } + PROX_PANIC(tpipe->n_ports_out < 1, "No output port created " + "for %s pipeline on core %u task %u\n", + mode, lcore_id, task_id); +} + +/* Helper function: connect pipeline input ports to one pipeline table */ +void init_pipe_connect_one(struct task_pipe *tpipe, struct task_args *targ, + uint32_t table_id) +{ + const char *mode = targ->task_init->mode_str; + uint8_t lcore_id = targ->lconf->id; + uint8_t task_id = targ->task; + int err; + + for (uint8_t i = 0; i < tpipe->n_ports_in; ++i) { + err = rte_pipeline_port_in_connect_to_table(tpipe->p, + tpipe->port_in_id[i], table_id); + PROX_PANIC(err != 0, "Failed to connect input port %u to table id %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, table_id, mode, lcore_id, task_id, err); + } +} + +/* Helper function: connect pipeline input ports to all pipeline tables */ +void init_pipe_connect_all(struct task_pipe *tpipe, struct task_args *targ) +{ + const char *mode = targ->task_init->mode_str; + uint8_t lcore_id = targ->lconf->id; + uint8_t task_id = targ->task; + int err; + + PROX_PANIC(tpipe->n_tables < tpipe->n_ports_in, + "Not enough tables (%u) to connect %u input ports " + "for %s pipeline on core %u task %u\n", + tpipe->n_tables, tpipe->n_ports_in, + mode, lcore_id, task_id); + + for (uint8_t i = 0; i < tpipe->n_ports_in; ++i) { + err = rte_pipeline_port_in_connect_to_table(tpipe->p, + tpipe->port_in_id[i], tpipe->table_id[i]); + PROX_PANIC(err != 0, "Failed to connect input port %u to table id %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, tpipe->table_id[i], mode, lcore_id, task_id, err); + } +} + +/* Helper function: enable pipeline input ports */ +void init_pipe_enable(struct task_pipe *tpipe, struct task_args *targ) +{ + const char *mode = targ->task_init->mode_str; + uint8_t lcore_id = targ->lconf->id; + uint8_t task_id = targ->task; + int err; + + for (uint8_t i = 0; i < tpipe->n_ports_in; ++i) { + err = rte_pipeline_port_in_enable(tpipe->p, tpipe->port_in_id[i]); + PROX_PANIC(err != 0, "Failed to enable input port %u " + "for %s pipeline on core %u task %u: " + "err = %d\n", + i, mode, lcore_id, task_id, err); + } +} + +/* Helper function: check pipeline consistency */ +void init_pipe_check(struct task_pipe *tpipe, struct task_args *targ) +{ + const char *mode = targ->task_init->mode_str; + uint8_t lcore_id = targ->lconf->id; + uint8_t task_id = targ->task; + int err; + + err = rte_pipeline_check(tpipe->p); + PROX_PANIC(err != 0, "Failed consistency check " + "for %s pipeline on core %u task %u: " + "err = %d\n", + mode, lcore_id, task_id, err); +} + +/* This function will panic on purpose: tasks based on Packet Framework + pipelines should not be invoked via the usual task_base.handle_bulk method */ +int handle_pipe(struct task_base *tbase, + __attribute__((unused)) struct rte_mbuf **mbufs, + __attribute__((unused)) uint16_t n_pkts) +{ + uint32_t lcore_id = rte_lcore_id(); + struct lcore_cfg *lconf = &lcore_cfg[lcore_id]; + + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + struct task_args *targ = &lconf->targs[task_id]; + if (lconf->tasks_all[task_id] == tbase) { + PROX_PANIC(1, "Error on core %u task %u: cannot run " + "%s pipeline and other non-PF tasks\n", + lcore_id, task_id, targ->task_init->mode_str); + } + } + PROX_PANIC(1, "Error: cannot find task on core %u\n", lcore_id); + return 0; +} + +int thread_pipeline(struct lcore_cfg *lconf) +{ + struct task_pipe *pipes[MAX_TASKS_PER_CORE]; + uint64_t cur_tsc = rte_rdtsc(); + uint64_t term_tsc = cur_tsc + TERM_TIMEOUT; + uint64_t drain_tsc = cur_tsc + DRAIN_TIMEOUT; + const uint8_t nb_tasks = lconf->n_tasks_all; + + for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) { + //TODO: solve other mutually exclusive thread/tasks + struct task_args *targ = &lconf->targs[task_id]; + PROX_PANIC(targ->task_init->thread_x != thread_pipeline, + "Invalid task %u '%s' on core %u: %s() can only " + "run tasks based on Packet Framework pipelines\n", + targ->task, targ->task_init->mode_str, + targ->lconf->id, __func__); + + pipes[task_id] = (struct task_pipe *)lconf->tasks_all[task_id]; + } + + lconf->flags |= LCONF_FLAG_RUNNING; + for (;;) { + cur_tsc = rte_rdtsc(); + if (cur_tsc > drain_tsc) { + drain_tsc = cur_tsc + DRAIN_TIMEOUT; + + if (cur_tsc > term_tsc) { + term_tsc = cur_tsc + TERM_TIMEOUT; + if (lconf->msg.req && lconf->msg.type == LCONF_MSG_STOP) { + lconf->flags &= ~LCONF_FLAG_RUNNING; + break; + } + if (!lconf_is_req(lconf)) { + lconf_unset_req(lconf); + plog_warn("Command ignored (lconf functions not supported in Packet Framework pipelines)\n"); + } + } + + for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) { + rte_pipeline_flush(pipes[task_id]->p); + } + } + + for (uint8_t task_id = 0; task_id < nb_tasks; ++task_id) { + rte_pipeline_run(pipes[task_id]->p); + } + } + return 0; +} diff --git a/VNFs/DPPD-PROX/thread_pipeline.h b/VNFs/DPPD-PROX/thread_pipeline.h new file mode 100644 index 00000000..35cb64c7 --- /dev/null +++ b/VNFs/DPPD-PROX/thread_pipeline.h @@ -0,0 +1,60 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _THREAD_PIPELINE_H_ +#define _THREAD_PIPELINE_H_ + +#include <rte_pipeline.h> + +#include "lconf.h" +#include "task_base.h" + +/* Tasks based on Packet Framework pipelines */ +struct task_pipe { + struct task_base base; + + struct rte_pipeline *p; + uint32_t port_in_id[MAX_RINGS_PER_TASK]; + uint32_t port_out_id[MAX_RINGS_PER_TASK]; + uint32_t table_id[MAX_RINGS_PER_TASK]; + uint8_t n_ports_in; + uint8_t n_ports_out; + uint8_t n_tables; +}; + +/* Helper function: create pipeline, input ports and output ports */ +void init_pipe_create_in_out(struct task_pipe *tpipe, struct task_args *targ); + +/* Helper function: connect pipeline input ports to one pipeline table */ +void init_pipe_connect_one(struct task_pipe *tpipe, struct task_args *targ, uint32_t table_id); + +/* Helper function: connect pipeline input ports to all pipeline tables */ +void init_pipe_connect_all(struct task_pipe *tpipe, struct task_args *targ); + +/* Helper function: enable pipeline input ports */ +void init_pipe_enable(struct task_pipe *tpipe, struct task_args *targ); + +/* Helper function: check pipeline consistency */ +void init_pipe_check(struct task_pipe *tpipe, struct task_args *targ); + +/* This function will panic on purpose: tasks based on Packet Framework + pipelines should not be invoked via the usual task_base.handle_bulk method */ +int handle_pipe(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +/* The pipeline thread can only run tasks based on Packet Framework pipelines */ +int thread_pipeline(struct lcore_cfg *lconf); + +#endif /* _THREAD_PIPELINE_H_ */ diff --git a/VNFs/DPPD-PROX/toeplitz.c b/VNFs/DPPD-PROX/toeplitz.c new file mode 100644 index 00000000..62424579 --- /dev/null +++ b/VNFs/DPPD-PROX/toeplitz.c @@ -0,0 +1,60 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <stdio.h> +#include <stdint.h> +#include "toeplitz.h" + +/* From XL710 Datasheet, 7.1.10 */ + +uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN] = + {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x8f, 0xb0, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 +}; + +uint32_t toeplitz_hash(uint8_t *buf_p, int buflen) +{ + uint32_t result = 0; + uint8_t *key_p = toeplitz_init_key; + uint8_t byte, *byte4_p = key_p+4; + int i, pos = 0; + int bit = 0; + uint32_t key_word = __builtin_bswap32(*(uint32_t *)key_p); + + for (i = 0; i < buflen; ++i) { + byte = buf_p[i]; + for (bit = 0; bit <= 7; ++bit) { + if (byte & (1 << (7 - bit))) { + result ^= key_word; + } + key_word = (key_word << 1) | ((*byte4_p >> (7 - bit)) & 1); + } + if (pos >= TOEPLITZ_KEY_LEN - 4) { + pos = 0; + byte4_p = key_p; + } + else { + pos++; + byte4_p++; + } + } + return result; +} diff --git a/VNFs/DPPD-PROX/toeplitz.h b/VNFs/DPPD-PROX/toeplitz.h new file mode 100644 index 00000000..f24ae766 --- /dev/null +++ b/VNFs/DPPD-PROX/toeplitz.h @@ -0,0 +1,23 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TOEPLITZ_H_ +#define _TOEPLITZ_H_ + +#define TOEPLITZ_KEY_LEN 52 +extern uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN]; +uint32_t toeplitz_hash(uint8_t *buf_p, int buflen); +#endif diff --git a/VNFs/DPPD-PROX/token_time.h b/VNFs/DPPD-PROX/token_time.h new file mode 100644 index 00000000..e59647ad --- /dev/null +++ b/VNFs/DPPD-PROX/token_time.h @@ -0,0 +1,165 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TOKEN_TIME_H_ +#define _TOKEN_TIME_H_ + +#include <rte_cycles.h> +#include <math.h> + +#include "prox_assert.h" + +struct token_time_cfg { + uint64_t bpp; + uint64_t period; + uint64_t bytes_max; +}; + +struct token_time { + uint64_t tsc_last; + uint64_t tsc_last_bytes; + uint64_t bytes_now; + struct token_time_cfg cfg; +}; + +/* Convert a given fractional bytes per period into bpp with as + minimal loss of accuracy. */ +static struct token_time_cfg token_time_cfg_create(double frac, uint64_t period, uint64_t bytes_max) +{ + struct token_time_cfg ret; + + /* Since period is expressed in units of cycles and it is in + most cases set to 1 second (which means its value is <= + 3*10^9) and 2^64/10^9 > 6148914691 > 2^32). This means that + at most, period and frac will be doubled 32 times by the + following algorithm. Hence, the total error introduced by + the chosen values for bpp and period will be between 0 and + 1/2^33. Note that since there are more operations that + can't overflow, the actual accuracy will probably be + lower. */ + + /* The reason to limit period by UINT64_MAX/(uint64_t)frac is + that at run-time, the token_time_update function will + multiply a number that is <= period with bpp. In addition, + the token_time_tsc_until function will multiply at most + bytes_max with period so make sure that can't overflow. */ + + while (period < UINT64_MAX/2 && frac != floor(frac) && + (frac < 2.0f || period < UINT64_MAX/4/(uint64_t)frac) && + (bytes_max == UINT64_MAX || period < UINT64_MAX/2/bytes_max)) { + period *= 2; + frac *= 2; + } + + ret.bpp = floor(frac + 0.5); + ret.period = period; + ret.bytes_max = bytes_max; + + return ret; +} + +static void token_time_update(struct token_time *tt, uint64_t tsc) +{ + uint64_t new_bytes; + uint64_t t_diff = tsc - tt->tsc_last; + + /* Since the rate is expressed in tt->bpp, i.e. bytes per + period, counters can only be incremented/decremented + accurately every period cycles. */ + + /* If the last update was more than a period ago, the update + can be performed accurately. */ + if (t_diff > tt->cfg.period) { + /* First add remaining tokens in the last period that + was added partially. */ + new_bytes = tt->cfg.bpp - tt->tsc_last_bytes; + tt->tsc_last_bytes = 0; + tt->bytes_now += new_bytes; + t_diff -= tt->cfg.period; + tt->tsc_last += tt->cfg.period; + + /* If now it turns out that more periods have elapsed, + add the bytes for those periods directly. */ + if (t_diff > tt->cfg.period) { + uint64_t periods = t_diff/tt->cfg.period; + + tt->bytes_now += periods * tt->cfg.bpp; + t_diff -= tt->cfg.period * periods; + tt->tsc_last += tt->cfg.period * periods; + } + } + + /* At this point, t_diff will be guaranteed to be less + than tt->cfg.period. */ + new_bytes = t_diff * tt->cfg.bpp/tt->cfg.period - tt->tsc_last_bytes; + tt->tsc_last_bytes += new_bytes; + tt->bytes_now += new_bytes; + if (tt->bytes_now > tt->cfg.bytes_max) + tt->bytes_now = tt->cfg.bytes_max; +} + +static void token_time_set_bpp(struct token_time *tt, uint64_t bpp) +{ + tt->cfg.bpp = bpp; +} + +static void token_time_init(struct token_time *tt, const struct token_time_cfg *cfg) +{ + tt->cfg = *cfg; +} + +static void token_time_reset(struct token_time *tt, uint64_t tsc, uint64_t bytes_now) +{ + tt->tsc_last = tsc; + tt->bytes_now = bytes_now; + tt->tsc_last_bytes = 0; +} + +static void token_time_reset_full(struct token_time *tt, uint64_t tsc) +{ + token_time_reset(tt, tsc, tt->cfg.bytes_max); +} + +static int token_time_take(struct token_time *tt, uint64_t bytes) +{ + if (bytes > tt->bytes_now) + return -1; + tt->bytes_now -= bytes; + return 0; +} + +static void token_time_take_clamp(struct token_time *tt, uint64_t bytes) +{ + if (bytes > tt->bytes_now) + tt->bytes_now = 0; + else + tt->bytes_now -= bytes; +} + +static uint64_t token_time_tsc_until(const struct token_time *tt, uint64_t bytes) +{ + if (tt->bytes_now >= bytes) + return 0; + + return (bytes - tt->bytes_now) * tt->cfg.period / tt->cfg.bpp; +} + +static uint64_t token_time_tsc_until_full(const struct token_time *tt) +{ + return token_time_tsc_until(tt, tt->cfg.bytes_max); +} + +#endif /* _TOKEN_TIME_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/Makefile b/VNFs/DPPD-PROX/tools/flow_extract/Makefile new file mode 100644 index 00000000..a772b8c5 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/Makefile @@ -0,0 +1,59 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +SOURCES = main.cpp +SOURCES += streamextract.cpp +SOURCES += pcapreader.cpp +SOURCES += pcapwriter.cpp +SOURCES += timestamp.cpp +SOURCES += pcappkt.cpp +SOURCES += netsocket.cpp +SOURCES += stream3.cpp +SOURCES += stream2.cpp +SOURCES += stream.cpp +SOURCES += path.cpp +SOURCES += allocator.cpp +SOURCES += halfstream.cpp +SOURCES += bundle.cpp +SOURCES += progress.cpp +SOURCES += mappedfile.cpp +SOURCES += streamsorter.cpp +SOURCES += memreader.cpp +SOURCES += programconfig.cpp + +BUILD_DIR = build +OBJECTS = $(SOURCES:%.cpp=$(BUILD_DIR)/%.o) +PROG = flowextract + +CXXFLAGS += -D__STDC_LIMIT_MACROS -g -O2 -Wall -ansi -pedantic -Wno-unused -msse4.2 +LDFLAGS = -lpcap + +$(BUILD_DIR)/$(PROG): $(OBJECTS) + @echo -e "LD\t$<" + @$(CXX) $(CXXFLAGS) $(LDFLAGS) $(OBJECTS) -o $@ + +-include $(SOURCES:%.cpp=$(BUILD_DIR)/%.d) + +$(BUILD_DIR)/%.o: %.cpp + @mkdir -p $(BUILD_DIR) + @echo -e "CXX\t $<" + @$(CXX) -c $(CXXFLAGS) $*.cpp -o $@ + @$(CXX) -MM $(CXXFLAGS) $*.cpp -MT $(BUILD_DIR)/$*.o > $(BUILD_DIR)/$*.d + @cp -f $(BUILD_DIR)/$*.d $(BUILD_DIR)/$*.d.tmp + @sed -e 's/.*://' -e 's/\\$$//' < $(BUILD_DIR)/$*.d.tmp | fmt -1 | sed -e 's/^ *//' -e 's/$$/:/' >> $(BUILD_DIR)/$*.d + @rm -f $(BUILD_DIR)/$*.d.tmp +clean: + @rm -f $(BUILD_DIR)/$(PROG) $(BUILD_DIR)/*.o $(BUILD_DIR)/*.d diff --git a/VNFs/DPPD-PROX/tools/flow_extract/README b/VNFs/DPPD-PROX/tools/flow_extract/README new file mode 100644 index 00000000..fb8754b3 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/README @@ -0,0 +1,20 @@ +## +## Copyright (c) 2010-2017 Intel Corporation +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## + +The flow extract tool is meant a to be run as a first pass on a pcap +file. The output is a lua config file describing the relations between +flows together with a binary file that contains all the packet headers +and payload. diff --git a/VNFs/DPPD-PROX/tools/flow_extract/allocator.cpp b/VNFs/DPPD-PROX/tools/flow_extract/allocator.cpp new file mode 100644 index 00000000..c861ebfe --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/allocator.cpp @@ -0,0 +1,84 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <sys/mman.h> +#include <unistd.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <cerrno> +#include <cstdio> +#include <cstdlib> +#include <cstring> + +#define USEHP + +using namespace std; + +#include "allocator.hpp" + +Allocator::Allocator(size_t size, size_t threshold) + : m_size(size), m_threshold(threshold), m_alloc_offset(0) +{ +#ifdef USEHP + int fd = open("/mnt/huge/hp", O_CREAT | O_RDWR, 0755); + if (fd < 0) { + cerr << "Allocator failed to open huge page file descriptor: " << strerror(errno) << endl; + exit(EXIT_FAILURE); + } + m_mem = (uint8_t *)mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + if (m_mem == MAP_FAILED) { + perror("mmap"); + unlink("/mnt/huge"); + cerr << "Allocator mmap failed: " << strerror(errno) << endl; + exit (EXIT_FAILURE); + } +#else + m_mem = new uint8_t[size]; +#endif +} + +Allocator::~Allocator() +{ +#ifdef USEHP + munmap((void *)m_mem, m_size); +#else + delete[] m_mem; +#endif +} + +void *Allocator::alloc(size_t size) +{ + void *ret = &m_mem[m_alloc_offset]; + + m_alloc_offset += size; + return ret; +} + +void Allocator::reset() +{ + m_alloc_offset = 0; +} + +size_t Allocator::getFreeSize() const +{ + return m_size - m_alloc_offset; +} + +bool Allocator::lowThresholdReached() const +{ + return (m_size - m_alloc_offset) < m_threshold; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/allocator.hpp b/VNFs/DPPD-PROX/tools/flow_extract/allocator.hpp new file mode 100644 index 00000000..d3f1537e --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/allocator.hpp @@ -0,0 +1,38 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _ALLOCATOR_H_ +#define _ALLOCATOR_H_ + +#include <cstddef> +#include <inttypes.h> + +class Allocator { +public: + Allocator(size_t size, size_t lowThreshold); + ~Allocator(); + bool lowThresholdReached() const; + void *alloc(size_t size); + void reset(); + size_t getFreeSize() const; +private: + size_t m_size; + size_t m_threshold; + size_t m_alloc_offset; + uint8_t *m_mem; +}; + +#endif /* _ALLOCATOR_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/bundle.cpp b/VNFs/DPPD-PROX/tools/flow_extract/bundle.cpp new file mode 100644 index 00000000..abeaf14e --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/bundle.cpp @@ -0,0 +1,28 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "bundle.hpp" + +void Bundle::toLua(ofstream *f, const string& streamTableName, uint32_t idx) const +{ + (*f) << "bundles[" << idx << "] = {"; + + for(vector<uint32_t>::const_iterator i = streams.begin(); i != streams.end(); ++i) { + (*f) << streamTableName << "[" << (*i) << "]," ; + } + + (*f) << "}" << endl; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/bundle.hpp b/VNFs/DPPD-PROX/tools/flow_extract/bundle.hpp new file mode 100644 index 00000000..cb5d81b6 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/bundle.hpp @@ -0,0 +1,38 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _BUNDLE_H_ +#define _BUNDLE_H_ + +#include <vector> +#include <inttypes.h> +#include <fstream> + +using namespace std; + +class Bundle +{ +public: + void addStream(uint32_t streamId, uint32_t port) {streams.push_back(streamId); ports.push_back(port);} + const vector<uint32_t>& getStream() const {return streams;} + const vector<uint32_t>& getPorts() const {return ports;} + void toLua(ofstream *f, const string& streamTableName, uint32_t idx) const; +private: + vector<uint32_t> streams; + vector<uint32_t> ports; +}; + +#endif /* _BUNDLE_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/crc.hpp b/VNFs/DPPD-PROX/tools/flow_extract/crc.hpp new file mode 100644 index 00000000..713b4abd --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/crc.hpp @@ -0,0 +1,51 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CRC_H_ +#define _CRC_H_ + +static uint32_t crc32(const uint8_t *buf, size_t len, int init) +{ + uint32_t ret = init; + + while (len/8) { + ret = __builtin_ia32_crc32di(ret, *((uint64_t*)buf)); + len -= 8; + buf += 8; + } + + while (len/4) { + ret = __builtin_ia32_crc32si(ret, *((uint32_t*)buf)); + len -= 4; + buf += 4; + } + + while (len/2) { + ret = __builtin_ia32_crc32hi(ret, *((uint16_t*)buf)); + len -= 2; + buf += 2; + } + + while (len) { + ret = __builtin_ia32_crc32qi(ret, *((uint8_t*)buf)); + len -= 1; + buf += 1; + } + + return ret; +} + +#endif /* _CRC_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.cpp b/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.cpp new file mode 100644 index 00000000..909fc94d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.cpp @@ -0,0 +1,67 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <cstdlib> +#include <cstring> +#include <stdint.h> + +#include "csvfilereader.hpp" + +int CsvFileReader::open(const string& str) +{ + char *resolved_path = new char[1024]; + + memset(resolved_path, 0, 1024); + realpath(str.c_str(), resolved_path); + file.open(resolved_path); + + delete []resolved_path; + return file.is_open(); +} + +vector<string> CsvFileReader::read() +{ + vector<string> ret; + size_t prev = 0, cur = 0; + string line; + + if (file.eof()) + return vector<string>(); + + std::getline(file, line); + if (line.empty()) + return vector<string>(); + + while (true) { + cur = line.find_first_of(',', prev); + + if (cur != SIZE_MAX) { + ret.push_back(line.substr(prev, cur - prev)); + prev = cur + 1; + } + else { + ret.push_back(line.substr(prev, line.size() - prev)); + break; + } + } + return ret; +} + +void CsvFileReader::close() +{ + file.close(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.hpp b/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.hpp new file mode 100644 index 00000000..21f397a7 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/csvfilereader.hpp @@ -0,0 +1,35 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _CSVFILE_H_ +#define _CSVFILE_H_ + +#include <fstream> +#include <string> +#include <vector> + +using namespace std; + +class CsvFileReader { +public: + int open(const string& str); + vector<string> read(); + void close(); +private: + ifstream file; +}; + +#endif /* _CSVFILE_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/flowtable.hpp b/VNFs/DPPD-PROX/tools/flow_extract/flowtable.hpp new file mode 100644 index 00000000..ebb4d927 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/flowtable.hpp @@ -0,0 +1,174 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _FLOWTABLE_H_ +#define _FLOWTABLE_H_ + +#include <inttypes.h> +#include <sys/time.h> +#include <stdio.h> +#include <cstring> + +#include <vector> +#include <list> +#include <cstddef> +#include <utility> + +#include "crc.hpp" +#include "timestamp.hpp" + +using namespace std; + +template <typename K, typename T> +class FlowTable { +public: + struct entry { + entry(K key, T value, const struct timeval& tv, list<struct entry> *parent) : + key(key), value(value), tv(tv), parent(parent) {} + bool expired(const Timestamp &now, const Timestamp &maxDiff) const + { + return now - Timestamp(tv) > maxDiff; + } + K key; + T value; + struct timeval tv; /* List time entry has been hit */ + list<struct entry> *parent; + }; + class Iterator { + friend class FlowTable; + public: + bool operator!=(const Iterator& other) { + return m_v != other.m_v || + m_vec_pos != other.m_vec_pos || + m_a != other.m_a; + + } + Iterator& operator++() { + m_a++; + while (m_vec_pos != m_v->size() - 1 && m_a == (*m_v)[m_vec_pos].end()) { + m_vec_pos++; + m_a = (*m_v)[m_vec_pos].begin(); + } + + return *this; + } + struct entry &operator*() { + return *m_a; + } + private: + Iterator(uint32_t vec_pos, vector<list<struct entry> > *v) + : m_vec_pos(vec_pos), m_v(v) + { + m_a = (*m_v)[vec_pos].begin(); + while (m_vec_pos != m_v->size() - 1 && m_a == (*m_v)[m_vec_pos].end()) { + m_vec_pos++; + m_a = (*m_v)[m_vec_pos].begin(); + } + } + Iterator(uint32_t vec_pos, vector<list<struct entry> > *v, const typename list< struct entry>::iterator& a) + : m_vec_pos(vec_pos), m_v(v), m_a(a) + { } + uint32_t m_vec_pos; + vector<list<struct entry> > *m_v; + typename list<struct entry>::iterator m_a; + }; + uint32_t getEntryCount() const {return m_entryCount;} + FlowTable(uint32_t size); + void expire(const struct timeval& tv); + struct entry* lookup(const K& key); + void remove(struct FlowTable<K,T>::entry* entry); + struct entry* insert(const K& key, const T& value, const struct timeval& tv); + Iterator begin() {return Iterator(0, &m_elems);} + Iterator end() {return Iterator(m_elems.size() - 1, &m_elems, m_elems.back().end());} + void clear(); +private: + void clearBucket(list<struct entry> *l); + vector<list<struct entry> > m_elems; + uint32_t m_entryCount; +}; + +template <typename K, typename T> +FlowTable<K, T>::FlowTable(uint32_t size) + : m_elems(), m_entryCount(0) + +{ + m_elems.resize(size); +} + +template <typename K, typename T> +struct FlowTable<K, T>::entry* FlowTable<K, T>::lookup(const K& key) +{ + uint32_t ret = crc32((uint8_t*)&key, sizeof(K), 0); + + list<struct entry> &l = m_elems[ret % m_elems.size()]; + + if (l.empty()) + return NULL; + + for (typename list<struct entry>::iterator it = l.begin(); it != l.end(); ++it) { + if (memcmp(&((*it).key), &key, sizeof(key)) == 0) + return &(*it); + } + return NULL; +} + +template <typename K, typename T> +struct FlowTable<K, T>::entry *FlowTable<K, T>::insert(const K& key, const T& value, const struct timeval& tv) +{ + uint32_t ret = crc32((uint8_t*)&key, sizeof(K), 0); + list<struct entry> &l = m_elems[ret % m_elems.size()]; + + l.push_back(entry(key, value, tv, &l)); + + struct entry &n = l.back(); + m_entryCount++; + n.key = key; + n.value = value; + return &n; +} + +template <typename K, typename T> +void FlowTable<K, T>::remove(struct FlowTable<K,T>::entry* entry) +{ + list<struct entry> &l = *entry->parent; + + for (typename list<struct entry>::iterator it = l.begin(); it != l.end(); ++it) { + if (memcmp(&((*it).key), &entry->key, sizeof(entry->key)) == 0) { + l.erase(it); + m_entryCount--; + return ; + } + } +} + +template <typename K, typename T> +void FlowTable<K, T>::clearBucket(list<struct entry> *l) +{ + while (!l->empty()) { + m_entryCount--; + l->erase(l->begin()); + } +} + +template <typename K, typename T> +void FlowTable<K, T>::clear() +{ + for (size_t i = 0; i < m_elems.size(); ++i) { + clearBucket(&m_elems[i]); + } +} + +#endif /* _FLOWTABLE_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/halfstream.cpp b/VNFs/DPPD-PROX/tools/flow_extract/halfstream.cpp new file mode 100644 index 00000000..7d8f1fe2 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/halfstream.cpp @@ -0,0 +1,101 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <fstream> +#include <arpa/inet.h> + +#include "halfstream.hpp" + +HalfStream::Action::Part HalfStream::addPkt(const PcapPkt &pkt) +{ + const uint32_t pktId = pkts.size(); + const uint8_t *l5; + uint32_t l5Len; + uint16_t tmpHdrLen; + + const struct PcapPkt::tcp_hdr *tcp; + + struct pkt_tuple pt = pkt.parsePkt((const uint8_t **)&tcp, &tmpHdrLen, &l5, &l5Len); + + if (pt.proto_id == IPPROTO_TCP) { + if (tcp->tcp_flags & 0x02) + tcpOpen = true; + if (tcp->tcp_flags & 0x01) + tcpClose = true; + } + + if (pkts.empty()) { + first = pkt.ts(); + hdrLen = tmpHdrLen; + memcpy(hdr, pkt.payload(), hdrLen); + } + last = pkt.ts(); + totLen += pkt.len(); + contentLen += l5Len; + + pkts.push_back(pkt); + + return Action::Part(pktId, l5 - pkt.payload(), l5Len); +} + +double HalfStream::getRate() const +{ + if (pkts.empty()) + return 0; + if (first == last) + return 1250000000; + + return totLen / (last - first); +} + +HalfStream::Action::Action(HalfStream* stream, const Part &p, bool isClient) + : halfStream(stream), m_isClient(isClient) +{ + addPart(p); +} + +void HalfStream::Action::addPart(const Part &p) +{ + parts.push_back(p); +} + +uint32_t HalfStream::Action::totLen() const +{ + uint32_t ret = 0; + + for (list<Part>::const_iterator i = parts.begin(); i != parts.end(); ++i) { + ret += (*i).len; + } + + return ret; +} + +void HalfStream::Action::toFile(ofstream *f) const +{ + for (list<Part>::const_iterator i = parts.begin(); i != parts.end(); ++i) { + const PcapPkt &pkt = halfStream->pkts[i->pktId]; + const uint8_t *payload = &pkt.payload()[i->offset]; + const uint16_t len = i->len; + + f->write((const char *)payload, len); + } +} + +HalfStream::HalfStream() + : first(0, 0), last(0, 0), totLen(0), hdrLen(0), contentLen(0), tcpOpen(false), tcpClose(false) +{ + +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/halfstream.hpp b/VNFs/DPPD-PROX/tools/flow_extract/halfstream.hpp new file mode 100644 index 00000000..6216979d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/halfstream.hpp @@ -0,0 +1,63 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <list> +#include <vector> + +#include "timestamp.hpp" +#include "pcappkt.hpp" + +struct HalfStream { + struct Action { + public: + struct Part { + Part(uint32_t pktId, uint32_t offset, uint32_t len) + : pktId(pktId), offset(offset), len(len) {} + uint32_t pktId; + uint32_t offset; + uint32_t len; + }; + + Action(HalfStream* stream, const Part &p, bool isClient); + void addPart(const Part& p); + bool isClient() const {return m_isClient;} + /* An action can consist of multiple + packets. The data is not stored in the + action. Instead, a packet id together with + an offset into the packet and a length is + kept to save space */ + void toFile(ofstream* f) const; + uint32_t totLen() const; + private: + HalfStream *halfStream; + bool m_isClient; + list<Part> parts; + }; + + HalfStream(); + Timestamp first; + Timestamp last; + uint64_t totLen; + uint64_t hdrLen; + uint8_t hdr[64]; + vector<PcapPkt> pkts; + uint64_t contentLen; + bool tcpOpen; + bool tcpClose; + Action::Part addPkt(const PcapPkt &pkt); + double getRate() const; +}; diff --git a/VNFs/DPPD-PROX/tools/flow_extract/main.cpp b/VNFs/DPPD-PROX/tools/flow_extract/main.cpp new file mode 100644 index 00000000..d1476c5f --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/main.cpp @@ -0,0 +1,37 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <cstdlib> + +#include "streamextract.hpp" + +using namespace std; + +int main(int argc, char *argv[]) +{ + ProgramConfig programConfig; + + if (programConfig.parseOptions(argc, argv)) { + cerr << programConfig.getError() << endl; + cerr << programConfig.getUsage() << endl; + return EXIT_FAILURE; + } + + StreamExtract se(programConfig); + + return se.run(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.cpp b/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.cpp new file mode 100644 index 00000000..b2d1a9da --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.cpp @@ -0,0 +1,109 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <cstdlib> +#include <cstdio> +#include <unistd.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <iostream> +#include <cerrno> +#include <sys/mman.h> +#include <cstring> + +#include "mappedfile.hpp" + +static void zeroOutFile(int fd, size_t size) +{ + void *empty = calloc(1, 4096); + + while (size > 4096) { + write(fd, empty, 4096); + size -= 4096; + } + write(fd, empty, size); + free(empty); +} + +int MappedFile::open(const string& filePath, size_t size) +{ + mappedFileSize = size; + + fd = ::open(filePath.c_str(), O_RDWR | O_TRUNC | O_CREAT, S_IRUSR | S_IWUSR); + if (fd < 0) { + cerr << "Failed to open file " << filePath << ":" << strerror(errno) << endl; + return -1; + } + + zeroOutFile(fd, size); + data = mmap(NULL, mappedFileSize, PROT_WRITE | PROT_READ, MAP_SHARED, fd, 0); + + if (data == MAP_FAILED) { + cerr << "Failed to map file: " << strerror(errno) << endl; + return -1; + } + return 0; +} + +static size_t getFileSize(const string& filePath) +{ + struct stat s; + if (stat(filePath.c_str(), &s)) + return -1; + + return s.st_size; +} + +int MappedFile::open(const string& filePath) +{ + mappedFileSize = getFileSize(filePath); + + fd = ::open(filePath.c_str(), O_RDONLY); + if (fd < 0) { + cerr << "Failed to open file " << filePath << ":" << strerror(errno) << endl; + return -1; + } + + data = mmap(NULL, mappedFileSize, PROT_READ, MAP_SHARED, fd, 0); + + if (data == MAP_FAILED) { + cerr << "Failed to map file: " << strerror(errno) << endl; + return -1; + } + return 0; +} + +int MappedFile::sync() +{ + if (msync(data, mappedFileSize, MS_SYNC) == -1) { + cerr << "Failed to sync: " << strerror(errno) << endl; + return -1; + } + return 0; +} + + +void MappedFile::close() +{ + sync(); + munmap(data, mappedFileSize); + ::close(fd); +} + +size_t MappedFile::size() const +{ + return mappedFileSize; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.hpp b/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.hpp new file mode 100644 index 00000000..7bf79df5 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/mappedfile.hpp @@ -0,0 +1,40 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _MAPPEDFILE_H_ +#define _MAPPEDFILE_H_ + +#include <inttypes.h> +#include <string> + +using namespace std; + +class MappedFile { +public: + int open(const string& filePath, size_t size); + int open(const string& filePath); + void close(); + int sync(); + uint8_t* getMapBeg() {return (uint8_t *)data;} + uint8_t* getMapEnd() {return (uint8_t *)data + mappedFileSize;} + size_t size() const; +private: + int fd; + size_t mappedFileSize; + void *data; +}; + +#endif /* _MAPPEDFILE_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/memreader.cpp b/VNFs/DPPD-PROX/tools/flow_extract/memreader.cpp new file mode 100644 index 00000000..df77631c --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/memreader.cpp @@ -0,0 +1,106 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <cstdlib> + +#include "memreader.hpp" +#include "mappedfile.hpp" +#include "stream3.hpp" + +MemReader::MemReader(MappedFile *file, const vector<size_t> &offsets) +{ + initRanges(file->getMapBeg(), file->getMapEnd(), offsets); +} + +bool MemReader::read(Stream3 *stream) +{ + if (ranges.empty()) + return false; + + readStream(stream, getLowestID()); + removeEmptyRanges(); + return true; +} + +uint32_t MemReader::getLowestID() const +{ + uint32_t lowestID = UINT32_MAX; + uint32_t rangeID; + + for (size_t i = 0; i < ranges.size(); ++i) { + rangeID = Stream3::getIDFromMem(ranges[i].first); + if (rangeID < lowestID) + lowestID = rangeID; + } + return lowestID; +} + +void MemReader::readStream(Stream3 *stream, uint32_t id) +{ + stream->removeAllPackets(); + stream->setID(id); + + size_t len = 0; + for (size_t i = 0; i < ranges.size(); ++i) { + if (Stream3::getIDFromMem(ranges[i].first) == id) { + stream->addFromMemory(ranges[i].first, &len); + ranges[i].first += len; + } + } +} + +void MemReader::removeEmptyRanges() +{ + vector<pair <uint8_t *, uint8_t *> > original = ranges; + size_t destinationIdx = 0; + + for (size_t i = 0; i < original.size(); ++i) { + if (original[i].first < original[i].second) + ranges[destinationIdx++] = original[i]; + } + ranges.resize(destinationIdx); +} + +void MemReader::initRanges(uint8_t *begin, uint8_t *end, const vector<size_t> &offsets) +{ + ranges.resize(offsets.size()); + + totalLength = 0; + for (size_t i = 0; i < offsets.size(); ++i) { + ranges[i].first = begin + offsets[i]; + if (i != offsets.size() - 1) + ranges[i].second = begin + offsets[i + 1]; + else + ranges[i].second = end; + totalLength += ranges[i].second - ranges[i].first; + } + removeEmptyRanges(); +} + +size_t MemReader::getRangeLengths() const +{ + size_t total = 0; + + for (size_t i = 0; i < ranges.size(); ++i) { + total += ranges[i].second - ranges[i].first; + } + return total; +} + +size_t MemReader::consumed() const +{ + return totalLength - getRangeLengths(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/memreader.hpp b/VNFs/DPPD-PROX/tools/flow_extract/memreader.hpp new file mode 100644 index 00000000..31be4c31 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/memreader.hpp @@ -0,0 +1,45 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _MEMREADER_H_ +#define _MEMREADER_H_ + +#include <vector> +#include <inttypes.h> + +using namespace std; + +class Stream3; +class MappedFile; + +class MemReader { +public: + MemReader(MappedFile *file, const vector<size_t> &offsets); + bool read(Stream3 *stream); + size_t getTotalLength() const {return totalLength;} + size_t consumed() const; +private: + size_t getRangeLengths() const; + uint32_t getLowestID() const; + void removeEmptyRanges(); + void readStream(Stream3 *stream, uint32_t id); + void initRanges(uint8_t *begin, uint8_t *end, const vector<size_t> &offsets); + + size_t totalLength; + vector<pair <uint8_t *, uint8_t *> > ranges; +}; + +#endif /* _MEMREADER_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/netsocket.cpp b/VNFs/DPPD-PROX/tools/flow_extract/netsocket.cpp new file mode 100644 index 00000000..8c61ba7d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/netsocket.cpp @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "netsocket.hpp" + +NetSocket::NetSocket(uint32_t host, uint16_t port) + : host(host), port(port) +{ + +} + +bool NetSocket::operator>(const NetSocket& other) const +{ + return host > other.host || (host == other.host && port > other.port); +} + +bool NetSocket::operator<(const NetSocket& other) const +{ + return host < other.host || (host == other.host && port < other.port); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/netsocket.hpp b/VNFs/DPPD-PROX/tools/flow_extract/netsocket.hpp new file mode 100644 index 00000000..bfd6bec9 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/netsocket.hpp @@ -0,0 +1,31 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _NETSOCKET_H_ +#define _NETSOCKET_H_ + +#include <inttypes.h> + +struct NetSocket { + NetSocket() {} + NetSocket(uint32_t host, uint16_t port); + bool operator>(const NetSocket& other) const; + bool operator<(const NetSocket& other) const; + uint32_t host; + uint16_t port; +}; + +#endif /* _NETSOCKET_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/path.cpp b/VNFs/DPPD-PROX/tools/flow_extract/path.cpp new file mode 100644 index 00000000..7d94aae6 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/path.cpp @@ -0,0 +1,97 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <iomanip> +#include <sys/stat.h> +#include <sstream> +#include <fstream> + +#include "path.hpp" + +bool Path::isDir() const +{ + struct stat s = { 0 }; + + if (stat(path.c_str(), &s)) { + return false; + } + + return s.st_mode & S_IFDIR; +} + +bool Path::isFile() const +{ + struct stat s = { 0 }; + + if (stat(path.c_str(), &s)) { + return false; + } + + return s.st_mode & S_IFREG; +} + +Path Path::add(const string& str) const +{ + stringstream ss; + + ss << path << str; + + return Path(ss.str()); +} + +Path Path::add(int number) const +{ + stringstream ss; + + ss << path << number; + + return Path(ss.str()); +} + +Path &Path::concat(const string &add) +{ + stringstream ss; + + ss << path << add; + path = ss.str(); + + return *this; +} + +int Path::mkdir() const +{ + if (!isDir()) + return ::mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + return 0; +} + +std::ostream& operator<<(std::ofstream &stream, const Path &p) +{ + stream << p.path.c_str(); + + return stream; +} + +string Path::getFileName() const +{ + for (size_t i = path.size() - 1; i >= 0; --i) { + if (path[i] == '/') { + return path.substr(i + 1); + } + } + return path; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/path.hpp b/VNFs/DPPD-PROX/tools/flow_extract/path.hpp new file mode 100644 index 00000000..e56c9050 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/path.hpp @@ -0,0 +1,42 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PATH_H_ +#define _PATH_H_ + +#include <string> + +using namespace std; + +class Path { +public: + Path(); + Path(const Path& other) : path(other.path) {} + Path(const string& str) : path(str) {} + Path add(const string& str) const; + Path add(int number) const; + Path &concat(const string &str); + const string& str() const {return path;} + bool isDir() const; + bool isFile() const; + string getFileName() const; + int mkdir() const; + friend std::ostream& operator<<(std::ofstream &stream, const Path &path); +private: + string path; +}; + +#endif /* _PATH_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.cpp b/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.cpp new file mode 100644 index 00000000..91708bb1 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.cpp @@ -0,0 +1,266 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <pcap.h> +#include <inttypes.h> +#include <cstring> +#include <arpa/inet.h> +#include <iostream> +#include <fstream> +#include <cstdlib> +#include "allocator.hpp" +#include "pcappkt.hpp" + +Allocator *PcapPkt::allocator = NULL; + +void* PcapPkt::operator new(size_t size) +{ + if (allocator) + return allocator->alloc(size); + else + return ::operator new(size); +} + +void PcapPkt::operator delete(void *pointer) +{ + if (!allocator) + :: operator delete(pointer); +} + +PcapPkt::PcapPkt(uint8_t *mem) +{ + header = *(struct pcap_pkthdr *)mem; + mem += sizeof(header); + buf = new uint8_t[header.len]; + memcpy(buf, mem, header.len); +} + +PcapPkt::PcapPkt() +{ + buf = new uint8_t[1514]; + memset(&header, 0, sizeof(header)); +} + +PcapPkt::PcapPkt(const PcapPkt& other) +{ + if (!allocator) { + buf = new uint8_t[other.len()]; + } + else { + buf = (uint8_t *)allocator->alloc(other.len()); + } + + memcpy(buf, other.buf, other.len()); + header = other.header; +} + +PcapPkt::~PcapPkt() +{ + if (!allocator) + delete[] buf; +} + +#define ETYPE_IPv4 0x0008 /* IPv4 in little endian */ +#define ETYPE_IPv6 0xDD86 /* IPv6 in little endian */ +#define ETYPE_ARP 0x0608 /* ARP in little endian */ +#define ETYPE_VLAN 0x0081 /* 802-1aq - VLAN */ +#define ETYPE_MPLSU 0x4788 /* MPLS unicast */ +#define ETYPE_MPLSM 0x4888 /* MPLS multicast */ +#define ETYPE_8021ad 0xA888 /* Q-in-Q */ +#define ETYPE_LLDP 0xCC88 /* Link Layer Discovery Protocol (LLDP) */ +#define ETYPE_EoGRE 0x5865 /* EoGRE in little endian */ + +struct ipv4_hdr { + uint8_t version_ihl; /**< version and header length */ + uint8_t type_of_service; /**< type of service */ + uint16_t total_length; /**< length of packet */ + uint16_t packet_id; /**< packet ID */ + uint16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + uint16_t hdr_checksum; /**< header checksum */ + uint32_t src_addr; /**< source address */ + uint32_t dst_addr; /**< destination address */ +} __attribute__((__packed__)); + +struct ether_addr { + uint8_t addr_bytes[6]; /**< Address bytes in transmission order */ +} __attribute__((__packed__)); + +struct ether_hdr { + struct ether_addr d_addr; /**< Destination address. */ + struct ether_addr s_addr; /**< Source address. */ + uint16_t ether_type; /**< Frame type. */ +} __attribute__((__packed__)); + +struct vlan_hdr { + uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */ + uint16_t eth_proto;/**< Ethernet type of encapsulated frame. */ +} __attribute__((__packed__)); + +struct udp_hdr { + uint16_t src_port; /**< UDP source port. */ + uint16_t dst_port; /**< UDP destination port. */ + uint16_t dgram_len; /**< UDP datagram length */ + uint16_t dgram_cksum; /**< UDP datagram checksum */ +} __attribute__((__packed__)); + +struct pkt_tuple PcapPkt::parsePkt(const uint8_t **l4_hdr, uint16_t *hdr_len, const uint8_t **l5, uint32_t *l5_len) const +{ + struct pkt_tuple pt = {0}; + + const struct ether_hdr *peth = (struct ether_hdr *)buf; + int l2_types_count = 0; + const struct ipv4_hdr* pip = 0; + + switch (peth->ether_type) { + case ETYPE_IPv4: + pip = (const struct ipv4_hdr *)(peth + 1); + break; + case ETYPE_VLAN: { + const struct vlan_hdr *vlan = (const struct vlan_hdr *)(peth + 1); + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (const struct ipv4_hdr *)(peth + 1); + } + else if (vlan->eth_proto == ETYPE_VLAN) { + const struct vlan_hdr *vlan = (const struct vlan_hdr *)(peth + 1); + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (const struct ipv4_hdr *)(peth + 1); + } + else if (vlan->eth_proto == ETYPE_IPv6) { + throw 0; + } + else { + /* TODO: handle BAD PACKET */ + throw 0; + } + } + } + break; + case ETYPE_8021ad: { + const struct vlan_hdr *vlan = (const struct vlan_hdr *)(peth + 1); + if (vlan->eth_proto == ETYPE_VLAN) { + const struct vlan_hdr *vlan = (const struct vlan_hdr *)(peth + 1); + if (vlan->eth_proto == ETYPE_IPv4) { + pip = (const struct ipv4_hdr *)(peth + 1); + } + else { + throw 0; + } + } + else { + throw 0; + } + } + break; + case ETYPE_MPLSU: + break; + default: + break; + } + + /* L3 */ + if ((pip->version_ihl >> 4) == 4) { + + if ((pip->version_ihl & 0x0f) != 0x05) { + /* TODO: optional fields */ + throw 0; + } + + pt.proto_id = pip->next_proto_id; + pt.src_addr = pip->src_addr; + pt.dst_addr = pip->dst_addr; + } + else { + /* TODO: IPv6 and bad packets */ + throw 0; + } + + /* L4 parser */ + if (pt.proto_id == IPPROTO_UDP) { + const struct udp_hdr *udp = (const struct udp_hdr*)(pip + 1); + if (l4_hdr) + *l4_hdr = (const uint8_t*)udp; + if (hdr_len) + *hdr_len = (const uint8_t*)udp - buf; + pt.src_port = udp->src_port; + pt.dst_port = udp->dst_port; + if (l5) + *l5 = ((const uint8_t*)udp) + sizeof(struct udp_hdr); + if (l5_len) + *l5_len = ntohs(udp->dgram_len) - sizeof(struct udp_hdr); + } + else if (pt.proto_id == IPPROTO_TCP) { + const struct tcp_hdr *tcp = (const struct tcp_hdr *)(pip + 1); + if (l4_hdr) + *l4_hdr = (const uint8_t*)tcp; + if (hdr_len) + *hdr_len = (const uint8_t*)tcp - buf; + pt.src_port = tcp->src_port; + pt.dst_port = tcp->dst_port; + + if (l5) + *l5 = ((const uint8_t*)tcp) + ((tcp->data_off >> 4)*4); + if (l5_len) + *l5_len = ntohs(pip->total_length) - sizeof(struct ipv4_hdr) - ((tcp->data_off >> 4)*4); + } + else { + fprintf(stderr, "unsupported protocol %d\n", pt.proto_id); + throw 0; + } + + return pt; +} + +void PcapPkt::toMem(uint8_t *mem) const +{ + memcpy(mem, &header, sizeof(header)); + mem += sizeof(header); + memcpy(mem, buf, header.len); +} + +void PcapPkt::fromMem(uint8_t *mem) +{ + memcpy(&header, mem, sizeof(header)); + mem += sizeof(header); + memcpy(buf, mem, header.len); +} + +void PcapPkt::toFile(ofstream *file) const +{ + file->write(reinterpret_cast<const char *>(&header), sizeof(header)); + file->write(reinterpret_cast<const char *>(buf), header.len); +} +size_t PcapPkt::memSize() const +{ + return sizeof(header) + header.len; +} + +PcapPkt::L4Proto PcapPkt::getProto() const +{ + struct pkt_tuple pt = parsePkt(); + return pt.proto_id == IPPROTO_TCP? PROTO_TCP : PROTO_UDP; +} + +ostream& operator<<(ostream& stream, const pkt_tuple &other) +{ + stream << other.src_addr << "," + << other.dst_addr << "," + << (int)other.proto_id << "," + << other.src_port << "," + << other.dst_port; + return stream; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.hpp b/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.hpp new file mode 100644 index 00000000..e437c790 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcappkt.hpp @@ -0,0 +1,104 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PCAPPKT_H_ +#define _PCAPPKT_H_ + +#include <inttypes.h> +#include <pcap.h> +#include <string> +#include <cstring> + +using namespace std; + +struct pkt_tuple { + uint32_t src_addr; + uint32_t dst_addr; + uint8_t proto_id; + uint16_t src_port; + uint16_t dst_port; + bool operator!=(const pkt_tuple& other) const + { + return src_addr != other.src_addr || + dst_addr != other.dst_addr || + proto_id != other.proto_id || + src_port != other.src_port || + dst_port != other.dst_port; + } + bool operator==(const pkt_tuple& other) const + { + return src_addr == other.src_addr && + dst_addr == other.dst_addr && + proto_id == other.proto_id && + src_port == other.src_port && + dst_port == other.dst_port; + } + friend ostream& operator<<(ostream& stream, const pkt_tuple &other); + struct pkt_tuple flip() const + { + struct pkt_tuple ret; + + ret = *this; + ret.src_addr = dst_addr; + ret.src_port = dst_port; + ret.dst_addr = src_addr; + ret.dst_port = src_port; + return ret; + } + +} __attribute__((packed)); + +class Allocator; + +class PcapPkt { + friend class PcapReader; +public: + struct tcp_hdr { + uint16_t src_port; /**< TCP source port. */ + uint16_t dst_port; /**< TCP destination port. */ + uint32_t sent_seq; /**< TX data sequence number. */ + uint32_t recv_ack; /**< RX data acknowledgement sequence number. */ + uint8_t data_off; /**< Data offset. */ + uint8_t tcp_flags; /**< TCP flags */ + uint16_t rx_win; /**< RX flow control window. */ + uint16_t cksum; /**< TCP checksum. */ + uint16_t tcp_urp; /**< TCP urgent pointer, if any. */ + } __attribute__((__packed__)); + + static Allocator *allocator; + enum L4Proto {PROTO_TCP, PROTO_UDP}; + PcapPkt(); + void* operator new(size_t size); + static void operator delete(void *pointer); + PcapPkt(const PcapPkt& other); + PcapPkt(uint8_t *mem); + void toMem(uint8_t *mem) const; + void fromMem(uint8_t *mem); + void toFile(ofstream *file) const; + size_t memSize() const; + const struct timeval &ts() const {return header.ts;} + const uint16_t len() const {return header.len;} + pkt_tuple parsePkt(const uint8_t **l4_hdr = NULL, uint16_t *hdr_len = NULL, const uint8_t **l5 = NULL, uint32_t *l5_len = NULL) const; + const struct pcap_pkthdr &hdr() const {return header;} + const uint8_t *payload() const {return buf;} + enum L4Proto getProto() const; + ~PcapPkt(); +private: + struct pcap_pkthdr header; + uint8_t *buf; +}; + +#endif /* _PCAPPKT_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.cpp b/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.cpp new file mode 100644 index 00000000..2a0f2f05 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.cpp @@ -0,0 +1,32 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "pcappktref.hpp" + +PcapPktRef::PcapPktRef(const PcapPktRef &other) + : pos(other.pos), pr(other.pr) +{ +} + +PcapPkt PcapPktRef::getPcapPkt() +{ + PcapPkt ret; + + if (!pr->readOnce(&ret, pos)) { + cerr << "failed to read pcap from pcap pkt ref" << endl; + } + return ret; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.hpp b/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.hpp new file mode 100644 index 00000000..1afaf2a5 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcappktref.hpp @@ -0,0 +1,40 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PCAPPKTREF_H_ +#define _PCAPPKTREF_H_ + +#include <iostream> + +#include "pcapreader.hpp" +#include "pcappkt.hpp" + +using namespace std; + +class PcapPktRef +{ +public: + PcapPktRef(uint64_t pos, PcapReader *pr) : pos(pos), pr(pr) {} + PcapPktRef(const PcapPktRef &other); + PcapPktRef() : pos(0), pr(0) {} + bool isValid() const {return pos != 0;} + PcapPkt getPcapPkt(); +private: + uint64_t pos; + PcapReader *pr; +}; + +#endif /* _PCAPPKTREF_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.cpp b/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.cpp new file mode 100644 index 00000000..6b5a6734 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.cpp @@ -0,0 +1,76 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <pcap.h> +#include <cstring> +#include <linux/in.h> + +#include "pcapreader.hpp" + +int PcapReader::open(const string& file_path) +{ + char err_str[PCAP_ERRBUF_SIZE]; + + if (m_handle) { + m_error = "Pcap file already open"; + return -1; + } + + m_handle = pcap_open_offline_with_tstamp_precision(file_path.c_str(), + PCAP_TSTAMP_PRECISION_NANO, + err_str); + + if (!m_handle) { + m_error = "Failed to open pcap file"; + return -1; + } + + m_file_beg = ftell(pcap_file(m_handle)); + fseek(pcap_file(m_handle), 0, SEEK_END); + m_file_end = ftell(pcap_file(m_handle)); + fseek(pcap_file(m_handle), m_file_beg, SEEK_SET); + + return 0; +} + +int PcapReader::readOnce(PcapPkt *pkt, uint64_t pos) +{ + return -1; +} + +int PcapReader::read(PcapPkt *pkt) +{ + if (!m_handle) { + m_error = "No pcap file opened"; + } + + const uint8_t *buf = pcap_next(m_handle, &pkt->header); + + if (buf) { + memcpy(pkt->buf, buf, pkt->header.len); + pktReadCount++; + } + + return !!buf; +} + +void PcapReader::close() +{ + if (m_handle) + pcap_close(m_handle); + + m_handle = NULL; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.hpp b/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.hpp new file mode 100644 index 00000000..3766c67b --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcapreader.hpp @@ -0,0 +1,48 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PCAPREADER_H_ +#define _PCAPREADER_H_ + +#include <inttypes.h> +#include <string> + +#include <pcap.h> + +#include "pcappkt.hpp" + +using namespace std; + +class PcapReader { +public: + PcapReader() : m_handle(NULL), pktReadCount(0) {} + int open(const string& file_path); + size_t pos() {return ftell(pcap_file(m_handle)) - m_file_beg;} + size_t end() {return m_file_end;} + int read(PcapPkt *pkt); + int readOnce(PcapPkt *pkt, uint64_t pos); + size_t getPktReadCount() const {return pktReadCount;} + void close(); + const string &getError() const {return m_error;} +private: + pcap_t *m_handle; + size_t m_file_beg; + size_t m_file_end; + size_t pktReadCount; + string m_error; +}; + +#endif /* _PCAPREADER_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.cpp b/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.cpp new file mode 100644 index 00000000..4c7c4cea --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.cpp @@ -0,0 +1,46 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include "pcapwriter.hpp" + +int PcapWriter::open(const string& file_path) +{ + m_handle = pcap_open_dead_with_tstamp_precision(DLT_EN10MB, 65536, PCAP_TSTAMP_PRECISION_NANO); + if (m_handle == NULL) + return -1; + + m_pcap_dumper = pcap_dump_open(m_handle, file_path.c_str()); + if (m_pcap_dumper == NULL) { + pcap_close(m_handle); + return -1; + } + + return 0; +} + +int PcapWriter::write(const PcapPkt& pkt) +{ + pcap_dump((unsigned char *)m_pcap_dumper, &pkt.hdr(), pkt.payload()); + return 0; +} + +void PcapWriter::close() +{ + if (m_pcap_dumper) + pcap_dump_close(m_pcap_dumper); + if (m_handle) + pcap_close(m_handle); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.hpp b/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.hpp new file mode 100644 index 00000000..32f79369 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/pcapwriter.hpp @@ -0,0 +1,33 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PCAPWRITER_H_ +#define _PCAPWRITER_H_ + +#include "pcappkt.hpp" + +class PcapWriter { +public: + PcapWriter() {} + int open(const string& file_path); + int write(const PcapPkt& pkt); + void close(); +private: + pcap_t *m_handle; + pcap_dumper_t *m_pcap_dumper; +}; + +#endif /* _PCAPWRITER_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/programconfig.cpp b/VNFs/DPPD-PROX/tools/flow_extract/programconfig.cpp new file mode 100644 index 00000000..7b1e18e1 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/programconfig.cpp @@ -0,0 +1,119 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <sstream> +#include <getopt.h> +#include <iostream> +#include <cstdlib> +#include "programconfig.hpp" + +ProgramConfig::ProgramConfig() + : path_file_in_pcap(""), path_dir_out("output"), + path_file_dest_lua("lua"), max_pkts(UINT32_MAX), + max_streams(UINT32_MAX), sampleCount(20000), flowTableSize(8*1024*1024), + run_first_step(true), write_pcaps(false) +{ +} + +string ProgramConfig::getUsage() const +{ + stringstream ret; + + ret << "Usage example: "<< m_programName << " -i in.pcap\n\n" + << "Flow Extract 2.0 analyzes and extracts a traffic profile\n" + << "configuration from a pcap file. The output is a lua\n" + << "configuration file and a binary file containing all the\n" + << "headers and payloads for each stream.\n\n" + + << "The program supports analyzing large pcap file (> 300 GB).\n" + << "For this, it uses a multi-pass approach. The output of \n" + << "intermediary steps is stored in the working directory. The\n" + << "algorithm can be described by the following steps:\n\n" + << " 1. The pcap file in read chunks of 16 GB. The packets in\n" + << " each chunk are associated with streams. The streams are\n" + << " ordered through a global ID. Each stream is stored as a" + << " sequence of packets that belong to that stream. The\n" + << " resulting file at 'DIR/tmp' where DIR is specified\n" + << " through -o options as shown below.\n" + << " Each chunk in tmp is merged and the result is written\n" + << " to file1. Reading the stream with a given ID from all chunks\n" + << " gets all the packets for the stream from the whole pcap in\n" + << " memory. This first step forms is implemented by an\n" + << " external sorting algorithm.\n" + << " 2. File2 is read and the source IP for each stream is used to\n" + << " associate each stream with a bundle. SAMPLE_COUNT samples\n" + << " are taken from the set of bundles. The set of streams that\n" + << " are still referenced by the sampled bundles extracted from\n" + << " file2 and written to the final binary file. This binary file\n" + << " is referenced from the lua configuration. The lua config file\n" + << " is written out as part of this step.\n" + << "Arguments:\n" + << "-i FILE Input pcap to process\n" + << "-o DIR output directory and working directory\n" + << "-s SAMPLE_COUNT Number of samples to take (default is 20K)\n" + << "-k Skip the first step as described above. Useful to\n" + << " adjust the number of samples without having to\n" + << " repeat the whole process\n"; + + + return ret.str(); +} + +int ProgramConfig::checkConfig() +{ + if (path_file_in_pcap.empty()) { + m_error = "Missing input pcap file\n"; + return -1; + } + return 0; +} + +int ProgramConfig::parseOptions(int argc, char *argv[]) +{ + char c; + + m_programName = argv[0]; + while ((c = getopt(argc, argv, "hki:o:s:p")) != -1) { + switch (c) { + case 'h': + return -1; + break; + case 'k': + run_first_step = false; + break; + case 'i': + path_file_in_pcap = optarg; + break; + case 'o': + path_dir_out = optarg; + break; + case 's': + sampleCount = atoi(optarg); + break; + case 'p': + write_pcaps = true; + break; + case '?': + cerr << getUsage() << endl; + return 0; + default: + m_error = "Invalid parameter\n"; + return -1; + } + } + + return checkConfig(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/programconfig.hpp b/VNFs/DPPD-PROX/tools/flow_extract/programconfig.hpp new file mode 100644 index 00000000..59b7104d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/programconfig.hpp @@ -0,0 +1,47 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROGRAMCONFIG_H_ +#define _PROGRAMCONFIG_H_ + +#include <string> +#include <inttypes.h> + +using namespace std; + +class ProgramConfig { +public: + ProgramConfig(); + string getUsage() const; + int parseOptions(int argc, char *argv[]); + const string& getError() const {return m_error;} + + string path_file_in_pcap; + string path_dir_out; + string path_file_dest_lua; + uint32_t max_pkts; + uint32_t max_streams; + uint32_t sampleCount; + uint32_t flowTableSize; + bool run_first_step; + bool write_pcaps; +private: + int checkConfig(); + string m_error; + string m_programName; +}; + +#endif /* _PROGRAMCONFIG_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/progress.cpp b/VNFs/DPPD-PROX/tools/flow_extract/progress.cpp new file mode 100644 index 00000000..2c65960f --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/progress.cpp @@ -0,0 +1,96 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <sys/time.h> +#include <iostream> +#include <cstdio> +#include <sstream> + +#include "progress.hpp" + +static uint64_t getSec() +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return tv.tv_sec; +} + +Progress::Progress(size_t maxProgress, bool inPlace, bool showElapsedTime) + : maxProgress(maxProgress), curProgress(0), inPlace(inPlace), showElapsedTime(showElapsedTime), prevLength(0), title("Progress") +{ + lastRefresh = -1; + firstRefresh = getSec(); +} + +void Progress::setProgress(size_t curProgress) +{ + this->curProgress = curProgress; +} + +void Progress::setProgress() +{ + this->curProgress = maxProgress; +} + +uint32_t Progress::addDetail(const string& detail) +{ + details.push_back(make_pair(detail, 0)); + return details.size() - 1; +} + +void Progress::setDetail(uint32_t idx, uint32_t val) +{ + details[idx].second = val; +} + +bool Progress::couldRefresh() +{ + uint32_t cur = getSec(); + + return (lastRefresh != cur); +} + +void Progress::refresh(bool withNewLine) +{ + lastRefresh = getSec(); + uint64_t elapsed = lastRefresh - firstRefresh; + size_t progress = curProgress * 100 / maxProgress; + size_t remainingTime = curProgress? (elapsed * maxProgress - elapsed * curProgress) / curProgress : 0; + + stringstream ss; + + if (inPlace) + ss << "\r"; + ss << title << ": " << progress << "%"; + ss << ", remaining: " << remainingTime; + if (showElapsedTime) + ss << ", elapsed: " << elapsed; + for (size_t i = 0; i < details.size(); ++i) + ss << ", " << details[i].first << ": " << details[i].second; + + size_t prevLength2 = ss.str().size(); + + while (ss.str().size() < prevLength) + ss << " "; + prevLength = prevLength2; + + if (!inPlace || withNewLine) + ss << "\n"; + + cout << ss.str(); + cout.flush(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/progress.hpp b/VNFs/DPPD-PROX/tools/flow_extract/progress.hpp new file mode 100644 index 00000000..7f55cf98 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/progress.hpp @@ -0,0 +1,50 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _PROGRESS_H_ +#define _PROGRESS_H_ + +#include <inttypes.h> +#include <vector> +#include <utility> +#include <string> + +using namespace std; + +class Progress { +public: + Progress(size_t maxProgress, bool inPlace = true, bool showElapsedTime = true); + void setTitle(const string &prefix) {this->title = title;} + void setProgress(size_t curProgress); + void setProgress(); + uint32_t addDetail(const string& detail); + void clearDetails() {details.clear();} + void setDetail(uint32_t idx, uint32_t val); + bool couldRefresh(); + void refresh(bool withNewLine = false); +private: + uint64_t firstRefresh; + uint64_t lastRefresh; + size_t maxProgress; + size_t curProgress; + bool inPlace; + bool showElapsedTime; + size_t prevLength; + string title; + vector<pair<string, uint32_t> > details; +}; + +#endif /* _PROGRESS_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream.cpp b/VNFs/DPPD-PROX/tools/flow_extract/stream.cpp new file mode 100644 index 00000000..b8056852 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream.cpp @@ -0,0 +1,171 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <iomanip> +#include <arpa/inet.h> + +#include "pcapwriter.hpp" +#include "stream.hpp" + +Stream::Stream(uint32_t id, uint32_t sizeHint) + : m_id(id), m_prevPktIsClient(false) +{ + m_client.pkts.reserve(sizeHint / 2); + m_server.pkts.reserve(sizeHint / 2); + m_pkts.reserve(sizeHint); +} + +bool Stream::isClient(const PcapPkt &pkt) const +{ + return m_pt == pkt.parsePkt(); +} + +size_t Stream::pktCount() const +{ + return m_client.pkts.size() + m_server.pkts.size(); +} + +void Stream::setTupleFromPkt(const PcapPkt &pkt) +{ + m_pt = pkt.parsePkt(); +} + +void Stream::addPkt(const PcapPkt &pkt) +{ + if (!pktCount()) + setTupleFromPkt(pkt); + + bool isClientPkt = isClient(pkt); + HalfStream *half; + + if (isClientPkt) + half = &m_client; + else + half = &m_server; + + HalfStream::Action::Part p = half->addPkt(pkt); + + if (p.len) { + addAction(half, p, isClientPkt); + } + + m_pkts.push_back(pkt); +} + +void Stream::addAction(HalfStream *half, HalfStream::Action::Part p, bool isClientPkt) +{ + if (m_actions.empty() || m_prevPktIsClient != isClientPkt || m_pt.proto_id == IPPROTO_UDP) + m_actions.push_back(HalfStream::Action(half, p, isClientPkt)); + else + m_actions.back().addPart(p); + m_prevPktIsClient = isClientPkt; +} + +Stream::Header Stream::getHeader() const +{ + Header h; + + h.streamId = m_id; + h.clientHdrLen = m_client.hdrLen; + h.clientContentLen = m_client.contentLen; + h.serverHdrLen = m_server.hdrLen; + h.serverContentLen = m_server.contentLen; + h.actionCount = m_actions.size(); + h.clientIP = m_pt.src_addr; + h.clientPort = m_pt.src_port; + h.serverIP = m_pt.dst_addr; + h.serverPort = m_pt.dst_port; + h.upRate = m_client.getRate(); + h.dnRate = m_server.getRate(); + h.protocol = m_pt.proto_id; + h.completedTCP = (m_client.tcpOpen && m_client.tcpClose && m_server.tcpOpen && m_server.tcpClose) || + (!m_client.tcpOpen && !m_client.tcpClose && !m_server.tcpOpen && !m_server.tcpClose); + + return h; +} + +void Stream::Header::toFile(ofstream *f) const +{ + f->write((const char *)this, sizeof(*this)); +} + +int Stream::Header::fromFile(ifstream *f) +{ + const size_t readSize = sizeof(*this); + + f->read((char *)this, readSize); + return f->gcount() == readSize? 0 : -1; +} + +size_t Stream::Header::getStreamLen() const +{ + return actionCount * sizeof(ActionEntry) + + clientHdrLen + clientContentLen + + serverHdrLen + serverContentLen; +} + +void Stream::actionsToFile(ofstream *f) const +{ + ActionEntry actionEntry; + uint32_t runningTotalLen[2] = {0}; + + for (size_t i = 0; i < m_actions.size(); ++i) { + actionEntry.peer = m_actions[i].isClient()? 0 : 1; + actionEntry.beg = runningTotalLen[actionEntry.peer]; + actionEntry.len = m_actions[i].totLen(); + + runningTotalLen[actionEntry.peer] += actionEntry.len; + f->write((const char *)&actionEntry, sizeof(actionEntry)); + } +} + +void Stream::clientHdrToFile(ofstream *f) const +{ + f->write((const char *)m_client.hdr, m_client.hdrLen); +} + +void Stream::serverHdrToFile(ofstream *f) const +{ + f->write((const char *)m_server.hdr, m_server.hdrLen); +} + +void Stream::contentsToFile(ofstream *f, bool isClient) const +{ + for (size_t i = 0; i < m_actions.size(); ++i) + if (m_actions[i].isClient() == isClient) + m_actions[i].toFile(f); +} + +void Stream::toFile(ofstream *f) +{ + getHeader().toFile(f); + actionsToFile(f); + clientHdrToFile(f); + serverHdrToFile(f); + contentsToFile(f, true); + contentsToFile(f, false); +} + +void Stream::toPcap(const string& outFile) +{ + PcapWriter pw; + + pw.open(outFile); + for (size_t i = 0; i < m_pkts.size(); ++i) + pw.write(m_pkts[i]); + pw.close(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream.hpp b/VNFs/DPPD-PROX/tools/flow_extract/stream.hpp new file mode 100644 index 00000000..28547d18 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream.hpp @@ -0,0 +1,94 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STREAM_H_ +#define _STREAM_H_ + +#include <list> +#include <string> +#include <fstream> +#include <cstring> +#include <vector> +#include <cstdlib> +#include <sys/time.h> + +#include "pcappktref.hpp" +#include "pcappkt.hpp" +#include "netsocket.hpp" +#include "timestamp.hpp" +#include "halfstream.hpp" + +using namespace std; + +class PcapReader; + +class Stream { +public: + struct Header { + uint32_t streamId; + uint16_t clientHdrLen; + uint32_t clientContentLen; + uint16_t serverHdrLen; + uint32_t serverContentLen; + uint32_t actionCount; + uint32_t clientIP; + uint16_t clientPort; + uint32_t serverIP; + uint16_t serverPort; + double upRate; + double dnRate; + uint8_t protocol; + uint8_t completedTCP; + void toFile(ofstream *f) const; + int fromFile(ifstream *f); + size_t getStreamLen() const; + }; + struct ActionEntry { + uint8_t peer; + uint32_t beg; + uint32_t len; + } __attribute__((packed)); + + Stream(uint32_t id = -1, uint32_t sizeHint = 0); + void addPkt(const PcapPkt &pkt); + void toFile(ofstream *f); + void toPcap(const string& outFile); + double getRate() const; + size_t actionCount() const {return m_actions.size();} + +private: + Header getHeader() const; + void actionsToFile(ofstream *f) const; + void clientHdrToFile(ofstream *f) const; + void serverHdrToFile(ofstream *f) const; + void contentsToFile(ofstream *f, bool isClient) const; + bool isClient(const PcapPkt &pkt) const; + size_t pktCount() const; + struct pkt_tuple m_pt; + void setTupleFromPkt(const PcapPkt &pkt); + void addToClient(const PcapPkt &pkt); + void addToServer(const PcapPkt &pkt); + void addAction(HalfStream *half, HalfStream::Action::Part p, bool isClientPkt); + + int m_id; + vector<PcapPkt> m_pkts; + vector<HalfStream::Action> m_actions; + HalfStream m_client; + HalfStream m_server; + bool m_prevPktIsClient; +}; + +#endif /* _STREAM_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream2.cpp b/VNFs/DPPD-PROX/tools/flow_extract/stream2.cpp new file mode 100644 index 00000000..51057e7d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream2.cpp @@ -0,0 +1,151 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iomanip> +#include <arpa/inet.h> +#include <sstream> + +#include "stream.hpp" +#include "stream2.hpp" + +int Stream2::fromFile(ifstream *f) +{ + m_actions.clear(); + if (streamHdr.fromFile(f)) + return -1; + if (actionsFromFile(f, streamHdr.actionCount)) + return -1; + if (setReferences(f)) + return -1; + + return 0; +} + +int Stream2::actionsFromFile(ifstream *f, size_t actionCount) +{ + m_actions.resize(actionCount); + for (size_t i = 0; i < actionCount; ++i) + f->read((char *)&m_actions[i], sizeof(Stream::ActionEntry)); + + return 0; +} + +int Stream2::setReferences(ifstream *f) +{ + size_t toRead = streamHdr.clientHdrLen + + streamHdr.serverHdrLen + + streamHdr.clientContentLen + + streamHdr.serverContentLen; + + delete [] clientServerHdrContent; + clientServerHdrContent = new uint8_t[toRead]; + f->read((char *)clientServerHdrContent, toRead); + return 0; +} + +void Stream2::calcOffsets(ofstream *out) +{ + size_t curPos = out->tellp(); + + clientHdrBeg = curPos; + serverHdrBeg = clientHdrBeg + streamHdr.clientHdrLen; + clientContentBeg = serverHdrBeg + streamHdr.serverHdrLen; + serverContentBeg = clientContentBeg + streamHdr.clientContentLen; +} + +void Stream2::toFile(ofstream *out) const +{ + size_t len = streamHdr.clientHdrLen + + streamHdr.serverHdrLen + + streamHdr.clientContentLen + + streamHdr.serverContentLen; + + out->write((const char *)clientServerHdrContent, len); +} + +static string ipToString(const uint32_t ip) +{ + uint32_t ip_ne = htonl(ip); + stringstream ss; + + ss << ((ip_ne >> 24) & 0xff) << "." + << ((ip_ne >> 16) & 0xff) << "." + << ((ip_ne >> 8) & 0xff) << "." + << (ip_ne & 0xff); + + return ss.str(); +} + +static string spaces(uint32_t count) +{ + stringstream ss; + + while (count--) + ss << " "; + return ss.str(); +} + +NetSocket Stream2::getServerNetSocket() const +{ + return NetSocket(streamHdr.serverIP, ntohs(streamHdr.serverPort)); +} + +NetSocket Stream2::getClientNetSocket() const +{ + return NetSocket(streamHdr.clientIP, ntohs(streamHdr.clientPort)); +} +void Stream2::setServerNetSocket(const NetSocket& netSocket) +{ + streamHdr.serverPort = htons(netSocket.port); + streamHdr.serverIP = netSocket.host; +} + +void Stream2::setClientNetSocket(const NetSocket& netSocket) +{ + streamHdr.clientPort = htons(netSocket.port); + streamHdr.clientIP = netSocket.host; +} +void Stream2::toLua(ofstream *f, const string& binFileName, const string& streamTableName) const + +{ + (*f) << std::fixed; + + (*f) << streamTableName << "[" << streamHdr.streamId << "] = {" << endl + << spaces(3) << "client_data = {" << endl + << spaces(6) << "header = bin_read(" << binFileName << "," << clientHdrBeg << "," << streamHdr.clientHdrLen << "), " << endl + << spaces(6) << "content = bin_read(" << binFileName << "," << clientContentBeg << "," << streamHdr.clientContentLen << "), " << endl + << spaces(3) << "}," << endl + << spaces(3) << "server_data = {" << endl + << spaces(6) << "header = bin_read(" << binFileName << "," << serverHdrBeg << "," << streamHdr.serverHdrLen << "), " << endl + << spaces(6) << "content = bin_read(" << binFileName << "," << serverContentBeg << "," << streamHdr.serverContentLen << "), " << endl + << spaces(3) << "}," << endl + << spaces(3) << "actions = {" << endl; + + for (size_t i = 0; i < m_actions.size(); ++i) { + const char *peer_str = m_actions[i].peer == 0? "client" : "server"; + + (*f) << spaces(6) << peer_str << "_content(" << m_actions[i].beg << "," << m_actions[i].len << ")," << endl; + } + + (*f) << spaces(3) << "}," << endl + << spaces(3) << "clients = {ip = ip(\"" << ipToString(streamHdr.clientIP) << "\"), port = " << ntohs(streamHdr.clientPort) << "}," << endl + << spaces(3) << "servers = {ip = ip(\"" << ipToString(streamHdr.serverIP) << "\"), port = " << ntohs(streamHdr.serverPort) << "}," << endl + << spaces(3) << "l4_proto = \"" << (streamHdr.protocol == 0x06? "tcp" : "udp") << "\"," << endl + << spaces(3) << "up_bps = " << setprecision(4) << streamHdr.upRate << "," << endl + << spaces(3) << "dn_bps = " << setprecision(4) << streamHdr.dnRate << "," << endl; + + (*f) << "}" << endl; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream2.hpp b/VNFs/DPPD-PROX/tools/flow_extract/stream2.hpp new file mode 100644 index 00000000..fd9d9c8c --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream2.hpp @@ -0,0 +1,54 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STREAM2_H_ +#define _STREAM2_H_ + +#include <inttypes.h> +#include <fstream> + +#include "netsocket.hpp" + +using namespace std; + +class Stream2 { +public: + Stream2() : clientServerHdrContent(NULL) {} + ~Stream2() {delete [] clientServerHdrContent;} + int fromFile(ifstream *f); + void calcOffsets(ofstream *out); + void toFile(ofstream *out) const; + void toLua(ofstream *f, const string& binFileName, const string& streamTableName) const; + NetSocket getServerNetSocket() const; + NetSocket getClientNetSocket() const; + void setServerNetSocket(const NetSocket& netSocket); + void setClientNetSocket(const NetSocket& netSocket); + Stream::Header streamHdr; +private: + int actionsFromFile(ifstream *f, size_t actionCount); + int setReferences(ifstream *f); + + uint8_t *clientServerHdrContent; + + uint32_t clientHdrBeg; + uint32_t serverHdrBeg; + uint32_t clientContentBeg; + uint32_t serverContentBeg; + + vector<Stream::ActionEntry> m_actions; +}; + +#endif /* _STREAM2_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream3.cpp b/VNFs/DPPD-PROX/tools/flow_extract/stream3.cpp new file mode 100644 index 00000000..30c166ae --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream3.cpp @@ -0,0 +1,95 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <fstream> + +using namespace std; + +#include "stream3.hpp" + +Stream3::Stream3(uint32_t id, PcapPkt::L4Proto proto) + : m_id(id), m_proto(proto), m_pktCount(0), m_flushCount(0) +{ +} + +void Stream3::writeHeader(ofstream *outputFile) const +{ + outputFile->write(reinterpret_cast<const char *>(&m_id), sizeof(m_id)); + outputFile->write(reinterpret_cast<const char *>(&m_flushCount), sizeof(m_flushCount)); +} + +void Stream3::writePackets(ofstream *outputFile) const +{ + for (size_t i = 0; i < m_pkts.size(); ++i) + m_pkts[i]->toFile(outputFile); +} + +void Stream3::clearPackets() +{ + for (size_t i = 0; i < m_pkts.size(); ++i) + delete m_pkts[i]; + m_pkts.clear(); + m_flushCount = 0; +} + +void Stream3::flush(ofstream *outputFile) +{ + writeHeader(outputFile); + writePackets(outputFile); + clearPackets(); +} + +void Stream3::addPkt(const PcapPkt& pkt) +{ + m_pkts.push_back(new PcapPkt(pkt)); + m_pktCount++; + m_flushCount++; +} + +Timestamp Stream3::getTimeout() const +{ + uint32_t timeoutMinutes = m_proto == PcapPkt::PROTO_UDP? 10 : 5; + + return Timestamp(timeoutMinutes * 60, 0); +} + +uint32_t Stream3::getIDFromMem(uint8_t *mem) +{ + return *reinterpret_cast<uint32_t *>(mem); +} + +void Stream3::addFromMemory(uint8_t *mem, size_t *len) +{ + uint32_t n_pkts; + + mem += sizeof(m_id); + n_pkts = *reinterpret_cast<uint32_t *>(mem); + mem += sizeof(n_pkts); + + *len = sizeof(m_id) + sizeof(n_pkts); + for (uint32_t i = 0; i < n_pkts; ++i) { + addPkt(PcapPkt(mem)); + mem += m_pkts.back()->memSize(); + *len += m_pkts.back()->memSize(); + } +} + +void Stream3::removeAllPackets() +{ + clearPackets(); + m_pktCount = 0; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/stream3.hpp b/VNFs/DPPD-PROX/tools/flow_extract/stream3.hpp new file mode 100644 index 00000000..7e94814e --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/stream3.hpp @@ -0,0 +1,55 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STREAM3_H_ +#define _STREAM3_H_ + +#include <inttypes.h> +#include <vector> + +#include "pcappkt.hpp" +#include "timestamp.hpp" + +using namespace std; +class Allocator; + +class Stream3 { +public: + PcapPkt::L4Proto getProto(void) const {return m_proto;} + Stream3(uint32_t id, PcapPkt::L4Proto proto); + Stream3() : m_id(UINT32_MAX), m_proto(PcapPkt::PROTO_UDP), m_pktCount(0), m_flushCount(0) {} + void addPkt(const PcapPkt& pkt); + void flush(ofstream *outputFile); + void addFromMemory(uint8_t *mem, size_t *len); + static uint32_t getIDFromMem(uint8_t *mem); + bool hasFlushablePackets() const {return !!m_flushCount;} + Timestamp getTimeout() const; + uint32_t getID() const {return m_id;} + void removeAllPackets(); + void setID(const uint32_t id) {m_id = id;} +private: + void writeHeader(ofstream *outputFile) const; + void writePackets(ofstream *outputFile) const; + void clearPackets(); + + uint32_t m_id; + PcapPkt::L4Proto m_proto; + vector<PcapPkt *> m_pkts; + uint32_t m_pktCount; + uint32_t m_flushCount; +}; + +#endif /* _STREAM3_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/streamextract.cpp b/VNFs/DPPD-PROX/tools/flow_extract/streamextract.cpp new file mode 100644 index 00000000..e493ef3f --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/streamextract.cpp @@ -0,0 +1,406 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <inttypes.h> +#include <string> +#include <cstdio> +#include <iostream> +#include <sys/stat.h> +#include <sys/types.h> +#include <sstream> +#include <set> +#include <arpa/inet.h> +#include <fcntl.h> +#include <unistd.h> +#include <sys/mman.h> +#include <cerrno> +#include <cstdlib> +#include <map> + +#include "path.hpp" +#include "bundle.hpp" +#include "stream.hpp" +#include "stream2.hpp" +#include "allocator.hpp" +#include "timestamp.hpp" +#include "streamextract.hpp" +#include "pcapreader.hpp" +#include "pcapwriter.hpp" +#include "flowtable.hpp" +#include "stream3.hpp" +#include "netsocket.hpp" +#include "pcappktref.hpp" +#include "progress.hpp" +#include "mappedfile.hpp" +#include "streamsorter.hpp" + +using namespace std; + +static bool is_dir(const string& path_dir_out) +{ + struct stat s = { 0 }; + + if (stat(path_dir_out.c_str(), &s)) { + return false; + } + + return s.st_mode & S_IFDIR; +} + +StreamExtract::StreamExtract(const ProgramConfig &cfg) + : ft2(cfg.flowTableSize), + streamSorter(cfg.flowTableSize, cfg.path_dir_out, 1024UL*1024*1024*8), + cfg(cfg) +{ +} + +vector<Bundle> StreamExtract::createBundles(const string& streamPath) +{ + map<uint32_t, Bundle>::iterator iterBundle; + map<uint32_t, Bundle> bundles; + set<uint32_t> servers; + + Stream2 s; + ifstream binIn; + + binIn.open(streamPath.c_str()); + binIn.seekg(0, binIn.end); + Progress progress(binIn.tellg()); + binIn.seekg(0, binIn.beg); + + while (!s.fromFile(&binIn)) { + if (progress.couldRefresh()) { + progress.setProgress(binIn.tellg()); + progress.refresh(); + } + if (!s.streamHdr.completedTCP) + continue; + if (!s.streamHdr.serverHdrLen) + continue; + /* The current implementation does not support clients + that are also servers. */ + servers.insert(s.streamHdr.serverIP); + if (servers.find(s.streamHdr.clientIP) != servers.end()) + continue; + + /* Since each application is represented as a path + graph (there is only one reply for a given request + and only one request after a given reply), each + application must run on a unique server. For this + reason, check if the socket on the server already + is occupied and if so, keep incrementing the socket + until the collision is resolved. */ + iterBundle = bundles.find(s.streamHdr.clientIP); + + if (iterBundle == bundles.end()) { + bundles.insert(make_pair(s.streamHdr.clientIP, Bundle())); + iterBundle = bundles.find(s.streamHdr.clientIP); + } + + (*iterBundle).second.addStream(s.streamHdr.streamId, s.getServerNetSocket().port); + } + + progress.setProgress(); + progress.refresh(true); + + binIn.close(); + + vector<Bundle> ret; + + ret.reserve(bundles.size()); + + for (map<uint32_t, Bundle>::const_iterator i = bundles.begin(); i != bundles.end(); ++i) + ret.push_back(i->second); + + return ret; +} + +set<uint32_t> StreamExtract::getBundleStreamIDs(const vector<Bundle*>& bundleSamples) +{ + set<uint32_t> streamIDs; + + for (size_t i = 0; i < bundleSamples.size(); ++i) { + const vector<uint32_t> &bundleStreamIDs = bundleSamples[i]->getStream(); + + for (vector<uint32_t>::const_iterator j = bundleStreamIDs.begin(); j != bundleStreamIDs.end(); ++j) { + streamIDs.insert(*j); + } + } + + return streamIDs; +} + +static size_t getRandom(size_t limit) +{ + size_t r = rand(); + size_t rand_limit = (RAND_MAX/limit)*limit; + + while (r > rand_limit) + r = rand(); + + return r % limit; +} + +static void removeFill(vector<Bundle*> *from, size_t idx) +{ + Bundle *last = from->back(); + from->pop_back(); + + if (idx != from->size()) + (*from)[idx] = last; +} + +static vector<Bundle*> takeSamples(vector<Bundle>& bundles, size_t sampleCount) +{ + vector<Bundle*> bundleSamples; + + bundleSamples.reserve(bundles.size()); + + cout << "Sampling " << sampleCount << " bundles out of " << bundles.size() << endl; + for (size_t i = 0; i < bundles.size(); ++i) + bundleSamples.push_back(&bundles[i]); + + srand(1000); + while (bundleSamples.size() > sampleCount) { + size_t r = getRandom(bundleSamples.size()); + removeFill(&bundleSamples, r); + } + return bundleSamples; +} + +static size_t replaceWithRunningTotals(vector<size_t> *streamLength) +{ + size_t runningTotal = 0; + for (size_t i = 0; i < streamLength->size(); ++i) { + size_t len = (*streamLength)[i] + sizeof(uint32_t); + (*streamLength)[i] = runningTotal; + runningTotal += len; + } + return runningTotal; +} + +static void printPorts(const vector<Bundle> &bundles) +{ + set<uint32_t> streamIDs; + + for (size_t i = 0; i < bundles.size(); ++i) { + const vector<uint32_t> &ports = bundles[i].getPorts(); + + for (size_t j = 0; j < ports.size(); ++j) { + if (j + 1 == ports.size()) + cout << ports[j] << ",END" << endl; + else + cout << ports[j] << "," << ports[j +1] << endl; + } + } +} + +string StreamExtract::createStreamPcapFileName(int id) +{ + stringstream ss; + + ss << cfg.path_dir_out << "/s" << id << ".pcap"; + + return ss.str(); +} + +int StreamExtract::writeToPcaps(const string &sourceFilePath, const set<uint32_t> &streamIDs) +{ + set<uint32_t>::const_iterator i = streamIDs.begin(); + + MappedFile mappedFile; + if (mappedFile.open(sourceFilePath)) { + cerr << "Failed to open file " << sourceFilePath << ":" << strerror(errno) << endl; + return -1; + } + + PcapPkt::allocator = NULL; + + Progress progress((uint64_t)mappedFile.getMapEnd() - (uint64_t)mappedFile.getMapBeg()); + cout << "Writing " << streamIDs.size() << " streams to pcaps" << endl; + uint8_t *data2 = mappedFile.getMapBeg(); + while (data2 < mappedFile.getMapEnd()) { + uint32_t id = *reinterpret_cast<uint32_t *>(data2); + + data2 += sizeof(id); + uint32_t pktCount = *reinterpret_cast<uint32_t *>(data2); + data2 += sizeof(pktCount); + Stream s(id, pktCount); + while (pktCount--) { + PcapPkt p(data2); + + data2 += p.memSize(); + s.addPkt(p); + } + + while (i != streamIDs.end() && (*i) < id) + i++; + if (i == streamIDs.end()) + break; + if (*i > id) + continue; + + const string pcapPath = createStreamPcapFileName(id); + + s.toPcap(pcapPath); + if (progress.couldRefresh()) { + progress.setProgress((uint64_t)data2 - (uint64_t)mappedFile.getMapBeg()); + progress.refresh(); + mappedFile.sync(); + } + } + + progress.setProgress(data2 - mappedFile.getMapBeg()); + progress.refresh(true); + + mappedFile.close(); + return 0; +} + +int StreamExtract::writeToLua(const string& binFilePath, const Path &smallFinalBin, const string& luaFilePath, const string &orderedTemp) +{ + vector<Bundle> bundles = createBundles(binFilePath); + vector<Bundle*> bundleSamples = takeSamples(bundles, cfg.sampleCount); + set<uint32_t> streamIDs = getBundleStreamIDs(bundleSamples); + + if (cfg.write_pcaps) + writeToPcaps(orderedTemp, streamIDs); + + ofstream outLua; + ofstream outSmallBin; + outLua.open(luaFilePath.c_str()); + outLua << "bf = \""<< smallFinalBin.getFileName() << "\"" << endl; + outLua << "s = {}\n"; + set<uint32_t>::iterator i = streamIDs.begin(); + + set<NetSocket> serverSockets; + ifstream binIn; + Stream2 s; + + outSmallBin.open(smallFinalBin.str().c_str()); + binIn.open(binFilePath.c_str()); + while (!s.fromFile(&binIn)) { + while (i != streamIDs.end() && (*i) < s.streamHdr.streamId) + i++; + if (i == streamIDs.end()) + break; + if (*i > s.streamHdr.streamId) + continue; + s.calcOffsets(&outSmallBin); + s.toFile(&outSmallBin); + while (serverSockets.find(s.getServerNetSocket()) != serverSockets.end()) { + NetSocket ns = s.getServerNetSocket(); + + ns.port++; + s.setServerNetSocket(ns); + } + serverSockets.insert(s.getServerNetSocket()); + + s.toLua(&outLua, "bf", "s"); + } + binIn.close(); + + uint32_t bundleCount = 0; + + outLua << "bundles = {}" << endl; + for (size_t i = 0; i < bundleSamples.size(); ++i) { + bundleSamples[i]->toLua(&outLua, "s", ++bundleCount); + } + outLua << "return bundles" << endl; + outLua.close(); + return 0; +} + +int StreamExtract::writeFinalBin(const string& sourceFilePath, const string& destFilePath) +{ + MappedFile mappedFile; + if (mappedFile.open(sourceFilePath)) { + cerr << "Failed to open file " << sourceFilePath << ":" << strerror(errno) << endl; + return -1; + } + ofstream binOut; + + binOut.open(destFilePath.c_str()); + PcapPkt::allocator = NULL; + + Progress progress((uint64_t)mappedFile.getMapEnd() - (uint64_t)mappedFile.getMapBeg()); + + int streamCount = 0; + uint8_t *data2 = mappedFile.getMapBeg(); + while (data2 < mappedFile.getMapEnd()) { + uint32_t id = *reinterpret_cast<uint32_t *>(data2); + + data2 += sizeof(id); + uint32_t pktCount = *reinterpret_cast<uint32_t *>(data2); + data2 += sizeof(pktCount); + Stream s(id, pktCount); + while (pktCount--) { + PcapPkt p(data2); + + data2 += p.memSize(); + s.addPkt(p); + } + s.toFile(&binOut); + streamCount++; + if (progress.couldRefresh()) { + progress.setProgress((uint64_t)data2 - (uint64_t)mappedFile.getMapBeg()); + progress.refresh(); + mappedFile.sync(); + } + } + + progress.setProgress(data2 - mappedFile.getMapBeg()); + progress.refresh(true); + + binOut.close(); + mappedFile.close(); + return 0; +} + +int StreamExtract::run() +{ + Path p(cfg.path_dir_out); + p.mkdir(); + + string orderedTemp = p.add("/a").str(); + + string finalBin = p.add("/b").str(); + Path smallfinalBin = p.add("/data.bin").str(); + string luaFile = p.add("/cfg.lua").str(); + + cout << "Writing to directory '" << p.str() << "'" << endl; + cout << "Ordered streams '" << orderedTemp << "'" << endl; + cout << "Final binary output '" << finalBin << "'" << endl; + cout << "lua file '" << luaFile << "' will contain " << cfg.sampleCount << " bundles" << endl; + + if (cfg.run_first_step) { + cout << "starting sorting" << endl; + streamSorter.sort(cfg.path_file_in_pcap, orderedTemp); + cout << "writing final binary file (converting format)" << endl; + if (writeFinalBin(orderedTemp, finalBin)) + return -1; + } else { + cout << "Skipping first step" << endl; + if (!Path(finalBin).isFile()) { + cerr << "File is missing:" << finalBin << endl; + return -1; + } + } + cout << "writing Lua '" << luaFile << "'" << endl; + if (writeToLua(finalBin, smallfinalBin, luaFile, orderedTemp)) + return -1; + return 0; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/streamextract.hpp b/VNFs/DPPD-PROX/tools/flow_extract/streamextract.hpp new file mode 100644 index 00000000..d5dbdb05 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/streamextract.hpp @@ -0,0 +1,55 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STREAMEXTRACT_H_ +#define _STREAMEXTRACT_H_ + +#include <string> +#include <list> +#include <map> +#include <set> + +#include "programconfig.hpp" +#include "bundle.hpp" +#include "pcapreader.hpp" +#include "flowtable.hpp" +#include "pcappkt.hpp" +#include "stream3.hpp" +#include "streamsorter.hpp" +#include "path.hpp" + +using namespace std; + +class StreamExtract { +public: + /* The size of the flow table determines the number of flows + that can be active at a given time. When a flow expires, it + is written out to a file and the memory is freed. */ + StreamExtract(const ProgramConfig &cfg); + int run(); +private: + int writeToPcaps(const string &sourceFilePath, const set<uint32_t> &streamIDs); + int writeToLua(const string& binFilePath, const Path &smallFinalBin, const string& luaFilePath, const string& orderedTemp); + int writeFinalBin(const string& sourceFilePath, const string& destFilePath); + string createStreamPcapFileName(int id); + vector<Bundle> createBundles(const string& streamPath); + set<uint32_t> getBundleStreamIDs(const vector<Bundle*>& bundleSamples); + FlowTable<pkt_tuple, Stream3> ft2; + StreamSorter streamSorter; + ProgramConfig cfg; +}; + +#endif /* _STREAMEXTRACT_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.cpp b/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.cpp new file mode 100644 index 00000000..65c645e1 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.cpp @@ -0,0 +1,203 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <iostream> +#include <fstream> +#include <cstdlib> + +#include "mappedfile.hpp" +#include "memreader.hpp" +#include "streamsorter.hpp" +#include "path.hpp" +#include "allocator.hpp" +#include "pcapreader.hpp" +#include "progress.hpp" + +StreamSorter::StreamSorter(size_t flowTableSize, const string& workingDirectory, size_t memoryLimit) + : flowTableSize(flowTableSize), + workingDirectory(workingDirectory), + allocator(memoryLimit, 1024*10), + streamID(0) +{ +} + +void StreamSorter::sort(const string &inputPcapFilePath, const string &outputBinFilePath) +{ + setTempFileName(); + sortChunks(inputPcapFilePath); + mergeChunks(outputBinFilePath); +} + +void StreamSorter::sortChunks(const string &inputPcapFilePath) +{ + ofstream outputTempFile; + + outputTempFile.open(tempFilePath.c_str()); + + if (!outputTempFile.is_open()) + return ; + + PcapReader pr; + PcapPkt pkt; + + if (pr.open(inputPcapFilePath)) { + pr.getError(); + return; + } + PcapPkt::allocator = &allocator; + + Progress progress(pr.end()); + uint32_t packetDetail = progress.addDetail("packet count"); + + ft = new FlowTable<pkt_tuple, uint32_t>(flowTableSize); + resetStreams(); + + while (pr.read(&pkt)) { + processPkt(pkt); + if (progress.couldRefresh()) { + progress.setProgress(pr.pos()); + progress.setDetail(packetDetail, pr.getPktReadCount()); + progress.refresh(); + } + if (allocator.lowThresholdReached()) { + flushStreams(&outputTempFile); + } + } + progress.setProgress(); + progress.setDetail(packetDetail, pr.getPktReadCount()); + progress.refresh(true); + + pr.close(); + flushStreams(&outputTempFile); + PcapPkt::allocator = NULL; + outputTempFile.close(); + delete ft; +} + +void StreamSorter::resetStreams() +{ + streams.clear(); +} + +void StreamSorter::flushStreams(ofstream *outputTempFile) +{ + size_t flushCount = 0; + size_t offset = outputTempFile->tellp(); + + Progress progress(streams.size()); + + cout << endl; + progress.setTitle("flush "); + for (size_t i = 0; i < streams.size(); ++i) { + if (streams[i].hasFlushablePackets()) { + streams[i].flush(outputTempFile); + flushCount++; + } + + if (progress.couldRefresh()) { + progress.setProgress(i); + progress.refresh(); + } + } + progress.setProgress(); + progress.refresh(true); + + if (flushCount) + flushOffsets.push_back(offset); + allocator.reset(); +} + +Stream3 *StreamSorter::addNewStream(PcapPkt::L4Proto proto) +{ + streams.push_back(Stream3(streamID++, proto)); + return &streams.back(); +} + +FlowTable<pkt_tuple, uint32_t>::entry* StreamSorter::getFlowEntry(const PcapPkt &pkt) +{ + FlowTable<pkt_tuple, uint32_t>::entry *a; + struct pkt_tuple pt = pkt.parsePkt(); + Stream3 *stream = NULL; + + a = ft->lookup(pt.flip()); + if (!a) { + a = ft->lookup(pt); + if (!a) { + stream = addNewStream(pkt.getProto()); + + a = ft->insert(pt, stream->getID(), pkt.ts()); + } + } + + if (a->expired(pkt.ts(), streams[a->value].getTimeout())) { + ft->remove(a); + + stream = addNewStream(pkt.getProto()); + + a = ft->insert(pt, stream->getID(), pkt.ts()); + } + return a; +} + +void StreamSorter::processPkt(const PcapPkt &pkt) +{ + FlowTable<pkt_tuple, uint32_t>::entry *a; + + a = getFlowEntry(pkt); + a->tv = pkt.ts(); + streams[a->value].addPkt(pkt); +} + +void StreamSorter::mergeChunks(const string &outputBinFile) +{ + cout << "merging chunks: " << tempFilePath << " to " << outputBinFile << endl; + cout << "have " << flushOffsets.size() << " parts to merge" << endl; + MappedFile tempFile; + + if (tempFile.open(tempFilePath)) { + cerr << "failed to open temp file" << endl; + return; + } + ofstream file; + + file.open(outputBinFile.c_str()); + + if (!file.is_open()) { + cerr << "failed top open file '" << outputBinFile << "'" << endl; + return; + } + MemReader memReader(&tempFile, flushOffsets); + Stream3 stream; + + Progress progress(memReader.getTotalLength()); + + while (memReader.read(&stream)) { + stream.flush(&file); + if (progress.couldRefresh()) { + progress.setProgress(memReader.consumed()); + progress.refresh(); + } + } + + progress.setProgress(); + progress.refresh(true); + tempFile.close(); +} + +void StreamSorter::setTempFileName() +{ + tempFilePath = Path(workingDirectory).add("/tmp").str(); +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.hpp b/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.hpp new file mode 100644 index 00000000..a6d3d6cd --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/streamsorter.hpp @@ -0,0 +1,47 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _STREAMSORTER_H_ +#define _STREAMSORTER_H_ + +#include "stream3.hpp" +#include "flowtable.hpp" +#include "allocator.hpp" + +class StreamSorter { +public: + StreamSorter(size_t flowTableSize, const string& workingDirectory, size_t memoryLimit); + void sort(const string &inputPcapFile, const string &outputBinFile); +private: + void sortChunks(const string &inputPcapFilePath); + void mergeChunks(const string &outputBinFilePath); + void setTempFileName(); + void processPkt(const PcapPkt &pkt); + void resetStreams(); + FlowTable<pkt_tuple, uint32_t>::entry* getFlowEntry(const PcapPkt &pkt); + void flushStreams(ofstream *outputTempFile); + Stream3 *addNewStream(PcapPkt::L4Proto proto); + size_t flowTableSize; + FlowTable<pkt_tuple, uint32_t> *ft; + vector<size_t> flushOffsets; + vector<Stream3> streams; + string tempFilePath; + const string workingDirectory; + Allocator allocator; + uint32_t streamID; +}; + +#endif /* _STREAMSORTER_H_ */ diff --git a/VNFs/DPPD-PROX/tools/flow_extract/timestamp.cpp b/VNFs/DPPD-PROX/tools/flow_extract/timestamp.cpp new file mode 100644 index 00000000..9e91173d --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/timestamp.cpp @@ -0,0 +1,65 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <cstdio> +#include <iostream> +#include <iomanip> + +#include "timestamp.hpp" + +Timestamp Timestamp::operator-(const Timestamp& other) const +{ + uint64_t sec; + uint64_t nsec; + + if (other.m_nsec <= m_nsec) { + nsec = m_nsec - other.m_nsec; + sec = m_sec - other.m_sec; + } else { + nsec = (1000000000 + m_nsec) - other.m_nsec; + sec = m_sec - 1 - other.m_sec; + } + + return Timestamp(sec, nsec); +} + +bool Timestamp::operator>(const Timestamp& other) +{ + return m_sec > other.m_sec || + (m_sec == other.m_sec && m_nsec > other.m_nsec); +} + +bool Timestamp::operator<(const Timestamp& other) +{ + return m_sec < other.m_sec || + (m_sec == other.m_sec && m_nsec < other.m_nsec); +} + +ostream& operator<<(ostream& stream, const Timestamp& ts) +{ + stream << ts.m_sec << "." << setw(9) << setfill('0') << ts.m_nsec; + return stream; +} + +double operator/(double d, const Timestamp &denominator) +{ + return d * 1000000000 / (denominator.m_sec * 1000000000 + denominator.m_nsec); +} + +bool Timestamp::operator==(const Timestamp &other) const +{ + return m_sec == other.m_sec && m_nsec == other.m_nsec; +} diff --git a/VNFs/DPPD-PROX/tools/flow_extract/timestamp.hpp b/VNFs/DPPD-PROX/tools/flow_extract/timestamp.hpp new file mode 100644 index 00000000..cf8ec5d4 --- /dev/null +++ b/VNFs/DPPD-PROX/tools/flow_extract/timestamp.hpp @@ -0,0 +1,45 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TIMESTAMP_H_ +#define _TIMESTAMP_H_ + +#include <iostream> + +#include <sys/time.h> +#include <inttypes.h> + +using namespace std; + +class Timestamp { +public: + Timestamp(const uint64_t sec, const uint64_t nsec) : m_sec(sec), m_nsec(nsec) {} + Timestamp() {} + Timestamp(const struct timeval& tv) : m_sec(tv.tv_sec), m_nsec(tv.tv_usec) {} + Timestamp operator-(const Timestamp& other) const; + bool operator==(const Timestamp &other) const; + friend double operator/(double d, const Timestamp &denominator); + bool operator>(const Timestamp& other); + bool operator<(const Timestamp& other); + uint64_t sec() const {return m_sec;} + uint64_t nsec() const {return m_nsec;} + friend ostream& operator<<(ostream& stream, const Timestamp& ts); +private: + uint64_t m_sec; + uint64_t m_nsec; +}; + +#endif /* _TIMESTAMP_H_ */ diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c new file mode 100644 index 00000000..c6f6010c --- /dev/null +++ b/VNFs/DPPD-PROX/tx_pkt.c @@ -0,0 +1,665 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#include <rte_ethdev.h> +#include <rte_version.h> + +#include "rx_pkt.h" +#include "tx_pkt.h" +#include "task_base.h" +#include "stats.h" +#include "prefetch.h" +#include "prox_assert.h" +#include "log.h" +#include "mbuf_utils.h" + +static void buf_pkt_single(struct task_base *tbase, struct rte_mbuf *mbuf, const uint8_t out) +{ + const uint16_t prod = tbase->ws_mbuf->idx[out].prod++; + tbase->ws_mbuf->mbuf[out][prod & WS_MBUF_MASK] = mbuf; +} + +static inline void buf_pkt_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + if (unlikely(out[j] >= OUT_HANDLED)) { + rte_pktmbuf_free(mbufs[j]); + if (out[j] == OUT_HANDLED) + TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, 1); + else + TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1); + } + else { + buf_pkt_single(tbase, mbufs[j], out[j]); + } + } +} +#define MAX_PMD_TX 32 + +/* The following help functions also report stats. Therefore we need + to pass the task_base struct. */ +static inline int txhw_drop(const struct port_queue *port_queue, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase) +{ + uint16_t ntx; + int ret; + + /* TX vector mode can't transmit more than 32 packets */ + if (n_pkts > MAX_PMD_TX) { + ntx = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, MAX_PMD_TX); + ntx += rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs + ntx, n_pkts - ntx); + } else { + ntx = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, n_pkts); + } + + TASK_STATS_ADD_TX(&tbase->aux->stats, ntx); + ret = n_pkts - ntx; + if (ntx < n_pkts) { + TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts - ntx); + if (tbase->tx_pkt == tx_pkt_bw) { + uint32_t drop_bytes = 0; + do { + drop_bytes += mbuf_wire_size(mbufs[ntx]); + rte_pktmbuf_free(mbufs[ntx++]); + } while (ntx < n_pkts); + TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes); + } + else { + do { + rte_pktmbuf_free(mbufs[ntx++]); + } while (ntx < n_pkts); + } + } + return ret; +} + +static inline int txhw_no_drop(const struct port_queue *port_queue, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase) +{ + uint16_t ret; + uint16_t n = n_pkts; + + TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts); + + do { + ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, n_pkts); + mbufs += ret; + n_pkts -= ret; + } + while (n_pkts); + return (n != ret); +} + +static inline int ring_enq_drop(struct rte_ring *ring, struct rte_mbuf *const *mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase) +{ + int ret = 0; + /* return 0 on succes, -ENOBUFS on failure */ + // Rings can be single or multiproducer (ctrl rings are multi producer) +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + if (unlikely(rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts))) { +#else + if (unlikely(rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts, NULL) == 0)) { +#endif + ret = n_pkts; + if (tbase->tx_pkt == tx_pkt_bw) { + uint32_t drop_bytes = 0; + for (uint16_t i = 0; i < n_pkts; ++i) { + drop_bytes += mbuf_wire_size(mbufs[i]); + rte_pktmbuf_free(mbufs[i]); + } + TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes); + TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts); + } + else { + for (uint16_t i = 0; i < n_pkts; ++i) + rte_pktmbuf_free(mbufs[i]); + TASK_STATS_ADD_DROP_TX_FAIL(&tbase->aux->stats, n_pkts); + } + } + else { + TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts); + } + return ret; +} + +static inline int ring_enq_no_drop(struct rte_ring *ring, struct rte_mbuf *const *mbufs, uint16_t n_pkts, __attribute__((unused)) struct task_base *tbase) +{ + int i = 0; +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + while (rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts)) { +#else + while (rte_ring_enqueue_bulk(ring, (void *const *)mbufs, n_pkts, NULL) == 0) { +#endif + i++; + }; + TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts); + return (i != 0); +} + +void flush_queues_hw(struct task_base *tbase) +{ + uint16_t prod, cons; + + for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (prod != cons) { + tbase->ws_mbuf->idx[i].prod = 0; + tbase->ws_mbuf->idx[i].cons = 0; + txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase); + } + } + + tbase->flags &= ~FLAG_TX_FLUSH; +} + +void flush_queues_sw(struct task_base *tbase) +{ + uint16_t prod, cons; + + for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (prod != cons) { + tbase->ws_mbuf->idx[i].prod = 0; + tbase->ws_mbuf->idx[i].cons = 0; + ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase); + } + } + tbase->flags &= ~FLAG_TX_FLUSH; +} + +void flush_queues_no_drop_hw(struct task_base *tbase) +{ + uint16_t prod, cons; + + for (uint8_t i = 0; i < tbase->tx_params_hw.nb_txports; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (prod != cons) { + tbase->ws_mbuf->idx[i].prod = 0; + tbase->ws_mbuf->idx[i].cons = 0; + txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase); + } + } + + tbase->flags &= ~FLAG_TX_FLUSH; +} + +void flush_queues_no_drop_sw(struct task_base *tbase) +{ + uint16_t prod, cons; + + for (uint8_t i = 0; i < tbase->tx_params_sw.nb_txrings; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (prod != cons) { + tbase->ws_mbuf->idx[i].prod = 0; + tbase->ws_mbuf->idx[i].cons = 0; + ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase); + } + } + tbase->flags &= ~FLAG_TX_FLUSH; +} + +/* "try" functions try to send packets to sw/hw w/o failing or blocking; + They return if ring/queue is full and are used by aggregators. + "try" functions do not have drop/no drop flavors + They are only implemented in never_discard mode (as by default they + use only one outgoing ring. */ +uint16_t tx_try_self(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + if (n_pkts < 64) { + tx_pkt_never_discard_self(tbase, mbufs, n_pkts, NULL); + return n_pkts; + } else { + tx_pkt_never_discard_self(tbase, mbufs, 64, NULL); + return 64; + } +} + +uint16_t tx_try_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + const int bulk_size = 64; + uint16_t ret = bulk_size, sent = 0, n_bulks; + n_bulks = n_pkts >> __builtin_ctz(bulk_size); + + for (int i = 0; i < n_bulks; i++) { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, bulk_size); +#else + ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, bulk_size, NULL); +#endif + mbufs += ret; + sent += ret; + if (ret != bulk_size) + break; + } + if ((ret == bulk_size) && (n_pkts & (bulk_size - 1))) { +#if RTE_VERSION < RTE_VERSION_NUM(17,5,0,1) + ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, (n_pkts & (bulk_size - 1))); +#else + ret = rte_ring_enqueue_burst(tbase->tx_params_sw.tx_rings[0], (void *const *)mbufs, (n_pkts & (bulk_size - 1)), NULL); +#endif + mbufs += ret; + sent += ret; + } + TASK_STATS_ADD_TX(&tbase->aux->stats, sent); + return sent; +} + +uint16_t tx_try_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) +{ + const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0]; + const int bulk_size = 64; + uint16_t ret = bulk_size, n_bulks, sent = 0; + n_bulks = n_pkts >> __builtin_ctz(bulk_size); + + for (int i = 0; i < n_bulks; i++) { + ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, bulk_size); + mbufs += ret; + sent += ret; + if (ret != bulk_size) + break; + } + if ((ret == bulk_size) && (n_pkts & (bulk_size - 1))) { + ret = rte_eth_tx_burst(port_queue->port, port_queue->queue, mbufs, (n_pkts & (bulk_size - 1))); + mbufs += ret; + sent += ret; + } + TASK_STATS_ADD_TX(&tbase->aux->stats, sent); + return sent; +} + +int tx_pkt_no_drop_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + return txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase); +} + +int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + static uint8_t fake_out[MAX_PKT_BURST] = {0}; + int ret = 0; + if (n_pkts == MAX_PKT_BURST) { + // First xmit what was queued + uint16_t prod, cons; + + prod = tbase->ws_mbuf->idx[0].prod; + cons = tbase->ws_mbuf->idx[0].cons; + + if ((uint16_t)(prod - cons)){ + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[0].prod = 0; + tbase->ws_mbuf->idx[0].cons = 0; + ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase); + } + ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase); + } else { + ret+= tx_pkt_no_drop_hw(tbase, mbufs, n_pkts, fake_out); + } + return ret; +} + +int tx_pkt_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + return txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase); +} + +int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + static uint8_t fake_out[MAX_PKT_BURST] = {0}; + int ret = 0; + if (n_pkts == MAX_PKT_BURST) { + // First xmit what was queued + uint16_t prod, cons; + + prod = tbase->ws_mbuf->idx[0].prod; + cons = tbase->ws_mbuf->idx[0].cons; + + if ((uint16_t)(prod - cons)){ + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[0].prod = 0; + tbase->ws_mbuf->idx[0].cons = 0; + ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase); + } + ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase); + } else { + ret+= tx_pkt_hw(tbase, mbufs, n_pkts, fake_out); + } + return ret; +} + +/* Transmit to hw using tx_params_hw_sw structure + This function is used to transmit to hw when tx_params_hw_sw should be used + i.e. when the task needs to transmit both to hw and sw */ +int tx_pkt_no_drop_never_discard_hw1_no_pointer(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + txhw_no_drop(&tbase->tx_params_hw_sw.tx_port_queue, mbufs, n_pkts, tbase); + return 0; +} + +int tx_pkt_no_drop_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + return ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase); +} + +int tx_pkt_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + return ring_enq_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase); +} + +static uint16_t tx_pkt_free_dropped(__attribute__((unused)) struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + uint64_t v = 0; + uint16_t i; + /* The most probable and most important optimize case is if + the no packets should be dropped. */ + for (i = 0; i + 8 < n_pkts; i += 8) { + v |= *((uint64_t*)(&out[i])); + } + for (; i < n_pkts; ++i) { + v |= out[i]; + } + + if (unlikely(v)) { + /* At least some packets need to be dropped, so the + mbufs array needs to be updated. */ + uint16_t n_kept = 0; + uint16_t n_discard = 0; + for (uint16_t i = 0; i < n_pkts; ++i) { + if (unlikely(out[i] >= OUT_HANDLED)) { + rte_pktmbuf_free(mbufs[i]); + n_discard += out[i] == OUT_DISCARD; + continue; + } + mbufs[n_kept++] = mbufs[i]; + } + TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, n_discard); + TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, n_pkts - n_kept - n_discard); + return n_kept; + } + return n_pkts; +} + +int tx_pkt_no_drop_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out); + int ret = 0; + + if (likely(n_kept)) + ret = txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_kept, tbase); + return ret; +} + +int tx_pkt_no_drop_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out); + int ret = 0; + + if (likely(n_kept)) + ret = ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_kept, tbase); + return ret; +} + +int tx_pkt_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out); + + if (likely(n_kept)) + return txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_kept, tbase); + return n_pkts; +} + +int tx_pkt_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out); + + if (likely(n_kept)) + return ring_enq_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_kept, tbase); + return 0; +} + +int tx_pkt_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out) +{ + const uint16_t n_kept = tx_pkt_free_dropped(tbase, mbufs, n_pkts, out); + + TASK_STATS_ADD_TX(&tbase->aux->stats, n_kept); + tbase->ws_mbuf->idx[0].nb_rx = n_kept; + struct rte_mbuf **tx_mbuf = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + for (uint16_t i = 0; i < n_kept; ++i) { + tx_mbuf[i] = mbufs[i]; + } + return 0; +} + +int tx_pkt_never_discard_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out) +{ + TASK_STATS_ADD_TX(&tbase->aux->stats, n_pkts); + tbase->ws_mbuf->idx[0].nb_rx = n_pkts; + struct rte_mbuf **tx_mbuf = tbase->ws_mbuf->mbuf[0] + (tbase->ws_mbuf->idx[0].prod & WS_MBUF_MASK); + for (uint16_t i = 0; i < n_pkts; ++i) { + tx_mbuf[i] = mbufs[i]; + } + return 0; +} + +int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + int ret = 0; + buf_pkt_all(tbase, mbufs, n_pkts, out); + + const uint8_t nb_bufs = tbase->tx_params_hw.nb_txports; + uint16_t prod, cons; + + for (uint8_t i = 0; i < nb_bufs; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) { + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST; + ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase); + } + } + return ret; +} + +int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + int ret = 0; + buf_pkt_all(tbase, mbufs, n_pkts, out); + + const uint8_t nb_bufs = tbase->tx_params_sw.nb_txrings; + uint16_t prod, cons; + + for (uint8_t i = 0; i < nb_bufs; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) { + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST; + ret += ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase); + } + } + return ret; +} + +int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + int ret = 0; + buf_pkt_all(tbase, mbufs, n_pkts, out); + + const uint8_t nb_bufs = tbase->tx_params_hw.nb_txports; + uint16_t prod, cons; + + for (uint8_t i = 0; i < nb_bufs; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) { + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST; + ret += txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase); + } + } + return ret; +} + +int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + int ret = 0; + buf_pkt_all(tbase, mbufs, n_pkts, out); + + const uint8_t nb_bufs = tbase->tx_params_sw.nb_txrings; + uint16_t prod, cons; + for (uint8_t i = 0; i < nb_bufs; ++i) { + prod = tbase->ws_mbuf->idx[i].prod; + cons = tbase->ws_mbuf->idx[i].cons; + + if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) { + tbase->flags &= ~FLAG_TX_FLUSH; + tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST; + ret+= ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase); + } + } + return ret; +} + +int tx_pkt_trace(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + int ret = 0; + if (tbase->aux->task_rt_dump.cur_trace == 0) { + // No packet received since dumping... + // So the transmitted packets should not be linked to received packets + tbase->aux->task_rt_dump.n_print_tx = tbase->aux->task_rt_dump.n_trace; + tbase->aux->task_rt_dump.n_trace = 0; + task_base_del_rx_pkt_function(tbase, rx_pkt_trace); + return tx_pkt_dump(tbase, mbufs, n_pkts, out); + } + plog_info("Tracing %d pkts\n", tbase->aux->task_rt_dump.cur_trace); + + for (uint32_t i = 0; i < tbase->aux->task_rt_dump.cur_trace; ++i) { + struct rte_mbuf tmp; + /* For each packet being transmitted, find which + buffer represent the packet as it was before + processing. */ + uint32_t j = 0; + uint32_t len = sizeof(tbase->aux->task_rt_dump.pkt_mbuf_addr)/sizeof(tbase->aux->task_rt_dump.pkt_mbuf_addr[0]); + for (;j < len; ++j) { + if (tbase->aux->task_rt_dump.pkt_mbuf_addr[j] == mbufs[i]) + break; + } + if (j == len) { + plog_info("Trace RX: missing!\n"); + } + else { +#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) + tmp.data_off = 0; +#endif + rte_pktmbuf_data_len(&tmp) = tbase->aux->task_rt_dump.pkt_cpy_len[j]; + rte_pktmbuf_pkt_len(&tmp) = tbase->aux->task_rt_dump.pkt_cpy_len[j]; + tmp.buf_addr = tbase->aux->task_rt_dump.pkt_cpy[j]; + plogd_info(&tmp, "Trace RX: "); + } + + if (out) { + if (out[i] != 0xFF) + plogd_info(mbufs[i], "Trace TX[%d]: ", out[i]); + else + plogd_info(mbufs[i], "Trace Dropped: "); + } else + plogd_info(mbufs[i], "Trace TX: "); + } + ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out); + + /* Unset by TX when n_trace = 0 */ + if (0 == tbase->aux->task_rt_dump.n_trace) { + tbase->tx_pkt = tbase->aux->tx_pkt_orig; + tbase->aux->tx_pkt_orig = NULL; + task_base_del_rx_pkt_function(tbase, rx_pkt_trace); + } + return ret; +} + +int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + uint32_t n_dump = tbase->aux->task_rt_dump.n_print_tx; + int ret = 0; + + n_dump = n_pkts < n_dump? n_pkts : n_dump; + for (uint32_t i = 0; i < n_dump; ++i) { + if (out) + plogd_info(mbufs[i], "TX[%d]: ", out[i]); + else + plogd_info(mbufs[i], "TX: "); + } + tbase->aux->task_rt_dump.n_print_tx -= n_dump; + + ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out); + + if (0 == tbase->aux->task_rt_dump.n_print_tx) { + tbase->tx_pkt = tbase->aux->tx_pkt_orig; + tbase->aux->tx_pkt_orig = NULL; + } + return ret; +} + +/* Gather the distribution of the number of packets that have been + xmitted from one TX call. Since the value is only modified by the + task that xmits the packet, no atomic operation is needed. */ +int tx_pkt_distr(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + tbase->aux->tx_bucket[n_pkts]++; + return tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out); +} + +int tx_pkt_bw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + uint32_t tx_bytes = 0; + uint32_t drop_bytes = 0; + + for (uint16_t i = 0; i < n_pkts; ++i) { + if (!out || out[i] < OUT_HANDLED) + tx_bytes += mbuf_wire_size(mbufs[i]); + else + drop_bytes += mbuf_wire_size(mbufs[i]); + } + + TASK_STATS_ADD_TX_BYTES(&tbase->aux->stats, tx_bytes); + TASK_STATS_ADD_DROP_BYTES(&tbase->aux->stats, drop_bytes); + return tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out); +} + +int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) +{ + for (uint16_t j = 0; j < n_pkts; ++j) { + rte_pktmbuf_free(mbufs[j]); + } + if (out == NULL) + TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, n_pkts); + else { + for (uint16_t j = 0; j < n_pkts; ++j) { + if (out[j] == OUT_HANDLED) + TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, 1); + else + TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1); + } + } + return n_pkts; +} diff --git a/VNFs/DPPD-PROX/tx_pkt.h b/VNFs/DPPD-PROX/tx_pkt.h new file mode 100644 index 00000000..798797ab --- /dev/null +++ b/VNFs/DPPD-PROX/tx_pkt.h @@ -0,0 +1,82 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _TX_PKT_H_ +#define _TX_PKT_H_ + +#include <inttypes.h> + +struct task_base; +struct rte_mbuf; + +void flush_queues_hw(struct task_base *tbase); +void flush_queues_sw(struct task_base *tbase); + +void flush_queues_no_drop_hw(struct task_base *tbase); +void flush_queues_no_drop_sw(struct task_base *tbase); + +/* The following four transmit functions always send packets to the + single output unless the packet should be dropped. These functions + are used if (1) the task is only sending to one destination and + (2), packets can potentially be dropped (as specified by the out + parameter, which is either NO_PORT_AVAIL or 0). */ +int tx_pkt_no_drop_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_no_drop_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + +/* The following four transmit functions are used if (1) the task is + only sending to one destination and (2), packets are never dropped + by the task (the out parameter is ignored). */ +int tx_pkt_no_drop_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_no_drop_never_discard_hw1_no_pointer(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_no_drop_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_never_discard_hw1_lat_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_never_discard_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + +/* The two "self" transmit functions are used if the task is + transmitting to another task running on the same core and the + destination task ID is one higher than the current task. The never_discard + version of the function ignores the out parameter and should + therefor only be used if the task never discards packets.*/ +int tx_pkt_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); +int tx_pkt_never_discard_self(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out); + +/* The following four tarnsmit functions are the most general. They + are used if (1) packets can be dropped and (2) there are multiple + outputs in the task. */ +int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); + +int tx_pkt_trace(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_distr(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); +int tx_pkt_bw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); + +uint16_t tx_try_sw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +uint16_t tx_try_hw1(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); +uint16_t tx_try_self(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +/* When there are no output ports, this function is configured as the + tx function. This tx function can be used to make each task a + sink. */ +int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); + +#endif /* _TX_PKT_H_ */ diff --git a/VNFs/DPPD-PROX/version.h b/VNFs/DPPD-PROX/version.h new file mode 100644 index 00000000..b906b14b --- /dev/null +++ b/VNFs/DPPD-PROX/version.h @@ -0,0 +1,34 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _VERSION_H_ +#define _VERSION_H_ + +#define STRINGIFY(s) #s +#define SSTR(s) STRINGIFY(s) + +/* PROGRAM_NAME defined through Makefile */ +#define VERSION_MAJOR 0 +#define VERSION_MINOR 39 +#define VERSION_REV 0 + +#if VERSION_REV > 0 +#define VERSION_STR "v" SSTR(VERSION_MAJOR) "." SSTR(VERSION_MINOR) "." SSTR(VERSION_REV) +#else +#define VERSION_STR "v" SSTR(VERSION_MAJOR) "." SSTR(VERSION_MINOR) +#endif + +#endif /* _VERSION_H_ */ diff --git a/VNFs/DPPD-PROX/vxlangpe_nsh.h b/VNFs/DPPD-PROX/vxlangpe_nsh.h new file mode 100644 index 00000000..2e7cfc76 --- /dev/null +++ b/VNFs/DPPD-PROX/vxlangpe_nsh.h @@ -0,0 +1,44 @@ +/* +// Copyright (c) 2010-2017 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#ifndef _VXLANGPE_NSH_H_ +#define _VXLANGPE_NSH_H_ + +struct nsh_hdr { + uint16_t version :2; + uint16_t oa_flag :1; + uint16_t cm_flag :1; + uint16_t reserved :6; + uint16_t length :6; + uint8_t md_type; + uint8_t next_proto; + uint32_t sfp_index :24; + uint32_t sf_index :8; + uint32_t ctx_1; + uint32_t ctx_2; + uint32_t ctx_3; + uint32_t ctx_4; +} __attribute__((__packed__)); + +struct vxlan_gpe_hdr { + uint8_t flag_0; + uint8_t flag_1; + uint8_t reserved; + uint8_t next_proto; + uint32_t vni_res; +} __attribute__((__packed__)); + +#endif /* _VXLANGPE_NSH_H_ */ diff --git a/VNFs/UDP_Replay/Makefile b/VNFs/UDP_Replay/Makefile index 6e24f0ac..e2375779 100644 --- a/VNFs/UDP_Replay/Makefile +++ b/VNFs/UDP_Replay/Makefile @@ -1,4 +1,4 @@ -# Copyright (c) 2016-2017 Intel Corporation +# Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,14 +18,72 @@ endif # Default target, can be overriden by command line or environment RTE_TARGET ?= x86_64-native-linuxapp-gcc +DIRS-y += pipeline include $(RTE_SDK)/mk/rte.vars.mk # binary name APP = UDP_Replay +VPATH += $(VNF_CORE)/common/vnf_common +VPATH += $(VNF_CORE)/common/VIL/pipeline_arpicmp +VPATH += $(VNF_CORE)/common/VIL/conntrack +VPATH += $(VNF_CORE)/common/VIL/pipeline_common +VPATH += $(VNF_CORE)/common/VIL/pipeline_loadb +VPATH += $(VNF_CORE)/common/VIL/pipeline_master +VPATH += $(VNF_CORE)/common/VIL/pipeline_passthrough +VPATH += $(SRCDIR)/pipeline +VPATH += $(VNF_CORE)/common/VIL/pipeline_txrx +VPATH += $(VNF_CORE)/common/VIL/l2l3_stack + +INC += $(wildcard *.h) +INC += $(wildcard pipeline/*.h) +INC += $(wildcard $(VNF_CORE)/common/vnf_common/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_arpicmp/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/conntrack/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_loadb/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_common/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_master/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_passthrough/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_txrx/*.h) +INC += $(wildcard $(VNF_CORE)/common/VIL/l2l3_stack/*.h) + +CFLAGS += -I$(SRCDIR) -mrtm -mhle -I$(SRCDIR)/pipeline -I$(VNF_CORE)/common/vnf_common +CFLAGS += -I$(VNF_CORE)/common/VIL/conntrack -I$(VNF_CORE)/common/VIL/l2l3_stack +CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_common -I$(VNF_CORE)/common/VIL/pipeline_loadb +CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_master -I$(VNF_CORE)/common/VIL/pipeline_passthrough +CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_txrx +CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_arpicmp + # all source are stored in SRCS-y -SRCS-y := main.c parse_obj_list.c +SRCS-y := main.c +SRCS-y += parse_obj_list.c +SRCS-y += config_parse.c +SRCS-y += config_parse_tm.c +SRCS-y += config_check.c + +SRCS-y += lib_arp.c +SRCS-y += lib_icmpv6.c +SRCS-y += interface.c +SRCS-y += hle.c +SRCS-y += tsx.c +SRCS-y += l2_proto.c +SRCS-y += l3fwd_main.c +SRCS-y += l3fwd_lpm4.c +SRCS-y += l3fwd_lpm6.c +SRCS-y += bond.c + +SRCS-y += pipeline_common_be.c +SRCS-y += pipeline_common_fe.c +SRCS-y += pipeline_master_be.c +SRCS-y += pipeline_master.c +SRCS-y += pipeline_passthrough_be.c +SRCS-y += pipeline_passthrough.c +SRCS-y += pipeline_arpicmp.c +SRCS-y += pipeline_loadb.c +SRCS-y += pipeline_loadb_be.c +SRCS-y += vnf_common.c +SRCS-y += pipeline_arpicmp_be.c CFLAGS += -O3 $(USER_FLAGS) CFLAGS += $(WERROR_FLAGS) diff --git a/VNFs/UDP_Replay/main.c b/VNFs/UDP_Replay/main.c index 5242adba..1b37c181 100644 --- a/VNFs/UDP_Replay/main.c +++ b/VNFs/UDP_Replay/main.c @@ -80,9 +80,22 @@ performance of the solution should be sufficient for testing the UDP NAT perform #include <cmdline_parse_num.h> #include <cmdline_parse_string.h> #include <cmdline_parse_ipaddr.h> +#include <rte_errno.h> +#include <rte_cfgfile.h> #include "parse_obj_list.h" +#include <lib_arp.h> +#include "l2_proto.h" +#include "interface.h" +#include "l3fwd_common.h" +#include "l3fwd_lpm4.h" +#include "l3fwd_lpm6.h" +#include "lib_icmpv6.h" +#include "app.h" +#include "vnf_common.h" +#define IN6ADDRSZ 16 +#define INADDRSZ 4 #define APP_LOOKUP_EXACT_MATCH 0 #define APP_LOOKUP_LPM 1 #define DO_RFC_1812_CHECKS @@ -171,38 +184,55 @@ performance of the solution should be sufficient for testing the UDP NAT perform */ #define RTE_TEST_RX_DESC_DEFAULT 128 #define RTE_TEST_TX_DESC_DEFAULT 512 -static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; -static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; static uint64_t rcv_pkt_count[32] = {0}; static uint64_t tx_pkt_count[32] = {0}; +static uint32_t arp_support; +unsigned num_ports; +struct sockaddr_in ipaddr1, ipaddr2; /* ethernet addresses of ports */ static uint64_t dest_eth_addr[RTE_MAX_ETHPORTS]; -static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; - static __m128i val_eth[RTE_MAX_ETHPORTS]; cmdline_parse_ctx_t main_ctx[]; +uint32_t timer_lcore; +uint32_t exit_loop = 1; +port_config_t *port_config; +#define MEMPOOL_SIZE 32 * 1024 +#define BUFFER_SIZE 2048 +#define CACHE_SIZE 256 /* replace first 12B of the ethernet header. */ #define MASK_ETH 0x3f +#define IP_TYPE_IPV4 0 +#define IP_TYPE_IPV6 1 +#define MAX_IP 32 +const char* ipv4[MAX_IP]; +uint8_t link_ipv6[MAX_IP][16]; +uint32_t type, numports; /* mask of enabled ports */ static uint32_t enabled_port_mask = 0; static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */ static int numa_on = 1; /**< NUMA is enabled by default. */ static int csum_on = 1; /**< NUMA is enabled by default. */ +struct pipeline_params def_pipeline_params = { + .n_ports_in = 0, + .n_ports_out = 0, + .n_msgq = 0, + .socket_id = 0, + .n_args = 0, + .log_level = 0, +}; #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) static int ipv6 = 0; /**< ipv6 is false by default. */ #endif +void convert_ipstr_to_numeric(void); -static void -print_ethaddr(const char *name, const struct ether_addr *eth_addr); - -int print_stats(void); +int print_l4stats(void); int clear_stats(void); struct mbuf_table { @@ -265,7 +295,29 @@ static struct rte_eth_conf port_conf = { }, }; -static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; +/* empty vmdq configuration structure. Filled in programatically */ +static struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 4, + }, + .rx_free_thresh = 64, + .rx_drop_en = 0, + .rx_deferred_start = 0, +}; +static struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = 36, + .hthresh = 0, + .wthresh = 0, + }, + .tx_rs_thresh = 0, + .tx_free_thresh = 0, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + .tx_deferred_start = 0, +}; #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) @@ -362,8 +414,6 @@ static struct ipv6_udp_replay_route ipv6_udp_replay_route_array[] = { }; typedef struct rte_hash lookup_struct_t; -static lookup_struct_t *ipv4_udp_replay_lookup_struct[NB_SOCKETS]; -static lookup_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS]; #ifdef RTE_ARCH_X86_64 /* default to 4 million hash entries (approx) */ @@ -375,6 +425,185 @@ static lookup_struct_t *ipv6_udp_replay_lookup_struct[NB_SOCKETS]; #define HASH_ENTRY_NUMBER_DEFAULT 4 static uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT; +void +app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp) +{ + cp->state = 1; +} +void +app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp) +{ + cp->state = 0; +} + +/* int + * inet_pton_ipv4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int inet_pton_ipv4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + if ((pch = strchr(digits, ch)) != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton_ipv6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int inet_pton_ipv6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned dbloct_count = 0; + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + while ((ch = *src++) != '\0') { + const char *pch; + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton_ipv4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + if (dbloct_count == 8) + return 0; + const int n = tp - colonp; + int i; + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} +static int my_inet_pton_ipv6(int af, const char *src, void *dst) +{ + switch (af) { + case AF_INET: + return inet_pton_ipv4(src, dst); + case AF_INET6: + return inet_pton_ipv6(src, dst); + default: + errno = EAFNOSUPPORT; + return -1; + } +} +void convert_ipstr_to_numeric(void) +{ + uint32_t i; + for (i = 0; i < numports; i++) + { + if (type == IP_TYPE_IPV4) { + memset(&ipaddr1, '\0', sizeof(struct sockaddr_in)); + ipaddr1.sin_addr.s_addr = inet_addr(ipv4[i]); + ifm_add_ipv4_port(i, ipaddr1.sin_addr.s_addr, 24); + } else if (type == IP_TYPE_IPV6) { + ifm_add_ipv6_port(i, &link_ipv6[i][0], 128); + } + } +} static inline uint32_t ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, @@ -401,6 +630,43 @@ ipv4_hash_crc(const void *data, __rte_unused uint32_t data_len, #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */ return (init_val); } +static int arp_pkts; +static inline int check_arpicmp(struct rte_mbuf *pkt) +{ + uint8_t in_port_id = pkt->port; + uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12; + uint16_t *eth_proto = + RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset); + uint8_t *protocol; + uint32_t prot_offset = + MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST; + protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset); + if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) || + ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4) + && (*protocol == IP_PROTOCOL_ICMP))) { + process_arpicmp_pkt(pkt, ifm_get_port(in_port_id)); + arp_pkts++; + return 0; + } + return 1; +} +static inline int check_arpicmpv6(struct rte_mbuf *pkt) +{ + struct ether_hdr *eth_h; + struct ipv6_hdr *ipv6_h; + uint8_t in_port_id = pkt->port; + uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12; + uint16_t *eth_proto = + RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset); + eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr)); + if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6) + && (ipv6_h->proto == ICMPV6_PROTOCOL_ID)) { + process_icmpv6_pkt(pkt, ifm_get_port(in_port_id)); + return 0; + } + return 1; +} static inline uint32_t ipv6_hash_crc(const void *data, __rte_unused uint32_t data_len, uint32_t init_val) @@ -650,7 +916,7 @@ send_packetsx4(struct lcore_conf *qconf, uint8_t port, #ifdef DO_RFC_1812_CHECKS static inline int -is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len) +is_valid_pkt_ipv4(struct ipv4_hdr *pkt, uint32_t link_len) { /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */ /* @@ -777,6 +1043,22 @@ simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_con struct ether_hdr tmp; struct ipv4_hdr *ipv4_hdr[8]; struct udp_hdr *udp_hdr[8]; + int i; + l2_phy_interface_t *port = ifm_get_port(portid); + if (port == NULL) { + printf("port may be un initialized\n"); + return; + } + if (unlikely(arp_support)) { + check_arpicmp(m[0]); + check_arpicmp(m[1]); + check_arpicmp(m[2]); + check_arpicmp(m[3]); + check_arpicmp(m[4]); + check_arpicmp(m[5]); + check_arpicmp(m[6]); + check_arpicmp(m[7]); + } eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *); eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *); @@ -790,7 +1072,6 @@ simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_con memset(&tmp,0,sizeof (struct ether_hdr)); - int i; for(i=0;i<8;i++) { @@ -817,12 +1098,30 @@ simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_con ipv4_hdr[7] = rte_pktmbuf_mtod_offset(m[7], struct ipv4_hdr *, sizeof(struct ether_hdr)); struct ipv4_hdr temp_ipv4; - for(i=0;i<8;i++) - { - temp_ipv4.dst_addr = ipv4_hdr[i]->dst_addr; - ipv4_hdr[i]->dst_addr = ipv4_hdr[i]->src_addr; - ipv4_hdr[i]->src_addr = temp_ipv4.dst_addr; - } + temp_ipv4.dst_addr = ipv4_hdr[0]->dst_addr; + ipv4_hdr[0]->dst_addr = ipv4_hdr[0]->src_addr; + ipv4_hdr[0]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[1]->dst_addr; + ipv4_hdr[1]->dst_addr = ipv4_hdr[1]->src_addr; + ipv4_hdr[1]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[2]->dst_addr; + ipv4_hdr[2]->dst_addr = ipv4_hdr[2]->src_addr; + ipv4_hdr[2]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[3]->dst_addr; + ipv4_hdr[3]->dst_addr = ipv4_hdr[3]->src_addr; + ipv4_hdr[3]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[4]->dst_addr; + ipv4_hdr[4]->dst_addr = ipv4_hdr[4]->src_addr; + ipv4_hdr[4]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[5]->dst_addr; + ipv4_hdr[5]->dst_addr = ipv4_hdr[5]->src_addr; + ipv4_hdr[5]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[6]->dst_addr; + ipv4_hdr[6]->dst_addr = ipv4_hdr[6]->src_addr; + ipv4_hdr[6]->src_addr = temp_ipv4.dst_addr; + temp_ipv4.dst_addr = ipv4_hdr[7]->dst_addr; + ipv4_hdr[7]->dst_addr = ipv4_hdr[7]->src_addr; + ipv4_hdr[7]->src_addr = temp_ipv4.dst_addr; /* Handle UDP headers.*/ udp_hdr[0] = rte_pktmbuf_mtod_offset(m[0], struct udp_hdr *, @@ -845,44 +1144,62 @@ simple_ipv4_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_con /*1) memcpy or assignment.*/ struct udp_hdr temp_udp; - for(i=0;i<8;i++) - { - temp_udp.dst_port = udp_hdr[i]->dst_port; - udp_hdr[i]->dst_port = udp_hdr[i]->src_port; - udp_hdr[i]->src_port = temp_udp.dst_port; - } + temp_udp.dst_port = udp_hdr[0]->dst_port; + udp_hdr[0]->dst_port = udp_hdr[0]->src_port; + udp_hdr[0]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[1]->dst_port; + udp_hdr[1]->dst_port = udp_hdr[1]->src_port; + udp_hdr[1]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[2]->dst_port; + udp_hdr[2]->dst_port = udp_hdr[2]->src_port; + udp_hdr[2]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[3]->dst_port; + udp_hdr[3]->dst_port = udp_hdr[3]->src_port; + udp_hdr[3]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[4]->dst_port; + udp_hdr[4]->dst_port = udp_hdr[4]->src_port; + udp_hdr[4]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[5]->dst_port; + udp_hdr[5]->dst_port = udp_hdr[5]->src_port; + udp_hdr[5]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[6]->dst_port; + udp_hdr[6]->dst_port = udp_hdr[6]->src_port; + udp_hdr[6]->src_port = temp_udp.dst_port; + temp_udp.dst_port = udp_hdr[7]->dst_port; + udp_hdr[7]->dst_port = udp_hdr[7]->src_port; + udp_hdr[7]->src_port = temp_udp.dst_port; #ifdef DO_RFC_1812_CHECKS /* Check to make sure the packet is valid (RFC1812) */ uint8_t valid_mask = MASK_ALL_PKTS; - if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[0], m[0]->pkt_len) < 0) { rte_pktmbuf_free(m[0]); valid_mask &= EXCLUDE_1ST_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[1], m[1]->pkt_len) < 0) { rte_pktmbuf_free(m[1]); valid_mask &= EXCLUDE_2ND_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[2], m[2]->pkt_len) < 0) { rte_pktmbuf_free(m[2]); valid_mask &= EXCLUDE_3RD_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[3], m[3]->pkt_len) < 0) { rte_pktmbuf_free(m[3]); valid_mask &= EXCLUDE_4TH_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[4], m[4]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[4], m[4]->pkt_len) < 0) { rte_pktmbuf_free(m[4]); valid_mask &= EXCLUDE_5TH_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[5], m[5]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[5], m[5]->pkt_len) < 0) { rte_pktmbuf_free(m[5]); valid_mask &= EXCLUDE_6TH_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[6], m[6]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[6], m[6]->pkt_len) < 0) { rte_pktmbuf_free(m[6]); valid_mask &= EXCLUDE_7TH_PKT; } - if (is_valid_ipv4_pkt(ipv4_hdr[7], m[7]->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr[7], m[7]->pkt_len) < 0) { rte_pktmbuf_free(m[7]); valid_mask &= EXCLUDE_8TH_PKT; } @@ -948,10 +1265,28 @@ static inline void simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_conf *qconf) { struct ether_hdr *eth_hdr[8],tmp; + int i; __attribute__((unused)) struct ipv6_hdr *ipv6_hdr[8], temp_ipv6; int32_t ret[8]; union ipv6_5tuple_host key[8]; struct udp_hdr *udp_hdr[8]; + l2_phy_interface_t *port = ifm_get_port(portid); + if (port == NULL) { + printf("port may be un initialized\n"); + return; + } + + if (unlikely(arp_support)) { + check_arpicmpv6(m[0]); + check_arpicmpv6(m[1]); + check_arpicmpv6(m[2]); + check_arpicmpv6(m[3]); + check_arpicmpv6(m[4]); + check_arpicmpv6(m[5]); + check_arpicmpv6(m[6]); + check_arpicmpv6(m[7]); + } + eth_hdr[0] = rte_pktmbuf_mtod(m[0], struct ether_hdr *); eth_hdr[1] = rte_pktmbuf_mtod(m[1], struct ether_hdr *); @@ -964,7 +1299,6 @@ simple_ipv6_replay_8pkts(struct rte_mbuf *m[8], uint8_t portid, struct lcore_con memset(&tmp,0,sizeof (struct ether_hdr)); - int i; for(i=0;i<8;i++) { ether_addr_copy(ð_hdr[i]->s_addr, &tmp.s_addr); @@ -1046,9 +1380,23 @@ udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf * { struct ether_hdr *eth_hdr,tmp; struct ipv4_hdr *ipv4_hdr,temp_ipv4; - uint8_t dst_port; struct udp_hdr *udp_hdr,temp_udp; + l2_phy_interface_t *port = ifm_get_port(portid); + if (port == NULL) { + printf("port may be un initialized\n"); + return; + } + if (m == NULL) { + printf("Null packet received\n"); + return; + } + if (unlikely(arp_support)) { + if (!check_arpicmp(m)) + return; + } + if (qconf == NULL) + printf("qconf configuration is NULL\n"); eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); ether_addr_copy(ð_hdr->s_addr, &tmp.s_addr); ether_addr_copy(ð_hdr->d_addr, ð_hdr->s_addr); @@ -1064,15 +1412,12 @@ udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf * ipv4_hdr->src_addr = temp_ipv4.dst_addr; #ifdef DO_RFC_1812_CHECKS /* Check to make sure the packet is valid (RFC1812) */ - if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) { + if (is_valid_pkt_ipv4(ipv4_hdr, m->pkt_len) < 0) { rte_pktmbuf_free(m); return; } #endif - dst_port = get_ipv4_dst_port(ipv4_hdr, portid, - qconf->ipv4_lookup_struct); - dst_port = portid; #ifdef DO_RFC_1812_CHECKS /* Update time to live and header checksum */ @@ -1088,7 +1433,7 @@ udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf * udp_hdr->dst_port = udp_hdr->src_port; udp_hdr->src_port = temp_udp.dst_port; - send_single_packet(m, dst_port); + send_single_packet(m, portid); } else if ((rte_cpu_to_be_16(eth_h->ether_type)) == ETHER_TYPE_IPv6) { /* Handle IPv6 headers.*/ struct ipv6_hdr *ipv6_hdr,temp_ipv6; @@ -1101,8 +1446,6 @@ udp_replay_simple_replay(struct rte_mbuf *m, uint8_t portid, struct lcore_conf * memcpy(ipv6_hdr->dst_addr,ipv6_hdr->src_addr,16); memcpy(ipv6_hdr->src_addr,temp_ipv6.dst_addr,16); - - dst_port = get_ipv6_dst_port(ipv6_hdr, portid, qconf->ipv6_lookup_struct); /* Handle UDP headers.*/ udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *, (sizeof(struct ether_hdr)+sizeof(struct ipv6_hdr))); @@ -1513,6 +1856,7 @@ main_loop(__attribute__((unused)) void *dummy) int i, j, nb_rx; uint8_t portid, queueid; struct lcore_conf *qconf; + l2_phy_interface_t *port; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -1547,7 +1891,7 @@ main_loop(__attribute__((unused)) void *dummy) portid, queueid); } - while (1) { + while (exit_loop) { cur_tsc = rte_rdtsc(); @@ -1579,8 +1923,15 @@ main_loop(__attribute__((unused)) void *dummy) for (i = 0; i < qconf->n_rx_queue; ++i) { portid = qconf->rx_queue_list[i].port_id; queueid = qconf->rx_queue_list[i].queue_id; - nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, - MAX_PKT_BURST); + port = ifm_get_port(portid); + if (port != NULL) { + nb_rx = port->retrieve_bulk_pkts(portid, + queueid, pkts_burst); + port->n_rxpkts += nb_rx; + } else { + printf("port may be un initialized\n"); + return 0; + } if(nb_rx) rcv_pkt_count[portid] += nb_rx; if (nb_rx == 0) @@ -1595,20 +1946,37 @@ main_loop(__attribute__((unused)) void *dummy) */ int32_t n = RTE_ALIGN_FLOOR(nb_rx, 8); for (j = 0; j < n; j += 8) { - uint32_t pkt_type = - pkts_burst[j]->packet_type & - pkts_burst[j+1]->packet_type & - pkts_burst[j+2]->packet_type & - pkts_burst[j+3]->packet_type & - pkts_burst[j+4]->packet_type & - pkts_burst[j+5]->packet_type & - pkts_burst[j+6]->packet_type & - pkts_burst[j+7]->packet_type; - if (pkt_type & RTE_PTYPE_L3_IPV4) { + struct ether_hdr *eth_h0 = + rte_pktmbuf_mtod(pkts_burst[j], struct ether_hdr *); + struct ether_hdr *eth_h1 = + rte_pktmbuf_mtod(pkts_burst[j+1], struct ether_hdr *); + struct ether_hdr *eth_h2 = + rte_pktmbuf_mtod(pkts_burst[j+2], struct ether_hdr *); + struct ether_hdr *eth_h3 = + rte_pktmbuf_mtod(pkts_burst[j+3], struct ether_hdr *); + struct ether_hdr *eth_h4 = + rte_pktmbuf_mtod(pkts_burst[j+4], struct ether_hdr *); + struct ether_hdr *eth_h5 = + rte_pktmbuf_mtod(pkts_burst[j+5], struct ether_hdr *); + struct ether_hdr *eth_h6 = + rte_pktmbuf_mtod(pkts_burst[j+6], struct ether_hdr *); + struct ether_hdr *eth_h7 = + rte_pktmbuf_mtod(pkts_burst[j+7], struct ether_hdr *); + + uint16_t ether_type; + ether_type = (rte_cpu_to_be_16(eth_h0->ether_type) & + rte_cpu_to_be_16(eth_h1->ether_type) & + rte_cpu_to_be_16(eth_h2->ether_type) & + rte_cpu_to_be_16(eth_h3->ether_type) & + rte_cpu_to_be_16(eth_h4->ether_type) & + rte_cpu_to_be_16(eth_h5->ether_type) & + rte_cpu_to_be_16(eth_h6->ether_type) & + rte_cpu_to_be_16(eth_h7->ether_type)); + + if (ether_type == ETHER_TYPE_IPv4) { simple_ipv4_replay_8pkts( &pkts_burst[j], portid, qconf); - } else if (pkt_type & - RTE_PTYPE_L3_IPV6) { + } else if (ether_type == ETHER_TYPE_IPv6) { simple_ipv6_replay_8pkts(&pkts_burst[j], portid, qconf); } else { @@ -1630,6 +1998,7 @@ main_loop(__attribute__((unused)) void *dummy) portid, qconf); } } + for (; j < nb_rx ; j++) { udp_replay_simple_replay(pkts_burst[j], portid, qconf); @@ -1780,17 +2149,17 @@ main_loop(__attribute__((unused)) void *dummy) /* display usage */ int -print_stats(void) +print_l4stats(void) { unsigned portid; - uint16_t i; + uint16_t i, j=0; printf ("\n"); printf ("UDP_Replay stats:\n"); printf ("--------------\n"); - printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop \n"); + printf (" Port Rx Packet Tx Packet Rx Pkt Drop Tx Pkt Drop arp_pkts\n"); for (i = 0; i < nb_lcore_params; ++i) { portid = lcore_params[i].port_id; - printf (" %u %lu %lu 0 0", portid, rcv_pkt_count[(uint64_t)portid], tx_pkt_count[(uint64_t)portid]); + printf ("%5u%15lu%15lu%17d%17d%14u",portid, rcv_pkt_count[portid], tx_pkt_count[portid],j,j, arp_pkts); printf ("\n"); } printf ("\n"); @@ -1806,7 +2175,7 @@ clear_stats(void) rcv_pkt_count[i] = 0; tx_pkt_count[i] = 0; } - print_stats(); + print_l4stats(); return 0; } @@ -1931,6 +2300,32 @@ static int parse_max_pkt_len(const char *pktlen) } static int +parse_link_ip(const char *file_name) +{ + uint32_t i, type; + struct rte_cfgfile *file; + const char *entry; + char buf[256]; + file = rte_cfgfile_load(file_name, 0); + entry = rte_cfgfile_get_entry(file, "linkip", "num_ports"); + numports = (uint32_t)atoi(entry); + if (numports <= 0 || numports > 32) + rte_panic("numports is not valid\n"); + entry = rte_cfgfile_get_entry(file, "linkip", "ip_type"); + type = (uint32_t)atoi(entry); + for (i = 0;i < numports; i++) { + sprintf(buf, "port%d", i); + entry = rte_cfgfile_get_entry(file, "linkip", buf); + if (entry == NULL) + continue; + if (!type) + ipv4[i] = strdup(entry); + else if (type) + my_inet_pton_ipv6(AF_INET6, entry, &link_ipv6[i][0]); + } + return 0; +} +static int parse_portmask(const char *portmask) { char *end = NULL; @@ -2073,10 +2468,14 @@ parse_args(int argc, char **argv) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "p:P", + while ((opt = getopt_long(argc, argvopt, "s:p:P", lgopts, &option_index)) != EOF) { switch (opt) { + case 's': + parse_link_ip(optarg); + arp_support = 1; + break; /* portmask */ case 'p': enabled_port_mask = parse_portmask(optarg); @@ -2118,6 +2517,7 @@ parse_args(int argc, char **argv) sizeof(CMD_LINE_OPT_NO_HW_CSUM))) { printf("numa is hw ip checksum \n"); port_conf.rxmode.hw_ip_checksum = 0; + rx_conf.rx_free_thresh = 30; csum_on = 0; } @@ -2178,14 +2578,6 @@ parse_args(int argc, char **argv) return ret; } -static void -print_ethaddr(const char *name, const struct ether_addr *eth_addr) -{ - char buf[ETHER_ADDR_FMT_SIZE]; - ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr); - printf("%s%s", name, buf); -} - #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) static void convert_ipv4_5tuple(struct ipv4_5tuple* key1, @@ -2349,69 +2741,6 @@ populate_ipv6_many_flow_into_table(const struct rte_hash* h, printf("Hash: Adding 0x%x keys\n", nr_flow); } -static void -setup_hash(int socketid) -{ - struct rte_hash_parameters ipv4_udp_replay_hash_params = { - .name = NULL, - .entries = UDP_Replay_HASH_ENTRIES, - .key_len = sizeof(union ipv4_5tuple_host), - .hash_func = ipv4_hash_crc, - .hash_func_init_val = 0, - }; - - struct rte_hash_parameters ipv6_udp_replay_hash_params = { - .name = NULL, - .entries = UDP_Replay_HASH_ENTRIES, - .key_len = sizeof(union ipv6_5tuple_host), - .hash_func = ipv6_hash_crc, - .hash_func_init_val = 0, - }; - - char s[64]; - - /* create ipv4 hash */ - snprintf(s, sizeof(s), "ipv4_udp_replay_hash_%d", socketid); - ipv4_udp_replay_hash_params.name = s; - ipv4_udp_replay_hash_params.socket_id = socketid; - ipv4_udp_replay_lookup_struct[socketid] = rte_hash_create(&ipv4_udp_replay_hash_params); - if (ipv4_udp_replay_lookup_struct[socketid] == NULL) - rte_exit(EXIT_FAILURE, "Unable to create the udp_replay hash on " - "socket %d\n", socketid); - - /* create ipv6 hash */ - snprintf(s, sizeof(s), "ipv6_udp_replay_hash_%d", socketid); - ipv6_udp_replay_hash_params.name = s; - ipv6_udp_replay_hash_params.socket_id = socketid; - ipv6_udp_replay_lookup_struct[socketid] = rte_hash_create(&ipv6_udp_replay_hash_params); - if (ipv6_udp_replay_lookup_struct[socketid] == NULL) - rte_exit(EXIT_FAILURE, "Unable to create the udp_replay hash on " - "socket %d\n", socketid); - - if (hash_entry_number != HASH_ENTRY_NUMBER_DEFAULT) { - /* For testing hash matching with a large number of flows we - * generate millions of IP 5-tuples with an incremented dst - * address to initialize the hash table. */ - if (ipv6 == 0) { - /* populate the ipv4 hash */ - populate_ipv4_many_flow_into_table( - ipv4_udp_replay_lookup_struct[socketid], hash_entry_number); - } else { - /* populate the ipv6 hash */ - populate_ipv6_many_flow_into_table( - ipv6_udp_replay_lookup_struct[socketid], hash_entry_number); - } - } else { - /* Use data in ipv4/ipv6 udp_replay lookup table directly to initialize the hash table */ - if (ipv6 == 0) { - /* populate the ipv4 hash */ - populate_ipv4_few_flow_into_table(ipv4_udp_replay_lookup_struct[socketid]); - } else { - /* populate the ipv6 hash */ - populate_ipv6_few_flow_into_table(ipv6_udp_replay_lookup_struct[socketid]); - } - } -} #endif #if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) @@ -2495,51 +2824,10 @@ setup_lpm(int socketid) } #endif -static int -init_mem(unsigned nb_mbuf) -{ - struct lcore_conf *qconf; - int socketid; - unsigned lcore_id; - char s[64]; - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - if (rte_lcore_is_enabled(lcore_id) == 0) - continue; - if (numa_on) - socketid = rte_lcore_to_socket_id(lcore_id); - else - socketid = 0; - if (socketid >= NB_SOCKETS) { - rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", - socketid, lcore_id, NB_SOCKETS); - } - if (pktmbuf_pool[socketid] == NULL) { - snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); - pktmbuf_pool[socketid] = - rte_pktmbuf_pool_create(s, nb_mbuf, - MEMPOOL_CACHE_SIZE, 0, - RTE_MBUF_DEFAULT_BUF_SIZE, socketid); - if (pktmbuf_pool[socketid] == NULL) - rte_exit(EXIT_FAILURE, - "Cannot init mbuf pool on socket %d\n", socketid); - else - printf("Allocated mbuf pool on socket %d\n", socketid); -#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) - setup_lpm(socketid); -#else - setup_hash(socketid); -#endif - } - qconf = &lcore_conf[lcore_id]; - qconf->ipv4_lookup_struct = ipv4_udp_replay_lookup_struct[socketid]; - qconf->ipv6_lookup_struct = ipv6_udp_replay_lookup_struct[socketid]; - } - return 0; -} /* Check the link status of all ports in up to 9s, and print them finally */ static void @@ -2599,16 +2887,14 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) int main(int argc, char **argv) { - struct lcore_conf *qconf; - struct rte_eth_dev_info dev_info; - struct rte_eth_txconf *txconf; int ret; unsigned nb_ports; - uint16_t queueid; unsigned lcore_id; - uint32_t n_tx_queue, nb_lcores; - uint8_t portid, nb_rx_queue, queue, socketid; + uint32_t n_tx_queue; + uint8_t portid, nb_rx_queue; struct cmdline *cl; + uint32_t size; + struct pipeline_params *params; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -2616,6 +2902,7 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); argc -= ret; argv += ret; + timer_lcore = rte_lcore_id(); /* parse application arguments (after the EAL ones) */ ret = parse_args(argc, argv); if (ret < 0) @@ -2628,20 +2915,31 @@ main(int argc, char **argv) if (ret < 0) rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + params = rte_malloc(NULL, sizeof(*params), RTE_CACHE_LINE_SIZE); + memcpy(params, &def_pipeline_params, sizeof(def_pipeline_params)); + lib_arp_init(params, NULL); + ifm_init(); nb_ports = rte_eth_dev_count(); + num_ports = nb_ports; if (nb_ports > RTE_MAX_ETHPORTS) nb_ports = RTE_MAX_ETHPORTS; if (check_port_config(nb_ports) < 0) rte_exit(EXIT_FAILURE, "check_port_config failed\n"); - nb_lcores = rte_lcore_count(); - + /* + *Configuring port_config_t structure for interface manager initialization + */ + size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t)); + port_config = rte_zmalloc(NULL, (RTE_MAX_ETHPORTS * size), RTE_CACHE_LINE_SIZE); + if (port_config == NULL) + rte_panic("port_config is NULL: Memory Allocation failure\n"); /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); + num_ports--; continue; } @@ -2650,116 +2948,38 @@ main(int argc, char **argv) fflush(stdout); nb_rx_queue = get_port_n_rx_queues(portid); - n_tx_queue = nb_rx_queue; + n_tx_queue = nb_rx_queue; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; - printf("Creating queues: nb_rxq=%d nb_txq=%u... ", - nb_rx_queue, (unsigned)n_tx_queue ); - ret = rte_eth_dev_configure(portid, nb_rx_queue, - (uint16_t)n_tx_queue, &port_conf); - if (ret < 0) { - printf("Port configuration failed : port: %d... Try with hw-ip-checksum disabled\n", portid); - port_conf.rxmode.hw_ip_checksum = 0; - ret = rte_eth_dev_configure(portid, nb_rx_queue, - (uint16_t)n_tx_queue, &port_conf); - if (ret < 0) - rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", - ret, portid); - } - /*Since its just swapping of MAC we dont have to fill our own src mac*/ - rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); - print_ethaddr(" Address:", &ports_eth_addr[portid]); - - /* init memory */ - ret = init_mem(NB_MBUF); - if (ret < 0) - rte_exit(EXIT_FAILURE, "init_mem failed\n"); - - /* init one TX queue per couple (lcore,port) */ - queueid = 0; - for (lcore_id = 0; lcore_id < n_tx_queue; lcore_id++) { - if (rte_lcore_is_enabled(lcore_id) == 0) - continue; - - if (numa_on) - socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); - else - socketid = 0; - printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); - fflush(stdout); - - rte_eth_dev_info_get(portid, &dev_info); - txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; - ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, - socketid, txconf); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " - "port=%d\n", ret, portid); - - qconf = &lcore_conf[lcore_id]; - qconf->tx_queue_id[portid] = queueid; - queueid++; - } - printf("\n"); - } - - for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { - if (rte_lcore_is_enabled(lcore_id) == 0) - continue; - qconf = &lcore_conf[lcore_id]; - printf("\nInitializing rx queues on lcore %u ... ", lcore_id ); - fflush(stdout); - /* init RX queues */ - for(queue = 0; queue < qconf->n_rx_queue; ++queue) { - portid = qconf->rx_queue_list[queue].port_id; - queueid = qconf->rx_queue_list[queue].queue_id; - - if (numa_on) - socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); - else - socketid = 0; - - printf("rxq=%d,%d,%d ", portid, queueid, socketid); - fflush(stdout); - - ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, - NULL, - pktmbuf_pool[socketid]); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," - "port=%d\n", ret, portid); - } - } - - printf("\n"); - - /* start ports */ - for (portid = 0; portid < nb_ports; portid++) { - if ((enabled_port_mask & (1 << portid)) == 0) { - continue; - } - /* Start device */ - ret = rte_eth_dev_start(portid); - if (ret < 0) - rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", - ret, portid); - - /* - * If enabled, put device in promiscuous mode. - * This allows IO forwarding mode to forward packets - * to itself through 2 cross-connected ports of the - * target machine. - */ - if (promiscuous_on) - rte_eth_promiscuous_enable(portid); + port_config[portid].port_id = portid; + port_config[portid].nrx_queue = nb_rx_queue; + port_config[portid].ntx_queue = n_tx_queue; + port_config[portid].state = 1; + port_config[portid].promisc = promiscuous_on; + port_config[portid].mempool.pool_size = MEMPOOL_SIZE; + port_config[portid].mempool.buffer_size = BUFFER_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM; + port_config[portid].mempool.cache_size = CACHE_SIZE; + port_config[portid].mempool.cpu_socket_id = rte_socket_id(); + memcpy (&port_config[portid].port_conf, &port_conf, sizeof(struct rte_eth_conf)); + memcpy (&port_config[portid].rx_conf, &rx_conf, sizeof(struct rte_eth_rxconf)); + memcpy (&port_config[portid].tx_conf, &tx_conf, sizeof(struct rte_eth_txconf)); + + /* Enable TCP and UDP HW Checksum , when required */ + //port_config[portid].tx_conf.txq_flags &= + // ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP); + + if (ifm_port_setup (portid, &port_config[portid])) + rte_panic ("Port Setup Failed: %"PRIu32"\n", portid); } check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + l3fwd_init(); + create_arp_table(); + create_nd_table(); + populate_lpm_routes(); + convert_ipstr_to_numeric(); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); cl = cmdline_stdin_new(main_ctx, "Replay>"); @@ -2767,6 +2987,7 @@ main(int argc, char **argv) rte_panic("Cannot create cmdline instance\n"); cmdline_interact(cl); cmdline_stdin_exit(cl); + exit_loop = 0; rte_exit(0, "Bye!\n"); RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) @@ -2821,7 +3042,7 @@ static void cmd_udp_replay_stats_parsed( __rte_unused struct cmdline *cl, __attribute__((unused)) void *data) { - print_stats(); + print_l4stats(); } cmdline_parse_token_string_t cmd_udp_replay_stats_udp_replay_string = diff --git a/VNFs/vACL/pipeline/pipeline_acl_be.c b/VNFs/vACL/pipeline/pipeline_acl_be.c index 039d6d59..77f1784c 100644 --- a/VNFs/vACL/pipeline/pipeline_acl_be.c +++ b/VNFs/vACL/pipeline/pipeline_acl_be.c @@ -3177,7 +3177,7 @@ static void *pipeline_acl_init(struct pipeline_params *params, * p_acl->links_map[0] = 0xff; * p_acl->links_map[1] = 0xff;] */ - p_acl->traffic_type = MIX; + p_acl->traffic_type = IPv4_HDR_VERSION; for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) { p_acl->links_map[i] = 0xff; p_acl->port_out_id[i] = 0xff; diff --git a/VNFs/vACL/pipeline/pipeline_acl_be.h b/VNFs/vACL/pipeline/pipeline_acl_be.h index 8c85d888..bbb20e82 100644 --- a/VNFs/vACL/pipeline/pipeline_acl_be.h +++ b/VNFs/vACL/pipeline/pipeline_acl_be.h @@ -44,7 +44,6 @@ enum pipeline_acl_key_type { #define IP_HDR_DST_ADR_OFST 16 #define IP_VERSION_4 4 #define IP_VERSION_6 6 -#define MIX 10 /* IPv6 */ #define IP_HDR_SIZE_IPV6 40 diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c index 2da8b5e4..f9b0eb16 100644 --- a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c +++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c @@ -8450,7 +8450,7 @@ static void *pipeline_cgnapt_init(struct pipeline_params *params, void *arg) p_nat->hw_checksum_reqd = 0; p_nat->pub_ip_port_set = NULL; p_nat->pub_ip_count = 0; - p_nat->traffic_type = TRAFFIC_TYPE_MIX; + p_nat->traffic_type = TRAFFIC_TYPE_IPV4; p_nat->vnf_set = 0xff; /* For every init it should be reset */ diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h index 34031192..e5106bfb 100644 --- a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h +++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h @@ -325,7 +325,6 @@ f_ah( \ #define DST_ADR_OFST_IP4t6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + \ IPV6_HDR_DST_ADR_OFST - 20) -#define TRAFFIC_TYPE_MIX 0 #define TRAFFIC_TYPE_IPV4 4 #define TRAFFIC_TYPE_IPV6 6 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_1Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_1Thread.cfg index a274506b..067ffc35 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_1Thread.cfg @@ -45,7 +45,6 @@ pktq_out = TXQ0.1 TXQ1.1 TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_4Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_4Thread.cfg index 7d543c83..0454a6e3 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV4_MultiPortPair_4Thread.cfg @@ -45,7 +45,6 @@ pktq_out = TXQ0.1 TXQ1.1 TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -66,7 +65,6 @@ pktq_out = TXQ0.2 TXQ1.2 TXQ2.2 TXQ3.2 SWQ1 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -87,7 +85,6 @@ pktq_out = TXQ0.3 TXQ1.3 TXQ2.3 TXQ3.3 SWQ2 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -107,7 +104,6 @@ pktq_out = TXQ0.4 TXQ1.4 TXQ2.4 TXQ3.4 SWQ3 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_1Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_1Thread.cfg index b90fad14..315c0dad 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_1Thread.cfg @@ -47,7 +47,6 @@ pktq_out = TXQ0.1 TXQ1.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 4000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_4Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_4Thread.cfg index dee9d97d..49436ddf 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV4_SinglePortPair_4Thread.cfg @@ -47,7 +47,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -67,7 +66,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -87,7 +85,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -108,7 +105,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_1Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_1Thread.cfg index ddf746e2..bb350839 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_1Thread.cfg @@ -47,7 +47,6 @@ pktq_out = TXQ0.1 TXQ1.1 TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_4Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_4Thread.cfg index 5b1a1cd7..7af5a450 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV6_MultiPortPair_4Thread.cfg @@ -47,7 +47,6 @@ pktq_out = TXQ0.1 TXQ1.1 TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -67,7 +66,6 @@ pktq_out = TXQ0.2 TXQ1.2 TXQ2.2 TXQ3.2 SWQ1 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -87,7 +85,6 @@ pktq_out = TXQ0.3 TXQ1.3 TXQ2.3 TXQ3.3 SWQ2 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -107,7 +104,6 @@ pktq_out = TXQ0.4 TXQ1.4 TXQ2.4 TXQ3.4 SWQ3 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_1Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_1Thread.cfg index ed2e8422..0fc3ec2c 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_1Thread.cfg @@ -47,7 +47,6 @@ pktq_out = TXQ0.1 TXQ1.1 SWQ0;TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 diff --git a/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_4Thread.cfg b/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_4Thread.cfg index fd9da8f2..960af412 100644 --- a/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_HWLB_IPV6_SinglePortPair_4Thread.cfg @@ -47,7 +47,6 @@ pktq_out = TXQ0.1 TXQ1.1 SWQ0;TXQ2.1 TXQ3.1 SWQ0 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 @@ -68,7 +67,6 @@ pktq_out = TXQ0.2 TXQ1.2 SWQ1;TXQ2.2 TXQ3.2 SWQ1 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -89,7 +87,6 @@ pktq_out = TXQ0.3 TXQ1.3 SWQ2;TXQ2.3 TXQ3.3 SWQ2 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -109,7 +106,6 @@ pktq_out = TXQ0.4 TXQ1.4 SWQ3;TXQ2.3 TXQ3.3 SWQ2 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_1Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_1Thread.cfg index 8e26c286..69527492 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_1Thread.cfg @@ -64,7 +64,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -105,7 +104,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_4Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_4Thread.cfg index adcce34e..b1a0d2f9 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV4_MultiPortPair_4Thread.cfg @@ -66,7 +66,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -84,7 +83,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -123,7 +121,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -141,7 +138,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_1Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_1Thread.cfg index 5499ea8c..51a01019 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_1Thread.cfg @@ -63,7 +63,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 4096000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_4Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_4Thread.cfg index abb4735b..4e375299 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV4_SinglePortPair_4Thread.cfg @@ -62,7 +62,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -80,7 +79,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -98,7 +96,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -116,7 +113,6 @@ n_rules = 4096 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv4 traffic_type = 4 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_1Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_1Thread.cfg index aefb37aa..f5bd020d 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_1Thread.cfg @@ -65,7 +65,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -106,7 +105,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_4Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_4Thread.cfg index bc30db8d..29bd4780 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV6_MultiPortPair_4Thread.cfg @@ -64,7 +64,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -82,7 +81,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -120,7 +118,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -138,7 +135,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_1Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_1Thread.cfg index 4fb0fad1..da263c45 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_1Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_1Thread.cfg @@ -63,7 +63,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_4Thread.cfg b/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_4Thread.cfg index 4c372f75..2ab3e506 100644 --- a/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_4Thread.cfg +++ b/VNFs/vFW/config/VFW_SWLB_IPV6_SinglePortPair_4Thread.cfg @@ -63,7 +63,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -81,7 +80,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -99,7 +97,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 @@ -117,7 +114,6 @@ n_rules = 10000 ;n_flows gets round up to power of 2 n_flows = 1000000 -pkt_type = ipv6 traffic_type = 6 ; tcp_time_wait controls timeout for closed connection, normally 120 tcp_time_wait = 10 diff --git a/VNFs/vFW/pipeline/pipeline_vfw_be.c b/VNFs/vFW/pipeline/pipeline_vfw_be.c index 97508a77..b659ee58 100644 --- a/VNFs/vFW/pipeline/pipeline_vfw_be.c +++ b/VNFs/vFW/pipeline/pipeline_vfw_be.c @@ -142,7 +142,7 @@ struct mbuf_tcp_meta_data { #define IP_VERSION_4 4 #define IP_VERSION_6 6 -#define MIX 10 + /* IPv6 */ #define IP_HDR_SIZE_IPV6 40 #define IP_HDR_DSCP_OFST_IPV6 0 @@ -2555,7 +2555,7 @@ static void strncpy(pipe->name, params->name, sizeof(pipe->name)); pipe->log_level = params->log_level; pipe_vfw->n_flows = 4096; /* small default value */ - pipe_vfw->traffic_type = MIX; + pipe_vfw->traffic_type = IP_VERSION_4; pipe_vfw->pipeline_num = 0xff; for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) { pipe_vfw->links_map[i] = 0xff; diff --git a/tools/vnf_build.sh b/tools/vnf_build.sh index d404cf82..5ac79a47 100755 --- a/tools/vnf_build.sh +++ b/tools/vnf_build.sh @@ -290,20 +290,15 @@ build_vnfs() #--- Add non intractive option to build vnfs if [[ "$1" = "--silient" ]];then + DPDK_VER=("" "16.04" "16.11" "17.02" "17.05") + member="$2" + for item in "${DPDK_VER[@]}"; do + if [[ "$member" == "$item" ]]; then + DPDK_RTE_VER="$member" + fi + done pushd $VNF_CORE - echo "Setup proxy if needed..." - http_proxy=$2 - https_proxy=$3 - if [[ "$http_proxy" != "" ]]; then - export http_proxy=$http_proxy - export https_proxy=$http_proxy - fi - - if [[ "$https_proxy" != "" ]]; then - export https_proxy=$https_proxy - fi - echo "Install required libraries..." touch .agree install_libs |