summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak S <deepak.s@linux.intel.com>2017-04-17 23:03:43 -0700
committerDeepak S <deepak.s@linux.intel.com>2017-04-19 03:13:12 -0700
commitf0bfb2b0c8467154990b49beafb991b7515e37e3 (patch)
treef713c75bca8048cae1d73ddfc2ce874bac3cbb4f
parent421bd97023e853a9e87d16e100b23bf3c60a9188 (diff)
vCGNAPT VNF initial check-in
JIRA: SAMPLEVNF-5 The vCGNAPT implementation contains following features: • Static and dynamic Network address translation. • Static and dynamic Network address and port translation • ARP (request, response, gratuitous) • ICMP (terminal echo, echo response, pass-through) • ICMPv6 and ND • UDP, TCP and ICMP protocol pass-through • Multithread support and Multiple physical port support • Limiting max ports per client • Limiting max clients per public IP address • Live Session tracking to NAT flow • NAT64 – connectivity between IPv6 access network to IPv4 data • PCP - Port Control protocol • SIP functionality • FTP functionality Change-Id: I5ebb44ae60e32dd6da5e793efd91a6831a4d30a7 Signed-off-by: Deepak S <deepak.s@linux.intel.com>
-rw-r--r--Makefile33
-rw-r--r--VNFs/vCGNAPT/Makefile119
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-1T-ipv6.cfg44
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-1T.cfg44
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-2T-ipv6.cfg61
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-2T.cfg61
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-3T-ipv6.cfg76
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-2P-3T.cfg76
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-1T-ipv6.cfg67
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-1T.cfg67
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-2T-ipv6.cfg101
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-2T.cfg101
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-3T-ipv6.cfg135
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb-4P-3T.cfg135
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb_scriptfile_2P.cfg32
-rw-r--r--VNFs/vCGNAPT/config/arp_hwlb_scriptfile_4P.cfg50
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T-ipv6.cfg69
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T.cfg69
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T-ipv6.cfg86
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T.cfg86
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T-ipv6.cfg103
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T.cfg103
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T-ipv6.cfg117
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T.cfg117
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T-ipv6.cfg86
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T.cfg86
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T-ipv6.cfg185
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T.cfg185
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx_ScriptFile_2P.cfg20
-rw-r--r--VNFs/vCGNAPT/config/arp_txrx_ScriptFile_4P.cfg30
-rw-r--r--VNFs/vCGNAPT/init.c1809
-rw-r--r--VNFs/vCGNAPT/main.c50
-rw-r--r--VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.c825
-rw-r--r--VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.h356
-rw-r--r--VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.c174
-rw-r--r--VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.h35
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt.c1518
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h138
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c10963
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h808
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt_common.h271
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_timer.c37
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_timer.h24
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_timer_be.c507
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_timer_be.h55
-rw-r--r--docs/vCGNAPT/INSTALL.rst185
-rw-r--r--docs/vCGNAPT/README.rst189
-rw-r--r--docs/vCGNAPT/RELEASE_NOTES.rst80
48 files changed, 20568 insertions, 0 deletions
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..68feab0d
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,33 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http:#www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+TARGETS := all clean
+VNF_DIR := VNFs
+CGNAPT := $(VNF_DIR)/vCGNAPT
+
+subdirs := $(CGNAPT)
+
+.PHONY: $(TARGETS) $(subdirs)
+
+$(TARGETS): $(subdirs)
+
+$(subdirs):
+ $(MAKE) -C $@ $(MAKECMDGOALS)
diff --git a/VNFs/vCGNAPT/Makefile b/VNFs/vCGNAPT/Makefile
new file mode 100644
index 00000000..e2025a13
--- /dev/null
+++ b/VNFs/vCGNAPT/Makefile
@@ -0,0 +1,119 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http:#www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+ifeq ($(VNF_CORE),)
+$(error "Please define VNF_CORE environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+DIRS-(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = vCGNAPT
+
+VPATH += $(VNF_CORE)/common/vnf_common
+VPATH += $(VNF_CORE)/common/VIL/pipeline_arpicmp
+VPATH += $(VNF_CORE)/common/VIL/conntrack
+VPATH += $(VNF_CORE)/common/VIL/pipeline_common
+VPATH += $(VNF_CORE)/common/VIL/pipeline_loadb
+VPATH += $(VNF_CORE)/common/VIL/pipeline_master
+VPATH += $(VNF_CORE)/common/VIL/pipeline_passthrough
+VPATH += $(SRCDIR)/pipeline
+VPATH += $(VNF_CORE)/common/VIL/pipeline_txrx
+VPATH += $(VNF_CORE)/common/VIL/alg
+VPATH += $(VNF_CORE)/common/VIL/l2l3_stack
+
+INC += $(wildcard *.h)
+INC += $(wildcard pipeline/*.h)
+INC += $(wildcard $(VNF_CORE)/common/vnf_common/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/l2l3_stack/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/conntrack/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_loadb/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_common/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_master/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_passthrough/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/pipeline_txrx/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/conntrack/*.h)
+INC += $(wildcard $(VNF_CORE)/common/VIL/alg/*.h)
+
+CFLAGS += -I$(SRCDIR) -mrtm -mhle -I$(SRCDIR)/pipeline -I$(VNF_CORE)/common/vnf_common
+CFLAGS += -I$(VNF_CORE)/common/VIL/l2l3_stack -I$(VNF_CORE)/common/VIL/conntrack
+CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_common -I$(VNF_CORE)/common/VIL/pipeline_loadb
+CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_master -I$(VNF_CORE)/common/VIL/pipeline_passthrough
+CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_txrx -I$(VNF_CORE)/common/VIL/alg
+CFLAGS += -I$(VNF_CORE)/common/VIL/pipeline_arpicmp
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cpu_core_map.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib_arp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib_icmpv6.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += interface.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += hle.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += tsx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += l2_proto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += l3fwd_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += l3fwd_lpm4.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += l3fwd_lpm6.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += bond.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_cgnapt.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_cgnapt_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_loadb.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_loadb_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_timer.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_timer_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += vnf_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_arpicmp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_arpicmp_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_ct_tcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_cnxn_tracking.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_ct_udp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_ct_synproxy.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_txrx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_txrx_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cgnapt_pcp_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cgnapt_pcp_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib_sip_alg.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib_ftp_alg.c
+
+CFLAGS += -O3
+CFLAGS += -DIPV6
+CFLAGS += -DPCP_ENABLE
+CFLAGS += -DHW_CHECKSUM_REQ
+CFLAGS += -DSIP_ALG
+CFLAGS += -DALG
+CFLAGS += -DCT_CGNAT
+CFLAGS += -DFTP_ALG
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-1T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-1T-ipv6.cfg
new file mode 100644
index 00000000..34691d4f
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-1T-ipv6.cfg
@@ -0,0 +1,44 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-1T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-1T.cfg
new file mode 100644
index 00000000..f0127584
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-1T.cfg
@@ -0,0 +1,44 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-2T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-2T-ipv6.cfg
new file mode 100644
index 00000000..f9f9935f
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-2T-ipv6.cfg
@@ -0,0 +1,61 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-2T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-2T.cfg
new file mode 100644
index 00000000..6dd7576b
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-2T.cfg
@@ -0,0 +1,61 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-3T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-3T-ipv6.cfg
new file mode 100644
index 00000000..d082d345
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-3T-ipv6.cfg
@@ -0,0 +1,76 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ0.2 RXQ1.2
+pktq_out = TXQ0.3 TXQ1.3 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-2P-3T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-2P-3T.cfg
new file mode 100644
index 00000000..17623dfb
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-2P-3T.cfg
@@ -0,0 +1,76 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ0.2 RXQ1.2
+pktq_out = TXQ0.3 TXQ1.3 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-1T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-1T-ipv6.cfg
new file mode 100644
index 00000000..685ec442
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-1T-ipv6.cfg
@@ -0,0 +1,67 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-1T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-1T.cfg
new file mode 100644
index 00000000..3687eb44
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-1T.cfg
@@ -0,0 +1,67 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-2T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-2T-ipv6.cfg
new file mode 100644
index 00000000..7dcf92e3
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-2T-ipv6.cfg
@@ -0,0 +1,101 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = RXQ2.1 RXQ3.1
+pktq_out = TXQ2.2 TXQ3.2 SWQ3
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-2T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-2T.cfg
new file mode 100644
index 00000000..a7fa7e45
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-2T.cfg
@@ -0,0 +1,101 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = RXQ2.1 RXQ3.1
+pktq_out = TXQ2.2 TXQ3.2 SWQ3
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-3T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-3T-ipv6.cfg
new file mode 100644
index 00000000..42ae5035
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-3T-ipv6.cfg
@@ -0,0 +1,135 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3 SWQ4 SWQ5
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ0.2 RXQ1.2
+pktq_out = TXQ0.3 TXQ1.3 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ3
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = RXQ2.1 RXQ3.1
+pktq_out = TXQ2.2 TXQ3.2 SWQ4
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = CGNAPT
+core = 8
+pktq_in = RXQ2.2 RXQ3.2
+pktq_out = TXQ2.3 TXQ3.3 SWQ5
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb-4P-3T.cfg b/VNFs/vCGNAPT/config/arp_hwlb-4P-3T.cfg
new file mode 100644
index 00000000..4317dcef
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb-4P-3T.cfg
@@ -0,0 +1,135 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3 SWQ4 SWQ5
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = CGNAPT
+core = 3
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = TXQ0.1 TXQ1.1 SWQ0
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE4]
+type = CGNAPT
+core = 4
+pktq_in = RXQ0.1 RXQ1.1
+pktq_out = TXQ0.2 TXQ1.2 SWQ1
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = RXQ0.2 RXQ1.2
+pktq_out = TXQ0.3 TXQ1.3 SWQ2
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = TXQ2.1 TXQ3.1 SWQ3
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = RXQ2.1 RXQ3.1
+pktq_out = TXQ2.2 TXQ3.2 SWQ4
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = CGNAPT
+core = 8
+pktq_in = RXQ2.2 RXQ3.2
+pktq_out = TXQ2.3 TXQ3.3 SWQ5
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
diff --git a/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_2P.cfg b/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_2P.cfg
new file mode 100644
index 00000000..c560ceb7
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_2P.cfg
@@ -0,0 +1,32 @@
+
+link 0 down
+link 0 config 152.16.100.21 24
+link 0 up
+link 1 down
+link 1 config 152.16.40.21 24
+link 1 up
+
+
+; uncomment to enable static NAPT
+;p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl> <no_of_entries> <end_prv_port> <end_pub_port>
+;p 3 entry addm 152.16.100.20 1234 152.16.40.10 1 0 500 65535 1234 65535
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 152.16.100.20 00:00:00:00:00:01
+;p 1 arpadd 1 152.16.40.20 00:00:00:00:00:02
+
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 0064:ff9b:0:0:0:0:9810:6414 00:00:00:00:00:01
+;p 1 arpadd 1 0064:ff9b:0:0:0:0:9810:2814 00:00:00:00:00:02
+
+
+set fwd rxonly
+set_sym_hash_ena_per_port 0 enable
+set_hash_global_config 0 simple_xor ipv4-udp enable
+set_sym_hash_ena_per_port 1 enable
+set_hash_global_config 1 simple_xor ipv4-udp enable
+set_hash_input_set 0 ipv4-udp src-ipv4 udp-src-port add
+set_hash_input_set 1 ipv4-udp dst-ipv4 udp-dst-port add
+set_hash_input_set 0 ipv4-udp src-ipv6 udp-src-port add
+set_hash_input_set 1 ipv4-udp dst-ipv6 udp-dst-port add
diff --git a/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_4P.cfg b/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_4P.cfg
new file mode 100644
index 00000000..0dc60e1d
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_hwlb_scriptfile_4P.cfg
@@ -0,0 +1,50 @@
+
+link 0 down
+link 0 config 152.16.100.21 24
+link 0 up
+link 1 down
+link 1 config 152.16.40.21 24
+link 1 up
+link 2 down
+link 2 config 192.16.100.25 24
+link 2 up
+link 3 down
+link 3 config 192.16.40.25 24
+link 3 up
+
+; uncomment to enable static NAPT
+;p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl> <no_of_entries> <end_prv_port> <end_pub_port>
+;p 3 entry addm 152.16.100.20 1234 152.16.40.10 1 0 500 65535 1234 65535
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 152.16.100.20 00:00:00:00:00:01
+;p 1 arpadd 1 152.16.40.20 00:00:00:00:00:02
+;p 1 arpadd 2 192.16.100.20 00:00:00:00:00:03
+;p 1 arpadd 3 192.16.40.20 00:00:00:00:00:04
+
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 0064:ff9b:0:0:0:0:9810:6414 00:00:00:00:00:01
+;p 1 arpadd 1 0064:ff9b:0:0:0:0:9810:2814 00:00:00:00:00:02
+;p 1 arpadd 2 0064:ff9b:0:0:0:0:c010:6414 00:00:00:00:00:03
+;p 1 arpadd 3 0064:ff9b:0:0:0:0:c010:2814 00:00:00:00:00:04
+
+
+set fwd rxonly
+set_sym_hash_ena_per_port 0 enable
+set_hash_global_config 0 simple_xor ipv4-udp enable
+set_sym_hash_ena_per_port 1 enable
+set_hash_global_config 1 simple_xor ipv4-udp enable
+set_hash_input_set 0 ipv4-udp src-ipv4 udp-src-port add
+set_hash_input_set 1 ipv4-udp dst-ipv4 udp-dst-port add
+set_hash_input_set 0 ipv4-udp src-ipv6 udp-src-port add
+set_hash_input_set 1 ipv4-udp dst-ipv6 udp-dst-port add
+
+set_sym_hash_ena_per_port 2 enable
+set_hash_global_config 2 simple_xor ipv4-udp enable
+set_sym_hash_ena_per_port 3 enable
+set_hash_global_config 3 simple_xor ipv4-udp enable
+set_hash_input_set 2 ipv4-udp src-ipv4 udp-src-port add
+set_hash_input_set 3 ipv4-udp dst-ipv4 udp-dst-port add
+set_hash_input_set 2 ipv4-udp src-ipv6 udp-src-port add
+set_hash_input_set 3 ipv4-udp dst-ipv6 udp-dst-port add
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T-ipv6.cfg
new file mode 100644
index 00000000..a81d9d43
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T-ipv6.cfg
@@ -0,0 +1,69 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ5 SWQ6
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = TXRX
+core = 6
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ5 SWQ6
+pktq_out = TXQ0.1 TXQ1.1
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T.cfg
new file mode 100644
index 00000000..785240d1
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-1T.cfg
@@ -0,0 +1,69 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ5 SWQ6
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = TXRX
+core = 6
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ5 SWQ6
+pktq_out = TXQ0.1 TXQ1.1
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T-ipv6.cfg
new file mode 100644
index 00000000..48df0e62
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T-ipv6.cfg
@@ -0,0 +1,86 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6
+outport_offset = 136; 8
+n_vnf_threads = 2
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ7 SWQ8
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ7 SWQ8 SWQ9 SWQ10
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T.cfg
new file mode 100644
index 00000000..35eaa3bf
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-2T.cfg
@@ -0,0 +1,86 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6
+outport_offset = 136; 8
+n_vnf_threads = 2
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ7 SWQ8
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ7 SWQ8 SWQ9 SWQ10
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T-ipv6.cfg
new file mode 100644
index 00000000..47747f60
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T-ipv6.cfg
@@ -0,0 +1,103 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6 SWQ7 SWQ8
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ11 SWQ12
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = SWQ7 SWQ8
+pktq_out = SWQ13 SWQ14
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = TXRX
+core = 8
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ9 SWQ10 SWQ11 SWQ12 SWQ13 SWQ14
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2 TXQ0.3 TXQ1.3
diff --git a/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T.cfg b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T.cfg
new file mode 100644
index 00000000..413e1d8f
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-1LB-2P-3T.cfg
@@ -0,0 +1,103 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6 SWQ7 SWQ8
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ11 SWQ12
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = SWQ7 SWQ8
+pktq_out = SWQ13 SWQ14
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = TXRX
+core = 8
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ9 SWQ10 SWQ11 SWQ12 SWQ13 SWQ14
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2 TXQ0.3 TXQ1.3
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T-ipv6.cfg
new file mode 100644
index 00000000..f67d7461
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T-ipv6.cfg
@@ -0,0 +1,117 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ2 SWQ3 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ4 SWQ5
+pktq_out = SWQ6 SWQ7
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = TXRX
+core = 6
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ6 SWQ7
+pktq_out = TXQ0.1 TXQ1.1
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = SWQ8 SWQ9 SWQ1
+
+[PIPELINE8]
+type = LOADB
+core = 8
+pktq_in = SWQ8 SWQ9
+pktq_out = SWQ10 SWQ11
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE9]
+type = CGNAPT
+core = 9
+pktq_in = SWQ10 SWQ11
+pktq_out = SWQ12 SWQ13
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE10]
+type = TXRX
+core = 10
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ12 SWQ13
+pktq_out = TXQ2.1 TXQ3.1
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T.cfg
new file mode 100644
index 00000000..2e14aee4
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-1T.cfg
@@ -0,0 +1,117 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ2 SWQ3 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ4 SWQ5
+pktq_out = SWQ6 SWQ7
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = TXRX
+core = 6
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ6 SWQ7
+pktq_out = TXQ0.1 TXQ1.1
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = SWQ8 SWQ9 SWQ1
+
+[PIPELINE8]
+type = LOADB
+core = 8
+pktq_in = SWQ8 SWQ9
+pktq_out = SWQ10 SWQ11
+outport_offset = 136; 8
+n_vnf_threads = 1
+prv_que_handler = (0,)
+
+[PIPELINE9]
+type = CGNAPT
+core = 9
+pktq_in = SWQ10 SWQ11
+pktq_out = SWQ12 SWQ13
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE10]
+type = TXRX
+core = 10
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ12 SWQ13
+pktq_out = TXQ2.1 TXQ3.1
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T-ipv6.cfg
new file mode 100644
index 00000000..48df0e62
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T-ipv6.cfg
@@ -0,0 +1,86 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6
+outport_offset = 136; 8
+n_vnf_threads = 2
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ7 SWQ8
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ7 SWQ8 SWQ9 SWQ10
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T.cfg
new file mode 100644
index 00000000..35eaa3bf
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-2T.cfg
@@ -0,0 +1,86 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0
+pktq_out = TXQ0.0 TXQ1.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+
+
+pktq_in_prv = RXQ0.0
+prv_to_pub_map = (0, 1)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ1 SWQ2 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ1 SWQ2
+pktq_out = SWQ3 SWQ4 SWQ5 SWQ6
+outport_offset = 136; 8
+n_vnf_threads = 2
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ3 SWQ4
+pktq_out = SWQ7 SWQ8
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ5 SWQ6
+pktq_out = SWQ9 SWQ10
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = TXRX
+core = 7
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ7 SWQ8 SWQ9 SWQ10
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T-ipv6.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T-ipv6.cfg
new file mode 100644
index 00000000..c98278f8
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T-ipv6.cfg
@@ -0,0 +1,185 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ2 SWQ3 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5 SWQ6 SWQ7 SWQ8 SWQ9
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ4 SWQ5
+pktq_out = SWQ10 SWQ11
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ6 SWQ7
+pktq_out = SWQ12 SWQ13
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = SWQ8 SWQ9
+pktq_out = SWQ14 SWQ15
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = TXRX
+core = 8
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ10 SWQ11 SWQ12 SWQ13 SWQ14 SWQ15
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2 TXQ0.3 TXQ1.3
+
+[PIPELINE9]
+type = TXRX
+core = 9
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = SWQ16 SWQ17 SWQ1
+
+[PIPELINE10]
+type = LOADB
+core = 10
+pktq_in = SWQ16 SWQ17
+pktq_out = SWQ18 SWQ19 SWQ20 SWQ21 SWQ22 SWQ23
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE11]
+type = CGNAPT
+core = 11
+pktq_in = SWQ18 SWQ19
+pktq_out = SWQ24 SWQ25
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE12]
+type = CGNAPT
+core = 12
+pktq_in = SWQ20 SWQ21
+pktq_out = SWQ26 SWQ27
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE13]
+type = CGNAPT
+core = 13
+pktq_in = SWQ22 SWQ23
+pktq_out = SWQ28 SWQ29
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv6
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE14]
+type = TXRX
+core = 14
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ24 SWQ25 SWQ26 SWQ27 SWQ28 SWQ29
+pktq_out = TXQ2.1 TXQ3.1 TXQ2.2 TXQ3.2 TXQ2.3 TXQ3.3
diff --git a/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T.cfg b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T.cfg
new file mode 100644
index 00000000..19384731
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx-2LB-4P-3T.cfg
@@ -0,0 +1,185 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ARPICMP
+core = 1
+pktq_in = SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+arp_route_tbl = (98106414,FFFFFF00,0,98106414)
+arp_route_tbl = (98102814,FFFFFF00,1,98102814)
+arp_route_tbl = (C0106414,FFFFFF00,2,C0106414)
+arp_route_tbl = (C0102814,FFFFFF00,3,C0102814)
+
+
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:6414,120,0,0064:ff9b:0:0:0:0:9810:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:9810:2814,120,1,0064:ff9b:0:0:0:0:9810:2814)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:6414,120,2,0064:ff9b:0:0:0:0:c010:6414)
+nd_route_tbl = (0064:ff9b:0:0:0:0:c010:2814,120,3,0064:ff9b:0:0:0:0:c010:2814)
+
+
+pktq_in_prv = RXQ0.0 RXQ2.0
+prv_to_pub_map = (0, 1)(2, 3)
+ports_mac_list = 00:00:00:00:00:06 00:00:00:00:00:07 00:00:00:00:00:08 00:00:00:00:00:09
+
+[PIPELINE2]
+type = TIMER
+core = 2
+n_flows = 1048576
+
+[PIPELINE3]
+type = TXRX
+core = 3
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ0.0 RXQ1.0
+pktq_out = SWQ2 SWQ3 SWQ0
+
+[PIPELINE4]
+type = LOADB
+core = 4
+pktq_in = SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5 SWQ6 SWQ7 SWQ8 SWQ9
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE5]
+type = CGNAPT
+core = 5
+pktq_in = SWQ4 SWQ5
+pktq_out = SWQ10 SWQ11
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103214:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE6]
+type = CGNAPT
+core = 6
+pktq_in = SWQ6 SWQ7
+pktq_out = SWQ12 SWQ13
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE7]
+type = CGNAPT
+core = 7
+pktq_in = SWQ8 SWQ9
+pktq_out = SWQ14 SWQ15
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE8]
+type = TXRX
+core = 8
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ10 SWQ11 SWQ12 SWQ13 SWQ14 SWQ15
+pktq_out = TXQ0.1 TXQ1.1 TXQ0.2 TXQ1.2 TXQ0.3 TXQ1.3
+
+[PIPELINE9]
+type = TXRX
+core = 9
+pipeline_txrx_type = RXRX
+dest_if_offset = 176
+pktq_in = RXQ2.0 RXQ3.0
+pktq_out = SWQ16 SWQ17 SWQ1
+
+[PIPELINE10]
+type = LOADB
+core = 10
+pktq_in = SWQ16 SWQ17
+pktq_out = SWQ18 SWQ19 SWQ20 SWQ21 SWQ22 SWQ23
+outport_offset = 136; 8
+n_vnf_threads = 3
+prv_que_handler = (0,)
+
+[PIPELINE11]
+type = CGNAPT
+core = 11
+pktq_in = SWQ18 SWQ19
+pktq_out = SWQ24 SWQ25
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+public_ip_port_range = 98103215:(1, 65535)
+vnf_set = (3,4,5)
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE12]
+type = CGNAPT
+core = 12
+pktq_in = SWQ20 SWQ21
+pktq_out = SWQ26 SWQ27
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE13]
+type = CGNAPT
+core = 13
+pktq_in = SWQ22 SWQ23
+pktq_out = SWQ28 SWQ29
+phyport_offset = 204
+n_flows = 1048576
+key_offset = 192;64
+key_size = 8
+hash_offset = 200;72
+timer_period = 100
+max_clients_per_ip = 65535
+max_port_per_client = 10
+pkt_type = ipv4
+cgnapt_meta_offset = 128
+prv_que_handler = (0,)
+
+[PIPELINE14]
+type = TXRX
+core = 14
+pipeline_txrx_type = TXTX
+dest_if_offset = 176
+pktq_in = SWQ24 SWQ25 SWQ26 SWQ27 SWQ28 SWQ29
+pktq_out = TXQ2.1 TXQ3.1 TXQ2.2 TXQ3.2 TXQ2.3 TXQ3.3
diff --git a/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_2P.cfg b/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_2P.cfg
new file mode 100644
index 00000000..b04505c3
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_2P.cfg
@@ -0,0 +1,20 @@
+
+link 0 down
+link 0 config 152.16.100.21 24
+link 0 up
+link 1 down
+link 1 config 152.16.40.21 24
+link 1 up
+
+; uncomment to enable static NAPT
+;p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl> <no_of_entries> <end_prv_port> <end_pub_port>
+;p 3 entry addm 152.16.100.20 1234 152.16.40.10 1 0 500 65535 1234 65535
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 152.16.100.20 00:00:00:00:00:01
+;p 1 arpadd 1 152.16.40.20 00:00:00:00:00:02
+
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 0064:ff9b:0:0:0:0:9810:6414 00:00:00:00:00:01
+;p 1 arpadd 1 0064:ff9b:0:0:0:0:9810:2814 00:00:00:00:00:02
diff --git a/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_4P.cfg b/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_4P.cfg
new file mode 100644
index 00000000..ad4ee606
--- /dev/null
+++ b/VNFs/vCGNAPT/config/arp_txrx_ScriptFile_4P.cfg
@@ -0,0 +1,30 @@
+
+link 0 down
+link 0 config 152.16.100.21 24
+link 0 up
+link 1 down
+link 1 config 152.16.40.21 24
+link 1 up
+link 2 down
+link 2 config 192.16.100.25 24
+link 2 up
+link 3 down
+link 3 config 192.16.40.25 24
+link 3 up
+
+; uncomment to enable static NAPT
+;p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl> <no_of_entries> <end_prv_port> <end_pub_port>
+;p 3 entry addm 152.16.100.20 1234 152.16.40.10 1 0 500 65535 1234 65535
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 152.16.100.20 00:00:00:00:00:01
+;p 1 arpadd 1 152.16.40.20 00:00:00:00:00:02
+;p 1 arpadd 2 192.16.100.20 00:00:00:00:00:03
+;p 1 arpadd 3 192.16.40.20 00:00:00:00:00:04
+
+
+; uncomment below lines to enable static arp
+;p 1 arpadd 0 0064:ff9b:0:0:0:0:9810:6414 00:00:00:00:00:01
+;p 1 arpadd 1 0064:ff9b:0:0:0:0:9810:2814 00:00:00:00:00:02
+;p 1 arpadd 2 0064:ff9b:0:0:0:0:c010:6414 00:00:00:00:00:03
+;p 1 arpadd 3 0064:ff9b:0:0:0:0:c010:2814 00:00:00:00:00:04
diff --git a/VNFs/vCGNAPT/init.c b/VNFs/vCGNAPT/init.c
new file mode 100644
index 00000000..eff9e30e
--- /dev/null
+++ b/VNFs/vCGNAPT/init.c
@@ -0,0 +1,1809 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+
+#include "app.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_master.h"
+#include "thread_fe.h"
+#include "pipeline_cgnapt.h"
+#include "pipeline_loadb.h"
+#include "pipeline_timer.h"
+#include "pipeline_txrx.h"
+#include "pipeline_arpicmp.h"
+#include "interface.h"
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "lib_arp.h"
+
+#define APP_NAME_SIZE 32
+port_config_t *port_config;
+
+static void
+app_init_core_map(struct app_params *app)
+{
+ APP_LOG(app, HIGH, "Initializing CPU core map ...");
+ app->core_map = cpu_core_map_init(4, 32, 4, 0);
+
+ if (app->core_map == NULL)
+ rte_panic("Cannot create CPU core map\n");
+
+ if (app->log_level >= APP_LOG_LEVEL_LOW)
+ cpu_core_map_print(app->core_map);
+}
+
+/* Core Mask String in Hex Representation */
+#define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
+
+static void
+app_init_core_mask(struct app_params *app)
+{
+ char core_mask_str[APP_CORE_MASK_STRING_SIZE];
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p->socket_id,
+ p->core_id,
+ p->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Cannot create CPU core mask\n");
+
+ app_core_enable_in_core_mask(app, lcore_id);
+ }
+
+ app_core_build_core_mask_string(app, core_mask_str);
+ APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
+
+}
+
+static void
+app_init_eal(struct app_params *app)
+{
+ char buffer[256];
+ char core_mask_str[APP_CORE_MASK_STRING_SIZE];
+ struct app_eal_params *p = &app->eal_params;
+ uint8_t n_args = 0;
+ uint32_t i;
+ int status;
+
+ app->eal_argv[n_args++] = strdup(app->app_name);
+
+ app_core_build_core_mask_string(app, core_mask_str);
+ snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->coremap) {
+ snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->master_lcore_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--master-lcore=%" PRIu32,
+ p->master_lcore);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->memory_present) {
+ snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->ranks_present) {
+ snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-blacklist=%s",
+ p->pci_blacklist[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (app->port_mask != 0)
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ p->pci_whitelist[i]);
+ if (n_args < 255)
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+ else
+ for (i = 0; i < app->n_links; i++) {
+ char *pci_bdf = app->link_params[i].pci_bdf;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ pci_bdf);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vdev=%s",
+ p->vdev[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
+ snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->proc_type) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--proc-type=%s",
+ p->proc_type);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->syslog) {
+ snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->log_level_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--log-level=%" PRIu32,
+ p->log_level);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->version_present) && p->version) {
+ snprintf(buffer, sizeof(buffer), "-v");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->help_present) && p->help) {
+ snprintf(buffer, sizeof(buffer), "--help");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_huge_present) && p->no_huge) {
+ snprintf(buffer, sizeof(buffer), "--no-huge");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_pci_present) && p->no_pci) {
+ snprintf(buffer, sizeof(buffer), "--no-pci");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_hpet_present) && p->no_hpet) {
+ snprintf(buffer, sizeof(buffer), "--no-hpet");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_shconf_present) && p->no_shconf) {
+ snprintf(buffer, sizeof(buffer), "--no-shconf");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->add_driver) {
+ snprintf(buffer, sizeof(buffer), "-d=%s", p->add_driver);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->socket_mem) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--socket-mem=%s",
+ p->socket_mem);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->huge_dir) {
+ snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->file_prefix) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--file-prefix=%s",
+ p->file_prefix);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->base_virtaddr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--base-virtaddr=%s",
+ p->base_virtaddr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->create_uio_dev_present) && p->create_uio_dev) {
+ snprintf(buffer, sizeof(buffer), "--create-uio-dev");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->vfio_intr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vfio-intr=%s",
+ p->vfio_intr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->xen_dom0_present) && (p->xen_dom0)) {
+ snprintf(buffer, sizeof(buffer), "--xen-dom0");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "--");
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ app->eal_argc = n_args;
+
+ APP_LOG(app, HIGH, "Initializing EAL ...");
+ if (app->log_level >= APP_LOG_LEVEL_LOW) {
+ int i;
+
+ fprintf(stdout, "[APP] EAL arguments: \"");
+ for (i = 1; i < app->eal_argc; i++)
+ fprintf(stdout, "%s ", app->eal_argv[i]);
+ fprintf(stdout, "\"\n");
+ }
+
+ status = rte_eal_init(app->eal_argc, app->eal_argv);
+ if (status < 0)
+ rte_panic("EAL init error\n");
+}
+#if 0
+static void
+app_init_mempool(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->mempool[i] = rte_mempool_create(
+ p->name,
+ p->pool_size,
+ p->buffer_size,
+ p->cache_size,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL,
+ p->cpu_socket_id,
+ 0);
+
+ if (app->mempool[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+#endif
+static inline int
+app_link_filter_arp_add(struct app_link_params *link)
+{
+ struct rte_eth_ethertype_filter filter = {
+ .ether_type = ETHER_TYPE_ARP,
+ .flags = 0,
+ .queue = link->arp_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_syn_add(struct app_link_params *link)
+{
+ struct rte_eth_syn_filter filter = {
+ .hig_pri = 1,
+ .queue = link->tcp_syn_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+#if 0
+static void
+app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->arp_q != 0) {
+ int status = app_link_filter_arp_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding ARP filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->arp_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding ARP filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->arp_q, status);
+ }
+}
+
+static void
+app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->tcp_syn_q != 0) {
+ int status = app_link_filter_tcp_syn_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP SYN filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->tcp_syn_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP SYN filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->tcp_syn_q,
+ status);
+ }
+}
+
+static int
+app_link_is_virtual(__rte_unused struct app_link_params *p)
+{
+ uint32_t pmd_id = p->pmd_id;
+ struct rte_eth_dev *dev = &rte_eth_devices[pmd_id];
+ if (dev->dev_type == RTE_ETH_DEV_VIRTUAL)
+ return 1;
+ return 0;
+}
+#endif
+
+void
+app_link_up_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
+{
+#if 0
+ uint32_t i;
+ int status;
+ struct rte_eth_link link;
+
+ if (app_link_is_virtual(cp)) {
+ cp->state = 1;
+ return;
+ }
+
+
+ /* For each link, add filters for IP of current link */
+ if (cp->ip != 0) {
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding IP filter (queue= %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding IP "
+ "filter (queue= %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->ip_local_q, cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->tcp_local_q, cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding UDP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->udp_local_q, cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Adding SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding SCTP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->sctp_local_q, cp->ip,
+ status);
+ }
+ }
+ }
+
+ rte_eth_link_get(cp->pmd_id, &link);
+ if (!link.link_status) {
+ /* PMD link up */
+ status = rte_eth_dev_set_link_up(cp->pmd_id);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): PMD set link up error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+ }
+#endif
+ ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_UP);
+
+ /* Mark link as UP */
+ cp->state = 1;
+}
+
+void
+app_link_down_internal(__rte_unused struct app_params *app, struct app_link_params *cp)
+{
+#if 0
+ uint32_t i;
+ int status;
+ struct rte_eth_link link;
+
+ if (app_link_is_virtual(cp)) {
+ cp->state = 0;
+ return;
+ }
+ rte_eth_link_get(cp->pmd_id, &link);
+ if (link.link_status) {
+ /* PMD link down */
+ status = rte_eth_dev_set_link_down(cp->pmd_id);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): PMD set link down error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+ }
+#endif
+ ifm_update_linkstatus(cp->pmd_id, IFM_ETH_LINK_DOWN);
+ /* Mark link as DOWN */
+ cp->state = 0;
+
+ /* Return if current link IP is not valid */
+ if (cp->ip == 0)
+ return;
+#if 0
+ /* For each link, remove filters for IP of current link */
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting IP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting IP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting UDP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip, status);
+ }
+ }
+#endif
+}
+
+static void
+app_check_link(struct app_params *app)
+{
+ uint32_t all_links_up, i;
+
+ all_links_up = 1;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+ struct rte_eth_link link_params;
+
+ memset(&link_params, 0, sizeof(link_params));
+ rte_eth_link_get(p->pmd_id, &link_params);
+
+ APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
+ p->name,
+ p->pmd_id,
+ link_params.link_speed / 1000,
+ link_params.link_status ? "UP" : "DOWN");
+
+ if (link_params.link_status == ETH_LINK_DOWN)
+ all_links_up = 0;
+ }
+
+ if (all_links_up == 0)
+ rte_panic("Some links are DOWN\n");
+}
+
+static uint32_t
+is_any_swq_frag_or_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
+ (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+app_init_link_frag_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ if (is_any_swq_frag_or_ras(app)) {
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p_txq =
+ &app->hwq_out_params[i];
+
+ p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
+ }
+ }
+}
+
+static inline int
+app_get_cpu_socket_id(uint32_t pmd_id)
+{
+ int status = rte_eth_dev_socket_id(pmd_id);
+
+ return (status != SOCKET_ID_ANY) ? status : 0;
+}
+
+struct rte_eth_rxmode rx_mode = {
+ .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled. */
+ .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
+ .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
+ .hw_vlan_strip = 1, /**< VLAN strip enabled. */
+ .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
+ .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
+ .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
+};
+struct rte_fdir_conf fdir_conf = {
+ .mode = RTE_FDIR_MODE_NONE,
+ .pballoc = RTE_FDIR_PBALLOC_64K,
+ .status = RTE_FDIR_REPORT_STATUS,
+ .mask = {
+ .vlan_tci_mask = 0x0,
+ .ipv4_mask = {
+ .src_ip = 0xFFFFFFFF,
+ .dst_ip = 0xFFFFFFFF,
+ },
+ .ipv6_mask = {
+ .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
+ },
+ .src_port_mask = 0xFFFF,
+ .dst_port_mask = 0xFFFF,
+ .mac_addr_byte_mask = 0xFF,
+ .tunnel_type_mask = 1,
+ .tunnel_id_mask = 0xFFFFFFFF,
+ },
+ .drop_queue = 127,
+};
+
+static void
+app_init_link(struct app_params *app)
+{
+ uint32_t i, size;
+
+ app_init_link_frag_ras(app);
+
+ /*
+ *Configuring port_config_t structure for interface manager initialization
+ */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(port_config_t));
+ port_config = rte_zmalloc(NULL, (app->n_links * size), RTE_CACHE_LINE_SIZE);
+ if (port_config == NULL)
+ rte_panic("port_config is NULL: Memory Allocation failure\n");
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p_link = &app->link_params[i];
+ uint32_t link_id, n_hwq_in, n_hwq_out;
+ int status;
+
+ status = sscanf(p_link->name, "LINK%" PRIu32, &link_id);
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_link->name, link_id, status);
+
+ n_hwq_in = app_link_get_n_rxq(app, p_link);
+ n_hwq_out = app_link_get_n_txq(app, p_link);
+
+ printf("\n\nn_hwq_in %d\n", n_hwq_in);
+ struct rte_eth_conf *My_local_conf = &p_link->conf;
+ if(enable_hwlb)
+ {
+ My_local_conf->rxmode = rx_mode;
+ My_local_conf->fdir_conf = fdir_conf;
+ My_local_conf->rxmode.mq_mode = ETH_MQ_RX_RSS;
+ My_local_conf->rx_adv_conf.rss_conf.rss_key = NULL;
+ My_local_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP;
+ //My_local_conf->rx_adv_conf.rss_conf.rss_hf |= ETH_RSS_UDP;
+ //My_local_conf->rx_adv_conf.rss_conf.rss_hf |= ETH_RSS_IP;
+ // My_local_conf->rx_adv_conf.rss_conf.rss_hf = ETH_RSS_TCP;
+ }
+ //#else /*for FDIR Filter*/
+ else
+ {/* disable-rss */
+ My_local_conf->rx_adv_conf.rss_conf.rss_hf = 0;
+ /* pkt-filter-mode is perfect */
+ My_local_conf->fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
+ }
+
+ /* Set the hardware CRC stripping to avoid double stripping of FCS in VM */
+ p_link->conf.rxmode.hw_strip_crc=1;
+
+ APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
+ "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
+ p_link->name,
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out);
+
+ port_config[i].port_id = p_link->pmd_id;
+ port_config[i].nrx_queue = n_hwq_in;
+ port_config[i].ntx_queue = n_hwq_out;
+ port_config[i].state = 1;
+ port_config[i].promisc = p_link->promisc;
+ port_config[i].mempool.pool_size = app->mempool_params[0].pool_size;
+ port_config[i].mempool.buffer_size = app->mempool_params[0].buffer_size;
+ port_config[i].mempool.cache_size = app->mempool_params[0].cache_size;
+ port_config[i].mempool.cpu_socket_id = app->mempool_params[0].cpu_socket_id;
+ memcpy (&port_config[i].port_conf, &p_link->conf, sizeof(struct rte_eth_conf));
+ memcpy (&port_config[i].rx_conf, &app->hwq_in_params[0].conf, sizeof(struct rte_eth_rxconf));
+ memcpy (&port_config[i].tx_conf, &app->hwq_out_params[0].conf, sizeof(struct rte_eth_txconf));
+
+ if(app->header_csum_req) {
+ /* Enable TCP and UDP HW Checksum */
+ port_config[i].tx_conf.txq_flags &=
+ ~(ETH_TXQ_FLAGS_NOXSUMTCP|ETH_TXQ_FLAGS_NOXSUMUDP);
+ }
+
+ if (ifm_port_setup (p_link->pmd_id, &port_config[i]))
+ rte_panic ("Port Setup Failed: %s - %"PRIu32"\n", p_link->name, p_link->pmd_id);
+
+#if 0
+ /* LINK */
+ status = rte_eth_dev_configure(
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out,
+ &p_link->conf);
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_link->name, p_link->pmd_id, status);
+
+ rte_eth_macaddr_get(p_link->pmd_id,
+ (struct ether_addr *) &p_link->mac_addr);
+
+ if (p_link->promisc)
+ rte_eth_promiscuous_enable(p_link->pmd_id);
+
+ /* RXQ */
+ for (j = 0; j < app->n_pktq_hwq_in; j++) {
+ struct app_pktq_hwq_in_params *p_rxq =
+ &app->hwq_in_params[j];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ status =
+ sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_rxq->name, rxq_queue_id, status);
+
+ if (rxq_link_id != link_id)
+ continue;
+
+ status = rte_eth_rx_queue_setup(
+ p_link->pmd_id,
+ rxq_queue_id,
+ p_rxq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_rxq->conf,
+ app->mempool[p_rxq->mempool_id]);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_rxq->name,
+ status);
+ }
+
+ /* TXQ */
+ for (j = 0; j < app->n_pktq_hwq_out; j++) {
+ struct app_pktq_hwq_out_params *p_txq =
+ &app->hwq_out_params[j];
+ uint32_t txq_link_id, txq_queue_id;
+
+ status =
+ sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
+ &txq_link_id, &txq_queue_id);
+
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_txq->name, txq_link_id, status);
+
+ if (txq_link_id != link_id)
+ continue;
+
+ if (app->header_csum_req) {
+ /* Enable TCP and UDP HW Checksum */
+ p_txq->conf.txq_flags &=
+ ~(ETH_TXQ_FLAGS_NOXSUMTCP|
+ ETH_TXQ_FLAGS_NOXSUMUDP);
+ }
+
+ status = rte_eth_tx_queue_setup(
+ p_link->pmd_id,
+ txq_queue_id,
+ p_txq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_txq->conf);
+
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_txq->name,
+ status);
+ }
+
+ /* LINK START */
+ status = rte_eth_dev_start(p_link->pmd_id);
+ if (status < 0)
+ rte_panic("Cannot start %s (error %" PRId32 ")\n",
+ p_link->name, status);
+
+ /* LINK UP */
+ app_link_set_arp_filter(app, p_link);
+ app_link_set_tcp_syn_filter(app, p_link);
+#endif
+ app_link_up_internal(app, p_link);
+ }
+
+ app_check_link(app);
+}
+
+static void
+app_init_swq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ unsigned int flags = 0;
+
+ if (app_swq_get_readers(app, p) == 1)
+ flags |= RING_F_SC_DEQ;
+ if (app_swq_get_writers(app, p) == 1)
+ flags |= RING_F_SP_ENQ;
+
+ APP_LOG(app, HIGH, "Initializing %s...", p->name);
+ app->swq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ flags);
+
+ if (app->swq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static void
+app_init_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p_tm = &app->tm_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_link link_eth_params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ p_link = app_get_link_for_tm(app, p_tm);
+ /* LINK */
+ rte_eth_link_get(p_link->pmd_id, &link_eth_params);
+
+ /* TM */
+ p_tm->sched_port_params.name = p_tm->name;
+ p_tm->sched_port_params.socket =
+ app_get_cpu_socket_id(p_link->pmd_id);
+ p_tm->sched_port_params.rate =
+ (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
+ sched = rte_sched_port_config(&p_tm->sched_port_params);
+ if (sched == NULL)
+ rte_panic("%s init error\n", p_tm->name);
+ app->tm[i] = sched;
+
+ /* Subport */
+ n_subports = p_tm->sched_port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport, pipe_id;
+
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &p_tm->sched_subport_params[subport_id]);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " init error (%" PRId32 ")\n",
+ p_tm->name, subport_id, status);
+
+ /* Pipe */
+ n_pipes_per_subport =
+ p_tm->sched_port_params.n_pipes_per_subport;
+ for (pipe_id = 0;
+ pipe_id < n_pipes_per_subport;
+ pipe_id++) {
+ int profile_id = p_tm->sched_pipe_to_profile[
+ subport_id * APP_MAX_SCHED_PIPES +
+ pipe_id];
+
+ if (profile_id == -1)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " pipe %" PRIu32
+ " (profile %" PRId32 ") "
+ "init error (% " PRId32 ")\n",
+ p_tm->name, subport_id, pipe_id,
+ profile_id, status);
+ }
+ }
+ }
+}
+
+static void
+app_init_msgq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->msgq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (app->msgq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out)
+{
+ uint32_t i;
+ uint32_t mempool_id;
+
+ snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
+
+ p_out->socket_id = (int) p_in->socket_id;
+
+ p_out->log_level = app->log_level;
+
+ /* pktq_in */
+ p_out->n_ports_in = p_in->n_pktq_in;
+ for (i = 0; i < p_in->n_pktq_in; i++) {
+ struct app_pktq_in_params *in = &p_in->pktq_in[i];
+ struct pipeline_port_in_params *out = &p_out->port_in[i];
+
+ switch (in->type) {
+ case APP_PKTQ_IN_HWQ:
+ {
+ struct app_pktq_hwq_in_params *p_hwq_in =
+ &app->hwq_in_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_rxq(app, p_hwq_in);
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ int status =
+ sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id,
+ &rxq_queue_id);
+ if(status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_hwq_in->name, rxq_link_id, status);
+
+ out->type = PIPELINE_PORT_IN_ETHDEV_READER;
+ out->params.ethdev.port_id = p_link->pmd_id;
+ out->params.ethdev.queue_id = rxq_queue_id;
+ out->burst_size = p_hwq_in->burst;
+ break;
+ }
+ case APP_PKTQ_IN_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params =
+ &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_frag == 0) &&
+ (swq_params->ipv6_frag == 0)) {
+ if (app_swq_get_readers(app,
+ swq_params) == 1) {
+ out->type =
+ PIPELINE_PORT_IN_RING_READER;
+ out->params.ring.ring =
+ app->swq[in->id];
+ out->burst_size =
+ app->swq_params[in->id].
+ burst_read;
+ } else {
+ out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
+ out->params.ring_multi.ring = app->swq[in->id];
+ out->burst_size = swq_params->burst_read;
+ }
+ } else {
+ if (swq_params->ipv4_frag == 1) {
+ struct rte_port_ring_reader_ipv4_frag_params
+ *params =
+ &out->params.ring_ipv4_frag;
+
+ out->type =
+ PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size =
+ swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool
+ [swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool
+ [swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ } else {
+ struct rte_port_ring_reader_ipv6_frag_params
+ *params =
+ &out->params.ring_ipv6_frag;
+
+ out->type =
+ PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size =
+ swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool
+ [swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool
+ [swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_IN_TM:
+ out->type = PIPELINE_PORT_IN_SCHED_READER;
+ out->params.sched.sched = app->tm[in->id];
+ out->burst_size = app->tm_params[in->id].burst_read;
+ break;
+ case APP_PKTQ_IN_SOURCE:
+ mempool_id = app->source_params[in->id].mempool_id;
+ out->type = PIPELINE_PORT_IN_SOURCE;
+ out->params.source.mempool = app->mempool[mempool_id];
+ out->burst_size = app->source_params[in->id].burst;
+
+#ifdef RTE_NEXT_ABI
+ if (app->source_params[in->id].file_name
+ != NULL) {
+ out->params.source.file_name = strdup(
+ app->source_params[in->id].
+ file_name);
+ if (out->params.source.file_name == NULL) {
+ out->params.source.
+ n_bytes_per_pkt = 0;
+ break;
+ }
+ out->params.source.n_bytes_per_pkt =
+ app->source_params[in->id].
+ n_bytes_per_pkt;
+ }
+#endif
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* pktq_out */
+ p_out->n_ports_out = p_in->n_pktq_out;
+ for (i = 0; i < p_in->n_pktq_out; i++) {
+ struct app_pktq_out_params *in = &p_in->pktq_out[i];
+ struct pipeline_port_out_params *out = &p_out->port_out[i];
+
+ switch (in->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *p_hwq_out =
+ &app->hwq_out_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_txq(app, p_hwq_out);
+ uint32_t txq_link_id, txq_queue_id;
+
+ int status =
+ sscanf(p_hwq_out->name,
+ "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id,
+ &txq_queue_id);
+ if(status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_hwq_out->name, txq_link_id, status);
+
+ if (p_hwq_out->dropless == 0) {
+ struct rte_port_ethdev_writer_params *params =
+ &out->params.ethdev;
+
+ out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz =
+ app->hwq_out_params[in->id].burst;
+ } else {
+ struct rte_port_ethdev_writer_nodrop_params
+ *params = &out->params.ethdev_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz = p_hwq_out->burst;
+ params->n_retries = p_hwq_out->n_retries;
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params =
+ &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_ras == 0) &&
+ (swq_params->ipv6_ras == 0)) {
+ if (app_swq_get_writers(app, swq_params) == 1) {
+ if (app->swq_params[in->id].dropless == 0) {
+ struct rte_port_ring_writer_params *params =
+ &out->params.ring;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ } else {
+ struct rte_port_ring_writer_nodrop_params
+ *params = &out->params.ring_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ params->n_retries =
+ app->swq_params[in->id].n_retries;
+ }
+ } else {
+ if (swq_params->dropless == 0) {
+ struct rte_port_ring_multi_writer_params
+ *params =
+ &out->params.ring_multi;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_multi_writer_nodrop_params
+ *params =
+ &out->params.ring_multi_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
+
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ params->n_retries = swq_params->n_retries;
+ }
+ }
+ } else {
+ if (swq_params->ipv4_ras == 1) {
+ struct rte_port_ring_writer_ipv4_ras_params
+ *params =
+ &out->params.ring_ipv4_ras;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_writer_ipv6_ras_params
+ *params =
+ &out->params.ring_ipv6_ras;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_TM: {
+ struct rte_port_sched_writer_params *params =
+ &out->params.sched;
+
+ out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
+ params->sched = app->tm[in->id];
+ params->tx_burst_sz =
+ app->tm_params[in->id].burst_write;
+ break;
+ }
+ case APP_PKTQ_OUT_SINK:
+ out->type = PIPELINE_PORT_OUT_SINK;
+ if (app->sink_params[in->id].file_name != NULL) {
+ out->params.sink.file_name = strdup(
+ app->sink_params[in->id].
+ file_name);
+ if (out->params.sink.file_name == NULL) {
+ out->params.sink.max_n_pkts = 0;
+ break;
+ }
+ out->params.sink.max_n_pkts =
+ app->sink_params[in->id].
+ n_pkts_to_dump;
+ } else {
+ out->params.sink.file_name = NULL;
+ out->params.sink.max_n_pkts = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* msgq */
+ p_out->n_msgq = p_in->n_msgq_in;
+
+ for (i = 0; i < p_in->n_msgq_in; i++)
+ p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
+
+ for (i = 0; i < p_in->n_msgq_out; i++)
+ p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
+
+ /* args */
+ p_out->n_args = p_in->n_args;
+ for (i = 0; i < p_in->n_args; i++) {
+ p_out->args_name[i] = p_in->args_name[i];
+ p_out->args_value[i] = p_in->args_value[i];
+ }
+}
+
+static void
+app_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct pipeline_params pp;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", params->name);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline type \"%s\"\n",
+ params->type);
+
+ app_pipeline_params_get(app, params, &pp);
+
+ /* Back-end */
+ data->be = NULL;
+ if (ptype->be_ops->f_init) {
+ data->be = ptype->be_ops->f_init(&pp, (void *) app);
+
+ if (data->be == NULL)
+ rte_panic("Pipeline instance \"%s\" back-end "
+ "init error\n", params->name);
+ }
+
+ /* Front-end */
+ data->fe = NULL;
+ if (ptype->fe_ops->f_init) {
+ data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
+
+ if (data->fe == NULL)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "init error\n", params->name);
+ }
+
+ data->ptype = ptype;
+
+ data->timer_period = (rte_get_tsc_hz() *
+ params->timer_period) / 100;
+ }
+}
+
+static void
+app_init_threads(struct app_params *app)
+{
+ uint64_t time = rte_get_tsc_cycles();
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct app_thread_data *t;
+ struct app_thread_pipeline_data *p;
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
+ params->socket_id,
+ params->core_id,
+ (params->hyper_th_id) ? "h" : "");
+
+ t = &app->thread_data[lcore_id];
+
+ t->timer_period = (rte_get_tsc_hz() *
+ APP_THREAD_TIMER_PERIOD) / 1000;
+ t->thread_req_deadline = time + t->timer_period;
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_get_tsc_cycles();
+ t->headroom_ratio = 0.0;
+
+ t->msgq_in = app_thread_msgq_in_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_in == NULL)
+ rte_panic("Init error: Cannot find MSGQ_IN "
+ "for thread %" PRId32, lcore_id);
+
+ t->msgq_out = app_thread_msgq_out_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_out == NULL)
+ rte_panic("Init error: Cannot find MSGQ_OUT "
+ "for thread %" PRId32, lcore_id);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline "
+ "type \"%s\"\n", params->type);
+
+ p = (ptype->be_ops->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = p_id;
+ p->be = data->be;
+ p->f_run = ptype->be_ops->f_run;
+ p->f_timer = ptype->be_ops->f_timer;
+ p->timer_period = data->timer_period;
+ p->deadline = time + data->timer_period;
+
+ data->enabled = 1;
+
+ if (ptype->be_ops->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+ }
+}
+
+int app_init(struct app_params *app)
+{
+ app_init_core_map(app);
+ app_init_core_mask(app);
+
+ app_init_eal(app);
+ ifm_init();
+ //app_init_mempool(app);
+ app_init_link(app);
+ app_init_swq(app);
+ app_init_tm(app);
+ app_init_msgq(app);
+
+ app_pipeline_common_cmd_push(app);
+ app_pipeline_thread_cmd_push(app);
+ app_pipeline_type_register(app, &pipeline_master);
+ app_pipeline_type_register(app, &pipeline_cgnapt);
+ app_pipeline_type_register(app, &pipeline_loadb);
+ app_pipeline_type_register(app, &pipeline_timer);
+ app_pipeline_type_register(app, &pipeline_txrx);
+ app_pipeline_type_register(app, &pipeline_arpicmp);
+
+ app_init_pipelines(app);
+ app_init_threads(app);
+
+ l3fwd_init();
+ create_arp_table();
+ create_nd_table();
+ populate_lpm_routes();
+ print_interface_details();
+
+ return 0;
+}
+
+static int
+app_pipeline_type_cmd_push(struct app_params *app,
+ struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL))
+ return -EINVAL;
+
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if (n_cmds == 0)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+
+ /* Check for available slots in the application commands array */
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
+
+int
+app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
+{
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL) ||
+ (ptype->name == NULL) ||
+ (strlen(ptype->name) == 0) ||
+ (ptype->be_ops->f_init == NULL) ||
+ (ptype->be_ops->f_timer == NULL))
+ return -EINVAL;
+
+ /* Check for duplicate entry */
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
+ return -EEXIST;
+
+ /* Check for resource availability */
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
+ (n_cmds > APP_MAX_CMDS - app->n_cmds))
+ return -ENOMEM;
+
+ /* Copy pipeline type */
+ memcpy(&app->pipeline_type[app->n_pipeline_types++],
+ ptype,
+ sizeof(struct pipeline_type));
+
+ /* Copy CLI commands */
+ if (n_cmds)
+ app_pipeline_type_cmd_push(app, ptype);
+
+ return 0;
+}
+
+struct
+pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, name) == 0)
+ return &app->pipeline_type[i];
+
+ return NULL;
+}
diff --git a/VNFs/vCGNAPT/main.c b/VNFs/vCGNAPT/main.c
new file mode 100644
index 00000000..9ebf6fc3
--- /dev/null
+++ b/VNFs/vCGNAPT/main.c
@@ -0,0 +1,50 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "app.h"
+
+static struct app_params app;
+
+int
+main(int argc, char **argv)
+{
+ rte_openlog_stream(stderr);
+
+ /* Config */
+ app_config_init(&app);
+
+ app_config_args(&app, argc, argv);
+
+ app_config_preproc(&app);
+
+ app_config_parse(&app, app.parser_file);
+
+ app_config_check(&app);
+
+ /* Timer subsystem init*/
+ rte_timer_subsystem_init();
+
+ /* Init */
+ app_init(&app);
+
+ /* Run-time */
+ rte_eal_mp_remote_launch(
+ app_thread,
+ (void *) &app,
+ CALL_MASTER);
+
+ return 0;
+}
diff --git a/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.c b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.c
new file mode 100644
index 00000000..e91fd75b
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.c
@@ -0,0 +1,825 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <rte_mbuf.h>
+#include "cgnapt_pcp_be.h"
+#include "pipeline_cgnapt_be.h"
+#include "pipeline_cgnapt_common.h"
+
+/**
+ * @file
+ * Pipeline CG-NAPT PCP BE Implementation.
+ *
+ * Implementation of Pipeline CG-NAPT PCP Back End (BE).
+ * Handles PCP requests for both IPv4 & IPv6
+ * Constructs PCP responses for both IPv4 & IPv6
+ * Provides backend CLI support.
+ * Runs on CGNAPT pipeline core
+ *
+ *
+ */
+
+#ifdef PCP_ENABLE
+
+uint32_t pcp_lifetime = 60;
+uint8_t pcp_ipv4_format[12] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff };
+/**
+ * Function to initialize PCP stuff
+ *
+ */
+enum PCP_RET pcp_init(void)
+{
+ /* Init of PCP mempool */
+ if (!pcp_pool_init) {
+ pcp_pool_init = 1;
+ pcp_mbuf_pool = rte_pktmbuf_pool_create(
+ "pcp_mbuf_pool", 64, 32, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+ if (pcp_mbuf_pool == NULL) {
+ printf("PCP mbuf pool creation failed\n");
+ return PCP_INIT_UNSUCCESS;
+ }
+ }
+ printf("In pcp_init: success\n");
+ return PCP_INIT_SUCCESS;
+}
+
+/**
+ * Function to handle PCP CLI commands
+ *
+ * @param p
+ * Pipieline struct associated with each pipeline
+ * @param msg
+ * CLI message enqueued by master thread
+ */
+
+void *pipeline_cgnapt_msg_req_pcp_handler(
+ __rte_unused struct pipeline *p,
+ void *msg)
+{
+
+ struct pipeline_cgnapt_pcp_msg_rsp *rsp = msg;
+ struct pipeline_cgnapt_pcp_msg_req *req = msg;
+
+ req = msg;
+ rsp->status = 0;
+ if (req->cmd == CGNAPT_PCP_CMD_STATS) {
+ printf("pcp_success_count:%d\n", pcp_success_count);
+ printf("pcp_error_count:%d\n", pcp_error_count);
+ printf("pcp_entry_count:%d\n", pcp_entry_count);
+
+ return rsp;
+ }
+ if (req->cmd == CGNAPT_PCP_CMD_PCP_ENABLE) {
+ if (req->lifetime) {
+ pcp_enable = 1;
+ printf("PCP option is enabled\n");
+ } else{
+ pcp_enable = 0;
+ printf("PCP option is disabled\n");
+ }
+ return rsp;
+ }
+ if (req->cmd == CGNAPT_PCP_CMD_SET_LIFETIME) {
+ pcp_lifetime = req->lifetime;
+ printf("pcp_lifetime:%" PRIu32 "\n", pcp_lifetime);
+ return rsp;
+ }
+ if (req->cmd == CGNAPT_PCP_CMD_GET_LIFETIME) {
+ printf("pcp_lifetime:%" PRIu32 "\n", pcp_lifetime);
+ return rsp;
+ }
+
+ printf("CG-NAPT PCP handler called with wrong args %x %x\n",
+ req->cmd, req->lifetime);
+ printf("\n");
+ return rsp;
+}
+
+void clone_data(
+ struct rte_mbuf *rx_pkt,
+ struct rte_mbuf *tx_pkt);
+
+/**
+ * Function to copy Rx pkt data to Tx pkt data
+ *
+ * @param rx_pkt
+ * Received PCP pkt
+ * @param tx_pkt
+ * Transmitting PCP pkt
+ */
+
+void clone_data(
+ struct rte_mbuf *rx_pkt,
+ struct rte_mbuf *tx_pkt)
+{
+ char *buf1;
+ char *buf2;
+
+ buf1 = rte_pktmbuf_mtod(rx_pkt, char *);
+ buf2 = rte_pktmbuf_append(tx_pkt, rx_pkt->data_len);
+
+ rte_memcpy(buf2, buf1, rx_pkt->data_len);
+}
+
+/**
+ * Function to construct L2,L3,L4 in pkt and to send out
+ *
+ * @param rx_pkt
+ * Received PCP pkt
+ * @param tx_pkt
+ * Transmitting PCP pkt
+ * @param ver
+ * Version of pkt : IPv4 or IPv6
+ * @param p_nat
+ * A pointer to struct rte_pipeline
+ */
+
+void construct_pcp_resp(
+ struct rte_mbuf *rx_pkt,
+ struct rte_mbuf *tx_pkt,
+ uint8_t ver, struct rte_pipeline *rte_p)
+{
+ struct ether_hdr *eth_tx, *eth_rx;
+ struct ipv4_hdr *ipv4_tx, *ipv4_rx;
+ struct ipv6_hdr *ipv6_tx, *ipv6_rx;
+ struct udp_hdr *udp_tx, *udp_rx;
+ struct pcp_resp_hdr *pcp_resp;
+ struct pcp_req_hdr *pcp_req;
+
+ tx_pkt->port = rx_pkt->port;
+
+ if (ver == 4) {
+ pcp_req = (struct pcp_req_hdr *)
+ ((uint8_t *) rx_pkt + IPV4_PCP_OFST);
+ pcp_resp = (struct pcp_resp_hdr *)
+ ((uint8_t *) tx_pkt + IPV4_PCP_OFST);
+ } else {
+ pcp_req = (struct pcp_req_hdr *)
+ ((uint8_t *) rx_pkt + IPV6_PCP_OFST);
+ pcp_resp = (struct pcp_resp_hdr *)
+ ((uint8_t *) tx_pkt + IPV6_PCP_OFST);
+ }
+
+ if (pcp_resp->result_code == PCP_SUCCESS) {
+ memset(pcp_resp->reserve, 0, 12);
+ pcp_success_count++;
+ } else {
+ memcpy(pcp_resp->reserve, &pcp_req->cli_ip[1], 12);
+ pcp_error_count++;
+ }
+
+ pcp_resp->req_resp = PCP_RESP;
+ pcp_resp->res_unuse = 0x00;
+ /* Epoch time */
+ pcp_resp->epoch_time = rte_bswap32(time(NULL));
+
+ /* swap L2 identities */
+ eth_rx = rte_pktmbuf_mtod(rx_pkt, struct ether_hdr *);
+ eth_tx = rte_pktmbuf_mtod(tx_pkt, struct ether_hdr *);
+
+ memcpy(&eth_tx->s_addr, &eth_rx->d_addr, sizeof(struct ether_addr));
+ memcpy(&eth_tx->d_addr, &eth_rx->s_addr, sizeof(struct ether_addr));
+
+ /* swap L3 identities */
+
+ if (ver == 4) {
+ ipv4_rx = (struct ipv4_hdr *)((uint8_t *) rx_pkt + IP_OFFSET);
+ udp_rx = (struct udp_hdr *)((uint8_t *) rx_pkt + IPV4_UDP_OFST);
+
+ ipv4_tx = (struct ipv4_hdr *)((uint8_t *) tx_pkt + IP_OFFSET);
+ udp_tx = (struct udp_hdr *)((uint8_t *) tx_pkt + IPV4_UDP_OFST);
+
+ ipv4_tx->src_addr = ipv4_rx->dst_addr;
+ ipv4_tx->dst_addr = ipv4_rx->src_addr;
+
+ /* swap L4 identities */
+
+ udp_tx->src_port = udp_rx->dst_port;
+ udp_tx->dst_port = udp_rx->src_port;
+ udp_tx->dgram_cksum = 0;
+ udp_tx->dgram_cksum =
+ rte_ipv4_udptcp_cksum(ipv4_tx, (void *)udp_tx);
+
+ ipv4_tx->total_length =
+ rte_cpu_to_be_16(pcp_resp->result_code ==
+ PCP_MAP ? IPV4_PCP_MAP_PL_LEN :
+ IPV4_PCP_PEER_PL_LEN);
+
+ ipv4_tx->packet_id = 0xaabb;
+ ipv4_tx->fragment_offset = 0x0000;
+ ipv4_tx->time_to_live = 64;
+ ipv4_tx->next_proto_id = IP_PROTOCOL_UDP;
+ ipv4_tx->hdr_checksum = 0;
+ ipv4_tx->hdr_checksum = rte_ipv4_cksum(ipv4_tx);
+
+ } else {
+ ipv6_rx = (struct ipv6_hdr *)((uint8_t *) rx_pkt + IP_OFFSET);
+ udp_rx = (struct udp_hdr *)((uint8_t *) rx_pkt + IPV6_UDP_OFST);
+
+ ipv6_tx = (struct ipv6_hdr *)((uint8_t *) tx_pkt + IP_OFFSET);
+ udp_tx = (struct udp_hdr *)((uint8_t *) tx_pkt + IPV6_UDP_OFST);
+
+ memcpy((uint8_t *)&ipv6_tx->src_addr[0],
+ (uint8_t *)&ipv6_rx->dst_addr[0], 16);
+ memcpy((uint8_t *)&ipv6_tx->dst_addr[0],
+ (uint8_t *)&ipv6_rx->src_addr[0], 16);
+
+ /* swap L4 identities */
+
+ udp_tx->src_port = udp_rx->dst_port;
+ udp_tx->dst_port = udp_rx->src_port;
+
+ udp_tx->dgram_cksum = 0;
+ udp_tx->dgram_cksum =
+ rte_ipv6_udptcp_cksum(ipv6_tx, (void *)udp_tx);
+ ipv6_tx->payload_len =
+ rte_cpu_to_be_16(pcp_resp->result_code ==
+ PCP_MAP ? IPV6_PCP_MAP_PL_LEN :
+ IPV6_PCP_PEER_PL_LEN);
+
+ ipv6_tx->proto = IP_PROTOCOL_UDP;
+ ipv6_tx->hop_limits = 64;
+ }
+
+ #ifdef PCP_DEBUG
+ rte_hexdump(stdout, "Transferring PCP Pkt", tx_pkt, 400);
+ #endif
+
+ rte_pipeline_port_out_packet_insert(rte_p, tx_pkt->port, tx_pkt);
+}
+
+/**
+ * Function to handle PCP requests
+ *
+ * @param rx_pkt
+ * Received PCP pkt
+ * @param ver
+ * Version of pkt : IPv4 or IPv6
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ */
+
+void handle_pcp_req(struct rte_mbuf *rx_pkt,
+ uint8_t ver,
+ void *pipeline_cgnapt_ptr)
+{
+ struct ipv4_hdr *ipv4 = NULL;
+ struct ipv6_hdr *ipv6 = NULL;
+ struct udp_hdr *udp_rx = NULL;
+ struct pcp_req_hdr *pcp_req = NULL;
+ struct pcp_resp_hdr *pcp_resp = NULL;
+ struct rte_mbuf *tx_pkt = NULL;
+ struct pipeline_cgnapt *p_nat = pipeline_cgnapt_ptr;
+
+ if (pcp_mbuf_pool == NULL)
+ printf("handle PCP: PCP pool is NULL\n");
+ tx_pkt = rte_pktmbuf_alloc(pcp_mbuf_pool);
+ if (tx_pkt == NULL) {
+ printf("unable to allocate mem from PCP pool\n");
+ return;
+ }
+ /* clone the pkt */
+
+ clone_data(rx_pkt, tx_pkt);
+
+ #ifdef PCP_DEBUG
+ rte_hexdump(stdout, "cloned PCP Pkt", tx_pkt, 400);
+ #endif
+
+ if (ver == 4) {
+ pcp_req = (struct pcp_req_hdr *)
+ ((uint8_t *) rx_pkt + IPV4_PCP_OFST);
+ pcp_resp = (struct pcp_resp_hdr *)
+ ((uint8_t *) tx_pkt + IPV4_PCP_OFST);
+ udp_rx = (struct udp_hdr *)
+ ((uint8_t *) rx_pkt + IPV4_UDP_OFST);
+ } else {
+ pcp_req = (struct pcp_req_hdr *)
+ ((uint8_t *) rx_pkt + IPV6_PCP_OFST);
+ pcp_resp = (struct pcp_resp_hdr *)
+ ((uint8_t *) tx_pkt + IPV6_PCP_OFST);
+ udp_rx = (struct udp_hdr *)
+ ((uint8_t *) rx_pkt + IPV6_UDP_OFST);
+ }
+
+ /* Check for all conditions to drop the packet */
+
+ /* Check the PCP version */
+
+ if (pcp_req->ver != 2) {
+ #ifdef PCP_DEBUG
+ printf("PCP version mismatch\n");
+ #endif
+ pcp_resp->result_code = PCP_UNSUPP_VERSION;
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+
+ /* If req msg is less than 2 octects */
+
+ if (rte_bswap16(udp_rx->dgram_len) > 1100) {
+ #ifdef PCP_DEBUG
+ printf("PCP len > 1000\n");
+ #endif
+ pcp_resp->result_code = PCP_MALFORMED_REQUEST;
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+
+ /* Silently drop the response pkt */
+ if (pcp_req->req_resp == PCP_RESP) {
+ #ifdef PCP_DEBUG
+ printf("Its PCP Resp\n");
+ #endif
+ return;
+ }
+
+ /* Check for supported PCP opcode */
+
+ if ((pcp_req->opcode != PCP_MAP) && (pcp_req->opcode != PCP_PEER)) {
+ #ifdef PCP_DEBUG
+ printf("Neither PCP_MAP not PCP_PEER\n");
+ #endif
+ pcp_resp->result_code = PCP_UNSUPP_OPCODE;
+ printf("result code:%d\n", PCP_UNSUPP_OPCODE);
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+
+ /* To check whether options are using in PCP */
+
+ {
+ uint8_t *option =
+ (uint8_t *) ((uint8_t *) udp_rx + PCP_REQ_RESP_HDR_SZ +
+ PCP_MAP_REQ_RESP_SZ);
+ if (*option) {
+ #ifdef PCP_DEBUG
+ printf("No PCP option support\n");
+ #endif
+ pcp_resp->result_code = PCP_UNSUPP_OPTION;
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+ }
+
+ if (ver == 4) {
+ ipv4 = (struct ipv4_hdr *)((uint8_t *) rx_pkt + IP_OFFSET);
+ /* Check whether 3rd party host is requesting */
+ if (ipv4->src_addr != pcp_req->cli_ip[3]) {
+
+ #ifdef PCP_DEBUG
+ printf("PCP client IP & req IP mismatch\n");
+ #endif
+
+ printf("src addr:%x req addr:%x\n", ipv4->src_addr,
+ pcp_req->cli_ip[3]);
+
+ pcp_resp->result_code = PCP_ADDRESS_MISMATCH;
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+
+ } else {
+ ipv6 = (struct ipv6_hdr *)((uint8_t *) rx_pkt + IP_OFFSET);
+ /* 5. Check whether 3rd party host is requesting */
+ if (memcmp(ipv6->src_addr, pcp_req->cli_ip, IPV6_SZ) != 0) {
+ #ifdef PCP_DEBUG
+ printf("PCP client IP & req IP mismatch\n");
+ #endif
+
+ pcp_resp->result_code = PCP_ADDRESS_MISMATCH;
+ pcp_resp->life_time = rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+ }
+
+ struct pipeline_cgnapt_entry_key key;
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ int pos = 0;
+
+ switch (pcp_req->opcode) {
+
+ case PCP_MAP:
+ {
+ struct pcp_map_req *map_req;
+ struct pcp_map_resp *map_resp;
+
+ /* Not a PCP MAP Request(36) */
+
+ if ((rte_be_to_cpu_16(udp_rx->dgram_len) -
+ sizeof(struct pcp_req_hdr)) <= 35)
+ return;
+
+ if (ver == 4) {
+ map_req = (struct pcp_map_req *)
+ ((uint8_t *) rx_pkt +
+ IPV4_PCP_MAP_OFST);
+ map_resp = (struct pcp_map_resp *)
+ ((uint8_t *) tx_pkt +
+ IPV4_PCP_MAP_OFST);
+ } else {
+ map_req = (struct pcp_map_req *)
+ ((uint8_t *) rx_pkt +
+ IPV6_PCP_MAP_OFST);
+ map_resp = (struct pcp_map_resp *)
+ ((uint8_t *) tx_pkt +
+ IPV6_PCP_MAP_OFST);
+ }
+
+ /* 4. Check for supported protocol */
+
+ if (map_req->protocol != IP_PROTOCOL_TCP &&
+ map_req->protocol != IP_PROTOCOL_UDP) {
+ #ifdef PCP_DEBUG
+ printf("PCP Req is neither TCP nor "
+ "UDP protocol\n");
+ #endif
+
+ pcp_resp->result_code = PCP_UNSUPP_PROTOCOL;
+ pcp_resp->life_time =
+ rte_bswap32(PCP_LONG_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+ return;
+ }
+
+ /* Preparing key to search the entry */
+
+ key.pid = rx_pkt->port;
+ key.ip = rte_bswap32(pcp_req->cli_ip[3]);
+ key.port = rte_bswap16(map_req->int_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ #ifdef PCP_DEBUG
+ rte_hexdump(stdout, "key", &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ #endif
+
+ pos = rte_hash_lookup(napt_common_table, &key);
+
+ /* PCP request for deleting the CGNAPT entry */
+ if (rte_bswap32(pcp_req->life_time) == 0) {
+
+ if (pos != -ENOENT) {
+
+ long long int time_out;
+ time_out =
+ napt_hash_tbl_entries[pos].
+ data.timeout;
+
+ /* Check for PCP entry first */
+ if (time_out > 0) {
+ rte_hash_del_key
+ (napt_common_table, &key);
+ pcp_resp->life_time = 0;
+ pcp_resp->result_code =
+ PCP_SUCCESS;
+ memset(pcp_resp->reserve, 0, 12);
+ #ifdef PCP_DEBUG
+ printf("PCP SUCCESS : PCP MAP req for "
+ "deleting entry\n");
+ #endif
+
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+
+ return;
+
+ }
+
+ if (time_out == STATIC_CGNAPT_TIMEOUT)
+ pcp_resp->life_time = 0xffffffff;
+ else if (time_out == DYNAMIC_CGNAPT_TIMEOUT)
+ pcp_resp->life_time =
+ rte_bswap32(PCP_LONG_LTIME);
+
+ pcp_resp->result_code = PCP_NOT_AUTHORIZED;
+
+ #ifdef PCP_DEBUG
+ printf("PCP Failed : Not a PCP request "
+ "created entry\n");
+ #endif
+
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+ return;
+
+ } else {
+ pcp_resp->life_time = 0;
+ pcp_resp->result_code = PCP_SUCCESS;
+ memset(pcp_resp->reserve, 0, 12);
+
+ #ifdef PCP_DEBUG
+ printf("PCP SUCCESS : MAP req entry not "
+ "found for deletion\n");
+ #endif
+
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+ return;
+ }
+ }
+
+ /* PCP request for adding the CGNAPT entry */
+ struct cgnapt_table_entry *entry = NULL;
+
+ if ((pos == -ENOENT)) {
+ uint8_t err = 0;
+ entry = add_dynamic_cgnapt_entry(&p_nat->p,
+ &key,
+ rte_bswap32(pcp_req->life_time) <=
+ pcp_lifetime?
+ rte_bswap32(pcp_req->life_time):
+ pcp_lifetime,
+ ver == 4 ?
+ CGNAPT_ENTRY_IPV4 :
+ CGNAPT_ENTRY_IPV6,
+ ipv6->src_addr, &err);
+ /* Ignore klocwork issue in above calling */
+
+ /* MAP Err : unable to allocate
+ * requested resources
+ */
+ if (!entry) {
+
+ #ifdef PCP_DEBUG
+ printf("PCP Failure : unable to "
+ "create PCP req entry\n");
+ #endif
+
+ pcp_resp->result_code =
+ PCP_NO_RESOURCES;
+ pcp_resp->life_time =
+ rte_bswap32(PCP_SHORT_LTIME);
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+ return;
+ }
+ #ifdef PCP_DEBUG
+ printf("PCP dynamic entry created "
+ "successfully\n");
+ #endif
+
+ pcp_entry_count++;
+ } else {
+ /* Check whether PCP request created
+ * entry or not
+ */
+ if (napt_hash_tbl_entries[pos].data.
+ timeout > 0) {
+
+ napt_hash_tbl_entries[pos].
+ data.timeout = pcp_lifetime;
+
+ struct cgnapt_table_entry *p_entry, *s_entry;
+ struct pipeline_cgnapt_entry_key s_key;
+
+ p_entry = &napt_hash_tbl_entries[pos];
+ entry = &napt_hash_tbl_entries[pos];
+ s_key.port = napt_hash_tbl_entries[pos].
+ data.pub_port;
+ s_key.ip = napt_hash_tbl_entries[pos].
+ data.pub_ip;
+ s_key.pid = napt_hash_tbl_entries[pos].
+ data.pub_phy_port;
+
+ /* Getting ingress or second entry
+ * from the table
+ */
+
+ pos = rte_hash_lookup(napt_common_table,
+ &s_key);
+ s_entry = &napt_hash_tbl_entries[pos];
+
+ /* Enqueue the info to
+ * restart the timer
+ */
+ timer_thread_enqueue(&key, &s_key,
+ p_entry, s_entry,
+ (struct pipeline *)p_nat);
+
+ } else {
+ // if dynamic
+ if (!napt_hash_tbl_entries[pos].
+ data.timeout)
+ pcp_resp->life_time =
+ rte_bswap32(PCP_LONG_LTIME);
+ else // if static entry
+ pcp_resp->life_time =
+ 0xffffffff;
+
+ pcp_resp->result_code =
+ PCP_NOT_AUTHORIZED;
+
+ #ifdef PCP_DEBUG
+ printf("PCP Failure : Not authorized "
+ "to delete entry\n");
+ printf("Not a PCP request "
+ "created entry\n");
+ #endif
+ construct_pcp_resp(rx_pkt, tx_pkt,
+ ver, p_nat->p.p);
+ return;
+ }
+
+ }
+
+ /* Fill PCP Resp fields */
+ pcp_resp->result_code = PCP_SUCCESS;
+
+ rte_bswap32(pcp_req->life_time) < pcp_lifetime?
+ (pcp_resp->life_time = pcp_req->life_time):
+ (pcp_resp->life_time = rte_bswap32(pcp_lifetime));
+
+ /* Fill PCP MAP Resp fields */
+ memcpy(map_resp->nonce, map_req->nonce, 12);
+ map_resp->protocol = map_req->protocol;
+ map_resp->res_unuse1 = 0;
+ map_resp->int_port = map_req->int_port;
+
+ /* Ignore klockwork issue for below stmt */
+ map_resp->ext_port =
+ rte_be_to_cpu_16(entry->data.pub_port);
+ memcpy(map_resp->ext_ip, pcp_ipv4_format, 12);
+ map_resp->ext_ip[3] = rte_bswap32(entry->data.pub_ip);
+
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+ break;
+
+ case PCP_PEER:
+ {
+
+ /* Not a PCP PEER Request(56) */
+
+ if ((rte_be_to_cpu_16(udp_rx->dgram_len) -
+ sizeof(struct pcp_req_hdr)) <= 55)
+ return;
+
+ struct cgnapt_table_entry *p_entry, *s_entry;
+ struct pipeline_cgnapt_entry_key s_key;
+
+ struct pcp_peer_req *peer_req;
+ struct pcp_peer_resp *peer_resp;
+
+ peer_req =
+ (struct pcp_peer_req *)((uint8_t *) rx_pkt +
+ IPV4_PCP_PEER_OFST);
+ peer_resp =
+ (struct pcp_peer_resp *)((uint8_t *) rx_pkt +
+ IPV4_PCP_PEER_OFST);
+
+ /* PEER Err : Creation not supporting */
+ if (pcp_req->life_time == 0) {
+ pcp_resp->life_time = 0;
+ pcp_resp->result_code = PCP_MALFORMED_REQUEST;
+
+ #ifdef PCP_DEBUG
+ printf("PCP Failure : PEER creation not "
+ "supported\n");
+ #endif
+
+ construct_pcp_resp(rx_pkt, tx_pkt, ver,
+ p_nat->p.p);
+ return;
+ }
+
+ /* Preparing key to search the entry */
+ key.pid = rx_pkt->port;
+ /* For both IPv4 & IPv6, key is last 32 bits
+ * due to NAT64
+ */
+ key.ip = rte_bswap32(pcp_req->cli_ip[3]);
+ key.port = rte_bswap16(peer_req->int_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ /* PEER Err : If no requested entry is found */
+ pos = rte_hash_lookup(napt_common_table, &key);
+ if (pos == -ENOENT) {
+ pcp_resp->life_time =
+ rte_bswap32(PCP_LONG_LTIME);
+ pcp_resp->result_code = PCP_MALFORMED_REQUEST;
+
+ #ifdef PCP_DEBUG
+ printf("PCP Failure : unable to find entry\n");
+ #endif
+
+ construct_pcp_resp(rx_pkt, tx_pkt, ver,
+ p_nat->p.p);
+ return;
+ }
+ /* If requested created entry */
+
+ if (napt_hash_tbl_entries[pos].data.
+ timeout > 0) {
+
+ napt_hash_tbl_entries[pos].
+ data.timeout = pcp_lifetime;
+
+ p_entry = &napt_hash_tbl_entries[pos];
+
+ s_key.port = napt_hash_tbl_entries[pos].
+ data.pub_port;
+ s_key.ip = napt_hash_tbl_entries[pos].
+ data.pub_ip;
+ s_key.pid = napt_hash_tbl_entries[pos].
+ data.pub_phy_port;
+
+ /* Getting ingress or second entry
+ * from the table
+ */
+
+ pos = rte_hash_lookup(napt_common_table,
+ &s_key);
+ s_entry = &napt_hash_tbl_entries[pos];
+
+ /* Enqueue the info to restart the timer */
+ timer_thread_enqueue(&key, &s_key,
+ p_entry, s_entry,
+ (struct pipeline *)p_nat);
+
+ } else{
+ // dynamic entry
+ if (!napt_hash_tbl_entries[pos].data.timeout)
+ pcp_resp->life_time =
+ rte_bswap32(PCP_LONG_LTIME);
+ else // if static entry
+ pcp_resp->life_time = 0xffffffff;
+
+ pcp_resp->result_code =
+ PCP_NOT_AUTHORIZED;
+ #ifdef PCP_DEBUG
+ printf("PCP Failure : Not a PCP request "
+ "created entry\n");
+ #endif
+ construct_pcp_resp(rx_pkt, tx_pkt, ver,
+ p_nat->p.p);
+
+ return;
+ }
+
+ /* PEER Success */
+ /* Fill PCP Response */
+ rte_bswap16(pcp_req->life_time) < pcp_lifetime?
+ (pcp_resp->life_time = pcp_req->life_time):
+ (pcp_resp->life_time = rte_bswap32(pcp_lifetime));
+
+ pcp_resp->result_code = PCP_SUCCESS;
+ /* Fill PCP PEER Resonse */
+ memcpy(peer_resp->nonce, peer_req->nonce, 12);
+ peer_resp->protocol = peer_req->protocol;
+ peer_resp->res_unuse1 = 0;
+
+ peer_resp->int_port =
+ rte_be_to_cpu_16(peer_req->int_port);
+ peer_resp->ext_port =
+ rte_be_to_cpu_16(peer_req->ext_port);
+ memcpy(peer_resp->ext_ip, peer_req->ext_ip, 16);
+ memcpy(peer_resp->ext_ip, pcp_ipv4_format, 12);
+ peer_resp->ext_ip[3] =
+ rte_bswap32(p_entry->data.pub_ip);
+ peer_resp->rpeer_port =
+ rte_be_to_cpu_16(peer_req->rpeer_port);
+ peer_resp->res_unuse2 = 0x0000;
+ memcpy(peer_resp->rpeer_ip, peer_req->rpeer_ip, 16);
+ construct_pcp_resp(rx_pkt, tx_pkt, ver, p_nat->p.p);
+ return;
+ }
+ default:
+ printf("This never hits\n");
+ }
+
+}
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.h b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.h
new file mode 100644
index 00000000..5c26f5c3
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_be.h
@@ -0,0 +1,356 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef _CGNAPT_PCP_H_
+#define _CGNAPT_PCP_H_
+/**
+ * @file
+ *
+ * PCP-related defines
+ */
+
+#include <stdint.h>
+#include <rte_ether.h>
+#include <rte_udp.h>
+#include <rte_pipeline.h>
+#include <rte_ip.h>
+#include "pipeline_cgnapt_common.h"
+
+void handle_pcp_req(struct rte_mbuf *rx_pkt,
+ uint8_t ver,
+ void *pipeline_cgnapt_ptr);
+
+void construct_pcp_resp(struct rte_mbuf *rx_pkt,
+ struct rte_mbuf *tx_pkt,
+ uint8_t ver,
+ struct rte_pipeline *rte_p);
+
+void *pipeline_cgnapt_msg_req_pcp_handler(
+ __rte_unused struct pipeline *p,
+ void *msg);
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/************************** Constats used in PCP ****************************/
+#define PCP_SERVER_PORT 5351
+
+/* PCP Req or Resp */
+enum{
+ PCP_REQ,
+ PCP_RESP,
+};
+/* PCP life time in seconds */
+enum{
+ PCP_LONG_LTIME = 30 * 60,
+ PCP_SHORT_LTIME = 30,
+ MAX_PCP_LIFE_TIME = 120 * 60,
+};
+/* PCP opcodes */
+enum{
+ PCP_ANNOUNCE,
+ PCP_MAP,
+ PCP_PEER,
+};
+
+/* PCP result codes */
+enum{
+ PCP_SUCCESS,
+ PCP_UNSUPP_VERSION,
+ PCP_NOT_AUTHORIZED,
+ PCP_MALFORMED_REQUEST,
+ PCP_UNSUPP_OPCODE,
+ PCP_UNSUPP_OPTION,
+ PCP_MALFORMED_OPTION,
+ PCP_NETWORK_FAILURE,
+ PCP_NO_RESOURCES,
+ PCP_UNSUPP_PROTOCOL,
+ PCP_USER_EX_QUOTA,
+ PCP_CANNOT_PROVIDE_EXTERNAL,
+ PCP_ADDRESS_MISMATCH,
+ PCP_EXCESSIVE_REMOTE_PEERS
+};
+
+/*
+ * @struct
+ *
+ * PCP request header format
+ */
+struct pcp_req_hdr {
+ uint8_t ver;
+ uint8_t opcode:7; //First LSB
+ uint8_t req_resp:1;// MSB
+ uint16_t res_unuse;
+ uint32_t life_time;
+ uint32_t cli_ip[4];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * PCP response header format
+ */
+struct pcp_resp_hdr {
+ uint8_t ver;
+ uint8_t opcode:7; //First LSB
+ uint8_t req_resp:1;// MSB
+ uint8_t res_unuse;
+ uint8_t result_code;
+ uint32_t life_time;
+ uint32_t epoch_time;
+ uint32_t reserve[3];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * PCP MAP request header format
+ */
+struct pcp_map_req {
+ uint32_t nonce[3];
+ uint8_t protocol;
+ uint32_t res_unuse1:24;
+ uint16_t int_port;
+ uint16_t ext_port;
+ uint32_t ext_ip[4];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * PCP MAP response header format
+ */
+struct pcp_map_resp {
+ uint32_t nonce[3];
+ uint8_t protocol;
+ uint32_t res_unuse1:24;
+ uint16_t int_port;
+ uint16_t ext_port;
+ uint32_t ext_ip[4];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * PCP PEER request header format
+ */
+struct pcp_peer_req {
+ uint32_t nonce[3];
+ uint8_t protocol;
+ uint32_t res_unuse1:24;
+ uint16_t int_port;
+ uint16_t ext_port;
+ uint32_t ext_ip[4];
+ uint16_t rpeer_port;
+ uint16_t res_unuse2;
+ uint32_t rpeer_ip[4];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * PCP PEER response header format
+ */
+struct pcp_peer_resp {
+ uint32_t nonce[3];
+ uint8_t protocol;
+ uint32_t res_unuse1:24;
+ uint16_t int_port;
+ uint16_t ext_port;
+ uint32_t ext_ip[4];
+ uint16_t rpeer_port;
+ uint16_t res_unuse2;
+ uint32_t rpeer_ip[4];
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * Customized IPv4 header of struct ipv4_hdr
+ */
+struct ipv4 {
+ uint8_t version_ihl; /**< version and header length */
+ uint8_t type_of_service; /**< type of service */
+ uint16_t total_length; /**< length of packet */
+ uint16_t packet_id; /**< packet ID */
+ uint16_t fragment_offset; /**< fragmentation offset */
+ uint8_t time_to_live; /**< time to live */
+ uint8_t next_proto_id; /**< protocol ID */
+ uint16_t hdr_checksum; /**< header checksum */
+ uint32_t src_addr; /**< source address */
+ uint32_t dst_addr; /**< destination address */
+ uint16_t src_port;
+ uint16_t dst_port;
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * Customized IPv6 header of struct ipv6_hdr
+ */
+struct ipv6 {
+ uint32_t vtc_flow; /**< IP version, traffic class & flow label. */
+ uint16_t payload_len; /**< IP packet length -
+ * includes sizeof(ip_header).
+ */
+ uint8_t proto; /**< Protocol, next header. */
+ uint8_t hop_limits; /**< Hop limits. */
+ uint8_t src_addr[16]; /**< IP address of source host. */
+ uint8_t dst_addr[16]; /**< IP address of destination host(s). */
+ uint16_t src_port;
+ uint16_t dst_port;
+
+} __attribute__((__packed__));
+
+/*
+ * @struct
+ *
+ * To represent the entire pkt data in one structure
+ */
+struct pcp_pkt {
+ struct ether_hdr eth;
+ union{
+ struct ipv4 ipv4;
+ struct ipv6 ipv6;
+ };
+} __attribute__((__packed__));
+
+/**
+ * A structure defining the PCP msg request
+ */
+struct pipeline_cgnapt_pcp_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* data */
+ uint8_t cmd;
+ uint32_t lifetime;
+};
+
+/**
+ * A structure defining the PCP cmd response message.
+ */
+struct pipeline_cgnapt_pcp_msg_rsp {
+ int status;
+};
+
+
+/* All required offsets */
+enum{
+ MBUF_HEAD_ROOM = 256,
+ ETH_HDR_SZ = 14,
+ IPV4_HDR_SZ = 20,
+ IPV6_HDR_SZ = 40,
+ IPV4_SZ = 4,
+ IPV6_SZ = 6,
+ TCP_HDR_SZ = 20,
+ UDP_HDR_SZ = 8,
+ PCP_REQ_RESP_HDR_SZ = 24,
+ PCP_MAP_REQ_RESP_SZ = 36,
+ PCP_PEER_REQ_RESP_SZ = 56,
+};
+
+enum{
+ ETH_DST_MAC = MBUF_HEAD_ROOM,
+ ETH_SRC_MAC = MBUF_HEAD_ROOM + 6,
+ PKT_TYPE = MBUF_HEAD_ROOM + 12,
+ IP_OFFSET = MBUF_HEAD_ROOM + ETH_HDR_SZ,
+
+/* IPV4 Offsets */
+
+ IPV4_PROTOCOL = MBUF_HEAD_ROOM + ETH_HDR_SZ + 9,
+ IPV4_SRC_ADD_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + 12,
+ IPV4_DST_ADD_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + 12 + IPV4_SZ,
+
+ IPV4_TCP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ,
+ IPV4_TCP_SRC_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ,
+ IPV4_TCP_DST_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ + 2,
+
+ IPV4_UDP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ,
+ IPV4_UDP_SRC_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ,
+ IPV4_UDP_DST_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ + 2,
+
+ IPV4_PCP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ +
+ IPV4_HDR_SZ + UDP_HDR_SZ,
+ IPV4_PCP_MAP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ +
+ UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ,
+ IPV4_PCP_PEER_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV4_HDR_SZ +
+ UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ,
+
+ IPV4_PCP_MAP_PL_LEN = IPV4_HDR_SZ + UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ +
+ PCP_MAP_REQ_RESP_SZ,
+ IPV4_PCP_PEER_PL_LEN = IPV4_HDR_SZ + UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ +
+ PCP_PEER_REQ_RESP_SZ,
+/* IPV6 Offsets */
+
+ IPV6_PROTOCOL = MBUF_HEAD_ROOM + ETH_HDR_SZ + 6,
+ IPV6_SRC_ADD_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + 8,
+ IPV6_DST_ADD_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + 8 + IPV6_SZ,
+
+ IPV6_TCP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ,
+ IPV6_TCP_SRC_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ,
+ IPV6_TCP_DST_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ + 2,
+
+ IPV6_UDP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ,
+ IPV6_UCP_SRC_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ,
+ IPV6_UCP_DST_PORT_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ + 2,
+
+ IPV6_PCP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ +
+ IPV6_HDR_SZ + UDP_HDR_SZ,
+ IPV6_PCP_MAP_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ +
+ UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ,
+ IPV6_PCP_PEER_OFST = MBUF_HEAD_ROOM + ETH_HDR_SZ + IPV6_HDR_SZ +
+ UDP_HDR_SZ + PCP_REQ_RESP_HDR_SZ,
+
+ IPV6_PCP_MAP_PL_LEN = IPV6_HDR_SZ + UDP_HDR_SZ +
+ PCP_REQ_RESP_HDR_SZ + PCP_MAP_REQ_RESP_SZ,
+ IPV6_PCP_PEER_PL_LEN = IPV6_HDR_SZ + UDP_HDR_SZ +
+ PCP_REQ_RESP_HDR_SZ + PCP_PEER_REQ_RESP_SZ,
+};
+
+enum{
+STATIC_CGNAPT_TIMEOUT = -1,
+DYNAMIC_CGNAPT_TIMEOUT = 0,
+};
+
+enum PCP_RET {
+PCP_INIT_SUCCESS,
+PCP_INIT_UNSUCCESS,
+PCP_PCP_PKT,
+//PCP_PCP_PKT_SUCCESS,
+PCP_NOT_PCP_PKT,
+PCP_PKT_CORRUPT,
+};
+
+
+uint8_t _PCP_DEBUG;
+uint32_t pcp_success_count;
+uint32_t pcp_error_count;
+uint32_t pcp_entry_count;
+uint32_t pcp_enable;
+
+uint8_t pcp_pool_init;
+struct rte_mempool *pcp_mbuf_pool;
+
+enum PCP_RET pcp_init(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CGNAPT_PCP_H_ */
diff --git a/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.c b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.c
new file mode 100644
index 00000000..51b94b0b
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.c
@@ -0,0 +1,174 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_cgnapt.h"
+#include "pipeline_cgnapt_common.h"
+#include "cgnapt_pcp_fe.h"
+#include "cgnapt_pcp_be.h"
+
+#ifdef PCP_ENABLE
+
+/**
+ * @file
+ * Pipeline CG-NAPT PCP FE Implementation.
+ *
+ * Implementation of Pipeline CG-NAPT PCP Front End (FE).
+ * Provides CLI support.
+ * Runs on master core.
+ *
+ */
+
+void cmd_pcp_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data);
+/**
+ * A structure defining PCP cmd parse arguments.
+ */
+struct cmd_pcp_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t pcp_string;
+ uint8_t cmd;
+ uint32_t lifetime;
+};
+
+static cmdline_parse_token_string_t cmd_pcp_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_pcp_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_pcp_p =
+TOKEN_NUM_INITIALIZER(struct cmd_pcp_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_pcp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_pcp_result,
+ pcp_string, "pcp");
+
+static cmdline_parse_token_num_t cmd_pcp_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_pcp_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t cmd_pcp_lifetime =
+TOKEN_NUM_INITIALIZER(struct cmd_pcp_result, lifetime, UINT32);
+
+cmdline_parse_inst_t cmd_pcp = {
+ .f = cmd_pcp_parsed,
+ .data = NULL,
+ .help_str = "NAPT PCP cmd",
+ .tokens = {
+ (void *) &cmd_pcp_p_string,
+ (void *) &cmd_pcp_p,
+ (void *) &cmd_pcp_string,
+ (void *) &cmd_pcp_cmd,
+ (void *) &cmd_pcp_lifetime,
+ NULL,
+ },
+};
+
+ /**
+ * Function to send a PCP cmd message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param cmd
+ * PCP specific command whether to show stats,set to get lifetime
+ * @param lifetime
+ * PCP entry lifetime
+ * @return
+ * 0 on success, negative on error.
+ */
+//#ifdef PCP_ENABLE
+static int
+app_pipeline_cgnapt_pcp(struct app_params *app,
+ uint32_t pipeline_id, uint8_t cmd, uint32_t lifetime){
+
+ struct pipeline_cgnapt *p;
+ struct pipeline_cgnapt_pcp_msg_req *req;
+ struct pipeline_cgnapt_pcp_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_PCP;
+ req->cmd = cmd;
+ req->lifetime = lifetime;
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * Helping function for PCP cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param data
+ * void pointer data
+ */
+void
+cmd_pcp_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pcp_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_cgnapt_pcp(app, params->p, params->cmd,
+ params->lifetime);
+
+ if (status != 0) {
+ printf("PCP Command failed\n");
+ return;
+ }
+}
+
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.h b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.h
new file mode 100644
index 00000000..caf63e8a
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/cgnapt_pcp_fe.h
@@ -0,0 +1,35 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+ #ifndef __INCLUDE_CGNAPT_PCP_FE_H__
+ #define __INCLUDE_CGNAPT_PCP_FE_H__
+
+ /**
+ * @file
+ * Pipeline CG-NAPT PCP FE Implementation.
+ *
+ * Implementation of Pipeline CG-NAPT PCP Front End (FE).
+ * Provides CLI support.
+ * Runs on master core.
+ *
+ */
+
+#include "pipeline_cgnapt_common.h"
+#include "cgnapt_pcp_be.h"
+
+extern cmdline_parse_inst_t cmd_pcp;
+
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.c b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.c
new file mode 100644
index 00000000..239d34f9
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.c
@@ -0,0 +1,1518 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+ * @file
+ * Pipeline CG-NAPT FE Implementation.
+ *
+ * Implementation of Pipeline CG-NAPT Front End (FE).
+ * Provides CLI support.
+ * Runs on master core.
+ *
+ */
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_cgnapt.h"
+#include "pipeline_cgnapt_common.h"
+#include "pipeline_common_be.h"
+#include "pipeline_cgnapt_be.h"
+#ifdef PCP_ENABLE
+#include "cgnapt_pcp_fe.h"
+#endif
+
+/**
+ * A structure defining the CG-NAPT entry that is stored on
+ * front end.
+ */
+struct app_pipeline_cgnapt_entry {
+ struct pipeline_cgnapt_entry_key key;
+ struct app_pipeline_cgnapt_entry_params params;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_cgnapt_entry) node;
+};
+
+/**
+ * A structure defining the FE representation of a CG-NAPT pipeline
+ */
+struct pipeline_cgnapt_t {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* entries */
+ TAILQ_HEAD(, app_pipeline_cgnapt_entry) entries;
+ uint32_t n_entries;
+
+};
+
+/**
+ * Init function for CG-NAPT FE.
+ *
+ * @param params
+ * A pointer to the pipeline params.
+ *
+ */
+static void *pipeline_cgnapt_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline_cgnapt_t *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) || (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_cgnapt_t));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ TAILQ_INIT(&p->entries);
+ p->n_entries = 0;
+
+ return p;
+}
+
+/**
+ * Function for CG-NAPT FE cleanup.
+ *
+ * @param pipeline
+ * A pointer to the pipeline.
+ *
+ */
+static int app_pipeline_cgnapt_free(void *pipeline)
+{
+ struct pipeline_cgnapt_t *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ while (!TAILQ_EMPTY(&p->entries)) {
+ struct app_pipeline_cgnapt_entry *entry;
+
+ entry = TAILQ_FIRST(&p->entries);
+ TAILQ_REMOVE(&p->entries, entry, node);
+ rte_free(entry);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+/**
+ * Function to print an IPv6 address
+ *
+ * @param ipv6_addr
+ * A uint8_t array containing an IPv6 address
+ */
+static void print_ipv6_address_u8(uint8_t ipv6_addr[16])
+{
+ printf("Ipv6Address-%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x\n",
+ ipv6_addr[0], ipv6_addr[1], ipv6_addr[2], ipv6_addr[3],
+ ipv6_addr[4], ipv6_addr[5], ipv6_addr[6], ipv6_addr[7],
+ ipv6_addr[8], ipv6_addr[9], ipv6_addr[10], ipv6_addr[11],
+ ipv6_addr[12], ipv6_addr[13], ipv6_addr[14], ipv6_addr[15]);
+}
+
+/**
+ * Function to print an IPv6 address
+ *
+ * @param ipv6_addr
+ * A uint16_t array containing an IPv6 address
+ */
+static void print_ipv6_address_u16(uint16_t ipv6_addr[8])
+{
+ printf("Ipv6Address-%x:%x:%x:%x:%x:%x:%x:%x\n", ipv6_addr[0],
+ ipv6_addr[1], ipv6_addr[2], ipv6_addr[3], ipv6_addr[4],
+ ipv6_addr[5], ipv6_addr[6], ipv6_addr[7]);
+}
+
+/**
+ * Function to print an IPv6 address
+ *
+ * @param ipv6_addr
+ * A uint32_t array containing an IPv6 address
+ */
+static void print_ipv6_address_u32(uint32_t ipv6_addr[4])
+{
+ printf("Ipv6Address: %x:%x:%x:%x\n", ipv6_addr[0], ipv6_addr[1],
+ ipv6_addr[2], ipv6_addr[3]);
+}
+
+/**
+ * Function to print a NAPT entry
+ *
+ * @param entry
+ * A pointer to a NAPT entry
+ */
+static void print_entry(const struct app_pipeline_cgnapt_entry *entry)
+{
+ const struct pipeline_cgnapt_entry_key *key = &entry->key;
+
+ if (entry->params.type == CGNAPT_ENTRY_IPV4) {
+ printf("CGNAPT Entry: Key = %" PRIu32 ".%" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ":%" PRIu32 ":%" PRIu16 " => Prv = %" PRIu32
+ ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ":%" PRIu32
+ " => Pub = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ ":%" PRIu32 " => ttl = %" PRIu32 "\n",
+ (key->ip >> 24) & 0xFF, (key->ip >> 16) & 0xFF,
+ (key->ip >> 8) & 0xFF, key->ip & 0xFF, key->port,
+ key->pid, (entry->params.u.prv_ip >> 24) & 0xFF,
+ (entry->params.u.prv_ip >> 16) & 0xFF,
+ (entry->params.u.prv_ip >> 8) & 0xFF,
+ entry->params.u.prv_ip & 0xFF, entry->params.prv_port,
+ (entry->params.pub_ip >> 24) & 0xFF,
+ (entry->params.pub_ip >> 16) & 0xFF,
+ (entry->params.pub_ip >> 8) & 0xFF,
+ entry->params.pub_ip & 0xFF, entry->params.pub_port,
+ entry->params.ttl);
+ } else {
+ printf("CGNAPT Entry: Key = %" PRIu32 ".%" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ":%" PRIu32 ":%" PRIu16 " => Prv = %" PRIu32
+ "%" PRIu32 ":%" PRIu32 "%" PRIu32 ":%" PRIu32 "%" PRIu32
+ ":%" PRIu32 "%" PRIu32 ":%" PRIu32 "%" PRIu32 ":%" PRIu32
+ "%" PRIu32 ":%" PRIu32 "%" PRIu32 ":%" PRIu32 "%" PRIu32
+ ":%" PRIu32 " => Pub = %" PRIu32 ".%" PRIu32 ".%"
+ PRIu32 ".%" PRIu32 ":%" PRIu32 " => ttl = %" PRIu32
+ "\n", (key->ip >> 24) & 0xFF, (key->ip >> 16) & 0xFF,
+ (key->ip >> 8) & 0xFF, key->ip & 0xFF, key->port,
+ key->pid, entry->params.u.prv_ipv6[0],
+ entry->params.u.prv_ipv6[1], entry->params.u.prv_ipv6[2],
+ entry->params.u.prv_ipv6[3], entry->params.u.prv_ipv6[4],
+ entry->params.u.prv_ipv6[5], entry->params.u.prv_ipv6[6],
+ entry->params.u.prv_ipv6[7], entry->params.u.prv_ipv6[8],
+ entry->params.u.prv_ipv6[9],
+ entry->params.u.prv_ipv6[10],
+ entry->params.u.prv_ipv6[11],
+ entry->params.u.prv_ipv6[12],
+ entry->params.u.prv_ipv6[13],
+ entry->params.u.prv_ipv6[14],
+ entry->params.u.prv_ipv6[15], entry->params.prv_port,
+ (entry->params.pub_ip >> 24) & 0xFF,
+ (entry->params.pub_ip >> 16) & 0xFF,
+ (entry->params.pub_ip >> 8) & 0xFF,
+ entry->params.pub_ip & 0xFF, entry->params.pub_port,
+ entry->params.ttl);
+
+ }
+}
+
+/**
+ * Function to list NAPT entries from FE storage
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+static int
+app_pipeline_cgnapt_entry_ls(struct app_params *app, uint32_t pipeline_id)
+{
+ struct pipeline_cgnapt_t *p;
+ struct app_pipeline_cgnapt_entry *it;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(it, &p->entries, node)
+ print_entry(it);
+ print_static_cgnapt_entries();
+ printf(" - end of napt fe entry list -\n");
+ return 0;
+}
+
+/**
+ * Function to send a debug message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param msg
+ * debug message contents
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+static int
+app_pipeline_cgnapt_entry_dbg(struct app_params *app,
+ uint32_t pipeline_id, uint8_t *msg)
+{
+ struct pipeline_cgnapt_t *p;
+
+ struct pipeline_cgnapt_entry_dbg_msg_req *req;
+ struct pipeline_cgnapt_entry_dbg_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_ENTRY_DBG;
+ req->data[0] = msg[0];
+ req->data[1] = msg[1];
+ req->data[2] = msg[2];
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * Function to send a NAPT entry add message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param key
+ * A pointer to NAPT entry key
+ * @param entry_params
+ * A pointer to NAPT entry params
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int app_pipeline_cgnapt_add_entry(
+ struct app_params *app,
+ uint32_t pipeline_id,
+ struct app_pipeline_cgnapt_entry_params *entry_params)
+{
+ struct pipeline_cgnapt_t *p;
+
+ struct pipeline_cgnapt_entry_add_msg_req *req;
+ struct pipeline_cgnapt_entry_add_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) || (entry_params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -2;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -4;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADD;
+ memcpy(&req->data, entry_params, sizeof(*entry_params));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -5;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+/**
+ * Function to send a multiple NAPT entry add message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param key
+ * A pointer to NAPT entry key
+ * @param entry_params
+ * A pointer to multiple NAPT entry params
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int app_pipeline_cgnapt_addm_entry(
+ struct app_params *app,
+ uint32_t pipeline_id,
+ struct app_pipeline_cgnapt_mentry_params *entry_params)
+{
+ struct pipeline_cgnapt_t *p;
+
+ struct pipeline_cgnapt_entry_addm_msg_req *req;
+ struct pipeline_cgnapt_entry_addm_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) || (entry_params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -2;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -4;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADDM;
+ memcpy(&req->data, entry_params, sizeof(*entry_params));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -5;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+/**
+ * Function to send a NAPT entry delete message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param key
+ * A pointer to NAPT entry key
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int
+app_pipeline_cgnapt_delete_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_entry_key *key)
+{
+ struct pipeline_cgnapt_t *p;
+
+ struct pipeline_cgnapt_entry_delete_msg_req *req;
+ struct pipeline_cgnapt_entry_delete_msg_rsp *rsp;
+
+ if (CGNAPT_DEBUG) {
+ uint8_t *KeyP = (uint8_t *) key;
+ int i = 0;
+
+ printf("app_pipeline_cgnapt_delete_entry - Key: ");
+ for (i = 0; i < (int)sizeof(*key); i++)
+ printf(" %02x", KeyP[i]);
+ printf(" ,KeySize %u\n", (int)sizeof(*key));
+ }
+ /* Check input arguments */
+ if ((app == NULL) || (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_ENTRY_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ printf("Successfully deleted the entry\n");
+ return 0;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * A structure defining the entry add parse arguments.
+ */
+struct cmd_entry_add_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t prv_ip;
+ uint16_t prv_port;
+ cmdline_ipaddr_t pub_ip;
+ uint16_t pub_port;
+ uint16_t pid;
+ uint32_t ttl;
+};
+/**
+ * Helping function for add entry
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ *
+ */
+static void
+cmd_entry_add_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_add_result *params = parsed_result;
+ struct app_params *app = data;
+ struct app_pipeline_cgnapt_entry_params ent_params;
+ int status;
+
+ if (params->prv_ip.family == AF_INET) {
+ ent_params.type = CGNAPT_ENTRY_IPV4;
+ ent_params.u.prv_ip =
+ rte_bswap32((uint32_t) params->prv_ip.addr.ipv4.s_addr);
+ } else {
+ print_ipv6_address_u8(params->prv_ip.addr.ipv6.s6_addr);
+ print_ipv6_address_u16(params->prv_ip.addr.ipv6.s6_addr16);
+ print_ipv6_address_u32(params->prv_ip.addr.ipv6.s6_addr32);
+ ent_params.type = CGNAPT_ENTRY_IPV6;
+ memcpy(ent_params.u.prv_ipv6, params->prv_ip.addr.ipv6.s6_addr,
+ 16);
+ }
+
+ ent_params.prv_port = params->prv_port;
+ ent_params.pub_ip =
+ rte_bswap32((uint32_t) params->pub_ip.addr.ipv4.s_addr);
+ ent_params.pub_port = params->pub_port;
+ ent_params.prv_phy_port = params->pid;
+ ent_params.ttl = params->ttl;
+
+ status = app_pipeline_cgnapt_add_entry(app, params->p, &ent_params);
+
+ if (status != 0) {
+ printf("CG-NAPT add multiple entry command failed, %d\n",
+ status);
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_entry_add_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_add_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_entry_add_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_add_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_entry_add_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_add_result, entry_string,
+ "entry");
+
+static cmdline_parse_token_string_t cmd_entry_add_add_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_add_result, add_string,
+ "add");
+
+static cmdline_parse_token_ipaddr_t cmd_entry_add_prv_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_entry_add_result, prv_ip);
+
+static cmdline_parse_token_num_t cmd_entry_add_prv_port =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_add_result, prv_port, UINT16);
+
+static cmdline_parse_token_ipaddr_t cmd_entry_add_pub_ip =
+TOKEN_IPV4_INITIALIZER(struct cmd_entry_add_result, pub_ip);
+
+static cmdline_parse_token_num_t cmd_entry_add_pub_port =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_add_result, pub_port, UINT16);
+
+static cmdline_parse_token_num_t cmd_entry_add_pid =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_add_result, pid, UINT16);
+
+static cmdline_parse_token_num_t cmd_entry_add_ttl =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_add_result, ttl, UINT32);
+
+static cmdline_parse_inst_t cmd_entry_add = {
+ .f = cmd_entry_add_parsed,
+ .data = NULL,
+ .help_str = "NAPT entry add",
+ .tokens = {
+ (void *)&cmd_entry_add_p_string,
+ (void *)&cmd_entry_add_p,
+ (void *)&cmd_entry_add_entry_string,
+ (void *)&cmd_entry_add_add_string,
+ (void *)&cmd_entry_add_prv_ip,
+ (void *)&cmd_entry_add_prv_port,
+ (void *)&cmd_entry_add_pub_ip,
+ (void *)&cmd_entry_add_pub_port,
+ (void *)&cmd_entry_add_pid,
+ (void *)&cmd_entry_add_ttl,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the multiple entry add parse arguments.
+ */
+struct cmd_entry_addm_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t addm_string;
+ cmdline_ipaddr_t prv_ip;
+ uint16_t prv_port;
+ cmdline_ipaddr_t pub_ip;
+ uint16_t pub_port;
+ uint16_t pid;
+ uint32_t ttl;
+ uint32_t num_ue;
+ uint16_t prv_port_max;
+ uint16_t pub_port_max;
+};
+
+/**
+ * Helping function for add multiple entries
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_entry_addm_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_addm_result *params = parsed_result;
+ struct app_params *app = data;
+ struct app_pipeline_cgnapt_mentry_params ent_params;
+ int status;
+
+ if (params->prv_ip.family == AF_INET) {
+ ent_params.type = CGNAPT_ENTRY_IPV4;
+ ent_params.u.prv_ip =
+ rte_bswap32((uint32_t) params->prv_ip.addr.ipv4.s_addr);
+ } else {
+ print_ipv6_address_u8(params->prv_ip.addr.ipv6.s6_addr);
+ print_ipv6_address_u16(params->prv_ip.addr.ipv6.s6_addr16);
+ print_ipv6_address_u32(params->prv_ip.addr.ipv6.s6_addr32);
+ ent_params.type = CGNAPT_ENTRY_IPV6;
+ memcpy(ent_params.u.prv_ipv6, params->prv_ip.addr.ipv6.s6_addr,
+ 16);
+ }
+
+ ent_params.prv_port = params->prv_port;
+ ent_params.pub_ip =
+ rte_bswap32((uint32_t) params->pub_ip.addr.ipv4.s_addr);
+ ent_params.pub_port = params->pub_port;
+ ent_params.prv_phy_port = params->pid;
+ ent_params.ttl = params->ttl;
+ ent_params.num_ue = params->num_ue;
+ ent_params.prv_port_max = params->prv_port_max;
+ ent_params.pub_port_max = params->pub_port_max;
+
+ status = app_pipeline_cgnapt_addm_entry(app, params->p, &ent_params);
+
+ if (status != 0) {
+ printf("CG-NAPT add multiple entry command failed, %d\n",
+ status);
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_entry_add_addm_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_addm_result, addm_string,
+ "addm");
+
+static cmdline_parse_token_num_t cmd_entry_addm_prv_port =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_addm_result, prv_port_max, UINT16);
+
+static cmdline_parse_token_num_t cmd_entry_addm_pub_port =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_addm_result, pub_port_max, UINT16);
+
+static cmdline_parse_token_num_t cmd_entry_addm_max_ue =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_addm_result, num_ue, UINT32);
+
+static cmdline_parse_inst_t cmd_entry_addm = {
+ .f = cmd_entry_addm_parsed,
+ .data = NULL,
+ .help_str = "NAPT entry add multiple",
+ .tokens = {
+ (void *)&cmd_entry_add_p_string,
+ (void *)&cmd_entry_add_p,
+ (void *)&cmd_entry_add_entry_string,
+ (void *)&cmd_entry_add_addm_string,
+ (void *)&cmd_entry_add_prv_ip,
+ (void *)&cmd_entry_add_prv_port,
+ (void *)&cmd_entry_add_pub_ip,
+ (void *)&cmd_entry_add_pub_port,
+ (void *)&cmd_entry_add_pid,
+ (void *)&cmd_entry_add_ttl,
+ (void *)&cmd_entry_addm_max_ue,
+ (void *)&cmd_entry_addm_prv_port,
+ (void *)&cmd_entry_addm_pub_port,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the entry delete parse arguments.
+ */
+struct cmd_entry_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_ipaddr_t ip;
+ uint16_t port;
+ uint16_t pid;
+};
+
+/**
+ * Helping function for delete entry
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_entry_del_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_del_result *params = parsed_result;
+ struct app_params *app = data;
+ struct pipeline_cgnapt_entry_key key;
+
+ int status;
+
+ /* Create entry */
+ if (params->ip.family == AF_INET)
+ key.ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ else
+ key.ip =
+ rte_bswap32((uint32_t) params->ip.addr.ipv6.s6_addr32[3]);
+ key.port = params->port;
+ key.pid = params->pid;
+
+ status = app_pipeline_cgnapt_delete_entry(app, params->p, &key);
+
+ if (status != 0) {
+ printf("CG-NAPT entry del command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_entry_del_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_del_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_entry_del_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_entry_del_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_del_result, entry_string,
+ "entry");
+
+static cmdline_parse_token_string_t cmd_entry_del_del_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_del_result, del_string,
+ "del");
+
+static cmdline_parse_token_ipaddr_t cmd_entry_del_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_entry_del_result, ip);
+
+static cmdline_parse_token_num_t cmd_entry_del_port =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_del_result, port, UINT16);
+
+static cmdline_parse_token_num_t cmd_entry_del_pid =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_del_result, pid, UINT16);
+
+static cmdline_parse_inst_t cmd_entry_del = {
+ .f = cmd_entry_del_parsed,
+ .data = NULL,
+ .help_str = "Entry delete",
+ .tokens = {
+ (void *)&cmd_entry_del_p_string,
+ (void *)&cmd_entry_del_p,
+ (void *)&cmd_entry_del_entry_string,
+ (void *)&cmd_entry_del_del_string,
+ (void *)&cmd_entry_del_ip,
+ (void *)&cmd_entry_del_port,
+ (void *)&cmd_entry_del_pid,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the list entry parse arguments.
+ */
+struct cmd_entry_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+/**
+ * Helping function for list entry
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_entry_ls_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_ls_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_cgnapt_entry_ls(app, params->p);
+
+ if (status != 0) {
+ printf("Ls command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_entry_ls_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_ls_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_entry_ls_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_ls_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_entry_ls_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_ls_result,
+ entry_string, "entry");
+
+static cmdline_parse_token_string_t cmd_entry_ls_ls_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_ls_result, ls_string,
+ "ls");
+
+static cmdline_parse_inst_t cmd_entry_ls = {
+ .f = cmd_entry_ls_parsed,
+ .data = NULL,
+ .help_str = "Entry list",
+ .tokens = {
+ (void *)&cmd_entry_ls_p_string,
+ (void *)&cmd_entry_ls_p,
+ (void *)&cmd_entry_ls_entry_string,
+ (void *)&cmd_entry_ls_ls_string,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the dbg cmd parse arguments.
+ */
+struct cmd_entry_dbg_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t dbg_string;
+ uint8_t cmd;
+ uint8_t d1;
+ uint8_t d2;
+};
+
+/**
+ * Helping function for dbg cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_entry_dbg_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_dbg_result *params = parsed_result;
+ struct app_params *app = data;
+ uint8_t msg[4];
+ int status;
+
+ msg[0] = params->cmd;
+ msg[1] = params->d1;
+ msg[2] = params->d2;
+ status = app_pipeline_cgnapt_entry_dbg(app, params->p, msg);
+
+ if (status != 0) {
+ printf("Dbg Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_entry_dbg_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_entry_dbg_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_entry_dbg_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result,
+ entry_string, "entry");
+
+static cmdline_parse_token_string_t cmd_entry_dbg_dbg_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, dbg_string,
+ "dbg");
+
+static cmdline_parse_token_num_t cmd_entry_dbg_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t cmd_entry_dbg_d1 =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, d1, UINT8);
+
+static cmdline_parse_token_num_t cmd_entry_dbg_d2 =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, d2, UINT8);
+
+static cmdline_parse_inst_t cmd_entry_dbg = {
+ .f = cmd_entry_dbg_parsed,
+ .data = NULL,
+ .help_str = "NAPT dbg cmd",
+ .tokens = {
+ (void *)&cmd_entry_dbg_p_string,
+ (void *)&cmd_entry_dbg_p,
+ (void *)&cmd_entry_dbg_entry_string,
+ (void *)&cmd_entry_dbg_dbg_string,
+ (void *)&cmd_entry_dbg_cmd,
+ (void *)&cmd_entry_dbg_d1,
+ (void *)&cmd_entry_dbg_d2,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining num ip clients cmd parse arguments.
+ */
+struct cmd_numipcli_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t numipcli_string;
+};
+
+/**
+ * Helping function for printing num ip clients
+ *
+ * @param parsed_result
+ * Unused pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * Unused void pointer data
+ */
+static void
+cmd_numipcli_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ print_num_ip_clients();
+}
+
+static cmdline_parse_token_string_t cmd_numipcli_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_numipcli_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_numipcli_p =
+TOKEN_NUM_INITIALIZER(struct cmd_numipcli_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_numipcli_string =
+TOKEN_STRING_INITIALIZER(struct cmd_numipcli_result,
+ numipcli_string, "numipcli");
+
+static cmdline_parse_inst_t cmd_numipcli = {
+ .f = cmd_numipcli_parsed,
+ .data = NULL,
+ .help_str = "Num IP Clients command",
+ .tokens = {
+ (void *)&cmd_numipcli_p_string,
+ (void *)&cmd_numipcli_p,
+ (void *)&cmd_numipcli_string,
+ NULL,
+ },
+};
+
+/**
+ * Function to send a ver cmd message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param msg
+ * debug message contents
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+static int
+app_pipeline_cgnapt_ver(struct app_params *app,
+ uint32_t pipeline_id, uint8_t *msg)
+{
+
+ struct pipeline_cgnapt_t *p;
+ struct pipeline_cgnapt_entry_dbg_msg_req *req;
+ struct pipeline_cgnapt_entry_dbg_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_VER;
+ req->data[0] = msg[0];
+ req->data[1] = msg[1];
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * A structure defining ver cmd parse arguments.
+ */
+struct cmd_ver_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t ver_string;
+ uint8_t cmd;
+ uint8_t d1;
+};
+
+/**
+ * Helping function for ver cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_ver_parsed(void *parsed_result, __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_ver_result *params = parsed_result;
+ struct app_params *app = data;
+ uint8_t msg[4];
+ int status;
+
+ msg[0] = params->cmd;
+ msg[1] = params->d1;
+ status = app_pipeline_cgnapt_ver(app, params->p, msg);
+
+ if (status != 0) {
+ printf("Version Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_ver_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_ver_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_ver_p =
+TOKEN_NUM_INITIALIZER(struct cmd_ver_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_ver_string =
+TOKEN_STRING_INITIALIZER(struct cmd_ver_result,
+ ver_string, "ver");
+
+static cmdline_parse_token_num_t cmd_ver_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_ver_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t cmd_ver_d1 =
+TOKEN_NUM_INITIALIZER(struct cmd_ver_result, d1, UINT8);
+
+static cmdline_parse_inst_t cmd_ver = {
+ .f = cmd_ver_parsed,
+ .data = NULL,
+ .help_str = "NAPT ver cmd",
+ .tokens = {
+ (void *)&cmd_ver_p_string,
+ (void *)&cmd_ver_p,
+ (void *)&cmd_ver_string,
+ (void *)&cmd_ver_cmd,
+ (void *)&cmd_ver_d1,
+ NULL,
+ },
+};
+
+/**
+ * Function to send a nsp add cmd message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param nsp
+ * A pointer to struct pipeline_cgnapt_nsp_t
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+static int
+app_pipeline_cgnapt_add_nsp(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_nsp_t *nsp)
+{
+
+ struct pipeline_cgnapt_t *p;
+ struct pipeline_cgnapt_nsp_add_msg_req *req;
+ struct pipeline_cgnapt_nsp_add_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ printf("1st if condition\n");
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ printf("2st if condition\n");
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ printf("3st if condition\n");
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_NSP_ADD;
+ memcpy(&req->nsp, nsp, sizeof(struct pipeline_cgnapt_nsp_t));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ printf("4st if condition\n");
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * A structure defining nsp add cmd parse arguments.
+ */
+struct cmd_nsp_add_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t nsp_string;
+ cmdline_fixed_string_t add_string;
+ cmdline_ipaddr_t ip;
+};
+
+/**
+ * Helping function for nsp add cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_nsp_add_parsed(void *parsed_result, __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_nsp_add_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ struct pipeline_cgnapt_nsp_t nsp;
+
+ memcpy(&nsp.prefix, &params->ip.addr.ipv6.s6_addr, 16);
+ nsp.depth = params->ip.prefixlen;
+ status = app_pipeline_cgnapt_add_nsp(app, params->p, &nsp);
+ if (status != 0) {
+ printf("NSP ADD Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_add_nsp_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_add_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_add_nsp_p =
+TOKEN_NUM_INITIALIZER(struct cmd_nsp_add_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_add_nsp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_add_result,
+ nsp_string, "nsp");
+
+static cmdline_parse_token_string_t cmd_add_nsp_add_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_add_result,
+ add_string, "add");
+
+static cmdline_parse_token_ipaddr_t cmd_add_nsp_ip =
+TOKEN_IPNET_INITIALIZER(struct cmd_nsp_add_result, ip);
+
+static cmdline_parse_inst_t cmd_nsp_add = {
+ .f = cmd_nsp_add_parsed,
+ .data = NULL,
+ .help_str = "NAPT NSP ADD cmd",
+ .tokens = {
+ (void *)&cmd_add_nsp_p_string,
+ (void *)&cmd_add_nsp_p,
+ (void *)&cmd_add_nsp_string,
+ (void *)&cmd_add_nsp_add_string,
+ (void *)&cmd_add_nsp_ip,
+ NULL,
+ },
+};
+
+/**
+ * Function to send a nsp del cmd message to BE
+ *
+ * @param app
+ * A pointer to pipeline app
+ * @param pipeline_id
+ * Pipeline id
+ * @param nsp
+ * A pointer to struct pipeline_cgnapt_nsp_t
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+static int
+app_pipeline_cgnapt_del_nsp(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_nsp_t *nsp)
+{
+
+ struct pipeline_cgnapt_t *p;
+ struct pipeline_cgnapt_nsp_del_msg_req *req;
+ struct pipeline_cgnapt_nsp_del_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ (struct pipeline_type *)&pipeline_cgnapt);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_CGNAPT_MSG_REQ_NSP_DEL;
+ memcpy(&req->nsp, nsp, sizeof(struct pipeline_cgnapt_nsp_t));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/**
+ * A structure defining nsp del cmd parse arguments.
+ */
+struct cmd_nsp_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t nsp_string;
+ cmdline_fixed_string_t del_string;
+ cmdline_ipaddr_t ip;
+};
+
+/**
+ * Helping function for nsp del cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_nsp_del_parsed(void *parsed_result, __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_nsp_del_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ struct pipeline_cgnapt_nsp_t nsp;
+
+ memcpy(&nsp.prefix, &params->ip.addr.ipv6.s6_addr, 16);
+ nsp.depth = params->ip.prefixlen;
+ status = app_pipeline_cgnapt_del_nsp(app, params->p, &nsp);
+
+ if (status != 0) {
+ printf("NSP DEL Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t cmd_del_nsp_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_del_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_del_nsp_p =
+TOKEN_NUM_INITIALIZER(struct cmd_nsp_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_del_nsp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_del_result,
+ nsp_string, "nsp");
+
+static cmdline_parse_token_string_t cmd_del_nsp_del_string =
+TOKEN_STRING_INITIALIZER(struct cmd_nsp_del_result,
+ del_string, "del");
+
+static cmdline_parse_token_ipaddr_t cmd_del_nsp_ip =
+TOKEN_IPNET_INITIALIZER(struct cmd_nsp_del_result, ip);
+
+static cmdline_parse_inst_t cmd_nsp_del = {
+ .f = cmd_nsp_del_parsed,
+ .data = NULL,
+ .help_str = "NAPT NSP DEL cmd",
+ .tokens = {
+ (void *)&cmd_del_nsp_p_string,
+ (void *)&cmd_del_nsp_p,
+ (void *)&cmd_del_nsp_string,
+ (void *)&cmd_del_nsp_del_string,
+ (void *)&cmd_del_nsp_ip,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the cgnapt stats cmd parse arguments.
+ */
+struct cmd_cgnapt_stats_result {
+ cmdline_fixed_string_t p_string;
+ cmdline_fixed_string_t cgnapt_string;
+ cmdline_fixed_string_t stats_string;
+};
+
+/**
+ * Helping function for cgnapt stats cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_cgnapt_stats_parsed(
+ __rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ all_cgnapt_stats();
+}
+
+static cmdline_parse_token_string_t cmd_cgnapt_stats_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_stats_result, p_string, "p");
+
+static cmdline_parse_token_string_t cmd_cgnapt_stats_cgnapt_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_stats_result,
+ cgnapt_string, "cgnapt");
+
+static cmdline_parse_token_string_t cmd_cgnapt_stats_stats_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_stats_result, stats_string,
+ "stats");
+
+static cmdline_parse_inst_t cmd_stats = {
+ .f = cmd_cgnapt_stats_parsed,
+ .data = NULL,
+ .help_str = "CGNAPT stats cmd",
+ .tokens = {
+ (void *)&cmd_cgnapt_stats_p_string,
+ (void *)&cmd_cgnapt_stats_cgnapt_string,
+ (void *)&cmd_cgnapt_stats_stats_string,
+ NULL,
+ },
+};
+
+/**
+ * A structure defining the cgnapt clear stats cmd parse arguments.
+ */
+struct cmd_cgnapt_clear_stats_result {
+ cmdline_fixed_string_t p_string;
+ cmdline_fixed_string_t cgnapt_string;
+ cmdline_fixed_string_t clear_string;
+ cmdline_fixed_string_t stats_string;
+};
+
+/**
+ * Helping function for cgnapt clear stats cmd
+ *
+ * @param parsed_result
+ * A pointer parsed add arguments
+ * @param cl
+ * unused pointer to struct cmdline
+ * @param msg
+ * void pointer data
+ */
+static void
+cmd_cgnapt_clear_stats_parsed(
+ __rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ all_cgnapt_clear_stats();
+}
+
+static cmdline_parse_token_string_t cmd_cgnapt_clear_stats_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_clear_stats_result, p_string, "p");
+
+static cmdline_parse_token_string_t cmd_cgnapt_clear_stats_cgnapt_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_clear_stats_result,
+ cgnapt_string, "cgnapt");
+
+static cmdline_parse_token_string_t cmd_cgnapt_clear_stats_clear_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_clear_stats_result,
+ clear_string, "clear");
+
+static cmdline_parse_token_string_t cmd_cgnapt_clear_stats_stats_string =
+TOKEN_STRING_INITIALIZER(struct cmd_cgnapt_clear_stats_result, stats_string,
+ "stats");
+
+static cmdline_parse_inst_t cmd_clear_stats = {
+ .f = cmd_cgnapt_clear_stats_parsed,
+ .data = NULL,
+ .help_str = "CGNAPT clear stats cmd",
+ .tokens = {
+ (void *)&cmd_cgnapt_clear_stats_p_string,
+ (void *)&cmd_cgnapt_clear_stats_cgnapt_string,
+ (void *)&cmd_cgnapt_clear_stats_clear_string,
+ (void *)&cmd_cgnapt_clear_stats_stats_string,
+ NULL,
+ },
+};
+
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_entry_add,
+ (cmdline_parse_inst_t *) &cmd_entry_del,
+ (cmdline_parse_inst_t *) &cmd_entry_ls,
+ (cmdline_parse_inst_t *) &cmd_entry_dbg,
+ (cmdline_parse_inst_t *) &cmd_entry_addm,
+ (cmdline_parse_inst_t *) &cmd_ver,
+ (cmdline_parse_inst_t *) &cmd_nsp_add,
+ (cmdline_parse_inst_t *) &cmd_nsp_del,
+ (cmdline_parse_inst_t *) &cmd_numipcli,
+ #ifdef PCP_ENABLE
+ (cmdline_parse_inst_t *) &cmd_pcp,
+ #endif
+ (cmdline_parse_inst_t *) &cmd_stats,
+ (cmdline_parse_inst_t *) &cmd_clear_stats,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_cgnapt_fe_ops = {
+ .f_init = pipeline_cgnapt_init,
+ .f_free = app_pipeline_cgnapt_free,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_cgnapt = {
+ .name = "CGNAPT",
+ .be_ops = &pipeline_cgnapt_be_ops,
+ .fe_ops = &pipeline_cgnapt_fe_ops,
+};
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h
new file mode 100644
index 00000000..5491648a
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h
@@ -0,0 +1,138 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_CGNAPT_H__
+#define __INCLUDE_PIPELINE_CGNAPT_H__
+
+/**
+ * @file
+ * Pipeline CG-NAPT FE.
+ *
+ * PipelineCG-NAPT Front End (FE).
+ * Runs on the Master pipeline, responsible for CLI commands.
+ *
+ */
+
+#include "pipeline.h"
+#include "pipeline_cgnapt_common.h"
+
+/**
+ * Add NAPT rule to the NAPT rule table.
+ * Both IPv4 and IPv6 rules can be added.
+ *
+ * @param app
+ * A pointer to the pipeline app parameters.
+ * @param pipeline_id
+ * Pipeline id
+ * @param key
+ * A pointer to the NAPT key corresponding to the entry being added.
+ * @param entry_params
+ * A pointer to the NAPT entry being added.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+#if 0
+int
+app_pipeline_cgnapt_add_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_entry_key *key,
+ struct app_pipeline_cgnapt_entry_params
+ *entry_params);
+#endif
+int app_pipeline_cgnapt_add_entry(
+ struct app_params *app,
+ uint32_t pipeline_id,
+ struct app_pipeline_cgnapt_entry_params *entry_params);
+/**
+ * Delete NAPT rule from the NAPT rule table.
+ * Both IPv4 and IPv6 rules can be added.
+ *
+ * @param app
+ * A pointer to the pipeline app parameters.
+ * @param pipeline_id
+ * Pipeline id
+ * @param key
+ * A pointer to the NAPT key corresponding to the entry being added.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int
+app_pipeline_cgnapt_delete_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_entry_key *key);
+
+/**
+ * Add multiple NAPT rule to the NAPT rule table.
+ * Both IPv4 and IPv6 rules can be added.
+ *
+ * @param app
+ * A pointer to the pipeline app parameters.
+ * @param pipeline_id
+ * Pipeline id
+ * @param entry_params
+ * A pointer to the multiple NAPT entry params being added.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int app_pipeline_cgnapt_addm_entry(struct app_params *app, uint32_t pipeline_id,
+ struct app_pipeline_cgnapt_mentry_params
+ *entry_params);
+
+/**
+ * Add Network Specific Prefix for NAT64.
+ *
+ * @param app
+ * A pointer to the pipeline app parameters.
+ * @param pipeline_id
+ * Pipeline id
+ * @param nsp
+ * A pointer to NSP being added.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int
+app_pipeline_cgnapt_nsp_add_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_nsp_t *nsp);
+
+/**
+ * Delete a Network Specific Prefix for NAT64.
+ *
+ * @param app
+ * A pointer to the pipeline app parameters.
+ * @param pipeline_id
+ * Pipeline id
+ * @param nsp
+ * A pointer to NSP being deleted.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int
+app_pipeline_cgnapt_nsp_del_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_cgnapt_nsp_t *nsp);
+
+/*
+ * Pipeline type
+ */
+extern struct pipeline_type pipeline_cgnapt;
+
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c
new file mode 100644
index 00000000..9a05a4da
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c
@@ -0,0 +1,10963 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+ * @file
+ * Pipeline CG-NAPT BE Implementation.
+ *
+ * Implementation of Pipeline CG-NAPT Back End (BE).
+ * Provides NAPT service on dataplane packets.
+ * Runs on a core as defined in the config file.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_table_stub.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_pipeline.h>
+#include <rte_timer.h>
+#include <rte_config.h>
+#include <rte_prefetch.h>
+#include <rte_hexdump.h>
+
+#include "pipeline_cgnapt_be.h"
+#include "pipeline_cgnapt_common.h"
+#include "pipeline_actions_common.h"
+#include "hash_func.h"
+#include "pipeline_arpicmp_be.h"
+#include "vnf_common.h"
+#include "app.h"
+#include "pipeline_common_be.h"
+#include "vnf_common.h"
+#include "lib_sip_alg.h"
+#include "lib_icmpv6.h"
+
+#include "pipeline_common_fe.h"
+#ifdef CT_CGNAT
+#include "rte_ct_tcp.h"
+#include "rte_cnxn_tracking.h"
+#endif
+#ifdef FTP_ALG
+#include "lib_ftp_alg.h"
+#endif
+#ifdef PCP_ENABLE
+#include "cgnapt_pcp_be.h"
+#endif
+
+/* To maintain all cgnapt pipeline pointers used for all stats */
+struct pipeline_cgnapt *all_pipeline_cgnapt[128];
+uint8_t n_cgnapt_pipeline;
+
+/* To know egress or ingress port */
+static uint8_t cgnapt_in_port_egress_prv[PIPELINE_MAX_PORT_IN];
+static uint8_t cgnapt_prv_que_port_index[PIPELINE_MAX_PORT_IN];
+
+/* Max port per client declarations */
+
+struct rte_hash_parameters max_port_per_client_hash_params = {
+ .name = "MAX_PORT_PER_CLIENT",
+ .entries = MAX_DYN_ENTRY,
+ .key_len = sizeof(struct max_port_per_client_key),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+#ifdef CT_CGNAT
+struct rte_ct_cnxn_tracker *cgnat_cnxn_tracker;
+#endif
+
+/***** Common Port Allocation declarations *****/
+
+struct rte_ring *port_alloc_ring[MAX_CGNAPT_SETS] = { NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL };
+const char *napt_port_alloc_ring_name[MAX_CGNAPT_SETS] = {
+ "NAPT_PORT_ALLOC_0 ",
+ "NAPT_PORT_ALLOC_1 ",
+ "NAPT_PORT_ALLOC_2 ",
+ "NAPT_PORT_ALLOC_3 ",
+ "NAPT_PORT_ALLOC_4 ",
+ "NAPT_PORT_ALLOC_5 ",
+ "NAPT_PORT_ALLOC_6 ",
+ "NAPT_PORT_ALLOC_7 "
+};
+
+int vnf_set_count = -1;
+
+struct app_params *myApp;
+
+/***** Common Port Allocation declarations *****/
+int napt_port_alloc_elem_count;
+
+/***** Common Table declarations *****/
+struct rte_hash_parameters napt_common_table_hash_params = {
+ .name = "NAPT_COM_TBL",
+ .entries = MAX_NAPT_ENTRIES,
+ .key_len = sizeof(struct pipeline_cgnapt_entry_key),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ .extra_flag = 1,
+};
+
+/***** ARP local cache *****/
+uint8_t link_hw_laddr_valid[MAX_NUM_LOCAL_MAC_ADDRESS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct ether_addr link_hw_laddr[MAX_NUM_LOCAL_MAC_ADDRESS] = {
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} },
+ {.addr_bytes = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }
+};
+
+/****** NAT64 declarations *****/
+
+uint8_t well_known_prefix[16] = {
+ 0x00, 0x64, 0xff, 0x9b,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static uint32_t local_get_nh_ipv4(
+ uint32_t ip,
+ uint32_t *port,
+ uint32_t *nhip,
+ struct pipeline_cgnapt *p_nat);
+
+static void do_local_nh_ipv4_cache(
+ uint32_t dest_if,
+ struct pipeline_cgnapt *p_nat);
+
+static uint32_t local_get_nh_ipv6(
+ uint8_t *ip,
+ uint32_t *port,
+ uint8_t nhip[],
+ struct pipeline_cgnapt *p_nat);
+
+static void do_local_nh_ipv6_cache(
+ uint32_t dest_if,
+ struct pipeline_cgnapt *p_nat);
+
+static uint8_t check_arp_icmp(
+ struct rte_mbuf *pkt,
+ uint64_t pkt_mask,
+ struct pipeline_cgnapt *p_nat);
+
+/* Finds next power of two for n. If n itself
+ * is a power of two then returns n
+ *
+ * @param n
+ * Value usually 32-bit value
+ *
+ * @return
+ * Value after roundup to power of 2
+*/
+uint64_t nextPowerOf2(uint64_t n)
+{
+ n--;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ n |= n >> 32;
+ n++;
+ return n;
+}
+
+/**
+ * Function to get MAC addr of local link
+ *
+ * @params out_port
+ * Physical port number
+ *
+ * @return
+ * Outport Link MAC addr
+ */
+
+struct ether_addr *get_local_link_hw_addr(uint8_t out_port)
+{
+ return &link_hw_laddr[out_port];
+}
+
+/**
+ * Function to get MAC addr from array instead of hash table
+ *
+ * @params out_port
+ * Physical port number
+ *
+ * @return
+ * Outport Link MAC addr
+ */
+
+uint8_t local_dest_mac_present(uint8_t out_port)
+{
+ return link_hw_laddr_valid[out_port];
+}
+
+/**
+ * Function to get IPv4-IP NH from thread local array
+ *
+ * @params ip
+ * IPv4 - IP
+ * @params port
+ * NH port number
+ * @params nhip
+ * NHIP of IPv4 type
+ * @params p_nat
+ * CGNAPT pipeline ptr
+ *
+ * @return
+ * 1 on success, 0 for failure
+ */
+
+static uint32_t local_get_nh_ipv4(
+ uint32_t ip,
+ uint32_t *port,
+ uint32_t *nhip,
+ struct pipeline_cgnapt *p_nat)
+{
+ int i;
+ for (i = 0; i < p_nat->local_lib_arp_route_ent_cnt; i++) {
+ if (((p_nat->local_lib_arp_route_table[i].ip &
+ p_nat->local_lib_arp_route_table[i].mask) ==
+ (ip & p_nat->local_lib_arp_route_table[i].mask))) {
+ *port = p_nat->local_lib_arp_route_table[i].port;
+
+ *nhip = p_nat->local_lib_arp_route_table[i].nh;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Function to make local copy for NH of type IPv4
+ *
+ * @params dest_if
+ * Physical port number
+ * @params p_nat
+ * CGNAPT pipeline ptr
+ *
+ */
+
+static void do_local_nh_ipv4_cache(
+ uint32_t dest_if,
+ struct pipeline_cgnapt *p_nat)
+{
+
+ /* Search for the entry and do local copy */
+ int i;
+
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+ if (lib_arp_route_table[i].port == dest_if) {
+
+ struct lib_arp_route_table_entry *lentry =
+ &p_nat->local_lib_arp_route_table
+ [p_nat->local_lib_arp_route_ent_cnt];
+
+ lentry->ip = lib_arp_route_table[i].ip;
+ lentry->mask = lib_arp_route_table[i].mask;
+ lentry->port = lib_arp_route_table[i].port;
+ lentry->nh = lib_arp_route_table[i].nh;
+
+ p_nat->local_lib_arp_route_ent_cnt++;
+ break;
+ }
+ }
+}
+
+
+/**
+ * Function to get IPv6-IP NH from thread local array
+ *
+ * @params ip
+ * Pointer to starting addr of IPv6
+ * @params port
+ * NH port number
+ * @params nhip
+ * NHIP of IPv6 type
+ * @params p_nat
+ * CGNAPT pipeline ptr
+ *
+ * @return
+ * 1 on success, 0 for failure
+ */
+
+static uint32_t local_get_nh_ipv6(
+ uint8_t *ip,
+ uint32_t *port,
+ uint8_t nhip[],
+ struct pipeline_cgnapt *p_nat)
+{
+ int i = 0;
+ uint8_t netmask_ipv6[16];
+ uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
+
+ for (i = 0; i < p_nat->local_lib_nd_route_ent_cnt; i++) {
+
+ convert_prefixlen_to_netmask_ipv6(
+ p_nat->local_lib_nd_route_table[i].depth,
+ netmask_ipv6);
+
+ for (k = 0; k < 16; k++)
+ if (p_nat->local_lib_nd_route_table[i].ipv6[k] &
+ netmask_ipv6[k])
+ depthflags++;
+
+ for (l = 0; l < 16; l++)
+ if (ip[l] & netmask_ipv6[l])
+ depthflags1++;
+
+ int j = 0;
+ if (depthflags == depthflags1) {
+ *port = p_nat->local_lib_nd_route_table[i].port;
+
+ for (j = 0; j < 16; j++)
+ nhip[j] = p_nat->local_lib_nd_route_table[i].
+ nhipv6[j];
+ return 1;
+ }
+
+ depthflags = 0;
+ depthflags1 = 0;
+ }
+ return 0;
+}
+
+
+/**
+ * Function to make local copy for NH of type IPv6
+ *
+ * @params dest_if
+ * Physical port number
+ * @params p_nat
+ * CGNAPT pipeline ptr
+ *
+ */
+
+static void do_local_nh_ipv6_cache(
+ uint32_t dest_if,
+ struct pipeline_cgnapt *p_nat)
+{
+ /* Search for the entry and do local copy */
+ int i, l;
+ for (i = 0; i < MAX_ND_RT_ENTRY; i++) {
+
+ if (lib_nd_route_table[i].port == dest_if) {
+
+ struct lib_nd_route_table_entry *lentry =
+ &p_nat->local_lib_nd_route_table
+ [p_nat->local_lib_nd_route_ent_cnt];
+
+ for (l = 0; l < 16; l++) {
+ lentry->ipv6[l] =
+ lib_nd_route_table[i].ipv6[l];
+ lentry->nhipv6[l] =
+ lib_nd_route_table[i].nhipv6[l];
+ }
+ lentry->depth = lib_nd_route_table[i].depth;
+ lentry->port = lib_nd_route_table[i].port;
+
+ p_nat->local_lib_nd_route_ent_cnt++;
+ break;
+ } //if
+ } //for
+}
+
+#ifdef SIP_ALG
+/* Commented code may be required for future usage, Please keep it*/
+#if 0
+static int retrieve_cgnapt_entry_alg(
+ struct pipeline_cgnapt_entry_key *key,
+ struct cgnapt_table_entry **entry_ptr1,
+ struct cgnapt_table_entry **entry_ptr2)
+{
+ #ifdef CGNAPT_DBG_PRNT
+ printf("retrieve_cgnapt_entry key detail Entry:"
+ "0x%x, %d, %d\n", key->ip, key->port,
+ key->pid);
+ #endif
+
+ int position = rte_hash_lookup(napt_common_table, key);
+ if (position < 0) {
+ printf("Invalid cgnapt entry position(first_key): %d\n",
+ position);
+ return 0;
+ }
+
+ *entry_ptr1 = &napt_hash_tbl_entries[position];
+
+ uint32_t prv_ip = (*entry_ptr1)->data.prv_ip;
+ uint32_t prv_port = (*entry_ptr1)->data.prv_port;
+ uint32_t prv_phy_port = (*entry_ptr1)->data.prv_phy_port;
+
+ struct pipeline_cgnapt_entry_key second_key;
+ second_key.ip = prv_ip;
+ second_key.port = prv_port;
+ second_key.pid = prv_phy_port;
+
+ position = rte_hash_lookup(napt_common_table, &second_key);
+ if (position < 0) {
+ printf("Invalid cgnapt entry position(second_key): %d\n",
+ position);
+ return 0;
+ }
+
+ *entry_ptr2 = &napt_hash_tbl_entries[position];
+
+ return 1;
+}
+#endif
+
+int add_dynamic_cgnapt_entry_alg(
+ struct pipeline *p,
+ struct pipeline_cgnapt_entry_key *key,
+ struct cgnapt_table_entry **entry_ptr1,
+ struct cgnapt_table_entry **entry_ptr2)
+{
+ int port_num = 0, ret;
+
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1) {
+ printf("Th%d add_dynamic_cgnapt_entry key detail Entry:"
+ "0x%x, %d, %d\n", p_nat->pipeline_num, key->ip, key->port,
+ key->pid);
+ }
+ #endif
+
+ int32_t position = rte_hash_lookup(napt_common_table, key);
+ if (position >= 0) {
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1) {
+ printf("%s: cgnapt entry exists in "
+ "position(first_key): %d\n", __func__, position);
+ }
+ #endif
+ *entry_ptr1 = &napt_hash_tbl_entries[position];
+ /* not required, as it is not used in the caller */
+ *entry_ptr2 = NULL;
+ return 1;
+ }
+
+
+ ret = increment_max_port_counter(key->ip, key->pid, p_nat);
+ if (ret == MAX_PORT_INC_ERROR) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("add_dynamic_cgnapt_entry:"
+ "increment_max_port_counter-1 failed\n");
+ #endif
+
+ return 0;
+ }
+
+ if (ret == MAX_PORT_INC_REACHED) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("add_dynamic_cgnapt_entry:"
+ "increment_max_port_counter-2 failed\n");
+ #endif
+
+ return 0;
+ }
+
+ uint32_t public_ip;
+ port_num = get_free_iport(p_nat, &public_ip);
+
+ if (port_num == -1) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("add_dynamic_cgnapt_entry: %d\n", port_num);
+ printf("add_dynamic_cgnapt_entry key detail:0x%x, "
+ "%d, %d\n", key->ip, key->port, key->pid);
+ }
+ #endif
+
+ return 0;
+ }
+
+ /* check for max_clients_per_ip */
+ if (rte_atomic16_read
+ (&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) % 16].count) ==
+ p_nat->max_clients_per_ip) {
+ /* For now just bail out
+ * In future we can think about
+ * retrying getting a new iport
+ */
+ release_iport(port_num, public_ip, p_nat);
+
+ return 0;
+ }
+
+ rte_atomic16_inc(&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) %
+ 16].count);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if ((rte_jhash(&public_ip, 4, 0) % 16) == 8)
+ printf("pub ip:%x coutn:%d\n", public_ip,
+ rte_atomic16_read(&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) % 16].count));
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 0) {
+ printf("add_dynamic_cgnapt_entry: %d\n",
+ port_num);
+ printf("add_dynamic_cgnapt_entry key detail: "
+ "0x%x, %d, %d\n", key->ip, key->port, key->pid);
+ }
+ #endif
+
+ struct cgnapt_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ /* made it configurable below */
+ {.port_id = p->port_out_id[0]},
+ },
+
+ .data = {
+ .prv_port = key->port,
+ .pub_ip = public_ip,
+ .pub_port = port_num,
+ .prv_phy_port = key->pid,
+ .pub_phy_port = get_pub_to_prv_port(
+ &public_ip,
+ IP_VERSION_4),
+ .ttl = 0,
+ /* if(timeout == -1) : static entry
+ * if(timeout == 0 ) : dynamic entry
+ * if(timeout > 0 ) : PCP requested entry
+ */
+ .timeout = 0,
+ #ifdef PCP_ENABLE
+ .timer = NULL,
+ #endif
+ }
+ };
+
+ entry.data.u.prv_ip = key->ip;
+ entry.data.type = CGNAPT_ENTRY_IPV4;
+
+ entry.head.port_id = entry.data.pub_phy_port; /* outgoing port info */
+
+ struct pipeline_cgnapt_entry_key second_key;
+ /* Need to add a second ingress entry */
+ second_key.ip = public_ip;
+ second_key.port = port_num;
+ second_key.pid = 0xffff;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("add_dynamic_cgnapt_entry second key detail:"
+ "0x%x, %d, %d\n", second_key.ip, second_key.port,
+ second_key.pid);
+ #endif
+
+ int32_t position1 = rte_hash_add_key(napt_common_table, (void *)key);
+
+ if (position1 < 0) {
+ printf("CG-NAPT entry add failed ...returning "
+ "without adding ... %d\n", position1);
+ return 0;
+ }
+
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG) {
+ printf("add_dynamic_cgnapt_entry:");
+ print_key(key);
+ print_cgnapt_entry(&entry);
+ }
+ #endif
+
+ memcpy(&napt_hash_tbl_entries[position1], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ /* this pointer is returned to pkt miss function */
+ *entry_ptr1 = &napt_hash_tbl_entries[position1];
+
+ p_nat->n_cgnapt_entry_added++;
+ p_nat->dynCgnaptCount++;
+
+ /* Now modify the forward port for reverse entry */
+
+ /* outgoing port info */
+ entry.head.port_id = entry.data.prv_phy_port;
+
+ int32_t position2 = rte_hash_add_key(napt_common_table, &second_key);
+
+ if (position2 < 0) {
+ printf("CG-NAPT entry reverse bulk add failed ..."
+ "returning with fwd add ...%d\n",
+ position2);
+ return 0;
+ }
+
+ memcpy(&napt_hash_tbl_entries[position2], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ *entry_ptr2 = &napt_hash_tbl_entries[position2];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1) {
+ printf("add_dynamic_cgnapt_entry position: %d, %d\n",
+ position1, position2);
+ printf("add_dynamic_cgnapt_entry: entry_ptr1: %p, "
+ "entry_ptr2: %p\n", *entry_ptr1, *entry_ptr2);
+ }
+ #endif
+
+ timer_thread_enqueue(key, &second_key, *entry_ptr1,
+ *entry_ptr2, (struct pipeline *)p_nat);
+
+ p_nat->n_cgnapt_entry_added++;
+ p_nat->dynCgnaptCount++;
+
+ return 1;
+}
+
+#endif
+
+void hw_checksum(struct rte_mbuf *pkt, enum PKT_TYPE ver)
+{
+ struct tcp_hdr *tcp = NULL;
+ struct udp_hdr *udp = NULL;
+ struct icmp_hdr *icmp = NULL;
+ uint8_t *protocol;
+ void *ip_header = NULL;
+ uint16_t prot_offset = 0;
+ uint32_t pkt_type_is_ipv4 = 1;
+ int temp = 0;
+ pkt->ol_flags |= PKT_TX_IP_CKSUM;
+ pkt->l2_len = ETH_HDR_SIZE;
+
+
+
+ switch (ver) {
+ case PKT_TYPE_IPV4to6:
+ temp = -20;
+ case PKT_TYPE_IPV6:
+
+ ip_header = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + temp);
+
+ pkt_type_is_ipv4 = 0;
+ pkt->ol_flags |= PKT_TX_IPV6;
+ pkt->l3_len =
+ sizeof(struct ipv6_hdr);
+ tcp = (struct tcp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+ udp = (struct udp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+ icmp = (struct icmp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+
+ prot_offset = PROT_OFST_IP6 + temp;
+ break;
+ case PKT_TYPE_IPV6to4:
+ temp = 20;
+ case PKT_TYPE_IPV4:
+
+ ip_header = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + temp);
+
+ pkt->ol_flags |= PKT_TX_IPV4;
+ pkt->l3_len =
+ sizeof(struct ipv4_hdr);
+ tcp = (struct tcp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+ udp = (struct udp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+ icmp = (struct icmp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+ struct ipv4_hdr *ip_hdr =
+ (struct ipv4_hdr *)ip_header;
+ ip_hdr->hdr_checksum = 0;
+
+ prot_offset = PROT_OFST_IP4 + temp;
+ break;
+ default:
+ printf("hw_checksum: pkt version is invalid\n");
+ }
+ protocol = (uint8_t *) RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ prot_offset);
+
+ switch (*protocol) {
+ case IP_PROTOCOL_TCP: /* 6 */
+ tcp->cksum = 0;
+ pkt->ol_flags |= PKT_TX_TCP_CKSUM;
+ if (pkt_type_is_ipv4) {
+ tcp->cksum = rte_ipv4_phdr_cksum(
+ (struct ipv4_hdr *)ip_header,
+ pkt->ol_flags);
+ } else {
+ tcp->cksum = rte_ipv6_phdr_cksum(
+ (struct ipv6_hdr *)ip_header,
+ pkt->ol_flags);
+ }
+ break;
+ case IP_PROTOCOL_UDP: /* 17 */
+ udp->dgram_cksum = 0;
+ pkt->ol_flags |= PKT_TX_UDP_CKSUM;
+ if (pkt_type_is_ipv4) {
+ udp->dgram_cksum =
+ rte_ipv4_phdr_cksum(
+ (struct ipv4_hdr *)ip_header,
+ pkt->ol_flags);
+ } else {
+ udp->dgram_cksum =
+ rte_ipv6_phdr_cksum(
+ (struct ipv6_hdr *)ip_header,
+ pkt->ol_flags);
+ }
+ break;
+ case IP_PROTOCOL_ICMP: /* 1 */
+ if (pkt_type_is_ipv4) {
+ /* ICMP checksum code */
+ struct ipv4_hdr *ip_hdr =
+ (struct ipv4_hdr *)ip_header;
+ int size = rte_bswap16(ip_hdr->total_length) - 20;
+ icmp->icmp_cksum = 0;
+ icmp->icmp_cksum =
+ ~rte_raw_cksum(icmp,
+ size);
+ }
+ break;
+
+ default:
+ printf("hw_checksum() : Neither TCP or UDP pkt\n");
+ break;
+ }
+}
+
+
+void sw_checksum(struct rte_mbuf *pkt, enum PKT_TYPE ver)
+{
+ struct tcp_hdr *tcp = NULL;
+ struct udp_hdr *udp = NULL;
+ struct icmp_hdr *icmp = NULL;
+ uint8_t *protocol;
+ void *ip_header = NULL;
+ uint16_t prot_offset = 0;
+ uint32_t pkt_type_is_ipv4 = 1;
+ int temp = 0;
+
+ switch (ver) {
+ case PKT_TYPE_IPV4to6:
+ temp = -20;
+ case PKT_TYPE_IPV6:
+
+ ip_header = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + temp);
+
+ pkt_type_is_ipv4 = 0;
+ tcp = (struct tcp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+ udp = (struct udp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+ icmp = (struct icmp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv6_hdr));
+
+ prot_offset = PROT_OFST_IP6 + temp;
+ break;
+ case PKT_TYPE_IPV6to4:
+ temp = 20;
+ case PKT_TYPE_IPV4:
+
+ ip_header = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + temp);
+
+ tcp = (struct tcp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+ udp = (struct udp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+ icmp = (struct icmp_hdr *)
+ ((unsigned char *)ip_header +
+ sizeof(struct ipv4_hdr));
+
+ prot_offset = PROT_OFST_IP4 + temp;
+ break;
+ default:
+ printf("sw_checksum: pkt version is invalid\n");
+ }
+ protocol = (uint8_t *) RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ prot_offset);
+
+ switch (*protocol) {
+ case IP_PROTOCOL_TCP: /* 6 */
+ tcp->cksum = 0;
+ if (pkt_type_is_ipv4) {
+ struct ipv4_hdr *ip_hdr =
+ (struct ipv4_hdr *)ip_header;
+ tcp->cksum = rte_ipv4_udptcp_cksum(ip_hdr,
+ (void *)tcp);
+ ip_hdr->hdr_checksum = 0;
+ ip_hdr->hdr_checksum = rte_ipv4_cksum(
+ (struct ipv4_hdr *)ip_hdr);
+ } else {
+ tcp->cksum = rte_ipv6_udptcp_cksum(
+ (struct ipv6_hdr *)
+ ip_header, (void *)tcp);
+ }
+ break;
+ case IP_PROTOCOL_UDP: /* 17 */
+ udp->dgram_cksum = 0;
+ if (pkt_type_is_ipv4) {
+ struct ipv4_hdr *ip_hdr =
+ (struct ipv4_hdr *)ip_header;
+ udp->dgram_cksum = rte_ipv4_udptcp_cksum(
+ ip_hdr, (void *)udp);
+ ip_hdr->hdr_checksum = 0;
+ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
+ } else {
+ udp->dgram_cksum = rte_ipv6_udptcp_cksum(
+ (struct ipv6_hdr *)
+ ip_header, (void *)udp);
+ }
+ break;
+ case IP_PROTOCOL_ICMP: /* 1 */
+ if (pkt_type_is_ipv4) {
+ /* ICMP checksum code */
+ struct ipv4_hdr *ip_hdr =
+ (struct ipv4_hdr *)ip_header;
+ int size = rte_bswap16(ip_hdr->total_length) - 20;
+ icmp->icmp_cksum = 0;
+ icmp->icmp_cksum =
+ ~rte_raw_cksum(icmp,
+ size);
+ ip_hdr->hdr_checksum = 0;
+ ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
+ }
+ break;
+
+ default:
+ printf("sw_checksum() : Neither TCP or UDP pkt\n");
+ break;
+ }
+}
+
+static uint8_t check_arp_icmp(
+ struct rte_mbuf *pkt,
+ uint64_t pkt_mask,
+ struct pipeline_cgnapt *p_nat)
+{
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+ uint16_t *eth_proto = RTE_MBUF_METADATA_UINT16_PTR(
+ pkt, eth_proto_offset);
+ struct app_link_params *link;
+ uint8_t solicited_node_multicast_addr[16] = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
+
+ /* ARP outport number */
+ uint16_t out_port = p_nat->p.n_ports_out - 1;
+
+ uint8_t *protocol;
+ uint32_t prot_offset;
+
+ link = &myApp->link_params[pkt->port];
+
+
+ switch (rte_be_to_cpu_16(*eth_proto)) {
+
+ case ETH_TYPE_ARP:
+
+ rte_pipeline_port_out_packet_insert(
+ p_nat->p.p,
+ out_port,
+ pkt);
+
+ /*
+ * Pkt mask should be changed, and not changing the
+ * drop mask
+ */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->arpicmpPktCount++;
+
+ return 0;
+ break;
+ case ETH_TYPE_IPV4: {
+ /* header room + eth hdr size +
+ * src_aadr offset in ip header
+ */
+ uint32_t dst_addr_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ dst_addr_offset);
+ prot_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_PROTOCOL_OFST;
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ prot_offset);
+ if ((*protocol == IP_PROTOCOL_ICMP) &&
+ link->ip == rte_be_to_cpu_32(*dst_addr)) {
+
+ if (is_phy_port_privte(pkt->port)) {
+
+ rte_pipeline_port_out_packet_insert(
+ p_nat->p.p, out_port, pkt);
+
+ /*
+ * Pkt mask should be changed,
+ * and not changing the drop mask
+ */
+
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->arpicmpPktCount++;
+
+ return 0;
+ }
+ }
+ return 1;
+ }
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (dual_stack_enable) {
+
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ //uint32_t dst_addr_offset = MBUF_HDR_ROOM +
+ // ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST;
+ //uint32_t *dst_addr =
+ // RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ // dst_addr_offset);
+ uint32_t prot_offset_ipv6 = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
+ struct ipv6_hdr *ipv6_h;
+
+ ipv6_h = (struct ipv6_hdr *) MBUF_HDR_ROOM +
+ ETH_HDR_SIZE;
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ prot_offset_ipv6);
+
+ if (ipv6_h->proto == ICMPV6_PROTOCOL_ID) {
+ if (!memcmp(ipv6_h->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h->dst_addr,
+ solicited_node_multicast_addr, 13)) {
+ rte_pipeline_port_out_packet_insert(
+ p_nat->p.p, out_port, pkt);
+ /*
+ * Pkt mask should be changed,
+ * and not changing the drop mask
+ */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->arpicmpPktCount++;
+ } else {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount1++;
+ #endif
+ }
+ return 0;
+ }
+ }
+ break;
+ #endif
+ default:
+ return 1;
+ }
+ return 1;
+}
+
+/**
+ * Function to create common NAPT table
+ * Called during pipeline initialization
+ * Creates the common NAPT table
+ * If it is not already created and stores its pointer
+ * in global napt_common_table pointer.
+ *
+ * @params nFlows
+ * Max number of NAPT flows. This parameter is configurable via config file.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int create_napt_common_table(uint32_t nFlows)
+{
+ if (napt_common_table != NULL) {
+ printf("napt_common_table already exists.\n");
+ return -1;
+ }
+
+ napt_common_table = rte_hash_create(&napt_common_table_hash_params);
+
+ if (napt_common_table == NULL) {
+ printf("napt_common_table creation failed.\n");
+ return -2;
+ }
+
+ uint32_t number_of_entries = nFlows;
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct cgnapt_table_entry) *
+ number_of_entries);
+ napt_hash_tbl_entries = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ if (napt_hash_tbl_entries == NULL) {
+ printf("napt_hash_tbl_entries creation failed. %d, %d\n",
+ nFlows, (int)sizeof(struct cgnapt_table_entry));
+ return -3;
+ }
+
+ return 0;
+}
+
+/**
+ * Function to initialize bulk port allocation data structures
+ * Called during pipeline initialization.
+ *
+ * Creates the port alloc ring for the VNF_set this pipeline belongs
+ *
+ * Creates global port allocation buffer pool
+ *
+ * Initializes the port alloc ring according to config data
+ *
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+int napt_port_alloc_init(struct pipeline_cgnapt *p_nat)
+{
+ p_nat->allocated_ports = NULL;
+ p_nat->free_ports = NULL;
+
+ uint32_t vnf_set_num = p_nat->vnf_set;
+ /*uint32_t vnf_set_num = get_vnf_set_num(p_nat->pipeline_num); */
+
+ printf("VNF set number for CGNAPT %d is %d.\n", p_nat->pipeline_num,
+ vnf_set_num);
+ if (vnf_set_num == 0xFF) {
+ printf("VNF set number for CGNAPT %d is invalid %d.\n",
+ p_nat->pipeline_num, vnf_set_num);
+ return -1;
+ }
+
+ p_nat->port_alloc_ring = port_alloc_ring[vnf_set_num];
+ if (p_nat->port_alloc_ring != NULL) {
+ printf("CGNAPT%d port_alloc_ring already exists.\n",
+ p_nat->pipeline_num);
+ return 1;
+ }
+
+ printf("napt_port_alloc_elem_count :%d\n",
+ napt_port_alloc_elem_count);
+ napt_port_alloc_elem_count += 1;
+ napt_port_alloc_elem_count =
+ nextPowerOf2(napt_port_alloc_elem_count);
+ printf("Next power of napt_port_alloc_elem_count: %d\n",
+ napt_port_alloc_elem_count);
+
+ port_alloc_ring[vnf_set_num] =
+ rte_ring_create(napt_port_alloc_ring_name[vnf_set_num],
+ napt_port_alloc_elem_count, rte_socket_id(), 0);
+ p_nat->port_alloc_ring = port_alloc_ring[vnf_set_num];
+ if (p_nat->port_alloc_ring == NULL) {
+ printf("CGNAPT%d - Failed to create port_alloc_ring\n",
+ p_nat->pipeline_num);
+ return -1;
+ }
+
+ /* Create port alloc buffer */
+ /* Only one pool is enough for all vnf sets */
+ if (napt_port_pool == NULL) {
+
+ napt_port_pool = rte_mempool_create(
+ "napt_port_pool",
+ napt_port_alloc_elem_count,
+ sizeof(struct napt_port_alloc_elem),
+ 0, 0, NULL, NULL, NULL,
+ NULL, rte_socket_id(), 0);
+ }
+
+ if (napt_port_pool == NULL) {
+ printf("CGNAPT - Create port pool failed\n");
+ return -1;
+ }
+
+ /* Add all available public IP addresses and ports to the ring */
+ uint32_t i, j = 0;
+
+#ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ printf("******* pub_ip_range_count:%d ***********\n",
+ p_nat->pub_ip_range_count);
+ /* Initialize all public IP's addresses */
+ int if_addrs;
+ uint32_t max_ips_remain;
+
+ for (if_addrs = 0; if_addrs < p_nat->pub_ip_range_count;
+ if_addrs++) {
+ /* Add all available addresses to the ring */
+
+ for (i = p_nat->pub_ip_range[if_addrs].start_ip;
+ i <= p_nat->pub_ip_range[if_addrs].end_ip;) {
+ /* 1. Get a port alloc buffer from napt_port_pool */
+ void *portsBuf;
+
+ if (j == 0) {
+ /* get new napt_port_alloc_elem from pool */
+ if (rte_mempool_get(napt_port_pool,
+ &portsBuf) < 0) {
+ printf("CGNAPT - Error in getting port "
+ "alloc buffer\n");
+ return -1;
+ }
+ }
+
+ /* 2. Populate it with available ports and ip addr */
+ struct napt_port_alloc_elem *pb =
+ (struct napt_port_alloc_elem *)portsBuf;
+
+ int temp;
+ temp = p_nat->pub_ip_range[if_addrs].end_ip -
+ i + 1;
+
+ /* Check if remaining port count is greater
+ * than or equals to bulk count, if not give
+ * remaining count ports than giving bulk count
+ */
+ if (temp < NUM_NAPT_PORT_BULK_ALLOC)
+ max_ips_remain = temp;
+ else
+ max_ips_remain =
+ NUM_NAPT_PORT_BULK_ALLOC;
+
+ for (j = 0; j < max_ips_remain; j++) {
+ pb->count = j + 1;
+ pb->ip_addr[j] = i + j;
+ pb->ports[j] = 0;
+ if ((i + j) ==
+ p_nat->pub_ip_range[if_addrs].
+ end_ip)
+ break;
+ }
+
+ /* 3. add the port alloc buffer to ring */
+ if (rte_ring_enqueue(p_nat->port_alloc_ring,
+ portsBuf) != 0) {
+ printf("CGNAPT%d - Enqueue error - i %d,",
+ p_nat->pipeline_num, i);
+ printf("j %d, if_addrs %d, pb %p\n",
+ j, if_addrs, pb);
+ rte_ring_dump(stdout,
+ p_nat->port_alloc_ring);
+ rte_mempool_put(napt_port_pool,
+ portsBuf);
+ return -1;
+ }
+
+ /* reset j and advance i */
+ j = 0;
+ i += max_ips_remain;
+ }
+ }
+
+ return 1;
+ }
+#endif
+
+ printf("******* p_nat->pub_ip_count:%d ***********\n",
+ p_nat->pub_ip_count);
+ /* Initialize all public IP's ports */
+ int if_ports;
+ uint32_t max_ports_remain;
+
+ for (if_ports = 0; if_ports < p_nat->pub_ip_count; if_ports++) {
+ /* Add all available ports to the ring */
+
+ for (i = p_nat->pub_ip_port_set[if_ports].start_port;
+ i <= p_nat->pub_ip_port_set[if_ports].end_port;) {
+ /* 1. Get a port alloc buffer from napt_port_pool */
+ void *portsBuf;
+
+ if (j == 0) {
+ /* get new napt_port_alloc_elem from pool */
+ if (rte_mempool_get(napt_port_pool, &portsBuf) <
+ 0) {
+ printf("CGNAPT - Error in getting "
+ "port alloc buffer\n");
+ return -1;
+ }
+ }
+
+ /* 2. Populate it with available ports and ip addr */
+ struct napt_port_alloc_elem *pb =
+ (struct napt_port_alloc_elem *)portsBuf;
+
+ int temp;
+ temp = p_nat->pub_ip_port_set[if_ports].end_port -
+ i + 1;
+ /* Check if remaining port count is greater
+ * than or equals to bulk count, if not give
+ * remaining count ports than giving bulk count
+ */
+ if (temp < NUM_NAPT_PORT_BULK_ALLOC)
+ max_ports_remain = temp;
+ else
+ max_ports_remain =
+ NUM_NAPT_PORT_BULK_ALLOC;
+
+ for (j = 0; j < max_ports_remain; j++) {
+ pb->count = j + 1;
+ pb->ip_addr[j] =
+ p_nat->pub_ip_port_set[if_ports].ip;
+ pb->ports[j] = i + j;
+ if ((i + j) == p_nat->pub_ip_port_set
+ [if_ports].end_port)
+ break;
+ }
+
+ /* 3. add the port alloc buffer to ring */
+ if (rte_ring_enqueue(p_nat->port_alloc_ring,
+ portsBuf) != 0) {
+ printf("CGNAPT%d - Enqueue error - i %d, j %d, "
+ " if_ports %d, pb %p\n", p_nat->pipeline_num,
+ i, j, if_ports, pb);
+
+ rte_ring_dump(stdout, p_nat->port_alloc_ring);
+ rte_mempool_put(napt_port_pool, portsBuf);
+ return -1;
+ }
+
+ /* reset j and advance i */
+ j = 0;
+ i += max_ports_remain;
+ }
+ }
+
+ return 1;
+}
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] = pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_cgnapt_msg_req_custom_handler,
+};
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADD] =
+ pipeline_cgnapt_msg_req_entry_add_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_ENTRY_DEL] =
+ pipeline_cgnapt_msg_req_entry_del_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_ENTRY_SYNC] =
+ pipeline_cgnapt_msg_req_entry_sync_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_ENTRY_DBG] =
+ pipeline_cgnapt_msg_req_entry_dbg_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADDM] =
+ pipeline_cgnapt_msg_req_entry_addm_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_VER] =
+ pipeline_cgnapt_msg_req_ver_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_NSP_ADD] =
+ pipeline_cgnapt_msg_req_nsp_add_handler,
+ [PIPELINE_CGNAPT_MSG_REQ_NSP_DEL] =
+ pipeline_cgnapt_msg_req_nsp_del_handler,
+
+ #ifdef PCP_ENABLE
+ [PIPELINE_CGNAPT_MSG_REQ_PCP] =
+ pipeline_cgnapt_msg_req_pcp_handler,
+ #endif
+};
+
+/**
+ * Function to convert an IPv6 packet to IPv4 packet
+ *
+ * @param pkt
+ * A pointer to packet mbuf
+ * @param in_ipv6_hdr
+ * A pointer to IPv6 header in the given pkt
+ *
+ */
+static void
+convert_ipv6_to_ipv4(struct rte_mbuf *pkt, struct ipv6_hdr *in_ipv6_hdr)
+{
+ uint32_t ip_hdr_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE;
+
+ uint8_t *eth_hdr_p = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *ipv6_hdr_p = RTE_MBUF_METADATA_UINT8_PTR(pkt, ip_hdr_offset);
+
+ struct ether_hdr eth_hdr;
+ struct ipv4_hdr *ipv4_hdr_p;
+ uint16_t frag_off = 0x4000;
+ struct cgnapt_nsp_node *ll = nsp_ll;
+ uint8_t ipv4_dest[4];
+ int nsp = 0;
+
+ memcpy(&eth_hdr, eth_hdr_p, sizeof(struct ether_hdr));
+ memcpy(in_ipv6_hdr, ipv6_hdr_p, sizeof(struct ipv6_hdr));
+
+ eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
+ char *data_area_p = rte_pktmbuf_adj(pkt, 20);
+ if (data_area_p == NULL) {
+ printf("convert_ipv6_to_ipv4:data_area_p is NULL\n");
+ return;
+ }
+ ipv4_hdr_p = (struct ipv4_hdr *)(data_area_p + ETH_HDR_SIZE);
+ memset(ipv4_hdr_p, 0, sizeof(struct ipv4_hdr));
+
+ memcpy(data_area_p, &eth_hdr, sizeof(struct ether_hdr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG == 1)
+ printf("convert_ipv6_to_ipv4: eth_hdr_p(%p), data_area_p(%p), "
+ "ipv4_hdr_p(%p)\n", eth_hdr_p, data_area_p, ipv4_hdr_p);
+ #endif
+
+ ipv4_hdr_p->version_ihl = 0x4 << 4 | 0x5;
+ ipv4_hdr_p->type_of_service =
+ rte_be_to_cpu_32(in_ipv6_hdr->vtc_flow) & 0x0ff00000 >> 20;
+ ipv4_hdr_p->total_length =
+ rte_cpu_to_be_16(rte_be_to_cpu_16(
+ in_ipv6_hdr->payload_len) + 20);
+ ipv4_hdr_p->packet_id = 0;
+ ipv4_hdr_p->fragment_offset = rte_cpu_to_be_16(frag_off);
+ ipv4_hdr_p->time_to_live = in_ipv6_hdr->hop_limits;
+ ipv4_hdr_p->next_proto_id = in_ipv6_hdr->proto;
+ ipv4_hdr_p->hdr_checksum = 0;
+ ipv4_hdr_p->src_addr = 0;
+
+ while (ll != NULL) {
+ if (!memcmp
+ (&in_ipv6_hdr->dst_addr[0], &ll->nsp.prefix[0],
+ ll->nsp.depth / 8)) {
+ if (ll->nsp.depth == 32)
+ memcpy(&ipv4_dest[0], &in_ipv6_hdr->dst_addr[4],
+ 4);
+ else if (ll->nsp.depth == 40) {
+ ipv4_dest[0] = in_ipv6_hdr->dst_addr[5];
+ ipv4_dest[1] = in_ipv6_hdr->dst_addr[6];
+ ipv4_dest[2] = in_ipv6_hdr->dst_addr[7];
+ ipv4_dest[3] = in_ipv6_hdr->dst_addr[9];
+ } else if (ll->nsp.depth == 48) {
+ ipv4_dest[0] = in_ipv6_hdr->dst_addr[6];
+ ipv4_dest[1] = in_ipv6_hdr->dst_addr[7];
+ ipv4_dest[2] = in_ipv6_hdr->dst_addr[9];
+ ipv4_dest[3] = in_ipv6_hdr->dst_addr[10];
+ } else if (ll->nsp.depth == 56) {
+ ipv4_dest[0] = in_ipv6_hdr->dst_addr[7];
+ ipv4_dest[1] = in_ipv6_hdr->dst_addr[9];
+ ipv4_dest[2] = in_ipv6_hdr->dst_addr[10];
+ ipv4_dest[3] = in_ipv6_hdr->dst_addr[11];
+ } else if (ll->nsp.depth == 64) {
+ ipv4_dest[0] = in_ipv6_hdr->dst_addr[9];
+ ipv4_dest[1] = in_ipv6_hdr->dst_addr[10];
+ ipv4_dest[2] = in_ipv6_hdr->dst_addr[11];
+ ipv4_dest[3] = in_ipv6_hdr->dst_addr[12];
+ } else if (ll->nsp.depth == 96) {
+ ipv4_dest[0] = in_ipv6_hdr->dst_addr[12];
+ ipv4_dest[1] = in_ipv6_hdr->dst_addr[13];
+ ipv4_dest[2] = in_ipv6_hdr->dst_addr[14];
+ ipv4_dest[3] = in_ipv6_hdr->dst_addr[15];
+ }
+
+ nsp = 1;
+ break;
+ }
+
+ ll = ll->next;
+ }
+
+ if (nsp)
+ memcpy(&ipv4_hdr_p->dst_addr, &ipv4_dest[0], 4);
+ else
+ memcpy(&ipv4_hdr_p->dst_addr, &in_ipv6_hdr->dst_addr[12], 4);
+
+}
+
+/**
+ * Function to convert an IPv4 packet to IPv6 packet
+ *
+ * @param pkt
+ * A pointer to packet mbuf
+ * @param in_ipv4_hdr
+ * A pointer to IPv4 header in the given pkt
+ *
+ */
+static void
+convert_ipv4_to_ipv6(struct rte_mbuf *pkt, struct ipv4_hdr *in_ipv4_hdr)
+{
+ uint32_t ip_hdr_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE;
+
+ uint8_t *eth_hdr_p = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *ipv4_hdr_p = RTE_MBUF_METADATA_UINT8_PTR(pkt, ip_hdr_offset);
+
+ struct ether_hdr eth_hdr;
+ struct ipv6_hdr *ipv6_hdr_p;
+
+ memcpy(&eth_hdr, eth_hdr_p, sizeof(struct ether_hdr));
+ memcpy(in_ipv4_hdr, ipv4_hdr_p, sizeof(struct ipv4_hdr));
+
+ eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+
+ char *data_area_p = rte_pktmbuf_prepend(pkt, 20);
+ if (data_area_p == NULL) {
+ printf("convert_ipv4_to_ipv6:data_area_p is NULL\n");
+ return;
+ }
+ ipv6_hdr_p = (struct ipv6_hdr *)(data_area_p + ETH_HDR_SIZE);
+ memset(ipv6_hdr_p, 0, sizeof(struct ipv6_hdr));
+
+ memcpy(data_area_p, &eth_hdr, sizeof(struct ether_hdr));
+
+ ipv6_hdr_p->vtc_flow =
+ rte_cpu_to_be_32((0x6 << 28) |
+ (in_ipv4_hdr->type_of_service << 20));
+ ipv6_hdr_p->payload_len =
+ rte_cpu_to_be_16(rte_be_to_cpu_16(
+ in_ipv4_hdr->total_length) - 20);
+ ipv6_hdr_p->proto = in_ipv4_hdr->next_proto_id;
+ ipv6_hdr_p->hop_limits = in_ipv4_hdr->time_to_live;
+
+ ipv6_hdr_p->src_addr[0] = 0x00;
+ ipv6_hdr_p->src_addr[1] = 0x64;
+ ipv6_hdr_p->src_addr[2] = 0xff;
+ ipv6_hdr_p->src_addr[3] = 0x9b;
+ ipv6_hdr_p->src_addr[4] = 0x00;
+ ipv6_hdr_p->src_addr[5] = 0x00;
+ ipv6_hdr_p->src_addr[6] = 0x00;
+ ipv6_hdr_p->src_addr[7] = 0x00;
+ ipv6_hdr_p->src_addr[8] = 0x00;
+ ipv6_hdr_p->src_addr[9] = 0x00;
+ ipv6_hdr_p->src_addr[10] = 0x00;
+ ipv6_hdr_p->src_addr[11] = 0x00;
+ memcpy(&ipv6_hdr_p->src_addr[12], &in_ipv4_hdr->src_addr, 4);
+
+ memset(&ipv6_hdr_p->dst_addr, 0, 16);
+
+ return;
+
+}
+
+/**
+ * Output port handler
+ *
+ * @param pkt
+ * A pointer to packet mbuf
+ * @param arg
+ * Unused void pointer
+ *
+ */
+#ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+static void
+pkt_work_cgnapt_out(__rte_unused struct rte_mbuf *pkt, __rte_unused void *arg)
+{
+#ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+ if ((cgnapt_num_func_to_inst == 5)
+ && (cgnapt_inst_index < INST_ARRAY_SIZE)) {
+ if (cgnapt_inst5_flag == 0) {
+ uint8_t *inst5_sig =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ CGNAPT_INST5_OFST);
+ if (*inst5_sig == CGNAPT_INST5_SIG) {
+ cgnapt_inst5_flag = 1;
+ inst_end_time[cgnapt_inst_index] =
+ rte_get_tsc_cycles();
+ cgnapt_inst_index++;
+ }
+ }
+ }
+#endif
+
+ /* cgnapt_pkt_out_count++; */
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG)
+ print_pkt(pkt);
+ #endif
+}
+#endif
+
+/**
+ * Output port handler to handle 4 pkts
+ *
+ * @param pkt
+ * A pointer to packet mbuf
+ * @param arg
+ * Inport handler argument pointer
+ *
+ */
+#ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+static void pkt4_work_cgnapt_out(struct rte_mbuf **pkt, void *arg)
+{
+ (void)pkt;
+ (void)arg;
+/* TO BE IMPLEMENTED IF REQUIRED */
+}
+#endif
+
+#ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+PIPELINE_CGNAPT_PORT_OUT_AH(port_out_ah_cgnapt,
+ pkt_work_cgnapt_out, pkt4_work_cgnapt_out);
+
+PIPELINE_CGNAPT_PORT_OUT_BAH(port_out_ah_cgnapt_bulk,
+ pkt_work_cgnapt_out, pkt4_work_cgnapt_out);
+#endif
+
+/**
+ * Function to validate the packet and return version
+ *
+ * @param pkt
+ * A pointer to packet mbuf
+ *
+ * @return
+ * IP version of the valid pkt, -1 if invalid pkt
+ */
+int rte_get_pkt_ver(struct rte_mbuf *pkt)
+{
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+ uint16_t *eth_proto =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
+
+ if (*eth_proto == rte_be_to_cpu_16(ETHER_TYPE_IPv4))
+ return IP_VERSION_4;
+
+ if (dual_stack_enable
+ && (*eth_proto == rte_be_to_cpu_16(ETHER_TYPE_IPv6)))
+ return IP_VERSION_6;
+
+ /* Check the protocol first, if not UDP or TCP return */
+
+ return -1;
+}
+
+/**
+ * A method to print the NAPT entry
+ *
+ * @param ent
+ * A pointer to struct cgnapt_table_entry
+ */
+void my_print_entry(struct cgnapt_table_entry *ent)
+{
+ printf("CGNAPT key:\n");
+ printf("entry_type :%d\n", ent->data.type);
+ printf("prv_ip: %x %x %x %x\n", ent->data.u.u32_prv_ipv6[0],
+ ent->data.u.u32_prv_ipv6[1], ent->data.u.u32_prv_ipv6[2],
+ ent->data.u.u32_prv_ipv6[3]);
+ printf("prv_port:%d\n", ent->data.prv_port);
+
+ printf("pub_ip:%x\n", ent->data.pub_ip);
+ printf("prv_phy_port:%d\n", ent->data.prv_phy_port);
+ printf("pub_phy_port:%d\n", ent->data.pub_phy_port);
+}
+
+/**
+ * Function to print common CGNAPT table entries
+ *
+ */
+void print_common_table(void)
+{
+ uint32_t count = 0;
+ const void *key;
+ void *data;
+ uint32_t next = 0;
+ int32_t index = 0;
+ do {
+ index = rte_hash_iterate(napt_common_table,
+ &key, &data, &next);
+
+ if ((index != -EINVAL) && (index != -ENOENT)) {
+ printf("\n%04d ", count);
+ //print_key((struct pipeline_cgnapt_entry_key *)key);
+ rte_hexdump(stdout, "KEY", key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ int32_t position = rte_hash_lookup(
+ napt_common_table, key);
+ print_cgnapt_entry(&napt_hash_tbl_entries[position]);
+ }
+
+ count++;
+ } while (index != -ENOENT);
+}
+
+/**
+ * Input port handler for mixed traffic
+ * This is the main method in this file when running in mixed traffic mode.
+ * Starting from the packet burst it filters unwanted packets,
+ * calculates keys, does lookup and then based on the lookup
+ * updates NAPT table and does packet NAPT translation.
+ *
+ * @param rte_p
+ * A pointer to struct rte_pipeline
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param n_pkts
+ * Number of packets in the burst
+ * @param arg
+ * Void pointer
+ *
+ * @return
+ * int that is not checked by caller
+ */
+
+static int cgnapt_in_port_ah_mix(struct rte_pipeline *rte_p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts, void *arg)
+{
+/*
+* Code flow
+*
+* 1. Read packet version, if invalid drop the packet
+* 2. Check protocol, if not UDP or TCP drop the packet
+* 3. Bring all valid packets together - useful for bulk lookup
+* and calculate key for all packets
+* a. If IPv4 : calculate key with full IP
+* b. If IPv6 : calculate key with last 32-bit of IP
+* 4. Do bulk lookup with rte_hash_lookup_bulk(), if something went wrong
+* drop all packets
+* 5. For lookup hit packets, read entry from table
+* 6. For lookup miss packets, add dynamic entry to table
+* 7. If pkt is IPv6
+* a. If egress pkt, convert to IPv4 and NAPT it
+* b. If ingress, drop the pkt
+* 8. If pkt is IPv4
+* a. If egress pkt, NAPT it. Get MAC
+* b. If first ingress pkt (with no egress entry), drop the pkt
+* If not first ingress pkt
+* I. If IPv6 converted packet, convert back to IPv6,
+ NAPT it & get MAC
+* II. If IPv4 packet, NAPT it & get MAC
+* 9. Send all packets out to corresponding ports
+*/
+ struct pipeline_cgnapt_in_port_h_arg *ap = arg;
+ struct pipeline_cgnapt *p_nat = ap->p;
+ uint8_t compacting_map[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t packets_for_lookup = 0;
+ uint32_t i;
+
+ p_nat->valid_packets = 0;
+ p_nat->invalid_packets = 0;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("cgnapt_key hit fn: %" PRIu32 "\n", n_pkts);
+ #endif
+
+ p_nat->pkt_burst_cnt = 0; /* for dynamic napt */
+
+ uint16_t phy_port = 0;
+ uint16_t *src_port = NULL;
+ uint16_t *dst_port = NULL;
+ uint32_t *src_addr = NULL;
+ uint32_t *dst_addr = NULL;
+ uint8_t *protocol = NULL;
+ uint8_t *eth_dest = NULL;
+ uint8_t *eth_src = NULL;
+ uint16_t src_port_offset = 0;
+ uint16_t dst_port_offset = 0;
+ uint16_t src_addr_offset = 0;
+ uint16_t dst_addr_offset = 0;
+ uint16_t prot_offset = 0;
+ uint16_t eth_offset = 0;
+ int ver = 0;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4;
+
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+
+ for (i = 0; i < n_pkts; i++) {
+ p_nat->receivedPktCount++;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << i;
+
+ /* remember this pkt as valid pkt */
+ p_nat->valid_packets |= pkt_mask;
+
+ struct rte_mbuf *pkt = pkts[i];
+
+ if (enable_hwlb)
+ if (!check_arp_icmp(pkt, pkt_mask, p_nat))
+ continue;
+
+ int ver = rte_get_pkt_ver(pkt);
+
+ #ifdef CGNAPT_DBG_PRNT
+ printf("ver no. of the pkt:%d\n", ver);
+ #endif
+
+ if (unlikely(ver < 0)) {
+ /* Not a valid pkt , ignore. */
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount1++;
+ #endif
+ continue;
+ }
+ if (ver == 4)
+ prot_offset = PROT_OFST_IP4;
+ else
+ prot_offset = PROT_OFST_IP6;
+ protocol =
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ prot_offset);
+ if (!
+ (*protocol == IP_PROTOCOL_TCP
+ || *protocol == IP_PROTOCOL_UDP
+ || *protocol == IP_PROTOCOL_ICMP)) {
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ continue;
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt);
+ #endif
+
+ #ifdef PCP_ENABLE
+ /* Handling PCP
+ * 1. Handel PCP for egress traffic
+ * 2. If PCP, then give response (send pkt) from the same port
+ * 3. Drop the PCP packet, should not be added in the NAPT table
+ */
+ if (pcp_enable) {
+ if (*protocol == IP_PROTOCOL_UDP) {
+ struct udp_hdr *udp;
+ if (ver == 4)
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ IPV4_UDP_OFST);
+ else
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt, ver, p_nat);
+ p_nat->invalid_packets |= pkt_mask;
+ continue;
+ }
+ }
+ }
+ #endif
+
+ if (ver == 4) {
+
+ src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ SRC_ADR_OFST_IP4);
+ dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ DST_ADR_OFST_IP4);
+
+ if ((*protocol == IP_PROTOCOL_TCP)
+ || (*protocol == IP_PROTOCOL_UDP)) {
+
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+
+ } else if (*protocol == IP_PROTOCOL_ICMP) {
+ /* Identifier */
+ src_port_offset = IDEN_OFST_IP4_ICMP;
+ /* Sequence number */
+ dst_port_offset = SEQN_OFST_IP4_ICMP;
+ }
+
+ src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ src_port_offset);
+ dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ dst_port_offset);
+ } else {
+
+ src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ SRC_ADR_OFST_IP6);
+ dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ DST_ADR_OFST_IP6);
+ src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ SRC_PRT_OFST_IP6);
+ dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ DST_PRT_OFST_IP6);
+ }
+ /* need to create compacted table of pointers to
+ * pass to bulk lookup
+ */
+
+ compacting_map[packets_for_lookup] = i;
+
+ //phy_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, phyport_offset);
+ phy_port = pkt->port;
+
+ struct pipeline_cgnapt_entry_key key;
+
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+ key.pid = phy_port;
+ if (get_in_port_dir(phy_port)) {
+ /* Egress */
+ if (ver == 4)
+ key.ip = rte_bswap32(*src_addr);
+ else
+ key.ip = rte_bswap32(src_addr[3]);
+ key.port = rte_bswap16(*src_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+ } else {
+ /* Ingress */
+ key.ip = rte_bswap32(*dst_addr);
+
+ if (*protocol == IP_PROTOCOL_ICMP) {
+ /* common table lookupkey preparation from
+ * incoming ICMP Packet- Indentifier field
+ */
+ key.port = rte_bswap16(*src_port);
+ } else {
+ key.port = rte_bswap16(*dst_port);
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ key.pid = 0xffff;
+ }
+
+ memcpy(&(p_nat->keys[packets_for_lookup]), &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[packets_for_lookup] =
+ &(p_nat->keys[packets_for_lookup]);
+ packets_for_lookup++;
+ }
+
+ if (unlikely(packets_for_lookup == 0)) {
+ /* no suitable packet for lookup */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return p_nat->valid_packets;
+ }
+
+ /* lookup entries in the common napt table */
+
+ int lookup_result = rte_hash_lookup_bulk(napt_common_table,
+ (const void **)
+ &p_nat->key_ptrs,
+ packets_for_lookup,
+ &p_nat->lkup_indx[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* unknown error, just discard all packets */
+ printf("Unexpected hash lookup error %d, discarding all "
+ "packets", lookup_result);
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return 0;
+ }
+ //struct rte_pipeline_table_entry *entries[64];
+ /* Now one by one check the result of our bulk lookup */
+
+ for (i = 0; i < packets_for_lookup; i++) {
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[i];
+ /* index into packet table of this packet */
+ uint8_t pkt_index = compacting_map[i];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_index;
+
+ struct cgnapt_table_entry *entry = NULL;
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask =
+ pkt_miss_cgnapt(p_nat->key_ptrs[i],
+ pkts[pkt_index],
+ &table_entry,
+ &p_nat->valid_packets,
+ pkt_index,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ if (*protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkts[pkt_index];
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d",
+ p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[i]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+ continue;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_index] = &(entry->head);
+
+ phy_port = pkts[pkt_index]->port;
+
+ struct ipv6_hdr ipv6_hdr;
+ struct ipv4_hdr ipv4_hdr;
+
+ ver = rte_get_pkt_ver(pkts[pkt_index]);
+ #ifdef CGNAPT_DEBUGGING
+ if (CGNAPT_DEBUG >= 1) {
+ printf("ver:%d\n", ver);
+ printf("entry->data.type:%d\n", entry->data.type);
+ }
+ #endif
+ if ((ver == 6) && (entry->data.type == CGNAPT_ENTRY_IPV6)
+ && is_phy_port_privte(phy_port)) {
+ convert_ipv6_to_ipv4(pkts[pkt_index], &ipv6_hdr);
+
+ pkt_type = PKT_TYPE_IPV6to4;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1)
+ printf("pkt_work_cganpt: "
+ "convert_ipv6_to_ipv4\n");
+ #endif
+
+ struct cgnapt_nsp_node *ll = nsp_ll;
+ int nsp = 0;
+ while (ll != NULL) {
+ if (!memcmp(&ipv6_hdr.dst_addr[0],
+ &ll->nsp.prefix[0],
+ ll->nsp.depth / 8)) {
+ nsp = 1;
+ break;
+ }
+ ll = ll->next;
+ }
+
+ if (!nsp
+ && !memcmp(&ipv6_hdr.dst_addr[0],
+ &well_known_prefix[0], 12)) {
+ nsp = 1;
+ }
+
+ if (!nsp) {
+ p_nat->invalid_packets |= 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount5++;
+ #endif
+ continue;
+ }
+
+ }
+
+ /* As packet is already converted into IPv4 we must not operate
+ * IPv6 offsets on packet
+ * Only perform IPv4 operations
+ */
+
+ if (ver == 6) {
+
+ src_port_offset = SRC_PRT_OFST_IP6t4;
+ dst_port_offset = DST_PRT_OFST_IP6t4;
+ src_addr_offset = SRC_ADR_OFST_IP6t4;
+ dst_addr_offset = DST_ADR_OFST_IP6t4;
+ prot_offset = PROT_OFST_IP6t4;
+ eth_offset = ETH_OFST_IP6t4;
+
+ } else {
+
+ if ((*protocol == IP_PROTOCOL_TCP)
+ || (*protocol == IP_PROTOCOL_UDP)) {
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ } else if (*protocol == IP_PROTOCOL_ICMP) {
+ /* Identifier */
+ src_port_offset = IDEN_OFST_IP4_ICMP;
+ /* Sequence number */
+ dst_port_offset = SEQN_OFST_IP4_ICMP;
+ }
+
+ src_addr_offset = SRC_ADR_OFST_IP4;
+ dst_addr_offset = DST_ADR_OFST_IP4;
+ prot_offset = PROT_OFST_IP4;
+ eth_offset = MBUF_HDR_ROOM;
+
+ }
+
+ src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[pkt_index],
+ src_addr_offset);
+ dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[pkt_index],
+ dst_addr_offset);
+ src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkts[pkt_index],
+ src_port_offset);
+ dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkts[pkt_index],
+ dst_port_offset);
+ protocol =
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[pkt_index],
+ prot_offset);
+
+ eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[pkt_index],
+ eth_offset);
+ eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[pkt_index],
+ eth_offset + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+ uint8_t nh_ipv6[16];
+ uint32_t nhip = 0;
+
+ uint32_t dest_if = 0xff;
+ uint32_t ret;
+
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkts[pkt_index],
+ cgnapt_meta_offset);
+
+ if (is_phy_port_privte(phy_port)) {
+
+ if (*protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*dst_port) == 53) {
+ p_nat->invalid_packets |= 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ dest_address = rte_bswap32(*dst_addr);
+ ret = local_get_nh_ipv4(dest_address, &dest_if,
+ &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address,
+ IP_VERSION_4);
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |=
+ 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+
+ *outport_id = p_nat->outport_id[dest_if];
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address,
+ &dest_if, &hw_addr);
+
+ if (ret == ARP_FOUND) {
+ memcpy(eth_dest, &hw_addr,
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address, *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkts[pkt_index]);
+ #endif
+
+ } else{
+ if (ret == ARP_NOT_FOUND) {
+ /* Commented code may be required
+ * for future use, Please keep it */
+ //request_arp(*outport_id, nhip,
+ // p_nat->p.p);
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ }
+
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Egress: \tphy_port:%d\t "
+ "get_prv_to_pub():%d \tout_port:%d\n",
+ phy_port, dest_if,
+ *outport_id);
+ #endif
+
+ /* Egress */
+ *src_addr = rte_bswap32(entry->data.pub_ip);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *src_port = rte_bswap16(entry->data.pub_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ p_nat->enaptedPktCount++;
+ } else {
+ /* Ingress */
+ if (*protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*src_port) == 53) {
+ p_nat->invalid_packets |= 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Ingress: \tphy_port:%d\t "
+ "get_pub_to_prv():%d \tout_port%d\n",
+ phy_port, dest_if,
+ *outport_id);
+ #endif
+
+ if (entry->data.type == CGNAPT_ENTRY_IPV6) {
+ convert_ipv4_to_ipv6(pkts[pkt_index],
+ &ipv4_hdr);
+ pkt_type = PKT_TYPE_IPV4to6;
+ /* Ethernet MTU check */
+ if ((rte_pktmbuf_data_len(pkts[pkt_index]) -
+ 14) > 1500) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ continue;
+ }
+
+ eth_dest = eth_dest - 20;
+ eth_src = eth_src - 20;
+
+ dst_port_offset = DST_PRT_OFST_IP4t6;
+ dst_addr_offset = DST_ADR_OFST_IP4t6;
+ dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(
+ pkts[pkt_index],
+ dst_addr_offset);
+ dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(
+ pkts[pkt_index],
+ dst_port_offset);
+
+ memcpy((uint8_t *) &dst_addr[0],
+ &entry->data.u.prv_ipv6[0], 16);
+ memset(nh_ipv6, 0, 16);
+
+ ret = local_get_nh_ipv6((uint8_t *)&dst_addr[0],
+ &dest_if, &nh_ipv6[0], p_nat);
+
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(
+ &dst_addr[0],
+ IP_VERSION_6);
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |=
+ 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+ do_local_nh_ipv6_cache(dest_if, p_nat);
+ }
+ *outport_id = p_nat->outport_id[dest_if];
+
+ if (get_dest_mac_address_ipv6_port((uint8_t *)
+ &dst_addr[0], &dest_if,
+ &hw_addr, &nh_ipv6[0])){
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address, *outport_id,
+ hw_addr.addr_bytes[0],
+ hw_addr.addr_bytes[1], hw_addr.addr_bytes[2],
+ hw_addr.addr_bytes[3], hw_addr.addr_bytes[4],
+ hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+ memcpy(eth_dest, &hw_addr,
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(
+ dest_if),
+ sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkts[pkt_index]);
+ #endif
+ } else {
+
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port =
+ rte_bswap16(entry->data.prv_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ } else {
+ *dst_addr = rte_bswap32(entry->data.u.prv_ip);
+ dest_address = entry->data.u.prv_ip;
+ ret = local_get_nh_ipv4(dest_address, &dest_if,
+ &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_pub_to_prv_port(
+ &dest_address, IP_VERSION_4);
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |=
+ 1LLU << pkt_index;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ };
+
+ *outport_id = p_nat->outport_id[dest_if];
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address,
+ &dest_if, &hw_addr);
+
+ if (ret == ARP_FOUND) {
+ memcpy(eth_dest, &hw_addr,
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(
+ dest_if),
+ sizeof(struct ether_addr));
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address, *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkts[pkt_index]);
+ #endif
+
+ } else {
+ if (ret == ARP_NOT_FOUND) {
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ }
+ //request_arp(*outport_id,
+ // nhip, p_nat->p.p);
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+
+ if (*protocol == IP_PROTOCOL_ICMP) {
+ // Query ID reverse translation done here
+ *src_port =
+ rte_bswap16(entry->data.prv_port);
+ } else {
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port =
+ rte_bswap16(entry->
+ data.prv_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+ }
+ }
+
+ p_nat->inaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkts[pkt_index], pkt_type);
+ else
+ #endif
+ sw_checksum(pkts[pkt_index], pkt_type);
+ }
+
+ if (p_nat->invalid_packets) {
+ /* get rid of invalid packets */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1) {
+ printf("valid_packets:0x%jx\n", p_nat->valid_packets);
+ printf("rte_valid_packets :0x%jx\n", rte_p->pkts_mask);
+ printf("invalid_packets:0x%jx\n",
+ p_nat->invalid_packets);
+ printf("rte_invalid_packets :0x%jx\n",
+ rte_p->pkts_drop_mask);
+ printf("Total pkts dropped :0x%jx\n",
+ rte_p->n_pkts_ah_drop);
+ }
+ #endif
+ }
+
+ return p_nat->valid_packets;
+}
+
+/**
+ * Input port handler for IPv4 private traffic
+ * Starting from the packet burst it filters unwanted packets,
+ * calculates keys, does lookup and then based on the lookup
+ * updates NAPT table and does packet NAPT translation.
+ *
+ * @param rte_p
+ * A pointer to struct rte_pipeline
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param n_pkts
+ * Number of packets in the burst
+ * @param arg
+ * Void pointer
+ *
+ * @return
+ * int that is not checked by caller
+ */
+
+static int cgnapt_in_port_ah_ipv4_prv(struct rte_pipeline *rte_p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts, void *arg)
+{
+ uint32_t i, j;
+ struct pipeline_cgnapt_in_port_h_arg *ap = arg;
+ struct pipeline_cgnapt *p_nat = ap->p;
+
+ #ifdef CGNAPT_TIMING_INST
+ uint64_t entry_timestamp = 0, exit_timestamp;
+
+ if (p_nat->time_measurements_on) {
+ entry_timestamp = rte_get_tsc_cycles();
+ /* check since exit ts not valid first time through */
+ if (likely(p_nat->in_port_exit_timestamp))
+ p_nat->external_time_sum +=
+ entry_timestamp - p_nat->in_port_exit_timestamp;
+ }
+ #endif
+
+ p_nat->pkt_burst_cnt = 0; /* for dynamic napt */
+ p_nat->valid_packets = rte_p->pkts_mask; /*n_pkts; */
+ p_nat->invalid_packets = 0;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("cgnapt_key hit fn: %" PRIu32 "\n", n_pkts);
+ #endif
+
+ /* prefetching for mbufs should be done here */
+ for (j = 0; j < n_pkts; j++)
+ rte_prefetch0(pkts[j]);
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_key_ipv4_prv(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_key_ipv4_prv(pkts[i], i, arg, p_nat);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ if (unlikely(p_nat->valid_packets == 0)) {
+ /* no suitable packet for lookup */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+ return p_nat->valid_packets;
+ }
+
+ /* lookup entries in the common napt table */
+
+ int lookup_result = rte_hash_lookup_bulk(
+ napt_common_table,
+ (const void **)&p_nat->key_ptrs,
+ /* should be minus num invalid pkts */
+ n_pkts,
+ /*new pipeline data member */
+ &p_nat->lkup_indx[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* unknown error, just discard all packets */
+ printf("Unexpected hash lookup error %d, discarding "
+ "all packets", lookup_result);
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return 0;
+ }
+
+ /* Now call second stage of pipeline to one by one
+ * check the result of our bulk lookup
+ */
+
+ /* prefetching for table entries should be done here */
+ for (j = 0; j < n_pkts; j++) {
+ if (p_nat->lkup_indx[j] >= 0)
+ rte_prefetch0(&napt_hash_tbl_entries
+ [p_nat->lkup_indx[j]]);
+ }
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_ipv4_prv(pkts, i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_ipv4_prv(pkts, i, arg, p_nat);
+
+ if (p_nat->invalid_packets) {
+ /* get rid of invalid packets */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1) {
+ printf("valid_packets:0x%jx\n", p_nat->valid_packets);
+ printf("rte_valid_packets :0x%jx\n", rte_p->pkts_mask);
+ printf("invalid_packets:0x%jx\n",
+ p_nat->invalid_packets);
+ printf("rte_invalid_packets :0x%jx\n",
+ rte_p->pkts_drop_mask);
+ printf("Total pkts dropped :0x%jx\n",
+ rte_p->n_pkts_ah_drop);
+ }
+ #endif
+ }
+
+ #ifdef CGNAPT_TIMING_INST
+ if (p_nat->time_measurements_on) {
+ exit_timestamp = rte_get_tsc_cycles();
+ p_nat->in_port_exit_timestamp = exit_timestamp;
+ p_nat->internal_time_sum += exit_timestamp - entry_timestamp;
+ p_nat->time_measurements++;
+ if (p_nat->time_measurements == p_nat->max_time_mesurements)
+ p_nat->time_measurements_on = 0;
+ }
+ #endif
+
+ return p_nat->valid_packets;
+}
+
+/**
+ * Input port handler for IPv4 public traffic
+ * Starting from the packet burst it filters unwanted packets,
+ * calculates keys, does lookup and then based on the lookup
+ * updates NAPT table and does packet NAPT translation.
+ *
+ * @param rte_p
+ * A pointer to struct rte_pipeline
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param n_pkts
+ * Number of packets in the burst
+ * @param arg
+ * Void pointer
+ *
+ * @return
+ * int that is not checked by caller
+ */
+static int cgnapt_in_port_ah_ipv4_pub(struct rte_pipeline *rte_p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts, void *arg)
+{
+ uint32_t i, j;
+ struct pipeline_cgnapt_in_port_h_arg *ap = arg;
+ struct pipeline_cgnapt *p_nat = ap->p;
+
+ #ifdef CGNAPT_TIMING_INST
+ uint64_t entry_timestamp = 0, exit_timestamp;
+
+ if (p_nat->time_measurements_on) {
+ entry_timestamp = rte_get_tsc_cycles();
+
+ /* check since exit ts not valid first time through */
+ if (likely(p_nat->in_port_exit_timestamp))
+ p_nat->external_time_sum +=
+ entry_timestamp - p_nat->in_port_exit_timestamp;
+ }
+ #endif
+
+ p_nat->pkt_burst_cnt = 0; /* for dynamic napt */
+ p_nat->valid_packets = rte_p->pkts_mask; /*n_pkts; */
+ p_nat->invalid_packets = 0;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("cgnapt_key hit fn: %" PRIu32 "\n", n_pkts);
+ #endif
+
+ /* prefetching for mbufs should be done here */
+ for (j = 0; j < n_pkts; j++)
+ rte_prefetch0(pkts[j]);
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_key_ipv4_pub(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_key_ipv4_pub(pkts[i], i, arg, p_nat);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ if (unlikely(p_nat->valid_packets == 0)) {
+ /* no suitable packet for lookup */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+ return p_nat->valid_packets;
+ }
+
+ /* lookup entries in the common napt table */
+
+ int lookup_result = rte_hash_lookup_bulk(
+ napt_common_table,
+ (const void **)&p_nat->key_ptrs,
+ /* should be minus num invalid pkts */
+ n_pkts,
+ /*new pipeline data member */
+ &p_nat->lkup_indx[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* unknown error, just discard all packets */
+ printf("Unexpected hash lookup error %d, discarding "
+ "all packets", lookup_result);
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return 0;
+ }
+
+ /* Now call second stage of pipeline to one by one
+ * check the result of our bulk lookup
+ */
+
+ /* prefetching for table entries should be done here */
+ for (j = 0; j < n_pkts; j++) {
+ if (p_nat->lkup_indx[j] >= 0)
+ rte_prefetch0(&napt_hash_tbl_entries
+ [p_nat->lkup_indx[j]]);
+ }
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_ipv4_pub(pkts, i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_ipv4_pub(pkts, i, arg, p_nat);
+
+ if (p_nat->invalid_packets) {
+ /* get rid of invalid packets */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1) {
+ printf("valid_packets:0x%jx\n", p_nat->valid_packets);
+ printf("rte_valid_packets :0x%jx\n", rte_p->pkts_mask);
+ printf("invalid_packets:0x%jx\n",
+ p_nat->invalid_packets);
+ printf("rte_invalid_packets :0x%jx\n",
+ rte_p->pkts_drop_mask);
+ printf("Total pkts dropped :0x%jx\n",
+ rte_p->n_pkts_ah_drop);
+ }
+ #endif
+ }
+
+ #ifdef CGNAPT_TIMING_INST
+ if (p_nat->time_measurements_on) {
+ exit_timestamp = rte_get_tsc_cycles();
+ p_nat->in_port_exit_timestamp = exit_timestamp;
+
+ p_nat->internal_time_sum += exit_timestamp - entry_timestamp;
+ p_nat->time_measurements++;
+ if (p_nat->time_measurements == p_nat->max_time_mesurements)
+ p_nat->time_measurements_on = 0;
+ }
+ #endif
+
+ return p_nat->valid_packets;
+}
+
+/**
+ * NAPT key calculation function for IPv4 private traffic
+ * which handles 4 pkts
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_key_ipv4_prv(
+ struct rte_mbuf **pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ p_nat->receivedPktCount += 4;
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask0 = 1LLU << pkt_num;
+ uint64_t pkt_mask1 = 1LLU << (pkt_num + 1);
+ uint64_t pkt_mask2 = 1LLU << (pkt_num + 2);
+ uint64_t pkt_mask3 = 1LLU << (pkt_num + 3);
+
+ uint8_t protocol0 = RTE_MBUF_METADATA_UINT8(pkt[0],
+ PROT_OFST_IP4);
+ uint8_t protocol1 = RTE_MBUF_METADATA_UINT8(pkt[1],
+ PROT_OFST_IP4);
+ uint8_t protocol2 = RTE_MBUF_METADATA_UINT8(pkt[2],
+ PROT_OFST_IP4);
+ uint8_t protocol3 = RTE_MBUF_METADATA_UINT8(pkt[3],
+ PROT_OFST_IP4);
+
+ uint32_t src_addr0 = RTE_MBUF_METADATA_UINT32(pkt[0],
+ SRC_ADR_OFST_IP4);
+ uint32_t src_addr1 = RTE_MBUF_METADATA_UINT32(pkt[1],
+ SRC_ADR_OFST_IP4);
+ uint32_t src_addr2 = RTE_MBUF_METADATA_UINT32(pkt[2],
+ SRC_ADR_OFST_IP4);
+ uint32_t src_addr3 = RTE_MBUF_METADATA_UINT32(pkt[3],
+ SRC_ADR_OFST_IP4);
+
+ uint16_t src_port_offset0;
+ uint16_t src_port_offset1;
+ uint16_t src_port_offset2;
+ uint16_t src_port_offset3;
+
+ uint16_t src_port0;
+ uint16_t src_port1;
+ uint16_t src_port2;
+ uint16_t src_port3;
+
+ uint16_t phy_port0 = pkt[0]->port;
+ uint16_t phy_port1 = pkt[1]->port;
+ uint16_t phy_port2 = pkt[2]->port;
+ uint16_t phy_port3 = pkt[3]->port;
+
+ struct pipeline_cgnapt_entry_key key0;
+ struct pipeline_cgnapt_entry_key key1;
+ struct pipeline_cgnapt_entry_key key2;
+ struct pipeline_cgnapt_entry_key key3;
+
+ memset(&key0, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key1, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key2, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key3, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+/* --0-- */
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[0]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[0], pkt_mask0, p_nat))
+ goto PKT1;
+ }
+
+ switch (protocol0) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0],
+ IPV4_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[0], IPV4_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask0;
+ goto PKT1;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset0 = SRC_PRT_OFST_IP4_TCP;
+ src_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ src_port_offset0);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset0 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ src_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ src_port_offset0);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask0;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT1;
+ }
+
+ key0.pid = phy_port0;
+ key0.ip = rte_bswap32(src_addr0);
+ key0.port = rte_bswap16(src_port0);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key0.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key0,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+
+/* --1-- */
+PKT1:
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[1]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[1], pkt_mask1, p_nat))
+ goto PKT2;
+ }
+ switch (protocol1) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1],
+ IPV4_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[1], IPV4_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask1;
+ goto PKT2;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset1 = SRC_PRT_OFST_IP4_TCP;
+ src_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ src_port_offset1);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset1 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ src_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ src_port_offset1);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask1;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT2;
+ }
+
+ key1.pid = phy_port1;
+ key1.ip = rte_bswap32(src_addr1);
+ key1.port = rte_bswap16(src_port1);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key1.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 1], &key1,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 1] = &p_nat->keys[pkt_num + 1];
+
+/* --2-- */
+PKT2:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[2]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[2], pkt_mask2, p_nat))
+ goto PKT3;
+ }
+
+ switch (protocol2) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2],
+ IPV4_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[2], IPV4_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask2;
+ goto PKT3;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset2 = SRC_PRT_OFST_IP4_TCP;
+ src_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ src_port_offset2);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset2 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ src_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ src_port_offset2);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask2;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT3;
+ }
+
+ key2.pid = phy_port2;
+ key2.ip = rte_bswap32(src_addr2);
+ key2.port = rte_bswap16(src_port2);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key2.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 2], &key2,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 2] = &p_nat->keys[pkt_num + 2];
+
+/* --3-- */
+PKT3:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[3]);
+ #endif
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[3], pkt_mask3, p_nat))
+ return;
+ }
+
+ switch (protocol3) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3],
+ IPV4_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[3], IPV4_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask3;
+ return;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset3 = SRC_PRT_OFST_IP4_TCP;
+ src_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ src_port_offset3);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset3 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ src_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ src_port_offset3);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask3;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key3.pid = phy_port3;
+ key3.ip = rte_bswap32(src_addr3);
+ key3.port = rte_bswap16(src_port3);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key3.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 3], &key3,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 3] = &p_nat->keys[pkt_num + 3];
+}
+
+/**
+ * NAPT key calculation function for IPv4 public traffic
+ * which handles 4 pkts
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_key_ipv4_pub(
+ struct rte_mbuf **pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ p_nat->receivedPktCount += 4;
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask0 = 1LLU << pkt_num;
+ uint64_t pkt_mask1 = 1LLU << (pkt_num + 1);
+ uint64_t pkt_mask2 = 1LLU << (pkt_num + 2);
+ uint64_t pkt_mask3 = 1LLU << (pkt_num + 3);
+
+ uint8_t protocol0 = RTE_MBUF_METADATA_UINT8(pkt[0],
+ PROT_OFST_IP4);
+ uint8_t protocol1 = RTE_MBUF_METADATA_UINT8(pkt[1],
+ PROT_OFST_IP4);
+ uint8_t protocol2 = RTE_MBUF_METADATA_UINT8(pkt[2],
+ PROT_OFST_IP4);
+ uint8_t protocol3 = RTE_MBUF_METADATA_UINT8(pkt[3],
+ PROT_OFST_IP4);
+
+ uint32_t dst_addr0 = RTE_MBUF_METADATA_UINT32(pkt[0],
+ DST_ADR_OFST_IP4);
+ uint32_t dst_addr1 = RTE_MBUF_METADATA_UINT32(pkt[1],
+ DST_ADR_OFST_IP4);
+ uint32_t dst_addr2 = RTE_MBUF_METADATA_UINT32(pkt[2],
+ DST_ADR_OFST_IP4);
+ uint32_t dst_addr3 = RTE_MBUF_METADATA_UINT32(pkt[3],
+ DST_ADR_OFST_IP4);
+
+ uint16_t src_port_offset0;
+ uint16_t src_port_offset1;
+ uint16_t src_port_offset2;
+ uint16_t src_port_offset3;
+
+ uint16_t dst_port_offset0;
+ uint16_t dst_port_offset1;
+ uint16_t dst_port_offset2;
+ uint16_t dst_port_offset3;
+
+ uint16_t src_port0;
+ uint16_t src_port1;
+ uint16_t src_port2;
+ uint16_t src_port3;
+
+ uint16_t dst_port0;
+ uint16_t dst_port1;
+ uint16_t dst_port2;
+ uint16_t dst_port3;
+
+ struct pipeline_cgnapt_entry_key key0;
+ struct pipeline_cgnapt_entry_key key1;
+ struct pipeline_cgnapt_entry_key key2;
+ struct pipeline_cgnapt_entry_key key3;
+
+ memset(&key0, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key1, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key2, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key3, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+/* --0-- */
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[0]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[0], pkt_mask0, p_nat))
+ goto PKT1;
+ }
+
+ switch (protocol0) {
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset0 = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset0 = DST_PRT_OFST_IP4_TCP;
+
+ src_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ src_port_offset0);
+ dst_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ dst_port_offset0);
+
+ key0.port = rte_bswap16(dst_port0);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset0 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset0 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+
+ src_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ src_port_offset0);
+ dst_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ dst_port_offset0);
+
+ key0.port = rte_bswap16(src_port0);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask0;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT1;
+ }
+
+ key0.pid = 0xffff;
+ key0.ip = rte_bswap32(dst_addr0);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key0.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key0,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+
+/* --1-- */
+PKT1:
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[1]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[1], pkt_mask1, p_nat))
+ goto PKT2;
+ }
+
+ switch (protocol1) {
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset1 = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset1 = DST_PRT_OFST_IP4_TCP;
+
+ src_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ src_port_offset1);
+ dst_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ dst_port_offset1);
+
+ key1.port = rte_bswap16(dst_port1);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset1 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset1 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+
+ src_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ src_port_offset1);
+ dst_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ dst_port_offset1);
+
+ key1.port = rte_bswap16(src_port1);
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask1;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT2;
+ }
+
+ key1.pid = 0xffff;
+ key1.ip = rte_bswap32(dst_addr1);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key1.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 1], &key1,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 1] = &p_nat->keys[pkt_num + 1];
+
+/* --2-- */
+PKT2:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[2]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[2], pkt_mask2, p_nat))
+ goto PKT3;
+ }
+
+ switch (protocol2) {
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset2 = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset2 = DST_PRT_OFST_IP4_TCP;
+
+ src_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ src_port_offset2);
+ dst_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ dst_port_offset2);
+
+ key2.port = rte_bswap16(dst_port2);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset2 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset2 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+
+ src_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ src_port_offset2);
+ dst_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ dst_port_offset2);
+
+ key2.port = rte_bswap16(src_port2);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask2;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT3;
+ }
+
+ key2.pid = 0xffff;
+ key2.ip = rte_bswap32(dst_addr2);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key2.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 2], &key2,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 2] = &p_nat->keys[pkt_num + 2];
+
+/* --3-- */
+PKT3:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[3]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[3], pkt_mask3, p_nat))
+ return;
+ }
+
+ switch (protocol3) {
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset3 = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset3 = DST_PRT_OFST_IP4_TCP;
+
+ src_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ src_port_offset3);
+ dst_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ dst_port_offset3);
+
+ key3.port = rte_bswap16(dst_port3);
+
+ break;
+
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset3 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset3 = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+
+ src_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ src_port_offset3);
+ dst_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ dst_port_offset3);
+
+ key3.port = rte_bswap16(src_port3);
+
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask3;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key3.pid = 0xffff;
+ key3.ip = rte_bswap32(dst_addr3);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key3.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 3], &key3,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 3] = &p_nat->keys[pkt_num + 3];
+}
+
+/**
+ * NAPT key calculation function for IPv4 private traffic
+ * which handles 1 pkt
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_key_ipv4_prv(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ /* Egress */
+ p_nat->receivedPktCount++;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t src_addr = RTE_MBUF_METADATA_UINT32(pkt, SRC_ADR_OFST_IP4);
+
+ uint16_t src_port_offset;
+
+ uint16_t src_port;
+
+ uint16_t phy_port = pkt->port;
+ struct pipeline_cgnapt_entry_key key;
+
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt, pkt_mask, p_nat))
+ return;
+ }
+
+ switch (protocol) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ IPV4_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt, IPV4_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask;
+ return;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ src_port = RTE_MBUF_METADATA_UINT16(pkt, src_port_offset);
+
+ key.port = rte_bswap16(src_port);
+
+ break;
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ src_port = RTE_MBUF_METADATA_UINT16(pkt, src_port_offset);
+
+ key.port = rte_bswap16(src_port);
+
+ break;
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key.pid = phy_port;
+ key.ip = rte_bswap32(src_addr);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+}
+
+/**
+ * NAPT key calculation function for IPv4 public traffic
+ * which handles 1 pkt
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_key_ipv4_pub(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ p_nat->receivedPktCount++;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+ uint32_t dst_addr = RTE_MBUF_METADATA_UINT32(pkt, DST_ADR_OFST_IP4);
+ uint16_t src_port_offset;
+ uint16_t dst_port_offset;
+ uint16_t src_port;
+ uint16_t dst_port;
+ struct pipeline_cgnapt_entry_key key;
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt, pkt_mask, p_nat))
+ return;
+ }
+
+ switch (protocol) {
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+
+ src_port = RTE_MBUF_METADATA_UINT16(pkt, src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16(pkt, dst_port_offset);
+
+ key.port = rte_bswap16(dst_port);
+ break;
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ dst_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+
+ src_port = RTE_MBUF_METADATA_UINT16(pkt, src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16(pkt, dst_port_offset);
+
+ /* common table lookupkey preparation from incoming
+ * ICMP Packet- Indentifier field
+ */
+ key.port = rte_bswap16(src_port);
+ break;
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key.ip = rte_bswap32(dst_addr);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ key.pid = 0xffff;
+
+ memcpy(&p_nat->keys[pkt_num], &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+}
+
+
+/**
+ * NAPT function for IPv4 private traffic which handles 1 pkt
+ *
+ * @param pkts
+ * A pointer to array of packet mbuf
+ * @param in_pkt_num
+ * Pkt number of pkt
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_ipv4_prv(
+ struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ #ifdef CT_CGNAT
+ struct rte_CT_helper ct_helper;
+ memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
+ #endif
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+ struct rte_mbuf *pkt = pkts[pkt_num];
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t dest_if = 0xff; /* Added for Multiport */
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+
+ struct cgnapt_table_entry *entry = NULL;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask = pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num],
+ pkt, &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for Destination
+ * Host unreachable
+ */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d", p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+ return;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ uint32_t *src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, SRC_ADR_OFST_IP4);
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP4);
+ uint16_t src_port_offset = 0;
+ uint16_t dst_port_offset = 0;
+ uint16_t *src_port;
+ uint16_t *dst_port;
+
+ switch (protocol) {
+ case IP_PROTOCOL_TCP:
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, dst_port_offset);
+
+ #ifdef CT_CGNAT
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+
+ #ifdef ALGDBG
+ printf("cgnapt_ct_process: pkt_mask: % "PRIu64", "
+ "pkt_num: %d\n", pkt_mask, pkt_num);
+ #endif
+
+ pkt_mask = cgnapt_ct_process(cgnat_cnxn_tracker, pkts,
+ pkt_mask, &ct_helper);
+ }
+ #endif
+ break;
+ case IP_PROTOCOL_UDP:
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, dst_port_offset);
+ break;
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, dst_port_offset);
+ break;
+ }
+
+ uint8_t *eth_dest = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+
+ /* Egress */
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*dst_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ dest_address = rte_bswap32(*dst_addr);
+ /*Multiport Changes */
+ uint32_t nhip = 0;
+ uint32_t ret;
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address, IP_VERSION_4);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Egress: \tphy_port:%d\t get_prv_to_pub():%d "
+ "\tout_port:%d\n", pkt->port, dest_if,
+ *outport_id);
+ #endif
+
+ if (local_dest_mac_present(dest_if)) {
+ memcpy(eth_dest,
+ get_local_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - %02x:%02x: "
+ "%02x:%02x:%02x:%02x\n", dest_address,
+ *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - %02x:%02x:%02x: "
+ "%02x:%02x:%02x\n", eth_dest[0], eth_dest[1],
+ eth_dest[2], eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ link_hw_laddr_valid[dest_if] = 1;
+ memcpy(&link_hw_laddr[dest_if], &hw_addr,
+ sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - %02x:%02x:%02x:%02x:%02x"
+ ":%02x\n", eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Egress */
+ *src_addr = rte_bswap32(entry->data.pub_ip);
+
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *src_port = rte_bswap16(entry->data.pub_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ #ifdef SIP_ALG
+ uint16_t rtp_port = 0, rtcp_port = 0;
+ struct cgnapt_table_entry *entry_ptr1 = NULL,
+ *entry_ptr2 = NULL, *entry_ptr3 = NULL,
+ *entry_ptr4 = NULL;
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && (rte_be_to_cpu_16(*dst_port) == 5060
+ || rte_be_to_cpu_16(*src_port) == 5060))) {
+
+ int ret = natSipAlgGetAudioPorts(pkt, &rtp_port,
+ &rtcp_port);
+ /* Commented code may be required for debug
+ * and future use, Please keep it*/
+ #if 0
+ if (ret < 0) {
+ printf("%s: Wrong SIP ALG packet1\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ #endif
+
+ if (ret >= 0 && rtp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtp_key;
+ rtp_key.ip = entry->data.u.prv_ip;
+ rtp_key.port = rtp_port;
+ rtp_key.pid = entry->data.prv_phy_port;
+
+ if (add_dynamic_cgnapt_entry_alg(
+ (struct pipeline *)p_nat, &rtp_key,
+ &entry_ptr1, &entry_ptr2) == 0) {
+ printf("%s: Wrong SIP ALG packet2\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ }
+
+ if (ret >= 0 && rtcp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtcp_key;
+ rtcp_key.ip = entry->data.u.prv_ip;
+ rtcp_key.port = rtcp_port;
+ rtcp_key.pid = entry->data.prv_phy_port;
+
+ if (add_dynamic_cgnapt_entry_alg(
+ (struct pipeline *)p_nat, &rtcp_key,
+ &entry_ptr3, &entry_ptr4) == 0) {
+ printf("%s: Wrong SIP ALG packet3\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ }
+ //if(entry_ptr1 != NULL && entry_ptr3 != NULL)
+ if (sip_alg_dpi(pkt, PRIVATE, entry->data.pub_ip,
+ entry->data.pub_port, entry->data.u.prv_ip,
+ entry->data.prv_port, (rtp_port == 0) ? 0 :
+ entry_ptr1->data.pub_port,
+ (rtcp_port == 0) ? 0 :
+ entry_ptr3->data.pub_port) == 0) {
+
+ printf("%s: Wrong SIP ALG packet4\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ }
+ #endif /* SIP_ALG */
+
+ #ifdef FTP_ALG
+
+ #ifdef ALGDBG
+ printf("@CGNAT-pktwork ct_position :%d, pkt_num %d pkt_mask= "
+ "%" PRIu64 "\n", ct_position, pkt_num, pkt_mask);
+ #endif
+
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+
+ int32_t ct_position = cgnat_cnxn_tracker->positions[pkt_num];
+ if (ct_position < 0){
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ return;
+ }
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ //if (cgnat_cnxn_tracker->hash_table_entries
+ // [ct_position].alg_bypass_flag != BYPASS)
+ {
+ struct pipeline_cgnapt_entry_key data_channel_entry_key;
+
+ data_channel_entry_key.ip = entry->data.pub_ip;
+ data_channel_entry_key.port = entry->data.pub_port;
+ data_channel_entry_key.pid = pkt->port;
+ ftp_alg_dpi(p_nat, &data_channel_entry_key, pkt,
+ cgnat_cnxn_tracker, ct_position, PRIVATE);
+ }
+ }
+ #endif /* FTP_ALG */
+
+ p_nat->enaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+
+}
+
+
+/**
+ * NAPT function for IPv4 public traffic which handles 1 pkt
+ *
+ * @param pkts
+ * A pointer to array of packet mbuf
+ * @param in_pkt_num
+ * Pkt number of pkt
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_ipv4_pub(
+ struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+
+ #ifdef CT_CGNAT
+ struct rte_CT_helper ct_helper;
+ memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
+ #endif
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+ struct rte_mbuf *pkt = pkts[pkt_num];
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t dest_if = 0xff; /* Added for Multiport */
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+
+ struct cgnapt_table_entry *entry = NULL;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask = pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num],
+ pkt, &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d", p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+ return;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP4);
+ uint16_t src_port_offset = 0;
+ uint16_t dst_port_offset = 0;
+
+ if ((protocol == IP_PROTOCOL_TCP) || (protocol == IP_PROTOCOL_UDP)) {
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ } else if (protocol == IP_PROTOCOL_ICMP) {
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+ }
+
+ uint16_t *src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, src_port_offset);
+ uint16_t *dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt, dst_port_offset);
+
+ uint8_t *eth_dest = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+
+ /* Multiport Changes */
+ uint32_t nhip = 0;
+ uint32_t ret;
+
+ {
+ /* Ingress */
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*src_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ dest_address = entry->data.u.prv_ip;
+
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address, IP_VERSION_4);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Ingress: \tphy_port:%d\t get_pub_to_prv():%d "
+ "\tout_port%d\n", pkt->port, dest_if, *outport_id);
+ #endif
+ }
+
+ if (local_dest_mac_present(dest_if)) {
+ memcpy(eth_dest,
+ get_local_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ /* Commented code may be required for debug
+ * and future use, Please keep it */
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+
+ }
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf
+ ("MAC found for ip 0x%x, port %d - %02x:%02x: "
+ "%02x:%02x:%02x:%02x\n", dest_address,
+ *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]
+ );
+
+ printf
+ ("Dest MAC before - %02x:%02x:%02x:%02x "
+ ":%02x:%02x\n", eth_dest[0], eth_dest[1],
+ eth_dest[2], eth_dest[3], eth_dest[4],
+ eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ link_hw_laddr_valid[dest_if] = 1;
+ memcpy(&link_hw_laddr[dest_if], &hw_addr,
+ sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3],
+ eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Ingress */
+
+ *dst_addr = rte_bswap32(entry->data.u.prv_ip);
+ if (protocol == IP_PROTOCOL_ICMP) {
+ /* Query ID reverse translation done here */
+ /* dont care sequence num */
+ *src_port = rte_bswap16(entry->data.prv_port);
+ } else {
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port = rte_bswap16(entry->data.prv_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+ #ifdef CT_CGNAT
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+ pkt_mask = cgnapt_ct_process(cgnat_cnxn_tracker, pkts,
+ pkt_mask, &ct_helper);
+ }
+ #endif
+ }
+
+ #ifdef SIP_ALG
+ uint16_t rtp_port = 0, rtcp_port = 0;
+ struct cgnapt_table_entry *entry_ptr1 = NULL,
+ *entry_ptr3 = NULL;
+
+ /* Commented code may be required for debug
+ * and future use, Please keep it */
+ #if 0
+ struct cgnapt_table_entry *entry_ptr2 = NULL,
+ *entry_ptr4 = NULL;
+ #endif
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && (rte_be_to_cpu_16(*dst_port) == 5060
+ || rte_be_to_cpu_16(*src_port) == 5060))) {
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ #if 0
+ int ret = natSipAlgGetAudioPorts(pkt, &rtp_port,
+ &rtcp_port);
+ if (ret < 0) {
+ printf("%s: Wrong SIP ALG packet1\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ if (rtp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtp_key;
+ rtp_key.ip = entry->data.pub_ip;
+ rtp_key.port = rtp_port;
+ rtp_key.pid = 0xffff;
+
+ if (retrieve_cgnapt_entry_alg(&rtp_key,
+ &entry_ptr1, &entry_ptr2) == 0) {
+ printf("%s: Wrong SIP ALG packet2\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ }
+
+ if (rtcp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtcp_key;
+ rtcp_key.ip = entry->data.pub_ip;
+ rtcp_key.port = rtcp_port;
+ rtcp_key.pid = 0xffff;
+
+ if (retrieve_cgnapt_entry_alg(&rtcp_key,
+ &entry_ptr3, &entry_ptr4) == 0) {
+ printf("%s: Wrong SIP ALG packet3\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ }
+ #endif
+
+ if (sip_alg_dpi(pkt, PUBLIC, entry->data.u.prv_ip,
+ entry->data.prv_port, entry->data.pub_ip,
+ entry->data.pub_port, (rtp_port == 0) ? 0 :
+ entry_ptr1->data.prv_port,
+ (rtcp_port == 0) ? 0 :
+ entry_ptr3->data.prv_port) == 0) {
+
+ printf("%s: Wrong SIP ALG packet4\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+
+ }
+ }
+ #endif /* SIP_ALG */
+
+ #ifdef FTP_ALG
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+ int32_t ct_position = cgnat_cnxn_tracker->
+ positions[pkt_num];
+ if (ct_position < 0){
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+ return;
+ }
+ #ifdef ALGDBG
+ rte_hexdump(stdout, "CT Entry", &cgnat_cnxn_tracker->
+ hash_table_entries[ct_position].key, 40);
+ #endif
+
+ /* Commented code may be required for debug
+ * and future use, Please keep it*/
+ //if (cgnat_cnxn_tracker->hash_table_entries
+ // [ct_position].alg_bypass_flag != BYPASS)
+ {
+ /*enable ALG DPI */
+ struct pipeline_cgnapt_entry_key
+ data_channel_entry_key;
+
+ data_channel_entry_key.ip = entry->data.pub_ip;
+ data_channel_entry_key.port = entry->data.pub_port;
+ data_channel_entry_key.pid = 0xffff;
+ //printf("pkt_work_pub ftp_alg_dpi\n");
+ ftp_alg_dpi(p_nat, &data_channel_entry_key, pkt,
+ cgnat_cnxn_tracker, ct_position, PUBLIC);
+
+ }
+ }
+ #endif
+
+ p_nat->inaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+}
+
+
+/**
+ * NAPT function for IPv4 private traffic which handles 4 pkts
+ *
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_ipv4_prv(
+ struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ uint32_t dest_if = 0xff; /* Added for Multiport */
+ struct rte_mbuf *pkt;
+ uint8_t i;
+ uint8_t pkt_num;
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4;
+
+ #ifdef CT_CGNAT
+ struct rte_CT_helper ct_helper;
+ memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
+ #endif
+
+ for (i = 0; i < 4; i++) {
+ pkt_num = in_pkt_num + i;
+ pkt = pkts[pkt_num];
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+
+ struct cgnapt_table_entry *entry = NULL;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask =
+ pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num], pkt,
+ &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d",
+ p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+ continue;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ uint32_t *src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, SRC_ADR_OFST_IP4);
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP4);
+ uint16_t src_port_offset = 0;
+ uint16_t dst_port_offset = 0;
+ uint16_t *src_port;
+ uint16_t *dst_port;
+
+ #if 0
+ if ((protocol == IP_PROTOCOL_TCP)
+ || (protocol == IP_PROTOCOL_UDP)) {
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ } else if (protocol == IP_PROTOCOL_ICMP) {
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+ }
+ #endif
+
+ switch (protocol) {
+ case IP_PROTOCOL_TCP:
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ dst_port_offset);
+
+ #ifdef CT_CGNAT
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+
+ //To process CT , pkt_mask does it need
+ //to be complemented ??
+ #ifdef ALGDBG
+ printf("cgnapt_ct_process: pkt_mask: "
+ "% "PRIu64", pkt_num: %d\n",
+ pkt_mask, pkt_num);
+ #endif
+
+ pkt_mask = cgnapt_ct_process(
+ cgnat_cnxn_tracker, pkts,
+ pkt_mask, &ct_helper);
+ }
+ #endif
+ break;
+ case IP_PROTOCOL_UDP:
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ dst_port_offset);
+ break;
+ case IP_PROTOCOL_ICMP:
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset = MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+ src_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ src_port_offset);
+ dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ dst_port_offset);
+ break;
+ }
+
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+ /*Multiport Changes */
+ uint32_t nhip = 0;
+ uint32_t ret;
+
+ {
+
+ /* Egress */
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*dst_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ dest_address = rte_bswap32(*dst_addr);
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address,
+ IP_VERSION_4);
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Egress: \tphy_port:%d\t "
+ "get_prv_to_pub():%d \tout_port:%d\n",
+ pkt->port, dest_if, *outport_id);
+ #endif
+ }
+
+ if (local_dest_mac_present(dest_if)) {
+ memcpy(eth_dest,
+ get_local_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+
+ }
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address,
+ *outport_id,
+ hw_addr.addr_bytes[0],
+ hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2],
+ hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4],
+ hw_addr.addr_bytes[5]
+ );
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ link_hw_laddr_valid[dest_if] = 1;
+ memcpy(&link_hw_laddr[dest_if], &hw_addr,
+ sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Egress */
+ *src_addr = rte_bswap32(entry->data.pub_ip);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *src_port = rte_bswap16(entry->data.pub_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ #ifdef SIP_ALG
+ uint16_t rtp_port = 0, rtcp_port = 0;
+ struct cgnapt_table_entry *entry_ptr1 = NULL,
+ *entry_ptr2 = NULL, *entry_ptr3 = NULL,
+ *entry_ptr4 = NULL;
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && (rte_be_to_cpu_16(*dst_port) == 5060
+ || rte_be_to_cpu_16(*src_port) == 5060))) {
+
+ int ret = natSipAlgGetAudioPorts(pkt,
+ &rtp_port, &rtcp_port);
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ #if 0
+ if (ret < 0) {
+ printf("%s: Wrong SIP ALG packet1\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+ #endif
+
+ if (ret >= 0 && rtp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtp_key;
+ rtp_key.ip = entry->data.u.prv_ip;
+ rtp_key.port = rtp_port;
+ rtp_key.pid = entry->data.prv_phy_port;
+
+ if (add_dynamic_cgnapt_entry_alg(
+ (struct pipeline *)p_nat, &rtp_key,
+ &entry_ptr1, &entry_ptr2) == 0) {
+ printf("%s: Wrong SIP ALG packet2\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+ }
+
+ if (ret >= 0 && rtcp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtcp_key;
+ rtcp_key.ip = entry->data.u.prv_ip;
+ rtcp_key.port = rtcp_port;
+ rtcp_key.pid = entry->data.prv_phy_port;
+
+ if (add_dynamic_cgnapt_entry_alg(
+ (struct pipeline *)p_nat, &rtcp_key,
+ &entry_ptr3, &entry_ptr4) == 0) {
+
+ printf("%s: Wrong SIP ALG packet3\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+
+ }
+ //if(entry_ptr1 != NULL && entry_ptr3 != NULL)
+ if (sip_alg_dpi(pkt, PRIVATE,
+ entry->data.pub_ip,
+ entry->data.pub_port,
+ entry->data.u.prv_ip,
+ entry->data.prv_port,
+ (rtp_port == 0) ? 0 :
+ entry_ptr1->data.pub_port,
+ (rtcp_port == 0) ? 0 :
+ entry_ptr3->data.pub_port) == 0) {
+
+ printf("%s: Wrong SIP ALG packet4\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+ }
+ #endif /* SIP_ALG */
+
+ #ifdef FTP_ALG
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+
+ int32_t ct_position =
+ cgnat_cnxn_tracker->positions[pkt_num];
+ #ifdef ALGDBG
+ printf("@CGNAT-pkt4work ct_position :%d, pkt_num %d "
+ "pkt_mask = %" PRIu64 "\n", ct_position,
+ pkt_num, pkt_mask);
+ #endif
+
+ if (ct_position < 0){
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ continue;
+ }
+ if (cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ alg_bypass_flag != BYPASS){
+
+ struct pipeline_cgnapt_entry_key
+ data_channel_entry_key;
+ /*enable ALG DPI */
+ data_channel_entry_key.ip =
+ entry->data.pub_ip;
+ data_channel_entry_key.port =
+ entry->data.pub_port;
+ data_channel_entry_key.pid = 0xffff;
+
+ ftp_alg_dpi(p_nat, &data_channel_entry_key,
+ pkt, cgnat_cnxn_tracker, ct_position,
+ PRIVATE);
+
+ }
+ }
+ #endif
+ p_nat->enaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+ }
+}
+
+/**
+ * NAPT function for IPv4 public traffic which handles 4 pkts
+ *
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_ipv4_pub(
+ struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ #ifdef CT_CGNAT
+ struct rte_CT_helper ct_helper;
+ memset(&ct_helper, 0, sizeof(struct rte_CT_helper));
+ #endif
+ struct rte_mbuf *pkt;
+ uint8_t i;
+ uint8_t pkt_num;
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4;
+
+ for (i = 0; i < 4; i++) {
+ pkt_num = in_pkt_num + i;
+ pkt = pkts[pkt_num];
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t dest_if = 0xff; /* Added for Multiport */
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+
+ struct cgnapt_table_entry *entry = NULL;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask =
+ pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num], pkt,
+ &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d",
+ p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+
+ continue;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP4);
+ uint16_t src_port_offset = 0;
+ uint16_t dst_port_offset = 0;
+
+ if ((protocol == IP_PROTOCOL_TCP)
+ || (protocol == IP_PROTOCOL_UDP)) {
+ src_port_offset = SRC_PRT_OFST_IP4_TCP;
+ dst_port_offset = DST_PRT_OFST_IP4_TCP;
+ } else if (protocol == IP_PROTOCOL_ICMP) {
+ /* Identifier */
+ src_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 4;
+ /*Sequence number */
+ dst_port_offset = MBUF_HDR_ROOM +
+ ETH_HDR_SIZE +
+ IP_HDR_SIZE + 6;
+ }
+
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, src_port_offset);
+ uint16_t *dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, dst_port_offset);
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+ /* Multiport Changes */
+ uint32_t nhip = 0;
+ uint32_t ret;
+
+ /* Ingress */
+ {
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*src_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ dest_address = entry->data.u.prv_ip;
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address, IP_VERSION_4);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Ingress: \tphy_port:%d\t "
+ "get_pub_to_prv():%d \tout_port%d\n",
+ pkt->port, dest_if,
+ *outport_id);
+ #endif
+ }
+
+ if (local_dest_mac_present(dest_if)) {
+ memcpy(eth_dest,
+ get_local_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+
+ }
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address, *outport_id,
+ hw_addr.addr_bytes[0],
+ hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2],
+ hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4],
+ hw_addr.addr_bytes[5]
+ );
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ link_hw_laddr_valid[dest_if] = 1;
+ memcpy(&link_hw_laddr[dest_if],
+ &hw_addr, sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - %02x:%02x:%02x: "
+ "%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Ingress */
+
+ *dst_addr = rte_bswap32(entry->data.u.prv_ip);
+ if (protocol == IP_PROTOCOL_ICMP) {
+ /* Query ID reverse translation done here */
+ *src_port = rte_bswap16(entry->data.prv_port);
+ /* dont care sequence num */
+ } else {
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port =
+ rte_bswap16(entry->data.prv_port);
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ #ifdef CT_CGNAT
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+ pkt_mask = cgnapt_ct_process(
+ cgnat_cnxn_tracker, pkts,
+ pkt_mask, &ct_helper);
+ }
+ #endif
+ }
+
+ #ifdef SIP_ALG
+ uint16_t rtp_port = 0, rtcp_port = 0;
+ struct cgnapt_table_entry *entry_ptr1 = NULL,
+ *entry_ptr3 = NULL;
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ #if 0
+ struct cgnapt_table_entry *entry_ptr2 = NULL,
+ *entry_ptr4 = NULL;
+ #endif
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && (rte_be_to_cpu_16(*dst_port) == 5060
+ || rte_be_to_cpu_16(*src_port) == 5060))) {
+ /* Commented code may be required for future usage,
+ * Please keep it
+ */
+ #if 0
+ int ret = natSipAlgGetAudioPorts(pkt,
+ &rtp_port, &rtcp_port);
+ if (ret < 0) {
+ printf("%s: Wrong SIP ALG packet1\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ if (rtp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtp_key;
+ rtp_key.ip = entry->data.pub_ip;
+ rtp_key.port = rtp_port;
+ rtp_key.pid = 0xffff;
+
+ if (retrieve_cgnapt_entry_alg(&rtp_key,
+ &entry_ptr1, &entry_ptr2) == 0) {
+ printf("%s: Wrong SIP ALG packet2\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+ }
+
+ if (rtcp_port != 0) {
+ struct pipeline_cgnapt_entry_key rtcp_key;
+ rtcp_key.ip = entry->data.pub_ip;
+ rtcp_key.port = rtcp_port;
+ rtcp_key.pid = 0xffff;
+
+ if (retrieve_cgnapt_entry_alg(&rtcp_key,
+ &entry_ptr3, &entry_ptr4) == 0) {
+ printf("%s: Wrong SIP ALG packet3\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ }
+ #endif
+ if (sip_alg_dpi(pkt, PUBLIC,
+ entry->data.u.prv_ip,
+ entry->data.prv_port,
+ entry->data.pub_ip,
+ entry->data.pub_port,
+ (rtp_port == 0) ? 0 :
+ entry_ptr1->data.prv_port,
+ (rtcp_port == 0) ? 0 :
+ entry_ptr3->data.prv_port) == 0) {
+
+ printf("%s: Wrong SIP ALG packet4\n",
+ __func__);
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+ }
+ }
+ #endif /* SIP_ALG */
+
+ #ifdef FTP_ALG
+ if ((rte_be_to_cpu_16(*src_port) == 21) ||
+ rte_be_to_cpu_16(*dst_port) == 21) {
+
+ int32_t ct_position =
+ cgnat_cnxn_tracker->positions[pkt_num];
+ if (ct_position < 0){
+ p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+ continue;
+ }
+ if (cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].alg_bypass_flag != BYPASS){
+
+ struct pipeline_cgnapt_entry_key
+ data_channel_entry_key;
+
+ /*enable ALG DPI */
+ data_channel_entry_key.ip =
+ entry->data.pub_ip;
+ data_channel_entry_key.port =
+ entry->data.pub_port;
+ data_channel_entry_key.pid = 0xffff;
+
+ ftp_alg_dpi(p_nat, &data_channel_entry_key,
+ pkt, cgnat_cnxn_tracker,
+ ct_position, PUBLIC);
+
+ }
+ }
+ #endif
+ p_nat->inaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+ }
+}
+
+/**
+ * NAPT key calculation function for IPv6 private traffic
+ * which handles 1 pkt
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_key_ipv6_prv(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ /* Egress */
+ p_nat->receivedPktCount++;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP6);
+ uint32_t *src_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ SRC_ADR_OFST_IP6);
+ uint16_t src_port = RTE_MBUF_METADATA_UINT16(pkt, SRC_PRT_OFST_IP6);
+
+ uint16_t phy_port = pkt->port;
+ struct pipeline_cgnapt_entry_key key;
+
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt, pkt_mask, p_nat))
+ return;
+ }
+
+ switch (protocol) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt, IPV6_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask;
+ return;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ printf("wrong protocol: %d\n", protocol);
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key.pid = phy_port;
+ key.ip = rte_bswap32(src_addr[3]);
+ key.port = rte_bswap16(src_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+}
+
+/**
+ * NAPT key calculation function for IPv6 public traffic
+ * which handles 1 pkt
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_key_ipv6_pub(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+
+ /* Ingress */
+ p_nat->receivedPktCount++;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ DST_ADR_OFST_IP4);
+ uint16_t dst_port = RTE_MBUF_METADATA_UINT16(pkt,
+ DST_PRT_OFST_IP4_TCP);
+
+ struct pipeline_cgnapt_entry_key key;
+
+ memset(&key, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt, pkt_mask, p_nat))
+ return;
+ }
+
+ switch (protocol) {
+
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key.pid = 0xffff;
+ key.ip = rte_bswap32(dst_addr[0]);
+ key.port = rte_bswap16(dst_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+}
+
+/**
+ * NAPT key calculation function for IPv6 private traffic
+ * which handles 4 pkts
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_key_ipv6_prv(
+ struct rte_mbuf **pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ p_nat->receivedPktCount += 4;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask0 = 1LLU << pkt_num;
+ uint64_t pkt_mask1 = 1LLU << (pkt_num + 1);
+ uint64_t pkt_mask2 = 1LLU << (pkt_num + 2);
+ uint64_t pkt_mask3 = 1LLU << (pkt_num + 3);
+
+ uint8_t protocol0 = RTE_MBUF_METADATA_UINT8(pkt[0],
+ PROT_OFST_IP6);
+ uint8_t protocol1 = RTE_MBUF_METADATA_UINT8(pkt[1],
+ PROT_OFST_IP6);
+ uint8_t protocol2 = RTE_MBUF_METADATA_UINT8(pkt[2],
+ PROT_OFST_IP6);
+ uint8_t protocol3 = RTE_MBUF_METADATA_UINT8(pkt[3],
+ PROT_OFST_IP6);
+
+ uint32_t *src_addr0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0],
+ SRC_ADR_OFST_IP6);
+ uint32_t *src_addr1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1],
+ SRC_ADR_OFST_IP6);
+ uint32_t *src_addr2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2],
+ SRC_ADR_OFST_IP6);
+ uint32_t *src_addr3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3],
+ SRC_ADR_OFST_IP6);
+
+ uint16_t src_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ SRC_PRT_OFST_IP6);
+ uint16_t src_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ SRC_PRT_OFST_IP6);
+ uint16_t src_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ SRC_PRT_OFST_IP6);
+ uint16_t src_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ SRC_PRT_OFST_IP6);
+
+ uint16_t phy_port0 = pkt[0]->port;
+ uint16_t phy_port1 = pkt[1]->port;
+ uint16_t phy_port2 = pkt[2]->port;
+ uint16_t phy_port3 = pkt[3]->port;
+
+ struct pipeline_cgnapt_entry_key key0;
+ struct pipeline_cgnapt_entry_key key1;
+ struct pipeline_cgnapt_entry_key key2;
+ struct pipeline_cgnapt_entry_key key3;
+
+ memset(&key0, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key1, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key2, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key3, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[0]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[0], pkt_mask0, p_nat))
+ goto PKT1;
+ }
+
+ switch (protocol0) {
+
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0],
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[0], IPV6_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask0;
+ goto PKT1;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask0;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+
+ goto PKT1;
+ }
+
+
+ key0.pid = phy_port0;
+ key0.ip = rte_bswap32(src_addr0[3]);
+ key0.port = rte_bswap16(src_port0);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key0.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key0,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+
+ PKT1:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[1]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[1], pkt_mask1, p_nat))
+ goto PKT2;
+ }
+
+ switch (protocol1) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1],
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[1], IPV6_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask1;
+ goto PKT2;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask1;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+
+ goto PKT2;
+ }
+
+ key1.pid = phy_port1;
+ key1.ip = rte_bswap32(src_addr1[3]);
+ key1.port = rte_bswap16(src_port1);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key1.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 1], &key1,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 1] = &p_nat->keys[pkt_num + 1];
+
+ PKT2:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[2]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[2], pkt_mask2, p_nat))
+ goto PKT3;
+ }
+
+ switch (protocol2) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2],
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[2], IPV6_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask2;
+ goto PKT3;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask2;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+
+ goto PKT3;
+ }
+
+ key2.pid = phy_port2;
+ key2.ip = rte_bswap32(src_addr2[3]);
+ key2.port = rte_bswap16(src_port2);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key2.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 2], &key2,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 2] = &p_nat->keys[pkt_num + 2];
+
+ PKT3:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[3]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[3], pkt_mask3, p_nat))
+ return;
+ }
+
+ switch (protocol3) {
+ case IP_PROTOCOL_UDP:
+ {
+ #ifdef PCP_ENABLE
+ if (pcp_enable) {
+ struct udp_hdr *udp;
+
+ udp = (struct udp_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3],
+ IPV6_UDP_OFST);
+
+ if (rte_bswap16(udp->dst_port) ==
+ PCP_SERVER_PORT) {
+ handle_pcp_req(pkt[3], IPV6_SZ, p_nat);
+ p_nat->invalid_packets |= pkt_mask3;
+ return;
+ }
+ }
+ #endif
+ }
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask2;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+
+ return;
+ }
+
+ key3.pid = phy_port3;
+ key3.ip = rte_bswap32(src_addr3[3]);
+ key3.port = rte_bswap16(src_port3);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key3.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 3], &key3,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 3] = &p_nat->keys[pkt_num + 3];
+
+
+}
+
+/**
+ * NAPT key calculation function for IPv4 public traffic
+ * which handles 4 pkts
+ *
+ * @param pkt
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_key_ipv6_pub(
+ struct rte_mbuf **pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ p_nat->receivedPktCount += 4;
+
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask0 = 1LLU << pkt_num;
+ uint64_t pkt_mask1 = 1LLU << (pkt_num + 1);
+ uint64_t pkt_mask2 = 1LLU << (pkt_num + 2);
+ uint64_t pkt_mask3 = 1LLU << (pkt_num + 3);
+
+ uint8_t protocol0 = RTE_MBUF_METADATA_UINT8(pkt[0],
+ PROT_OFST_IP4);
+ uint8_t protocol1 = RTE_MBUF_METADATA_UINT8(pkt[1],
+ PROT_OFST_IP4);
+ uint8_t protocol2 = RTE_MBUF_METADATA_UINT8(pkt[2],
+ PROT_OFST_IP4);
+ uint8_t protocol3 = RTE_MBUF_METADATA_UINT8(pkt[3],
+ PROT_OFST_IP4);
+
+ uint32_t *dst_addr0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0],
+ DST_ADR_OFST_IP4);
+ uint32_t *dst_addr1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1],
+ DST_ADR_OFST_IP4);
+ uint32_t *dst_addr2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2],
+ DST_ADR_OFST_IP4);
+ uint32_t *dst_addr3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3],
+ DST_ADR_OFST_IP4);
+
+ uint16_t dst_port0 = RTE_MBUF_METADATA_UINT16(pkt[0],
+ DST_PRT_OFST_IP4_TCP);
+ uint16_t dst_port1 = RTE_MBUF_METADATA_UINT16(pkt[1],
+ DST_PRT_OFST_IP4_TCP);
+ uint16_t dst_port2 = RTE_MBUF_METADATA_UINT16(pkt[2],
+ DST_PRT_OFST_IP4_TCP);
+ uint16_t dst_port3 = RTE_MBUF_METADATA_UINT16(pkt[3],
+ DST_PRT_OFST_IP4_TCP);
+
+ struct pipeline_cgnapt_entry_key key0;
+ struct pipeline_cgnapt_entry_key key1;
+ struct pipeline_cgnapt_entry_key key2;
+ struct pipeline_cgnapt_entry_key key3;
+
+ memset(&key0, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key1, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key2, 0, sizeof(struct pipeline_cgnapt_entry_key));
+ memset(&key3, 0, sizeof(struct pipeline_cgnapt_entry_key));
+
+/* --0-- */
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[0]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[0], pkt_mask0, p_nat))
+ goto PKT1;
+ }
+
+ switch (protocol0) {
+
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask0;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT1;
+ }
+
+ key0.pid = 0xffff;
+ key0.ip = rte_bswap32(dst_addr0[0]);
+ key0.port = rte_bswap16(dst_port0);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key0.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num], &key0,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num] = &p_nat->keys[pkt_num];
+
+
+/* --1-- */
+
+PKT1:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[1]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[1], pkt_mask1, p_nat))
+ goto PKT2;
+ }
+
+ switch (protocol1) {
+
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask1;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT2;
+ }
+
+ key1.pid = 0xffff;
+ key1.ip = rte_bswap32(dst_addr1[0]);
+ key1.port = rte_bswap16(dst_port1);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key1.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 1], &key1,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->key_ptrs[pkt_num + 1] = &p_nat->keys[pkt_num + 1];
+
+
+/* --2-- */
+
+PKT2:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[2]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[2], pkt_mask2, p_nat))
+ goto PKT3;
+ }
+
+ switch (protocol2) {
+
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask2;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ goto PKT3;
+ }
+
+ key2.pid = 0xffff;
+ key2.ip = rte_bswap32(dst_addr2[0]);
+ key2.port = rte_bswap16(dst_port2);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key2.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 2], &key2,
+ sizeof(struct pipeline_cgnapt_entry_key));
+
+ p_nat->key_ptrs[pkt_num + 2] = &p_nat->keys[pkt_num + 2];
+
+
+/* --3-- */
+
+PKT3:
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 4)
+ print_pkt(pkt[3]);
+ #endif
+
+ if (enable_hwlb) {
+ if (!check_arp_icmp(pkt[3], pkt_mask3, p_nat))
+ return;
+ }
+
+ switch (protocol3) {
+
+ case IP_PROTOCOL_TCP:
+ case IP_PROTOCOL_UDP:
+ case IP_PROTOCOL_ICMP:
+ /*we don't need icmp check in ipv6 */
+ break;
+
+ default:
+ /* remember invalid packets to be dropped */
+ p_nat->invalid_packets |= pkt_mask3;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount2++;
+ #endif
+ return;
+ }
+
+ key3.pid = 0xffff;
+ key3.ip = rte_bswap32(dst_addr3[0]);
+ key3.port = rte_bswap16(dst_port3);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ key3.port = 0xffff;
+ #endif
+
+ memcpy(&p_nat->keys[pkt_num + 3], &key3,
+ sizeof(struct pipeline_cgnapt_entry_key));
+
+ p_nat->key_ptrs[pkt_num + 3] = &p_nat->keys[pkt_num + 3];
+}
+
+/**
+ * NAPT function for IPv6 private traffic which handles 1 pkt
+ *
+ * @param pkts
+ * A pointer to array of packet mbuf
+ * @param in_pkt_num
+ * Pkt number of pkt
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_ipv6_prv(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP6);
+
+ /* Added for Multiport */
+ uint32_t dest_if = INVALID_DESTIF;
+ uint16_t *outport_id = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ cgnapt_meta_offset);
+
+ struct cgnapt_table_entry *entry = NULL;
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV6to4;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask = pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num],
+ pkt, &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ /* Do we need this check for ipv6? */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d", p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+
+ return;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ struct ipv6_hdr ipv6_hdr;
+
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+ uint32_t nhip = 0;
+ /* Egress */
+ {
+
+ convert_ipv6_to_ipv4(pkt, &ipv6_hdr);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG == 1)
+ printf("pkt_work_cganpt: convert_ipv6_to_ipv4\n");
+ #endif
+
+ struct cgnapt_nsp_node *ll = nsp_ll;
+ int nsp = 0;
+
+ while (ll != NULL) {
+ if (!memcmp
+ (&ipv6_hdr.dst_addr[0], &ll->nsp.prefix[0],
+ ll->nsp.depth / 8)) {
+ nsp = 1;
+ break;
+ }
+ ll = ll->next;
+ }
+
+ if (!nsp
+ && !memcmp(&ipv6_hdr.dst_addr[0], &well_known_prefix[0],
+ 12)) {
+ nsp = 1;
+ }
+
+ if (!nsp) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount5++;
+ #endif
+
+ return;
+ }
+
+ }
+
+ /* As packet is already converted into IPv4 we must not
+ * operate IPv6 offsets on packet
+ * Only perform IPv4 operations
+ */
+
+ uint32_t *src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, SRC_ADR_OFST_IP6t4);
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP6t4);
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, SRC_PRT_OFST_IP6t4);
+ uint16_t *dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, DST_PRT_OFST_IP6t4);
+
+ uint8_t *eth_dest = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ ETH_OFST_IP6t4);
+ uint8_t *eth_src = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ ETH_OFST_IP6t4 + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+ {
+ /* Egress */
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*dst_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+
+ return;
+ }
+
+ dest_address = rte_bswap32(*dst_addr);
+ /*Multiport Changes */
+ uint32_t nhip = 0;
+ uint32_t ret;
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address, IP_VERSION_4);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Egress: \tphy_port:%d\t get_prv_to_pub():%d "
+ "\tout_port:%d\n", pkt->port,
+ dest_if, *outport_id);
+ #endif
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ static int static_count;
+
+ if (static_count++ < 10) {
+ print_pkt(pkt);
+ my_print_entry(entry);
+ printf("dest-offset:%d\n", DST_ADR_OFST_IP4);
+ printf("dest_add:%x\n", entry->data.u.prv_ip);
+ printf("dest_add:%x\n", *dst_addr);
+ printf("DST_ADR_OFST_IP6:%d\n", DST_ADR_OFST_IP6);
+ }
+ #endif
+
+ if (local_dest_mac_present(dest_if)) {
+ memcpy(eth_dest,
+ get_local_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+
+ }
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - %02x:%02x: "
+ "%02x:%02x:%02x:%02x\n", dest_address,
+ *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - %02x:%02x:%02x:%02x: "
+ "%02x:%02x\n", eth_dest[0], eth_dest[1],
+ eth_dest[2], eth_dest[3],
+ eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3],
+ eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Egress */
+ *src_addr = rte_bswap32(entry->data.pub_ip);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *src_port = rte_bswap16(entry->data.pub_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ p_nat->enaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+}
+
+
+/**
+ * NAPT function for IPv6 public traffic which handles 1 pkt
+ *
+ * @param pkts
+ * A pointer to array of packet mbuf
+ * @param in_pkt_num
+ * Pkt number of pkt
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt_work_cgnapt_ipv6_pub(
+ struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+
+ uint32_t dest_if = INVALID_DESTIF; /* Added for Multiport */
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+ struct cgnapt_table_entry *entry = NULL;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4to6;
+
+ if (hash_table_entry < 0) {
+
+ /* Drop ingress initial traffic */
+
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d", p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ return;
+
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+ if (entry->data.type != CGNAPT_ENTRY_IPV6) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ return;
+ }
+
+ struct ipv4_hdr ipv4_hdr;
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, SRC_PRT_OFST_IP4_TCP);
+
+ uint8_t *eth_dest = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src = RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint8_t dest_addr_ipv6[16];
+ uint8_t nh_ipv6[16];
+
+ /* Ingress */
+ {
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*src_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ memcpy(&dest_addr_ipv6[0], &entry->data.u.prv_ipv6[0], 16);
+ uint8_t nhipv6[16];
+ int ret;
+ ret = local_get_nh_ipv6(&dest_addr_ipv6[0], &dest_if,
+ &nhipv6[0], p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port((uint32_t *)
+ &dest_addr_ipv6[0],
+ IP_VERSION_6);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ do_local_nh_ipv6_cache(dest_if, p_nat);
+ }
+ *outport_id = p_nat->outport_id[dest_if];
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ static int static_count;
+
+ if (static_count++ < 10) {
+ print_pkt(pkt);
+ my_print_entry(entry);
+ printf("dest-offset:%d\n", DST_ADR_OFST_IP4);
+ printf("dest_add:%x\n", entry->data.u.prv_ip);
+ printf("DST_ADR_OFST_IP6:%d\n", DST_ADR_OFST_IP6);
+ }
+ #endif
+
+ memset(nh_ipv6, 0, 16);
+ if (get_dest_mac_address_ipv6_port(
+ &dest_addr_ipv6[0],
+ &dest_if,
+ &hw_addr,
+ &nh_ipv6[0])) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ *((uint32_t *)dest_addr_ipv6 + 12),
+ *outport_id,
+ hw_addr.addr_bytes[0],
+ hw_addr.addr_bytes[1], hw_addr.addr_bytes[2],
+ hw_addr.addr_bytes[3], hw_addr.addr_bytes[4],
+ hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3],
+ eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src, get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+
+ return;
+ }
+ /* Ingress */
+ {
+
+ convert_ipv4_to_ipv6(pkt, &ipv4_hdr);
+
+ /* Ethernet MTU check */
+ if ((rte_pktmbuf_data_len(pkt) - 14) > 1500) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ return;
+ }
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP4t6);
+ uint16_t *dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, DST_PRT_OFST_IP4t6);
+
+ memcpy((uint8_t *) &dst_addr[0], &entry->data.u.prv_ipv6[0],
+ 16);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port = rte_bswap16(entry->data.prv_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ p_nat->inaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+}
+
+
+/**
+ * NAPT function for IPv6 private traffic which handles 4 pkts
+ *
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_ipv6_prv(
+ struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ struct rte_mbuf *pkt;
+ uint8_t i;
+ uint8_t pkt_num;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV6to4;
+
+ for (i = 0; i < 4; i++) {
+ pkt_num = in_pkt_num + i;
+ pkt = pkts[i];
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP6);
+ uint32_t dest_if = INVALID_DESTIF;
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+ struct cgnapt_table_entry *entry = NULL;
+
+ if (hash_table_entry < 0) {
+
+ /* try to add new entry */
+ struct rte_pipeline_table_entry *table_entry = NULL;
+
+ uint64_t dropmask =
+ pkt_miss_cgnapt(p_nat->key_ptrs[pkt_num], pkt,
+ &table_entry,
+ &p_nat->valid_packets, pkt_num,
+ (void *)p_nat);
+
+ if (!table_entry) {
+ /* ICMP Error message generation for
+ * Destination Host unreachable
+ */
+ /* Do we need this check for ipv6? */
+ if (protocol == IP_PROTOCOL_ICMP) {
+ cgnapt_icmp_pkt = pkt;
+ send_icmp_dest_unreachable_msg();
+ }
+
+ /* Drop packet by adding to invalid pkt mask */
+
+ p_nat->invalid_packets |= dropmask;
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d",
+ p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ #endif
+
+ continue;
+ }
+
+ entry = (struct cgnapt_table_entry *)table_entry;
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+
+ struct ipv6_hdr ipv6_hdr;
+ struct ether_addr hw_addr;
+ uint32_t dest_address = 0;
+ uint8_t nh_ipv6[16];
+ uint32_t nhip = 0;
+
+ /* Egress */
+ {
+ convert_ipv6_to_ipv4(pkt, &ipv6_hdr);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1)
+ printf("pkt_work_cganpt: "
+ "convert_ipv6_to_ipv4\n");
+ #endif
+
+ struct cgnapt_nsp_node *ll = nsp_ll;
+ int nsp = 0;
+
+ while (ll != NULL) {
+ if (!memcmp(&ipv6_hdr.dst_addr[0],
+ &ll->nsp.prefix[0],
+ ll->nsp.depth / 8)) {
+ nsp = 1;
+ break;
+ }
+ ll = ll->next;
+ }
+
+ if (!nsp
+ && !memcmp(&ipv6_hdr.dst_addr[0],
+ &well_known_prefix[0], 12)) {
+ nsp = 1;
+ }
+
+ if (!nsp) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount5++;
+ #endif
+ continue;
+ }
+
+ }
+
+ /* As packet is already converted into IPv4 we must not
+ * operate IPv6 offsets on packet only perform IPv4 operations
+ */
+
+ uint32_t *src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, SRC_ADR_OFST_IP6t4);
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, DST_ADR_OFST_IP6t4);
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, SRC_PRT_OFST_IP6t4);
+ uint16_t *dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, DST_PRT_OFST_IP6t4);
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, ETH_OFST_IP6t4);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, ETH_OFST_IP6t4 + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ /* Egress */
+ {
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*dst_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ dest_address = rte_bswap32(*dst_addr);
+ uint32_t nhip;
+ uint32_t ret;
+ ret = local_get_nh_ipv4(dest_address, &dest_if, &nhip, p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port(&dest_address, IP_VERSION_4);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ do_local_nh_ipv4_cache(dest_if, p_nat);
+ }
+ *outport_id = p_nat->outport_id[dest_if];
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Egress: \tphy_port:%d\t"
+ "get_prv_to_pub():%d \tout_port:%d\n",
+ pkt->port, dest_if, *outport_id);
+ #endif
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ static int static_count;
+
+ if (static_count++ < 10) {
+ print_pkt(pkt);
+ my_print_entry(entry);
+ printf("dest-offset:%d\n", DST_ADR_OFST_IP4);
+ printf("dest_add:%x\n", entry->data.u.prv_ip);
+ printf("dest_add:%x\n", *dst_addr);
+ printf("DST_ADR_OFST_IP6:%d\n", DST_ADR_OFST_IP6);
+ }
+ #endif
+
+ memset(nh_ipv6, 0, 16);
+
+ {
+ int ret;
+ ret = get_dest_mac_addr_port(dest_address, &dest_if, &hw_addr);
+
+ if (unlikely(ret != ARP_FOUND)) {
+
+ if (unlikely(ret == ARP_NOT_FOUND)) {
+ /* Commented code may be required for debug
+ * and future use, Please keep it */
+ //request_arp(*outport_id, nhip, p_nat->p.p);
+ printf("%s: ARP Not Found, nhip: %x, "
+ "outport_id: %d\n", __func__, nhip,
+ *outport_id);
+ }
+
+ /* Drop the pkt */
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ continue;
+
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ dest_address, *outport_id,
+ hw_addr.addr_bytes[0],
+ hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2],
+ hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4],
+ hw_addr.addr_bytes[5]
+ );
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ }
+
+ {
+ /* Egress */
+ *src_addr = rte_bswap32(entry->data.pub_ip);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *src_port = rte_bswap16(entry->data.pub_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ p_nat->enaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+ }
+}
+
+/**
+ * NAPT function for IPv6 public traffic which handles 4 pkts
+ *
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param in_pkt_num
+ * Starting pkt number of pkts
+ * @param arg
+ * Void pointer
+ * @param p_nat
+ * A pointer to main CGNAPT structure
+ *
+ */
+void
+pkt4_work_cgnapt_ipv6_pub(
+ struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ __rte_unused void *arg,
+ struct pipeline_cgnapt *p_nat)
+{
+ struct rte_mbuf *pkt;
+ uint8_t i;
+ uint8_t pkt_num;
+
+ enum PKT_TYPE pkt_type = PKT_TYPE_IPV4to6;
+
+ for (i = 0; i < 4; i++) {
+ pkt_num = in_pkt_num + i;
+ pkt = pkts[i];
+
+ /* index into hash table entries */
+ int hash_table_entry = p_nat->lkup_indx[pkt_num];
+ /*bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_num;
+
+ uint8_t protocol = RTE_MBUF_METADATA_UINT8(pkt, PROT_OFST_IP4);
+ uint16_t *outport_id =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, cgnapt_meta_offset);
+ struct cgnapt_table_entry *entry = NULL;
+
+ if (hash_table_entry < 0) {
+
+ /* Drop ingress initial traffic */
+
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount3++;
+ if (p_nat->kpc2++ < 5) {
+ printf("in_ah Th: %d", p_nat->pipeline_num);
+ print_key(p_nat->key_ptrs[pkt_num]);
+ }
+ #endif
+
+ continue;
+
+ } else {
+ /* entry found for this packet */
+ entry = &napt_hash_tbl_entries[hash_table_entry];
+ }
+
+ /* apply napt and mac changes */
+
+ p_nat->entries[pkt_num] = &(entry->head);
+ if (entry->data.type != CGNAPT_ENTRY_IPV6) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ continue;
+ }
+
+ struct ipv4_hdr ipv4_hdr;
+
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, SRC_PRT_OFST_IP4_TCP);
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+
+ if (entry->data.ttl == NAPT_ENTRY_STALE)
+ entry->data.ttl = NAPT_ENTRY_VALID;
+
+ struct ether_addr hw_addr;
+ uint8_t dest_addr_ipv6[16];
+ uint8_t nh_ipv6[16];
+ uint32_t dest_if = INVALID_DESTIF;
+ /* Ingress */
+ {
+
+ if (unlikely(protocol == IP_PROTOCOL_UDP
+ && rte_be_to_cpu_16(*src_port) == 53)) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ continue;
+ }
+
+ memcpy(&dest_addr_ipv6[0], &entry->data.u.prv_ipv6[0],
+ 16);
+ uint8_t nhipv6[16];
+ int ret;
+ ret = local_get_nh_ipv6(&dest_addr_ipv6[0], &dest_if,
+ &nhipv6[0], p_nat);
+ if (!ret) {
+ dest_if = get_prv_to_pub_port((uint32_t *)
+ &dest_addr_ipv6[0], IP_VERSION_6);
+
+ if (dest_if == INVALID_DESTIF) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount6++;
+ #endif
+ return;
+ }
+
+ do_local_nh_ipv6_cache(dest_if, p_nat);
+ }
+
+ *outport_id = p_nat->outport_id[dest_if];
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ static int static_count;
+
+ if (static_count++ < 10) {
+ print_pkt(pkt);
+ my_print_entry(entry);
+ printf("dest-offset:%d\n", DST_ADR_OFST_IP4);
+ printf("dest_add:%x\n", entry->data.u.prv_ip);
+ printf("DST_ADR_OFST_IP6:%d\n", DST_ADR_OFST_IP6);
+ }
+ #endif
+
+ memset(nh_ipv6, 0, 16);
+ if (get_dest_mac_address_ipv6
+ (&dest_addr_ipv6[0], &dest_if,
+ &hw_addr, &nh_ipv6[0])) {
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("MAC found for ip 0x%x, port %d - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ *((uint32_t *)dest_addr_ipv6 + 12),
+ *outport_id,
+ hw_addr.addr_bytes[0], hw_addr.addr_bytes[1],
+ hw_addr.addr_bytes[2], hw_addr.addr_bytes[3],
+ hw_addr.addr_bytes[4], hw_addr.addr_bytes[5]);
+
+ printf("Dest MAC before - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_dest, &hw_addr, sizeof(struct ether_addr));
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("Dest MAC after - "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3], eth_dest[4], eth_dest[5]);
+ }
+ #endif
+
+ memcpy(eth_src,
+ get_link_hw_addr(dest_if),
+ sizeof(struct ether_addr));
+ } else {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+
+ continue;
+ }
+
+ {
+ /* Ingress */
+
+ convert_ipv4_to_ipv6(pkt, &ipv4_hdr);
+
+ /* Ethernet MTU check */
+ if ((rte_pktmbuf_data_len(pkt) - 14) > 1500) {
+ p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+ continue;
+ }
+ uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ DST_ADR_OFST_IP4t6);
+ uint16_t *dst_port = RTE_MBUF_METADATA_UINT16_PTR(pkt,
+ DST_PRT_OFST_IP4t6);
+
+ memcpy((uint8_t *) &dst_addr[0],
+ &entry->data.u.prv_ipv6[0], 16);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+ *dst_port = rte_bswap16(entry->data.prv_port);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ p_nat->inaptedPktCount++;
+ }
+
+ p_nat->naptedPktCount++;
+
+ #ifdef HW_CHECKSUM_REQ
+ if (p_nat->hw_checksum_reqd)
+ hw_checksum(pkt, pkt_type);
+ else
+ #endif
+ sw_checksum(pkt, pkt_type);
+ }
+}
+
+/**
+ * Input port handler for IPv6 private traffic
+ * Starting from the packet burst it filters unwanted packets,
+ * calculates keys, does lookup and then based on the lookup
+ * updates NAPT table and does packet NAPT translation.
+ *
+ * @param rte_p
+ * A pointer to struct rte_pipeline
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param n_pkts
+ * Number of packets in the burst
+ * @param arg
+ * Void pointer
+ *
+ * @return
+ * int that is not checked by caller
+ */
+static int cgnapt_in_port_ah_ipv6_prv(struct rte_pipeline *rte_p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts, void *arg)
+{
+ uint32_t i, j;
+ struct pipeline_cgnapt_in_port_h_arg *ap = arg;
+ struct pipeline_cgnapt *p_nat = ap->p;
+
+ p_nat->pkt_burst_cnt = 0; /* for dynamic napt */
+ p_nat->valid_packets = rte_p->pkts_mask; /*n_pkts; */
+ p_nat->invalid_packets = 0;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("cgnapt_key hit fn: %" PRIu32 "\n", n_pkts);
+ #endif
+
+ /* prefetching for mbufs should be done here */
+ for (j = 0; j < n_pkts; j++)
+ rte_prefetch0(pkts[j]);
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_key_ipv6_prv(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_key_ipv6_prv(pkts[i], i, arg, p_nat);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ if (unlikely(p_nat->valid_packets == 0)) {
+ /* no suitable packet for lookup */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+ return p_nat->valid_packets;
+ }
+
+ /* lookup entries in the common napt table */
+
+ int lookup_result = rte_hash_lookup_bulk(
+ napt_common_table,
+ (const void **) &p_nat->key_ptrs,
+ /* should be minus num invalid pkts */
+ n_pkts,
+ /*new pipeline data member */
+ &p_nat->lkup_indx[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* unknown error, just discard all packets */
+ printf("Unexpected hash lookup error %d, "
+ "discarding all packets",
+ lookup_result);
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return 0;
+ }
+
+ /* Now call second stage of pipeline to one by one
+ * check the result of our bulk lookup
+ */
+
+ /* prefetching for table entries should be done here */
+ for (j = 0; j < n_pkts; j++) {
+ if (p_nat->lkup_indx[j] >= 0)
+ rte_prefetch0(&napt_hash_tbl_entries
+ [p_nat->lkup_indx[j]]);
+ }
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_ipv6_prv(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_ipv6_prv(pkts[i], i, arg, p_nat);
+
+ if (p_nat->invalid_packets) {
+ /* get rid of invalid packets */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1) {
+ printf("valid_packets:0x%jx\n", p_nat->valid_packets);
+ printf("rte_valid_packets :0x%jx\n", rte_p->pkts_mask);
+ printf("invalid_packets:0x%jx\n",
+ p_nat->invalid_packets);
+ printf("rte_invalid_packets :0x%jx\n",
+ rte_p->pkts_drop_mask);
+ printf("Total pkts dropped :0x%jx\n",
+ rte_p->n_pkts_ah_drop);
+ }
+ #endif
+ }
+
+ return p_nat->valid_packets;
+}
+
+
+/**
+ * Input port handler for IPv6 public traffic
+ * Starting from the packet burst it filters unwanted packets,
+ * calculates keys, does lookup and then based on the lookup
+ * updates NAPT table and does packet NAPT translation.
+ *
+ * @param rte_p
+ * A pointer to struct rte_pipeline
+ * @param pkts
+ * A pointer to array of packets mbuf
+ * @param n_pkts
+ * Number of packets in the burst
+ * @param arg
+ * Void pointer
+ *
+ * @return
+ * int that is not checked by caller
+ */
+static int cgnapt_in_port_ah_ipv6_pub(struct rte_pipeline *rte_p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts, void *arg)
+{
+ uint32_t i, j;
+ struct pipeline_cgnapt_in_port_h_arg *ap = arg;
+ struct pipeline_cgnapt *p_nat = ap->p;
+
+ p_nat->pkt_burst_cnt = 0; /* for dynamic napt */
+ p_nat->valid_packets = rte_p->pkts_mask; /*n_pkts; */
+ p_nat->invalid_packets = 0;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("cgnapt_key hit fn: %" PRIu32 "\n", n_pkts);
+ #endif
+
+ /* prefetching for mbufs should be done here */
+ for (j = 0; j < n_pkts; j++)
+ rte_prefetch0(pkts[j]);
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_key_ipv6_pub(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_key_ipv6_pub(pkts[i], i, arg, p_nat);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ if (unlikely(p_nat->valid_packets == 0)) {
+ /* no suitable packet for lookup */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+ return p_nat->valid_packets;
+ }
+
+ /* lookup entries in the common napt table */
+
+ int lookup_result = rte_hash_lookup_bulk(
+ napt_common_table,
+ (const void **) &p_nat->key_ptrs,
+ /* should be minus num invalid pkts */
+ n_pkts,
+ /*new pipeline data member */
+ &p_nat->lkup_indx[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* unknown error, just discard all packets */
+ printf("Unexpected hash lookup error %d, "
+ "discarding all packets",
+ lookup_result);
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->valid_packets);
+ return 0;
+ }
+
+ /* Now call second stage of pipeline to one by one
+ * check the result of our bulk lookup
+ */
+
+ /* prefetching for table entries should be done here */
+ for (j = 0; j < n_pkts; j++) {
+ if (p_nat->lkup_indx[j] >= 0)
+ rte_prefetch0(&napt_hash_tbl_entries
+ [p_nat->lkup_indx[j]]);
+ }
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4)
+ pkt4_work_cgnapt_ipv6_pub(&pkts[i], i, arg, p_nat);
+
+ for (; i < n_pkts; i++)
+ pkt_work_cgnapt_ipv6_pub(pkts[i], i, arg, p_nat);
+
+ if (p_nat->invalid_packets) {
+ /* get rid of invalid packets */
+ rte_pipeline_ah_packet_drop(rte_p, p_nat->invalid_packets);
+
+ p_nat->valid_packets &= ~(p_nat->invalid_packets);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1) {
+ printf("valid_packets:0x%jx\n", p_nat->valid_packets);
+ printf("rte_valid_packets :0x%jx\n", rte_p->pkts_mask);
+ printf("invalid_packets:0x%jx\n",
+ p_nat->invalid_packets);
+ printf("rte_invalid_packets :0x%jx\n",
+ rte_p->pkts_drop_mask);
+ printf("Total pkts dropped :0x%jx\n",
+ rte_p->n_pkts_ah_drop);
+ }
+ #endif
+ }
+
+ return p_nat->valid_packets;
+}
+
+/**
+ * Function to send ICMP dest unreachable msg
+ *
+ */
+void send_icmp_dest_unreachable_msg(void)
+{
+
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ struct rte_mbuf *icmp_pkt = cgnapt_icmp_pkt;
+
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmp_pkt rte_mbuf\n");
+ return;
+ }
+ uint16_t port_id;
+ port_id = icmp_pkt->port;
+
+ struct app_link_params *link;
+ link = &mylink[port_id];
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ struct ether_addr gw_addr;
+ struct ether_addr dst_addr;
+ ether_addr_copy(&eth_h->s_addr, &dst_addr);
+ rte_eth_macaddr_get(port_id, &gw_addr);
+ ether_addr_copy(&gw_addr, &eth_h->s_addr);
+ ether_addr_copy(&dst_addr, &eth_h->d_addr);
+
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length = rte_cpu_to_be_16(sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = 1;
+
+ uint32_t *src_addr;
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST;
+ src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR(cgnapt_icmp_pkt, src_addr_offset);
+
+ ip_h->dst_addr = *src_addr;
+ ip_h->src_addr = rte_bswap32(link->ip);
+
+ ip_h->dst_addr = *src_addr;
+ ip_h->src_addr = rte_bswap32(link->ip);
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+ icmp_h->icmp_type = 3; /* Destination Unreachable */
+ icmp_h->icmp_code = 13; /* Communication administratively prohibited */
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+ if (ARPICMP_DEBUG) {
+ printf("Sending ICMP error message - "
+ "Destination Unreachable\n");
+ }
+ rte_pipeline_port_out_packet_insert(myP, port_id, icmp_pkt);
+}
+
+/**
+ * Function to add a dynamic NAPT entry pair
+ *
+ * @param p
+ * A pointer to struct pipeline
+ * @param key
+ * A pointer to struct pipeline_cgnapt_entry_key
+ * @param time_out
+ * expairy time of an dynamic or PCP req entry
+ * @param src_addr
+ * uint8_t pointer of source address
+ *
+ * @return
+ * A pointer to struct cgnapt_table_entry for added entry
+ */
+
+struct cgnapt_table_entry *add_dynamic_cgnapt_entry(
+ struct pipeline *p,
+ struct pipeline_cgnapt_entry_key *key,
+ uint32_t timeout,
+ uint8_t pkt_type,
+ uint8_t *src_addr,
+ uint8_t *err)
+{
+ int port_num = 0;
+ void *entry_ptr, *ret_ptr;
+ int ret = 0, i;
+
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 1) {
+ printf("Th%d add_dynamic_cgnapt_entry key detail Entry:"
+ "0x%x, %d, %d\n", p_nat->pipeline_num, key->ip, key->port,
+ key->pid);
+ }
+ #endif
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX && i < p_nat->pkt_burst_cnt;
+ i++) {
+ if (p_nat->cgnapt_dyn_ent_table[i].ip == key->ip
+ && p_nat->cgnapt_dyn_ent_table[i].port == key->port
+ && p_nat->cgnapt_dyn_ent_table[i].pid == key->pid) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("add_dynamic_cgnapt_entry:pkt_burst "
+ "array key matched!!!\n");
+ #endif
+
+ return &napt_hash_tbl_entries
+ [p_nat->cgnapt_dyn_ent_index[i]];
+ }
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+
+ ret = increment_max_port_counter(key->ip, key->pid, p_nat);
+ if (ret == MAX_PORT_INC_ERROR) {
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount5++;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("add_dynamic_cgnapt_entry:"
+ "increment_max_port_counter-1 failed\n");
+ #endif
+
+ *err = 1;
+ return NULL;
+ }
+
+ if (ret == MAX_PORT_INC_REACHED) {
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount6++;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("add_dynamic_cgnapt_entry:"
+ "increment_max_port_counter-2 failed\n");
+ #endif
+
+ *err = 1;
+ return NULL;
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ uint32_t public_ip;
+ port_num = get_free_iport(p_nat, &public_ip);
+
+ if (port_num == -1) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2) {
+ printf("add_dynamic_cgnapt_entry: %d\n", port_num);
+ printf("add_dynamic_cgnapt_entry key detail:0x%x, "
+ "%d, %d\n", key->ip, key->port, key->pid);
+ }
+ #endif
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount7++;
+ #endif
+
+ *err = 1;
+ return NULL;
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (!nat_only_config_flag) {
+ #endif
+
+ if (ret == 2) { //MPPC_NEW_ENTRY
+
+ /* check for max_clients_per_ip */
+ if (rte_atomic16_read
+ (&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP].count) ==
+ p_nat->max_clients_per_ip) {
+
+ /* For now just bail out
+ * In future we can think about
+ * retrying getting a new iport
+ */
+
+ release_iport(port_num, public_ip, p_nat);
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount10++;
+ #endif
+ *err = 1;
+ return NULL;
+ }
+
+ rte_atomic16_inc(&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP].count);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if ((rte_jhash(&public_ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP) == 8)
+ printf("pub ip:%x coutn:%d\n", public_ip,
+ rte_atomic16_read(&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP].count));
+ #endif
+
+ }
+ #ifdef NAT_ONLY_CONFIG_REQ
+ }
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 0) {
+ printf("add_dynamic_cgnapt_entry: %d\n",
+ port_num);
+ printf("add_dynamic_cgnapt_entry key detail: "
+ "0x%x, %d, %d\n", key->ip, key->port, key->pid);
+ }
+ #endif
+
+ struct cgnapt_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ /* made it configurable below */
+ {.port_id = p->port_out_id[0]},
+ },
+
+ .data = {
+ .prv_port = key->port,
+ .pub_ip = public_ip,
+ .pub_port = port_num,
+ .prv_phy_port = key->pid,
+ .pub_phy_port = get_pub_to_prv_port(
+ &public_ip,
+ IP_VERSION_4),
+ .ttl = 0,
+ /* if(timeout == -1) : static entry
+ * if(timeout == 0 ) : dynamic entry
+ * if(timeout > 0 ) : PCP requested entry
+ */
+ .timeout = timeout > 0 ? timeout : 0,
+ #ifdef PCP_ENABLE
+ .timer = NULL,
+ #endif
+ }
+ };
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ entry.data.prv_port = 0xffff;
+ entry.data.pub_port = 0xffff;
+ }
+ #endif
+
+ if (pkt_type == CGNAPT_ENTRY_IPV6) {
+ entry.data.type = CGNAPT_ENTRY_IPV6;
+ memcpy(&entry.data.u.prv_ipv6[0], src_addr, 16);
+ } else {
+ entry.data.u.prv_ip = key->ip;
+ entry.data.type = CGNAPT_ENTRY_IPV4;
+ }
+
+ //entry.head.port_id = CGNAPT_PUB_PORT_ID; /* outgoing port info */
+ entry.head.port_id = entry.data.pub_phy_port; /* outgoing port info */
+
+ struct pipeline_cgnapt_entry_key second_key;
+ /* Need to add a second ingress entry */
+ second_key.ip = public_ip;
+ second_key.port = port_num;
+ second_key.pid = 0xffff;
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ second_key.port = 0xffff;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("add_dynamic_cgnapt_entry second key detail:"
+ "0x%x, %d, %d\n", second_key.ip, second_key.port,
+ second_key.pid);
+ #endif
+
+ int32_t position = rte_hash_add_key(napt_common_table, (void *)key);
+
+ if (position < 0) {
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount8++;
+ #endif
+
+ printf("CG-NAPT entry add failed ...returning "
+ "without adding ... %d\n", position);
+ *err = 1;
+ return NULL;
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG) {
+ printf("add_dynamic_cgnapt_entry\n");
+ print_key(key);
+ print_cgnapt_entry(&entry);
+ }
+ #endif
+
+ memcpy(&napt_hash_tbl_entries[position], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ /* this pointer is returned to pkt miss function */
+ ret_ptr = &napt_hash_tbl_entries[position];
+
+ p_nat->n_cgnapt_entry_added++;
+ p_nat->dynCgnaptCount++;
+
+ /* Now modify the forward port for reverse entry */
+
+ /* outgoing port info */
+ //entry.head.port_id = CGNAPT_PRV_PORT_ID;
+ /* outgoing port info */
+ entry.head.port_id = entry.data.prv_phy_port;
+
+ int32_t position2 = rte_hash_add_key(napt_common_table, &second_key);
+
+ if (position2 < 0) {
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount9++;
+ #endif
+ printf("CG-NAPT entry reverse bulk add failed ..."
+ "returning with fwd add ...%d\n",
+ position2);
+ *err = 1;
+ return NULL;
+ }
+
+ memcpy(&napt_hash_tbl_entries[position2], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ entry_ptr = &napt_hash_tbl_entries[position2];
+
+ timer_thread_enqueue(key, &second_key, ret_ptr,
+ entry_ptr, (struct pipeline *)p_nat);
+
+ p_nat->n_cgnapt_entry_added++;
+ p_nat->dynCgnaptCount++;
+
+ if (p_nat->pkt_burst_cnt < RTE_PORT_IN_BURST_SIZE_MAX) {
+ memcpy(&p_nat->cgnapt_dyn_ent_table[p_nat->pkt_burst_cnt], key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ p_nat->cgnapt_dyn_ent_index[p_nat->pkt_burst_cnt] = position;
+ p_nat->pkt_burst_cnt++;
+ }
+ return ret_ptr;
+}
+
+int pkt_miss_cgnapt_count;
+/**
+ * Function handle a missed NAPT entry lookup
+ * Will attempt to add a dynamic entry pair.
+ *
+ * @param p
+ * A pointer to struct pipeline
+ * @param key
+ * A pointer to struct pipeline_cgnapt_entry_key
+ * @param pkt
+ * A pointer to pkt struct rte_mbuf
+ * @param pkt_mask
+ * uint64_t pointer to pkt mask
+ * @param table_entry
+ * A pointer to struct rte_pipeline_table_entry to be created and returned
+ * @param pkt_num
+ * number of this pkt in current burst
+ *
+ * @return
+ * A uint64_t mask for drop packets
+ */
+uint64_t
+pkt_miss_cgnapt(struct pipeline_cgnapt_entry_key *key,
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry **table_entry,
+ __rte_unused uint64_t *pkts_mask,
+ uint32_t pkt_num, void *arg)
+{
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 0)
+ printf("\n pkt_miss_cgnapt\n");
+ #endif
+ /* In egress case
+ * get src address
+ * see if get_port passes for this src address
+ * if passed add a new egress entry and a
+ * corresponding new ingress entry
+ * return the fwd entry to calling function using input pointer
+ * else if get_port fails drop packet
+ */
+
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)arg;
+
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+ uint32_t src_addr_offset_ipv6 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_SRC_ADR_OFST;
+ uint16_t phy_port = pkt->port;
+
+ uint16_t *eth_proto =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
+
+ uint8_t *src_addr = NULL;
+ uint8_t src_addr_ipv6[16];
+ uint8_t pkt_type = CGNAPT_ENTRY_IPV4;
+ /* To drop the packet */
+ uint64_t drop_mask = 0;
+
+ if (p_nat->is_static_cgnapt) {
+ drop_mask |= 1LLU << pkt_num;
+ p_nat->missedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount1++;
+ #endif
+ return drop_mask;
+ }
+
+ if (rte_be_to_cpu_16(*eth_proto) == ETHER_TYPE_IPv6) {
+ src_addr =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, src_addr_offset_ipv6);
+ pkt_type = CGNAPT_ENTRY_IPV6;
+ memcpy(src_addr_ipv6, src_addr, 16);
+ }
+
+ uint8_t err = 0;
+
+ /* some validation first */
+ if (is_phy_port_privte(phy_port)) {
+ /* dynamic NAPT entry creation */
+ *table_entry = (struct rte_pipeline_table_entry *)
+ add_dynamic_cgnapt_entry(
+ (struct pipeline *)&p_nat->p,
+ key,
+ DYNAMIC_CGNAPT_TIMEOUT,
+ pkt_type,
+ src_addr_ipv6, &err);
+
+ if (!(*table_entry)) {
+ if (err) {
+ drop_mask |= 1LLU << pkt_num;
+ p_nat->missedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount2++;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("Add Dynamic NAT entry failed "
+ "in pkt!!!\n");
+ #endif
+ } else {
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount11++;
+ #endif
+ }
+
+ }
+
+ } else if (!is_phy_port_privte(phy_port)) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 2) {
+ printf("Initial Ingress entry creation NOT ALLOWED "
+ "%d\n", phy_port);
+ }
+ #endif
+
+ drop_mask |= 1LLU << pkt_num;
+ p_nat->missedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount3++;
+ #endif
+ } else {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 1)
+ printf("NOT a PRIVATE or PUBLIC port!!!!!\n");
+ #endif
+
+ drop_mask |= 1LLU << pkt_num;
+ p_nat->missedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->missedpktcount4++;
+ #endif
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 5)
+ print_pkt(pkt);
+ #endif
+
+ return drop_mask;
+}
+
+int numprints;
+
+/**
+ * Function to print the contents of a packet
+ *
+ * @param pkt
+ * A pointer to pkt struct rte_mbuf
+ */
+void print_pkt(struct rte_mbuf *pkt)
+{
+ int i = 0, j = 0;
+
+ printf("\nPacket Contents:\n");
+
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
+
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ printf("%02x ", rd[(20 * i) + j]);
+
+ printf("\n");
+ }
+}
+
+rte_table_hash_op_hash cgnapt_hash_func[] = {
+ hash_default_key8,
+ hash_default_key16,
+ hash_default_key24,
+ hash_default_key32,
+ hash_default_key40,
+ hash_default_key48,
+ hash_default_key56,
+ hash_default_key64
+};
+
+/**
+ * Function to parse incoming pipeline arguments
+ * Called during pipeline initialization
+ *
+ * @param p
+ * A pointer to struct pipeline_cgnapt
+ * @param params
+ * A pointer to struct pipeline_params
+ *
+ * @return
+ * 0 if success, negative if failure
+ */
+static int
+pipeline_cgnapt_parse_args(struct pipeline_cgnapt *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_flows_present = 0;
+ uint32_t key_offset_present = 0;
+ uint32_t key_size_present = 0;
+ uint32_t hash_offset_present = 0;
+ uint32_t n_entries_present = 0;
+ uint32_t max_port_present = 0;
+ uint32_t max_client_present = 0;
+ uint32_t public_ip_range_present = 0;
+ uint32_t public_ip_port_range_present = 0;
+ uint32_t i;
+ uint8_t public_ip_count = 0;
+ uint8_t public_ip_range_count = 0;
+ uint8_t dest_if_offset_present = 0;
+ uint8_t cgnapt_meta_offset_present = 0;
+ uint8_t prv_que_handler_present = 0;
+ uint8_t n_prv_in_port = 0;
+
+ if (CGNAPT_DEBUG > 2) {
+ printf("CGNAPT pipeline_cgnapt_parse_args params->n_args: %d\n",
+ params->n_args);
+ }
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (CGNAPT_DEBUG > 2) {
+ printf("CGNAPT args[%d]: %s %d, %s\n", i, arg_name,
+ atoi(arg_value), arg_value);
+ }
+ if (strcmp(arg_name, "prv_que_handler") == 0) {
+
+ if (prv_que_handler_present) {
+ printf("Duplicate pktq_in_prv ..\n\n");
+ return -1;
+ }
+ prv_que_handler_present = 1;
+ n_prv_in_port = 0;
+
+ char *token;
+ int rxport = 0;
+ /* get the first token */
+ token = strtok(arg_value, "(");
+ token = strtok(token, ")");
+ token = strtok(token, ",");
+ printf("***** prv_que_handler *****\n");
+
+ if (token == NULL) {
+ printf("string is null\n");
+ printf("invalid prv_que_handler value/n");
+ return -1;
+ }
+ printf("string is :%s\n", token);
+
+ /* walk through other tokens */
+ while (token != NULL) {
+ printf(" %s\n", token);
+ rxport = atoi(token);
+ cgnapt_prv_que_port_index[n_prv_in_port++] =
+ rxport;
+ if (rxport < PIPELINE_MAX_PORT_IN)
+ cgnapt_in_port_egress_prv[rxport] = 1;
+ token = strtok(NULL, ",");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf("VNF common parse err - "
+ "no prv RX phy port\n");
+ return -1;
+ }
+ continue;
+ }
+
+ if (strcmp(arg_name, "cgnapt_meta_offset") == 0) {
+ if (cgnapt_meta_offset_present) {
+ printf("CG-NAPT parse error:");
+ printf("cgnapt_meta_offset initizlized "
+ "mulitple times\n");
+ return -1;
+ }
+ cgnapt_meta_offset_present = 1;
+ int temp;
+ temp = atoi(arg_value);
+
+ if (temp > 256) {
+ printf("cgnapt_meta_offset is invalid :");
+ printf("Not be more than metadata size\n");
+ return -1;
+ }
+ cgnapt_meta_offset = (uint16_t) temp;
+ }
+ if (strcmp(arg_name, "vnf_set") == 0)
+ vnf_set_count++;
+
+ if (strcmp(arg_name, "public_ip_range") == 0) {
+ public_ip_range_present = 1;
+ if (public_ip_port_range_present) {
+ printf("CG-NAPT parse error:");
+ printf("public_ip_range with "
+ "public_ip_port_range_present\n");
+ return -1;
+ }
+
+ p->pub_ip_range = rte_realloc(p->pub_ip_range,
+ sizeof(struct
+ pub_ip_range),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!p->pub_ip_range) {
+ printf("Memory allocation failed for "
+ "pub_ip_range\n");
+ return -1;
+ }
+
+ uint32_t sip = 0, eip = 0;
+
+ if (sscanf(arg_value, "(%x,%x)", &sip, &eip) != 2) {
+ printf("public_ip_range is invalid\n");
+ return -1;
+ }
+
+ if (sip <= 0 || eip <= 0 || sip >= eip) {
+ printf("public_ip_range is invalid %x-%x\n",
+ sip, eip);
+ return -1;
+ }
+
+ printf("public_ip_range: %d-%d\n",
+ p->pub_ip_range[public_ip_range_count].
+ start_ip = sip,
+ p->pub_ip_range[public_ip_range_count].
+ end_ip = eip);
+
+ p->pub_ip_range_count = ++public_ip_range_count;
+ continue;
+ }
+
+ if (strcmp(arg_name, "public_ip_port_range") == 0) {
+ public_ip_port_range_present = 1;
+ if (nat_only_config_flag || public_ip_range_present) {
+
+ printf("CG-NAPT parse error:");
+ printf("nat_only_config_flag OR ");
+ printf("public_ip_range_present with "
+ "public_ip_port_range_present\n");
+ return -1;
+ }
+
+ p->pub_ip_port_set = rte_realloc(
+ p->pub_ip_port_set,
+ sizeof(struct pub_ip_port_set),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!p->pub_ip_port_set) {
+ printf("Memory allocation failed for "
+ "public IP\n");
+ return -1;
+ }
+
+ uint32_t ip = 0;
+ int sp = 0, ep = 0;
+
+ if (sscanf(arg_value, "%x:(%d,%d)",
+ &ip, &sp, &ep) != 3) {
+ printf("Public IP or Port-range is invalid\n");
+ return -1;
+ }
+
+ if (ip <= 0 || sp <= 0 || ep <= 0 || sp > ep) {
+ printf("Public IP or Port-range is invalid "
+ "%x:%d-%d\n", ip, sp, ep);
+ return -1;
+ }
+
+ printf("public_ip: 0x%x Range:%d-%d\n",
+ p->pub_ip_port_set[public_ip_count].ip = ip,
+ p->pub_ip_port_set[public_ip_count].start_port = sp,
+ p->pub_ip_port_set[public_ip_count].end_port = ep);
+
+ napt_port_alloc_elem_count += (ep - sp + 1);
+ printf("parse - napt_port_alloc_elem_count :%d\n",
+ napt_port_alloc_elem_count);
+
+ /* Store all public IPs of all CGNAPT threads
+ * in the global variable
+ */
+ /* to revisit indexing */
+ all_public_ip[rte_jhash(&ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP].ip = ip;
+ p->pub_ip_count = ++public_ip_count;
+ printf("public_ip_count:%d hash:%d\n", public_ip_count,
+ rte_jhash(&ip, 4, 0) % CGNAPT_MAX_PUB_IP);
+ continue;
+ }
+
+ /* hw_checksum_reqd */
+ if (strcmp(arg_name, "hw_checksum_reqd") == 0) {
+ int temp;
+ temp = atoi(arg_value);
+ if ((temp != 0) && (temp != 1)) {
+ printf("hw_checksum_reqd is invalid\n");
+ return -1;
+ }
+ p->hw_checksum_reqd = temp;
+ continue;
+ }
+
+ /* nat_only_config_flag */
+ if (strcmp(arg_name, "nat_only_config_flag") == 0) {
+ nat_only_config_flag = 1;
+ if (public_ip_port_range_present) {
+
+ printf("CG-NAPT parse error:");
+ printf("nat_only_config_flag with "
+ "public_ip_port_range_present\n");
+ return -1;
+ }
+ continue;
+ }
+
+ /* max_port_per_client */
+ if (strcmp(arg_name, "max_port_per_client") == 0) {
+ if (max_port_present) {
+ printf("CG-NAPT Parse Error: "
+ "duplicate max_port_per_client\n");
+ return -1;
+ }
+ max_port_present = 1;
+
+ int max = 0;
+ max = atoi(arg_value);
+ if (max <= 0) {
+ printf("max_port_per_client is invalid !!!\n");
+ return -1;
+ }
+
+ p->max_port_per_client = (uint16_t) max;
+
+ if (p->max_port_per_client <= 0) {
+ printf("max port per client is invalid\n");
+ return -1;
+ }
+
+ printf("max_port_per_client comp: %d\n",
+ p->max_port_per_client);
+ continue;
+ }
+
+ /* max_clients_per_ip */
+ if (strcmp(arg_name, "max_clients_per_ip") == 0) {
+ if (max_client_present) {
+ printf("CG-NAPT parse Error: duplicate "
+ "max_clients_per_ip\n");
+ return -1;
+ }
+ max_client_present = 1;
+
+ if (nat_only_config_flag) {
+ printf("CG-NAPT parse error:");
+ printf("nat_only_config_flag with "
+ "max_clients_per_ip\n");
+ return -1;
+ }
+
+ int max = 0;
+ max = atoi(arg_value);
+ if (max <= 0) {
+ printf("max_clients_per_ip is invalid !!!\n");
+ return -1;
+ }
+
+ p->max_clients_per_ip = (uint16_t) max;
+
+ if (p->max_clients_per_ip <= 0) {
+ printf("max_clients_per_ip is invalid\n");
+ return -1;
+ }
+
+ printf("max_clients_per_ip: %d\n",
+ p->max_clients_per_ip);
+ continue;
+ }
+
+ /* n_entries */
+ if (strcmp(arg_name, "n_entries") == 0) {
+ if (n_entries_present)
+ return -1;
+ n_entries_present = 1;
+
+ p->n_entries = atoi(arg_value);
+ if (p->n_entries == 0)
+ return -1;
+
+ continue;
+ }
+
+ /* n_flows */
+ if (strcmp(arg_name, "n_flows") == 0) {
+ if (n_flows_present)
+ return -1;
+ n_flows_present = 1;
+
+ p->n_flows = atoi(arg_value);
+ if (p->n_flows == 0)
+ return -1;
+
+ napt_common_table_hash_params.entries = p->n_flows;
+ continue;
+ }
+ /* dest_if_offset Multiport Changes */
+ if (strcmp(arg_name, "dest_if_offset") == 0) {
+ if (dest_if_offset_present)
+ return -1;
+ //dest_if_offset_present = 1;
+
+ dest_if_offset = atoi(arg_value);
+
+ continue;
+ }
+
+ /* key_offset */
+ if (strcmp(arg_name, "key_offset") == 0) {
+ if (key_offset_present)
+ return -1;
+ key_offset_present = 1;
+
+ p->key_offset = atoi(arg_value);
+
+ continue;
+ }
+
+ /* key_size */
+ if (strcmp(arg_name, "key_size") == 0) {
+ if (key_size_present)
+ return -1;
+ key_size_present = 1;
+
+ p->key_size = atoi(arg_value);
+ if ((p->key_size == 0) ||
+ (p->key_size > PIPELINE_CGNAPT_KEY_MAX_SIZE) ||
+ (p->key_size % 8))
+ return -1;
+
+ continue;
+ }
+
+ /* hash_offset */
+ if (strcmp(arg_name, "hash_offset") == 0) {
+ if (hash_offset_present)
+ return -1;
+ hash_offset_present = 1;
+
+ p->hash_offset = atoi(arg_value);
+
+ continue;
+ }
+
+ /* traffic_type */
+ if (strcmp(arg_name, "pkt_type") == 0) {
+ if (strcmp(arg_value, "ipv4") == 0) {
+ p->traffic_type = TRAFFIC_TYPE_IPV4;
+ printf("Traffic is set to IPv4\n");
+ } else if (strcmp(arg_value, "ipv6") == 0) {
+ p->traffic_type = TRAFFIC_TYPE_IPV6;
+ printf("Traffic is set to IPv6\n");
+ }
+ continue;
+ }
+
+ /* cgnapt_debug */
+ if (strcmp(arg_name, "cgnapt_debug") == 0) {
+ CGNAPT_DEBUG = atoi(arg_value);
+
+ continue;
+ }
+
+ /* any other Unknown argument return -1 */
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ if (!public_ip_range_count) {
+ printf("No public_ip_range %d for NAT only config.\n",
+ public_ip_range_count);
+ printf("Running static NAT only configuration\n");
+ p->is_static_cgnapt = 1;
+ }
+ }
+ #else
+
+ if (!p->max_port_per_client)
+ p->is_static_cgnapt = 1;
+ #endif
+
+ /* Check that mandatory arguments are present */
+ if ((n_flows_present == 0) ||
+ (cgnapt_meta_offset_present == 0))
+ return -1;
+
+ return 0;
+
+}
+
+/**
+ * Function to initialize the pipeline
+ *
+ * @param params
+ * A pointer to struct pipeline_params
+ * @param arg
+ * Void pointer - points to app params
+ *
+ * @return
+ * void pointer to the pipeline, NULL 0 if failure
+ */
+static void *pipeline_cgnapt_init(struct pipeline_params *params, void *arg)
+ /* (struct app_params *app) save it for use in port in handler */
+{
+ struct pipeline *p;
+ struct pipeline_cgnapt *p_nat;
+ uint32_t size, i, in_ports_arg_size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) || (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_cgnapt));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_nat = (struct pipeline_cgnapt *)p;
+ if (p == NULL)
+ return NULL;
+
+ all_pipeline_cgnapt[n_cgnapt_pipeline++] = p_nat;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "CG-NAPT");
+ /* Initialize all counters and arrays */
+
+ p_nat->n_cgnapt_entry_deleted = 0;
+ p_nat->n_cgnapt_entry_added = 0;
+ p_nat->naptedPktCount = 0;
+ p_nat->naptDroppedPktCount = 0;
+ p_nat->inaptedPktCount = 0;
+ p_nat->enaptedPktCount = 0;
+ p_nat->receivedPktCount = 0;
+ p_nat->missedPktCount = 0;
+ p_nat->dynCgnaptCount = 0;
+ p_nat->arpicmpPktCount = 0;
+
+ p_nat->app_params_addr = (uint64_t) arg;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ p_nat->links_map[i] = 0xff;
+ p_nat->outport_id[i] = 0xff;
+ cgnapt_in_port_egress_prv[i] = 0;
+ cgnapt_prv_que_port_index[i] = 0;
+ }
+ p_nat->pipeline_num = 0xff;
+ p_nat->hw_checksum_reqd = 0;
+ p_nat->pub_ip_port_set = NULL;
+ p_nat->pub_ip_count = 0;
+ p_nat->traffic_type = TRAFFIC_TYPE_MIX;
+ p_nat->vnf_set = 0xff;
+
+ /* For every init it should be reset */
+ napt_port_alloc_elem_count = 0;
+
+ #ifdef CGNAPT_TIMING_INST
+ p_nat->in_port_exit_timestamp = 0;
+ p_nat->external_time_sum = 0;
+ p_nat->internal_time_sum = 0;
+ p_nat->time_measurements = 0;
+ p_nat->max_time_mesurements = 10000;
+ p_nat->time_measurements_on = 0;
+ #endif
+
+ #ifdef CGNAPT_DEBUGGING
+
+ p_nat->naptDebugCount = 0;
+
+ p_nat->naptDroppedPktCount1 = 0;
+ p_nat->naptDroppedPktCount2 = 0;
+ p_nat->naptDroppedPktCount3 = 0;
+ p_nat->naptDroppedPktCount4 = 0;
+ p_nat->naptDroppedPktCount5 = 0;
+ p_nat->naptDroppedPktCount6 = 0;
+
+ p_nat->missedpktcount1 = 0;
+ p_nat->missedpktcount2 = 0;
+ p_nat->missedpktcount3 = 0;
+ p_nat->missedpktcount4 = 0;
+ p_nat->missedpktcount5 = 0;
+ p_nat->missedpktcount6 = 0;
+ p_nat->missedpktcount7 = 0;
+ p_nat->missedpktcount8 = 0;
+ p_nat->missedpktcount9 = 0;
+ p_nat->missedpktcount10 = 0;
+ p_nat->missedpktcount11 = 0;
+ p_nat->missedpktcount12 = 0;
+
+ p_nat->max_port_dec_err1 = 0;
+ p_nat->max_port_dec_err2 = 0;
+ p_nat->max_port_dec_err3 = 0;
+ p_nat->max_port_dec_success = 0;
+
+ p_nat->pfb_err = 0;
+ p_nat->pfb_ret = 0;
+ p_nat->pfb_get = 0;
+ p_nat->pfb_suc = 0;
+ p_nat->gfp_suc = 0;
+ p_nat->gfp_get = 0;
+ p_nat->gfp_ret = 0;
+ p_nat->gfp_err = 0;
+
+ p_nat->kpc2 = 0;
+ p_nat->kpc1 = 0;
+ #endif
+
+ #ifdef SIP_ALG
+ static int sip_enabled;
+ if (!sip_enabled)
+ lib_sip_alg_init();
+ sip_enabled = 1;
+ #endif /* SIP_ALG */
+
+ /*struct rte_pipeline_table_entry *entries[RTE_HASH_LOOKUP_BULK_MAX];*/
+ /* bitmap of valid packets */
+ p_nat->valid_packets = 0;
+ /* bitmap of invalid packets to be dropped */
+ p_nat->invalid_packets = 0;
+
+ for (i = 0; i < RTE_HASH_LOOKUP_BULK_MAX; i++)
+ p_nat->key_ptrs[i] = &(p_nat->keys[i]);
+
+ p_nat->port_alloc_ring = NULL;
+
+ /* Parse arguments */
+ if (pipeline_cgnapt_parse_args(p_nat, params))
+ return NULL;
+
+ p_nat->vnf_set = vnf_set_count;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = cgnapt_meta_offset,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ myP = p->p;
+ }
+
+ #ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+
+ uint32_t instr_size =
+ RTE_CACHE_LINE_ROUNDUP((sizeof(uint64_t)) *
+ (INST_ARRAY_SIZE));
+ inst_start_time =
+ (uint64_t *) rte_zmalloc(NULL, instr_size,
+ RTE_CACHE_LINE_SIZE);
+ inst_end_time =
+ (uint64_t *) rte_zmalloc(NULL, instr_size,
+ RTE_CACHE_LINE_SIZE);
+ inst_diff_time =
+ (uint32_t *) rte_zmalloc(NULL, instr_size / 2,
+ RTE_CACHE_LINE_SIZE);
+ if ((inst_start_time == NULL) || (inst_end_time == NULL)
+ || (inst_diff_time == NULL)) {
+ printf("Inst array alloc failed .... ");
+ return NULL;
+ }
+ #endif
+
+ /* Memory allocation for in_port_h_arg */
+ in_ports_arg_size = RTE_CACHE_LINE_ROUNDUP(
+ (sizeof(struct pipeline_cgnapt_in_port_h_arg)) *
+ (params->n_ports_in));
+ struct pipeline_cgnapt_in_port_h_arg *ap =
+ (struct pipeline_cgnapt_in_port_h_arg *)
+ rte_zmalloc(NULL,
+ in_ports_arg_size,
+ RTE_CACHE_LINE_SIZE);
+ if (ap == NULL)
+ return NULL;
+
+ myApp = (struct app_params *) arg;
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ /* passing our cgnapt pipeline in call back arg */
+ (ap[i]).p = p_nat;
+ (ap[i]).in_port_id = i;
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops =
+ pipeline_port_in_params_get_ops(&params->port_in
+ [i]),
+ .arg_create =
+ pipeline_port_in_params_convert(&params->port_in
+ [i]),
+ .f_action = cgnapt_in_port_ah_mix,
+ .arg_ah = &(ap[i]),
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ #ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+ if (i == 0)
+ instrumentation_port_in_arg = &(ap[i]);
+ #endif
+
+ if (p_nat->traffic_type == TRAFFIC_TYPE_IPV4) {
+ /* Private in-port handler */
+ /* Multiport changes */
+ if (cgnapt_in_port_egress_prv[i]) {
+ port_params.f_action =
+ cgnapt_in_port_ah_ipv4_prv;
+ printf("CGNAPT port %d is IPv4 Prv\n", i);
+ } else{
+ port_params.f_action =
+ cgnapt_in_port_ah_ipv4_pub;
+ printf("CGNAPT port %d is IPv4 Pub\n", i);
+ }
+ }
+
+ if (p_nat->traffic_type == TRAFFIC_TYPE_IPV6) {
+ if (cgnapt_in_port_egress_prv[i]) {
+ port_params.f_action =
+ cgnapt_in_port_ah_ipv6_prv;
+ printf("CGNAPT port %d is IPv6 Prv\n", i);
+ } else{
+ port_params.f_action =
+ cgnapt_in_port_ah_ipv6_pub;
+ printf("CGNAPT port %d is IPv6 Pub\n", i);
+ }
+ }
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ #ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+ .f_action = port_out_ah_cgnapt,
+ #else
+ .f_action = NULL,
+ #endif
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ int pipeline_num = 0;
+ int ignore;
+ ignore = sscanf(params->name, "PIPELINE%d", &pipeline_num);
+ if (ignore != 1) {
+ printf("Not able to read pipeline number\n");
+ return NULL;
+ }
+ p_nat->pipeline_num = (uint8_t) pipeline_num;
+ register_pipeline_Qs(p_nat->pipeline_num, p);
+ set_link_map(p_nat->pipeline_num, p, p_nat->links_map);
+ set_outport_id(p_nat->pipeline_num, p, p_nat->outport_id);
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+
+ if (napt_common_table == NULL) {
+ if (create_napt_common_table(p_nat->n_flows)) {
+ PLOG(p, HIGH,
+ "CG-NAPT create_napt_common_table failed.");
+ return NULL;
+ }
+ }
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT_META
+ };
+ struct rte_pipeline_table_entry *default_entry_ptr;
+ status = rte_pipeline_table_default_entry_add(
+ p->p,
+ p->table_id[0],
+ &default_entry,
+ &default_entry_ptr);
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id
+ [i],
+ p->table_id
+ [0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_nat->custom_handlers,
+ custom_handlers, sizeof(p_nat->custom_handlers));
+
+ if (!p_nat->is_static_cgnapt) {
+ printf("Initializing dyn napt components ... %d\n",
+ p_nat->pipeline_num);
+ if (napt_port_alloc_init(p_nat) == -1) {
+ printf("Error - napt_port_alloc_init failed - %d\n",
+ p_nat->pipeline_num);
+ return NULL;
+ }
+ int rc = 0;
+
+ if (max_port_per_client_hash == NULL) {
+ rc = init_max_port_per_client(p_nat);
+ if (rc < 0) {
+ printf("CGNAPT Error - "
+ "init_max_port_per_client failed %d", rc);
+ return NULL;
+ }
+ }
+
+ }
+
+ if (!icmp_pool_init) {
+ icmp_pool_init = 1;
+ /* create the arp_icmp mbuf rx pool */
+ cgnapt_icmp_pktmbuf_tx_pool =
+ rte_pktmbuf_pool_create("icmp_mbuf_tx_pool", 63, 32, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (cgnapt_icmp_pktmbuf_tx_pool == NULL) {
+ PLOG(p, HIGH, "ICMP mbuf pool create failed.");
+ return NULL;
+ }
+
+ cgnapt_icmp_pkt =
+ rte_pktmbuf_alloc(cgnapt_icmp_pktmbuf_tx_pool);
+
+ if (cgnapt_icmp_pkt == NULL) {
+ printf("Failed to allocate cgnapt_icmp_pkt\n");
+ return NULL;
+ }
+ }
+
+ #ifdef CT_CGNAT
+
+ cgnat_cnxn_tracker = rte_zmalloc(NULL, rte_ct_get_cnxn_tracker_size(),
+ RTE_CACHE_LINE_SIZE);
+
+ if (cgnat_cnxn_tracker == NULL) {
+ printf("CGNAPT CT memory not allocated\n");
+ return NULL;
+ }
+ rte_ct_initialize_default_timeouts(cgnat_cnxn_tracker);
+
+ printf("CGNAPT CT Flows %d\n", p_nat->n_flows);
+ int ret;
+ ret = rte_ct_initialize_cnxn_tracker(cgnat_cnxn_tracker,
+ p_nat->n_flows,
+ "CGNAT_CT_COMMON_TABLE");
+ if (ret == -1)
+ return NULL;
+ #endif
+
+ #ifdef FTP_ALG
+ lib_ftp_alg_init();
+ #endif
+
+ #ifdef PCP_ENABLE
+ if (pcp_init() == PCP_INIT_SUCCESS)
+ printf("PCP contents are initialized successfully\n");
+ else
+ printf("Error in initializing PCP contents\n");
+ #endif
+
+ return p;
+}
+
+/**
+ * Function for pipeline cleanup
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ *
+ * @return
+ * 0
+ */
+static int pipeline_cgnapt_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_cgnapt_track(void *pipeline, __rte_unused uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+/**
+ * Function for pipeline timers
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ *
+ * @return
+ * 0
+ */
+static int pipeline_cgnapt_timer(void *pipeline)
+{
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)pipeline;
+
+ pipeline_msg_req_handle(&p_nat->p);
+
+ rte_pipeline_flush(((struct pipeline *)p_nat)->p);
+
+ return 0;
+}
+
+/**
+ * Function for pipeline custom handlers
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_CGNAPT_MSG_REQS) ?
+ p_nat->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+/**
+ * Function for adding NSP data
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_nsp_add_handler(
+ __rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_cgnapt_nsp_add_msg_req *req = msg;
+ struct pipeline_cgnapt_nsp_add_msg_rsp *rsp = msg;
+ int size = 0;
+ struct cgnapt_nsp_node *node = NULL, *ll = nsp_ll;
+
+ if (!
+ (req->nsp.depth == 32 || req->nsp.depth == 40
+ || req->nsp.depth == 48 || req->nsp.depth == 56
+ || req->nsp.depth == 64 || req->nsp.depth == 96)) {
+ rsp->status = 0xE;
+ rsp->key_found = 0;
+ return rsp;
+ }
+
+ printf("be initial cond\n");
+ if (nsp_ll == NULL) {
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct cgnapt_nsp_node));
+ node = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (node == NULL) {
+ printf("be 1st cond\n");
+ rsp->status = 0xE;
+ rsp->key_found = 0;
+ return rsp;
+ }
+
+ memcpy(&node->nsp, &req->nsp,
+ sizeof(struct pipeline_cgnapt_nsp_t));
+ node->next = NULL;
+ nsp_ll = node;
+ } else {
+ while (ll != NULL) {
+ if (!memcmp(ll->nsp.prefix, req->nsp.prefix, 16)
+ && ll->nsp.depth == req->nsp.depth) {
+ printf("be 2st cond\n");
+ rsp->status = 0xE;
+ rsp->key_found = 1;
+ return rsp;
+ }
+ ll = ll->next;
+ }
+
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct cgnapt_nsp_node));
+ node = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (node == NULL) {
+ printf("be 3st cond\n");
+ rsp->status = 0xE;
+ rsp->key_found = 0;
+ return rsp;
+ }
+
+ memcpy(&node->nsp, &req->nsp,
+ sizeof(struct pipeline_cgnapt_nsp_t));
+ node->next = nsp_ll;
+ nsp_ll = node;
+ }
+
+ rsp->status = 0;
+ rsp->key_found = 0;
+
+ printf("be 4st cond\n");
+ return rsp;
+}
+
+/**
+ * Function for deleting NSP data
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_nsp_del_handler(
+ __rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_cgnapt_nsp_del_msg_req *req = msg;
+ struct pipeline_cgnapt_nsp_del_msg_rsp *rsp = msg;
+ struct cgnapt_nsp_node *prev = NULL, *ll = nsp_ll;
+
+ while (ll != NULL) {
+ if (!memcmp(ll->nsp.prefix, req->nsp.prefix, 16)
+ && ll->nsp.depth == req->nsp.depth) {
+ if (prev != NULL)
+ prev->next = ll->next;
+ else
+ nsp_ll = NULL;
+
+ rte_free(ll);
+
+ rsp->status = 0;
+ rsp->key_found = 1;
+
+ return rsp;
+ }
+
+ prev = ll;
+ ll = ll->next;
+ }
+
+ rsp->status = 0xE;
+ rsp->key_found = 0;
+
+ return rsp;
+}
+
+/**
+ * Function for adding NAPT entry
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_entry_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt_entry_add_msg_req *req = msg;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+ uint8_t type = req->data.type;
+ uint32_t src_ip = (type == CGNAPT_ENTRY_IPV4) ?
+ req->data.u.prv_ip :
+ rte_bswap32(req->data.u.u32_prv_ipv6[3]);
+
+ uint8_t src_ipv6[16];
+
+ uint32_t dest_ip = req->data.pub_ip;
+ uint16_t src_port = req->data.prv_port;
+ uint16_t dest_port = req->data.pub_port;
+ uint16_t rx_port = req->data.prv_phy_port;
+ uint32_t ttl = req->data.ttl;
+
+ if (type == CGNAPT_ENTRY_IPV6)
+ memcpy(src_ipv6, req->data.u.prv_ipv6, 16);
+
+ printf("CG-NAPT addm - PrvIP %x, PrvPort %d,", src_ip, src_port);
+ printf("PubIP %x, PubPort %d,", dest_ip, dest_port);
+
+ printf("PhyPort %d, ttl %u,", rx_port, ttl);
+ printf("entry_type %d\n", type);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ if (!p_nat->is_static_cgnapt) {
+ int i;
+
+ for (i = 0; i < p_nat->pub_ip_range_count; i++) {
+ if (((dest_ip >= p_nat->pub_ip_range[i].start_ip)
+ && (dest_ip <= p_nat->pub_ip_range[i].end_ip))) {
+ printf("Error - static port cannot be in Dynamic "
+ "port range");
+ printf("%x-%x\n", p_nat->pub_ip_range[i].start_ip,
+ p_nat->pub_ip_range[i].end_ip);
+ return msg;
+ }
+ }
+ }
+
+ if (pipeline_cgnapt_msg_req_entry_addm_pair(p, msg,
+ src_ip, src_port,
+ dest_ip, dest_port,
+ rx_port, ttl,
+ type, src_ipv6)) {
+ printf("Error - ");
+ printf("pipeline_cgnapt_msg_req_entry_addm_handler\n");
+ return msg;
+ }
+
+ printf("Success - pipeline_cgnapt_msg_req_entry_addm_handler");
+ printf("added %d rule pairs.\n", count);
+
+ return msg;
+ }
+ #endif
+
+ if (!p_nat->is_static_cgnapt) {
+ int i;
+
+ for (i = 0; i < p_nat->pub_ip_count; i++) {
+ /* Check port range if same Public-IP */
+ if (dest_ip != p_nat->pub_ip_port_set[i].ip)
+ continue;
+ if (((dest_port >= p_nat->pub_ip_port_set[i].start_port) &&
+ (dest_port <= p_nat->pub_ip_port_set[i].end_port))) {
+ printf("Error - port cannot be in Dynamic "
+ "port range %d-%d\n",
+ p_nat->pub_ip_port_set[i].start_port,
+ p_nat->pub_ip_port_set[i].end_port);
+ return msg;
+ }
+ }
+ }
+
+ if (pipeline_cgnapt_msg_req_entry_addm_pair
+ (p, msg, src_ip, src_port, dest_ip, dest_port, rx_port,
+ ttl, type, src_ipv6)) {
+ printf("Error - pipeline_cgnapt_msg_req_entry_add_handler\n");
+ return msg;
+ }
+
+
+ printf("\nSuccess - pipeline_cgnapt_msg_req_entry_add_handler "
+ "added\n");
+
+ return msg;
+}
+
+/**
+ * Function for adding a NAPT entry pair
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ * @param src_ip
+ * source ip address
+ * @param src_port
+ * source port
+ * @param dest_ip
+ * destination ip address
+ * @param dest_port
+ * destination port
+ * @param rx_port
+ * Physical receive port
+ * @param ttl
+ * time to live value
+ * @param type
+ * type of entry IPv4 vs IPv6
+ * @param src_ipv6[]
+ * uint8_t array of IPv6 address
+ *
+ * @return
+ * 0 if success, negative if fails
+ */
+int
+pipeline_cgnapt_msg_req_entry_addm_pair(
+ struct pipeline *p, __rte_unused void *msg,
+ uint32_t src_ip, uint16_t src_port,
+ uint32_t dest_ip, uint16_t dest_port,
+ uint16_t rx_port, uint32_t ttl,
+ uint8_t type, uint8_t src_ipv6[16])
+{
+
+ struct pipeline_cgnapt_entry_key key;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+
+ key.ip = src_ip;
+ key.port = src_port;
+ key.pid = rx_port;
+
+ struct cgnapt_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ .port_id = CGNAPT_PUB_PORT_ID,
+ },
+
+ .data = {
+ /*.prv_ip = src_ip, */
+ .prv_port = src_port,
+ .pub_ip = dest_ip,
+ .pub_port = dest_port,
+ .prv_phy_port = rx_port,
+ .pub_phy_port = get_prv_to_pub_port(&dest_ip,
+ IP_VERSION_4),
+ .ttl = ttl,
+ .timeout = STATIC_CGNAPT_TIMEOUT,
+ #ifdef PCP_ENABLE
+ .timer = NULL,
+ #endif
+ }
+ };
+
+ if (type == CGNAPT_ENTRY_IPV4) {
+ entry.data.type = CGNAPT_ENTRY_IPV4;
+ entry.data.u.prv_ip = src_ip;
+ } else {
+ entry.data.type = CGNAPT_ENTRY_IPV6;
+ memcpy(entry.data.u.prv_ipv6, src_ipv6, 16);
+ }
+
+ /* Also need to add a paired entry on our own */
+ /*
+ * Need to change key
+ * Need to change entry header
+ * Will keep the same entry and take care
+ * of translation in table hit handler
+ */
+ struct pipeline_cgnapt_entry_key second_key;
+
+ /* Need to add a second ingress entry */
+ second_key.ip = dest_ip;
+ second_key.port = dest_port;
+ second_key.pid = 0xffff;
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ key.port = 0xffff;
+ entry.data.pub_port = 0xffff;
+ second_key.port = 0xffff;
+ }
+ #endif
+
+ if (CGNAPT_DEBUG > 2)
+ printf("key.ip %x, key.port %d", key.ip, key.port);
+ printf("key.pid %d, in_type %d,", key.pid, type);
+ printf("entry_type %d\n", entry.data.type);
+
+ int32_t position = rte_hash_add_key(napt_common_table, &key);
+
+ if (position < 0) {
+ printf("CG-NAPT entry bulk add failed");
+ printf(" ... returning without adding ...\n");
+ return -1;
+ }
+
+ memcpy(&napt_hash_tbl_entries[position], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc1++ < 5)
+ print_key(&key);
+ #endif
+
+ p_nat->n_cgnapt_entry_added++;
+
+ /* Now modify the forward port for reverse entry */
+ entry.head.port_id = CGNAPT_PRV_PORT_ID;
+
+ position = rte_hash_add_key(napt_common_table, &second_key);
+
+ if (position < 0) {
+ printf("CG-NAPT entry reverse bulk add failed");
+ printf(" ... returning with fwd add ...%d\n", position);
+ return 2;
+ }
+
+ memcpy(&napt_hash_tbl_entries[position], &entry,
+ sizeof(struct cgnapt_table_entry));
+
+ #ifdef CGNAPT_DEBUGGING
+ if (p_nat->kpc1 < 5)
+ print_key(&second_key);
+ #endif
+
+ p_nat->n_cgnapt_entry_added++;
+ return 0;
+}
+
+/**
+ * Function for adding multiple NAPT entries
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_entry_addm_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt_entry_addm_msg_req *req = msg;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+ uint32_t uenum = 0;
+ uint32_t max_ue = req->data.num_ue;
+ uint8_t type = req->data.type;
+ uint32_t src_ip = (type == CGNAPT_ENTRY_IPV4) ?
+ req->data.u.prv_ip :
+ rte_bswap32(req->data.u.u32_prv_ipv6[3]);
+
+ uint8_t src_ipv6[16];
+
+ uint32_t dest_ip = req->data.pub_ip;
+ uint16_t src_port = req->data.prv_port;
+ uint16_t dest_port = req->data.pub_port;
+ uint16_t rx_port = req->data.prv_phy_port;
+ uint32_t ttl = req->data.ttl;
+ uint16_t max_src_port = req->data.prv_port_max;
+ uint16_t max_dest_port = req->data.pub_port_max;
+ uint32_t count = 0;
+ uint16_t src_port_start = src_port;
+ uint16_t dest_port_start = dest_port;
+ uint32_t src_ip_temp;
+
+ if (type == CGNAPT_ENTRY_IPV6)
+ memcpy(src_ipv6, req->data.u.prv_ipv6, 16);
+
+ printf("CG-NAPT addm - PrvIP %x, PrvPort %d,", src_ip, src_port);
+ printf("PubIP %x, PubPort %d,", dest_ip, dest_port);
+ printf("PhyPort %d, ttl %u, NumUe %d,", rx_port, ttl, max_ue);
+ printf("mPrvPort %d, mPubPort %d,", max_src_port, max_dest_port);
+ printf("entry_type %d\n", type);
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag) {
+ if (!p_nat->is_static_cgnapt) {
+ int i;
+
+ for (i = 0; i < p_nat->pub_ip_range_count; i++) {
+ if (((dest_ip >= p_nat->pub_ip_range[i].start_ip)
+ && (dest_ip <= p_nat->pub_ip_range[i].end_ip)) ||
+ (((dest_ip + max_ue) >=
+ p_nat->pub_ip_range[i].start_ip) &&
+ ((dest_ip + max_ue) <=
+ p_nat->pub_ip_range[i].end_ip))) {
+ printf("Error - static port cannot be in Dynamic "
+ "port range");
+ printf("%x-%x\n", p_nat->pub_ip_range[i].start_ip,
+ p_nat->pub_ip_range[i].end_ip);
+
+ return msg;
+ }
+ }
+ }
+
+ for (uenum = 0; uenum < max_ue; uenum++) {
+
+ if (pipeline_cgnapt_msg_req_entry_addm_pair(p, msg,
+ src_ip, src_port,
+ dest_ip, dest_port,
+ rx_port, ttl,
+ type, src_ipv6)) {
+ printf("Error - ");
+ printf("pipeline_cgnapt_msg_req_entry_addm_handler\n");
+ return msg;
+ }
+
+ count++;
+
+ src_ip++;
+ dest_ip++;
+ }
+
+ printf("Success - pipeline_cgnapt_msg_req_entry_addm_handler");
+ printf("added %d rule pairs.\n", count);
+
+ return msg;
+ }
+ #endif
+
+ if (!p_nat->is_static_cgnapt) {
+ int i;
+
+ for (i = 0; i < p_nat->pub_ip_count; i++) {
+ /* Check port range if same Public-IP */
+ if (dest_ip != p_nat->pub_ip_port_set[i].ip)
+ continue;
+ if (((dest_port >= p_nat->pub_ip_port_set[i].start_port) &&
+ (dest_port <= p_nat->pub_ip_port_set[i].end_port)) ||
+ ((max_dest_port >= p_nat->pub_ip_port_set[i].start_port)
+ && max_dest_port <= p_nat->pub_ip_port_set[i].end_port)) {
+ printf("Error - port cannot be in Dynamic port range %d-%d\n",
+ p_nat->pub_ip_port_set[i].start_port,
+ p_nat->pub_ip_port_set[i].end_port);
+ return msg;
+ }
+ }
+ }
+
+ for (uenum = 0; uenum < max_ue; uenum++) {
+ if (pipeline_cgnapt_msg_req_entry_addm_pair
+ (p, msg, src_ip, src_port, dest_ip, dest_port, rx_port,
+ ttl, type, src_ipv6)) {
+ printf("Error - pipeline_cgnapt_msg_req_entry_addm_handler\n");
+ return msg;
+ }
+
+ count++;
+
+ src_port++;
+ if (src_port > max_src_port) {
+ src_port = src_port_start;
+ src_ip++;
+ if (req->data.type == CGNAPT_ENTRY_IPV6) {
+ src_ip_temp = rte_bswap32(src_ip);
+ memcpy(&src_ipv6[12], &src_ip_temp, 4);
+ }
+ }
+ dest_port++;
+ if (dest_port > max_dest_port) {
+ dest_port = dest_port_start;
+ dest_ip++;
+ }
+ }
+
+ printf("\nSuccess - pipeline_cgnapt_msg_req_entry_addm_handler added");
+ printf("%d rule pairs.\n", count);
+
+ return msg;
+}
+
+/**
+ * Function for deleting NAPT entry
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_cgnapt_msg_req_entry_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt_entry_delete_msg_req *req = msg;
+ struct pipeline_cgnapt_entry_delete_msg_rsp *rsp = msg;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+
+ if (CGNAPT_DEBUG) {
+ uint8_t *KeyP = (void *)(&req->key);
+ int i = 0;
+
+ printf("pipeline_cgnapt_msg_req_entry_del_handler - Key: ");
+ for (i = 0; i < (int)sizeof(struct pipeline_cgnapt_entry_key);
+ i++)
+ printf(" %02x", KeyP[i]);
+ printf(" ,KeySize %u\n",
+ (int)sizeof(struct pipeline_cgnapt_entry_key));
+ }
+
+ struct cgnapt_table_entry entry;
+
+ /* If ingress key */
+ if (!is_phy_port_privte(req->key.pid))
+ req->key.pid = 0xffff;
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ req->key.port = 0xffff;
+ #endif
+
+ int32_t position;
+ position = rte_hash_lookup(napt_common_table, &req->key);
+ if (position == -ENOENT) {
+ printf("Entry not found\n");
+ return NULL;
+ }
+ memcpy(&entry, &napt_hash_tbl_entries[position],
+ sizeof(struct cgnapt_table_entry));
+ position = rte_hash_del_key(napt_common_table, &req->key);
+ p_nat->n_cgnapt_entry_deleted++;
+
+ struct pipeline_cgnapt_entry_key second_key;
+
+ if (is_phy_port_privte(req->key.pid)) {
+ /* key is for egress - make second key for ingress */
+ second_key.ip = entry.data.pub_ip;
+ second_key.port = entry.data.pub_port;
+ second_key.pid = 0xffff;
+
+ } else {
+ /* key is for ingress - make second key for egress */
+ second_key.ip = entry.data.u.prv_ip;
+ second_key.port = entry.data.prv_port;
+ second_key.pid = entry.data.prv_phy_port;
+ }
+
+ #ifdef NAT_ONLY_CONFIG_REQ
+ if (nat_only_config_flag)
+ second_key.port = 0xffff;
+ #endif
+
+ position = rte_hash_del_key(napt_common_table, &second_key);
+ p_nat->n_cgnapt_entry_deleted++;
+
+ return rsp;
+}
+
+void *pipeline_cgnapt_msg_req_entry_sync_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt_entry_delete_msg_req *req = msg;
+ struct pipeline_cgnapt_entry_delete_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_entry_delete(
+ p->p,
+ p->table_id[0],
+ &req->key,
+ &rsp->key_found, NULL);
+
+ return rsp;
+}
+
+/**
+ * Function to print the NAPT key
+ *
+ * @param key
+ * A pointer to struct pipeline_cgnapt_entry_key
+ */
+void print_key(struct pipeline_cgnapt_entry_key *key)
+{
+ uint8_t *KeyP = (void *)(key);
+ int i = 0;
+
+ printf("\nKey: ");
+ for (i = 0; i < (int)sizeof(struct pipeline_cgnapt_entry_key); i++)
+ printf(" %02x", KeyP[i]);
+}
+
+/**
+ * Function to print the table entry
+ *
+ * @param entry
+ * A pointer to struct rte_pipeline_table_entry
+ */
+void print_entry1(struct rte_pipeline_table_entry *entry)
+{
+ uint8_t *entryP = (void *)(entry);
+ int i = 0;
+
+ printf("Entry: ");
+ for (i = 0; i < (int)sizeof(struct rte_pipeline_table_entry); i++)
+ printf(" %02x", entryP[i]);
+}
+
+/**
+ * Function to print the NAPT table entry
+ *
+ * @param entry
+ * A pointer to struct cgnapt_table_entry
+ */
+void print_cgnapt_entry(struct cgnapt_table_entry *entry)
+{
+ uint8_t *entryP = (void *)(entry);
+ int i = 0;
+
+ printf("CGNAPT Entry: ");
+ for (i = 0; i < (int)sizeof(struct cgnapt_table_entry); i++)
+ printf(" %02x", entryP[i]);
+ printf(" size:%d\n", (int)sizeof(struct cgnapt_table_entry));
+}
+
+/**
+ * Function to get a free port
+ *
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ * @param public_ip
+ * A uint32_t pointer to return corresponding ip address
+ *
+ * @return
+ * free port number, 0 if error
+ */
+int get_free_iport(struct pipeline_cgnapt *p_nat, uint32_t *public_ip)
+{
+ int port = -1;
+ /* If we don't have a valid napt_port_alloc_elem get one from
+ * port_alloc_ring
+ */
+ if (p_nat->allocated_ports == NULL) {
+ void *ports;
+ int ret;
+
+ ret = rte_ring_dequeue(p_nat->port_alloc_ring, &ports);
+ if (ret == 0) {
+ p_nat->allocated_ports =
+ (struct napt_port_alloc_elem *)ports;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->gfp_get++;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 3)
+ printf("p_nat->allocated_ports %p\n",
+ p_nat->allocated_ports);
+ #endif
+ } else {
+ printf("CGNAPT Err - get_free_iport rte_ring_dequeue "
+ "failed");
+ printf("%d, %d, %d\n", rte_ring_count(
+ p_nat->port_alloc_ring), rte_ring_free_count(
+ p_nat->port_alloc_ring), ret);
+
+ #ifdef CGNAPT_DEBUGGING
+ #ifdef CGNAPT_DBG_PRNT
+ printf("Th%d GFP:: %" PRIu64 ", %" PRIu64 ", "
+ "%" PRIu64", %" PRIu64 ",\n", p_nat->pipeline_num,
+ p_nat->gfp_get, p_nat->gfp_ret, p_nat->gfp_suc,
+ p_nat->gfp_err);
+
+ p_nat->gfp_err++;
+ #endif
+ #endif
+ return port;
+ }
+ }
+
+ /* get the port from index count-1 and decrease count */
+ port = p_nat->allocated_ports->ports
+ [p_nat->allocated_ports->count - 1];
+ *public_ip = p_nat->allocated_ports->ip_addr
+ [p_nat->allocated_ports->count - 1];
+
+ p_nat->allocated_ports->count -= 1;
+
+ /* if count is zero, return buffer to mem pool */
+ if (p_nat->allocated_ports->count == 0) {
+ rte_mempool_put(napt_port_pool, p_nat->allocated_ports);
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->gfp_ret++;
+ #ifdef CGNAPT_DBG_PRNT
+ printf("Th%d Returned to pool p_nat->allocated_ports %p,",
+ p_nat->pipeline_num, p_nat->allocated_ports);
+ printf("%" PRIu64 ", %" PRIu64 ",",
+ p_nat->gfp_get, p_nat->gfp_ret);
+ printf("%" PRIu64 ", %" PRIu64 ",\n",
+ p_nat->gfp_suc, p_nat->gfp_err);
+ #endif
+ #endif
+
+ p_nat->allocated_ports = NULL;
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->gfp_suc++;
+ #endif
+
+ return port;
+}
+
+/**
+ * Function to free a port
+ *
+ * @param port_num
+ * Port number to free
+ * @param public_ip
+ * Corresponding ip address
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ */
+void release_iport(uint16_t port_num, uint32_t public_ip,
+ struct pipeline_cgnapt *p_nat)
+{
+ /* If we don't have a valid napt_port_alloc_elem get one
+ * from mem pool
+ */
+ if (p_nat->free_ports == NULL) {
+ void *ports;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->pfb_get++;
+ #endif
+
+ if (rte_mempool_get(napt_port_pool, &ports) < 0) {
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->pfb_err++;
+ #endif
+ printf("CGNAPT release_iport error in getting "
+ "port alloc buffer\n");
+ return;
+ }
+
+ p_nat->free_ports = (struct napt_port_alloc_elem *)ports;
+ p_nat->free_ports->count = 0;
+ }
+
+ /* put the port at index count and increase count */
+ p_nat->free_ports->ip_addr[p_nat->free_ports->count] = public_ip;
+ p_nat->free_ports->ports[p_nat->free_ports->count] = port_num;
+ p_nat->free_ports->count += 1;
+
+ /* if napt_port_alloc_elem is full add it to ring */
+ {
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->pfb_ret++;
+ #endif
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 2) {
+ printf("CGNAPT port_alloc_ring before EnQ Cnt %d, Free %d\n",
+ rte_ring_count(p_nat->port_alloc_ring),
+ rte_ring_free_count(p_nat->port_alloc_ring));
+ }
+ #endif
+
+ if (rte_ring_enqueue(p_nat->port_alloc_ring,
+ (void *)p_nat->free_ports) != 0) {
+ printf("CGNAPT release_iport Enqueue error %p\n",
+ p_nat->free_ports);
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->pfb_err++;
+ #endif
+ }
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 2) {
+ printf("CGNAPT port_alloc_ring after EnQ Cnt %d",
+ rte_ring_count(p_nat->port_alloc_ring));
+ printf("Free %d\n",
+ rte_ring_free_count(p_nat->port_alloc_ring));
+ }
+ #endif
+
+ p_nat->free_ports = NULL;
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->pfb_suc++;
+ #endif
+}
+
+/**
+ * Function to initialize max ports per client data structures
+ * Called during dynamic NAPT initialization.
+ *
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 if success, negative if error
+ */
+int init_max_port_per_client(
+ __rte_unused struct pipeline_cgnapt *p_nat)
+{
+ if (max_port_per_client_hash)
+ return -1;
+
+ /*MPPC_ALREADY_EXISTS */
+
+ int i = 0;
+
+ max_port_per_client_hash =
+ rte_hash_create(&max_port_per_client_hash_params);
+ if (!max_port_per_client_hash)
+ return -2;
+
+ /*MPPC_HASH_CREATE_ERROR */
+
+ max_port_per_client_array =
+ rte_zmalloc(NULL,
+ sizeof(struct max_port_per_client) * MAX_DYN_ENTRY,
+ RTE_CACHE_LINE_SIZE);
+ if (!max_port_per_client_array)
+ return -3;
+
+ /*MPPC_ARRAY_CREATE_ERROR */
+
+ for (i = 0; i < MAX_DYN_ENTRY; i++) {
+ max_port_per_client_array[i].prv_ip = 0;
+ max_port_per_client_array[i].prv_phy_port = 0;
+ max_port_per_client_array[i].max_port_cnt = 0;
+ }
+
+ return 0;
+ /*MPPC_SUCCESS */
+}
+
+/**
+ * Function to check if max ports for a client is reached
+ *
+ * @param prv_ip_param
+ * A uint32_t ip address of client
+ * @param prv_phy_port_param
+ * A uint32_t physical port id of the client
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 if max port not reached, 1 if reached, -1 if error
+ */
+int is_max_port_per_client_reached(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat)
+{
+ int index = MAX_PORT_INVALID_KEY;
+
+ struct max_port_per_client_key key = {
+ .prv_ip = prv_ip_param,
+ .prv_phy_port = prv_phy_port_param,
+ };
+
+ index = rte_hash_lookup(max_port_per_client_hash, (const void *)&key);
+
+ if (index < 0)
+ return MAX_PORT_INVALID_KEY;
+
+ if (max_port_per_client_array[index].max_port_cnt >=
+ p_nat->max_port_per_client)
+ return MAX_PORT_REACHED;
+
+ return MAX_PORT_NOT_REACHED;
+}
+
+/**
+ * Function to increase max ports for a client
+ *
+ * @param prv_ip_param
+ * A uint32_t ip address of client
+ * @param prv_phy_port_param
+ * A uint32_t physical port id of the client
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 if max port reached, 1 if success, 2 if new entry, -1 if error
+ */
+int increment_max_port_counter(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat)
+{
+ int index = MAX_PORT_INC_ERROR;
+
+ struct max_port_per_client_key key = {
+ .prv_ip = prv_ip_param,
+ .prv_phy_port = prv_phy_port_param,
+ };
+
+ index = rte_hash_lookup(max_port_per_client_hash, (const void *)&key);
+
+ if (index == -EINVAL)
+ return MAX_PORT_INC_ERROR;
+
+ if (index == -ENOENT) {
+ if (max_port_per_client_add_entry(prv_ip_param,
+ prv_phy_port_param,
+ p_nat) <= 0)
+ return MAX_PORT_INC_ERROR;
+
+ return 2; /*return MAX_PORT_NEW_ENTRY; */
+ }
+
+ if (CGNAPT_DEBUG > 2)
+ printf("%s: max_port_cnt(%d), p_nat_max(%d)\n", __func__,
+ max_port_per_client_array[index].max_port_cnt,
+ p_nat->max_port_per_client);
+
+ if (max_port_per_client_array[index].max_port_cnt <
+ p_nat->max_port_per_client) {
+ max_port_per_client_array[index].max_port_cnt++;
+ return MAX_PORT_INC_SUCCESS;
+ }
+
+ return MAX_PORT_INC_REACHED;
+}
+
+/**
+ * Function to decrease max ports for a client
+ *
+ * @param prv_ip_param
+ * A uint32_t ip address of client
+ * @param prv_phy_port_param
+ * A uint32_t physical port id of the client
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 if count already 0, 1 if success, -1 if error
+ */
+int decrement_max_port_counter(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat)
+{
+ int index = MAX_PORT_DEC_ERROR;
+
+ struct max_port_per_client_key key = {
+ .prv_ip = prv_ip_param,
+ .prv_phy_port = prv_phy_port_param,
+ };
+
+ index = rte_hash_lookup(max_port_per_client_hash, (const void *)&key);
+ if (index < 0) {
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->max_port_dec_err1++;
+ #endif
+ return MAX_PORT_DEC_ERROR;
+
+ }
+
+ if (max_port_per_client_array[index].max_port_cnt > 0) {
+ /* If it is the last port,ret this info which is used for
+ * max_cli_per_pub_ip
+ */
+
+ max_port_per_client_array[index].max_port_cnt--;
+ /* Count should be atomic but we are good as we have only
+ * one task handling this counter at a time (core affinity)
+ */
+ }
+
+ if (max_port_per_client_array[index].max_port_cnt <= 0) {
+ if (max_port_per_client_del_entry
+ (prv_ip_param, prv_phy_port_param, p_nat) <= 0) {
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->max_port_dec_err2++;
+ #endif
+ return MAX_PORT_DEC_ERROR;
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->max_port_dec_err3++;
+ #endif
+
+ return MAX_PORT_DEC_REACHED;
+ }
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->max_port_dec_success++;
+ #endif
+
+ return MAX_PORT_DEC_SUCCESS;
+}
+
+/**
+ * Function to add a max ports per client entry
+ *
+ * @param prv_ip_param
+ * A uint32_t ip address of client
+ * @param prv_phy_port_param
+ * A uint32_t physical port id of the client
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 no success, 1 if success, -1 if error
+ */
+int max_port_per_client_add_entry(
+ uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ __rte_unused struct pipeline_cgnapt *p_nat)
+{
+ int index = MAX_PORT_ADD_ERROR;
+
+ struct max_port_per_client_key key = {
+ .prv_ip = prv_ip_param,
+ .prv_phy_port = prv_phy_port_param,
+ };
+
+ index = rte_hash_lookup(max_port_per_client_hash, (const void *)&key);
+ if (index == -EINVAL)
+ return MAX_PORT_ADD_ERROR;
+
+ if (index >= 0)
+ return MAX_PORT_ADD_UNSUCCESS;
+
+ if (index == -ENOENT) {
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("max_port_per_client_add_entry fn: "
+ "Entry does not exist\n");
+ #endif
+
+ index =
+ rte_hash_add_key(max_port_per_client_hash,
+ (const void *)&key);
+ if (index == -ENOSPC)
+ return MAX_PORT_ADD_UNSUCCESS;
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("max_port_per_client_add_entry fn:"
+ "Add entry index(%d)\n", index);
+ #endif
+
+ max_port_per_client_array[index].prv_ip = prv_ip_param;
+ max_port_per_client_array[index].prv_phy_port =
+ prv_phy_port_param;
+ }
+
+ max_port_per_client_array[index].max_port_cnt++;
+ return MAX_PORT_ADD_SUCCESS;
+}
+
+/**
+ * Function to delete a max ports per client entry
+ *
+ * @param prv_ip_param
+ * A uint32_t ip address of client
+ * @param prv_phy_port_param
+ * A uint32_t physical port id of the client
+ * @param p_nat
+ * A pointer to struct pipeline_cgnapt
+ *
+ * @return
+ * 0 no success, 1 if success, -1 if error
+ */
+int max_port_per_client_del_entry(
+ uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ __rte_unused struct pipeline_cgnapt *p_nat)
+{
+ int index = MAX_PORT_DEL_ERROR;
+
+ struct max_port_per_client_key key = {
+ .prv_ip = prv_ip_param,
+ .prv_phy_port = prv_phy_port_param,
+ };
+
+ index = rte_hash_lookup(max_port_per_client_hash, (const void *)&key);
+
+ if (index == -EINVAL)
+ return MAX_PORT_DEL_ERROR;
+
+ if (index == -ENOENT)
+ return MAX_PORT_DEL_UNSUCCESS;
+
+ index = rte_hash_del_key(max_port_per_client_hash, (const void *)&key);
+ max_port_per_client_array[index].prv_ip = 0;
+ max_port_per_client_array[index].prv_phy_port = 0;
+ max_port_per_client_array[index].max_port_cnt = 0;
+
+ return MAX_PORT_DEL_SUCCESS;
+}
+
+/**
+ * Function to execute debug commands
+ *
+ * @param p
+ * A pointer to struct pipeline
+ * @param msg
+ * void pointer to incoming arguments
+ */
+void *pipeline_cgnapt_msg_req_entry_dbg_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_cgnapt_entry_delete_msg_rsp *rsp = msg;
+ uint8_t *Msg = msg;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)p;
+
+ rsp->status = 0;
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_STATS_SHOW) {
+ printf("\nCG-NAPT Packet Stats:\n");
+ printf("Received %" PRIu64 ",", p_nat->receivedPktCount);
+ printf("Missed %" PRIu64 ",", p_nat->missedPktCount);
+ printf("Dropped %" PRIu64 ",", p_nat->naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", p_nat->naptedPktCount);
+ printf("ingress %" PRIu64 ",", p_nat->inaptedPktCount);
+ printf("egress %" PRIu64 "\n", p_nat->enaptedPktCount);
+ printf("arp pkts %" PRIu64 "\n", p_nat->arpicmpPktCount);
+
+ #ifdef CGNAPT_DEBUGGING
+ printf("\n Drop detail 1:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount1);
+ printf("\n Drop detail 2:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount2);
+ printf("\n Drop detail 3:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount3);
+ printf("\n Drop detail 4:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount4);
+ printf("\n Drop detail 5:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount5);
+ printf("\n Drop detail 6:%" PRIu64 "",
+ p_nat->naptDroppedPktCount6);
+
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount1,
+ p_nat->missedpktcount2);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount3,
+ p_nat->missedpktcount4);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount5,
+ p_nat->missedpktcount6);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount7,
+ p_nat->missedpktcount8);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount9,
+ p_nat->missedpktcount10);
+
+ #endif
+
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_STATS_CLEAR) {
+ printf("\nCG-NAPT Packet Stats:\n");
+ printf("Received %" PRIu64 ",", p_nat->receivedPktCount);
+ printf("Missed %" PRIu64 ",", p_nat->missedPktCount);
+ printf("Dropped %" PRIu64 ",", p_nat->naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", p_nat->naptedPktCount);
+ printf("ingress %" PRIu64 ",", p_nat->inaptedPktCount);
+ printf("egress %" PRIu64 "\n", p_nat->enaptedPktCount);
+ printf("arp pkts %" PRIu64 "\n", p_nat->arpicmpPktCount);
+
+ p_nat->naptedPktCount = 0;
+ p_nat->naptDroppedPktCount = 0;
+ p_nat->inaptedPktCount = 0;
+ p_nat->enaptedPktCount = 0;
+ p_nat->receivedPktCount = 0;
+ p_nat->missedPktCount = 0;
+ p_nat->arpicmpPktCount = 0;
+ printf("CG-NAPT Packet Stats cleared\n");
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_DBG_LEVEL) {
+ CGNAPT_DEBUG = Msg[CGNAPT_DBG_CMD_OFST + 1];
+ printf("CG-NAPT debug level set to %d\n", CGNAPT_DEBUG);
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_DBG_SHOW) {
+
+ printf("\nNAPT entries - added %" PRIu64 ",",
+ p_nat->n_cgnapt_entry_added);
+ printf("deleted %" PRIu64 ",", p_nat->n_cgnapt_entry_deleted);
+ printf("current %" PRIu64 "", p_nat->n_cgnapt_entry_added -
+ p_nat->n_cgnapt_entry_deleted);
+
+ printf("\nCG-NAPT Packet Stats:\n");
+ printf("Received %" PRIu64 ",", p_nat->receivedPktCount);
+ printf("Missed %" PRIu64 ",", p_nat->missedPktCount);
+ printf("Dropped %" PRIu64 ",", p_nat->naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", p_nat->naptedPktCount);
+ printf("ingress %" PRIu64 ",", p_nat->inaptedPktCount);
+ printf("egress %" PRIu64 "\n", p_nat->enaptedPktCount);
+ printf("arp pkts %" PRIu64 "\n", p_nat->arpicmpPktCount);
+
+ return rsp;
+ }
+ #ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_INSTRUMENTATION) {
+ if (Msg[CGNAPT_DBG_CMD_OFST1] ==
+ CGNAPT_CMD_INSTRUMENTATION_SUB0) {
+
+ int index = 0;
+ uint32_t diff_sum = 0;
+
+ printf("CG-NAPT Instrumentation ...\n");
+ printf("Instrumentation data collected for fn# %d\n",
+ cgnapt_num_func_to_inst);
+ printf("Current collection index %d\n",
+ cgnapt_inst_index);
+
+ if (Msg[CGNAPT_DBG_CMD_OFST + 2] == 2) {
+ printf("Timer Start:\n");
+
+ for (index = 0; index < INST_ARRAY_SIZE; index++) {
+ if ((index % 5) == 0)
+ printf("\n");
+ printf(" 0x%jx", inst_start_time[index]);
+ }
+ printf("\n\nTimer End:\n");
+
+ for (index = 0; index < INST_ARRAY_SIZE; index++) {
+ if ((index % 5) == 0)
+ printf("\n");
+ printf(" 0x%jx", inst_end_time[index]);
+ }
+ }
+
+ for (index = 0; index < INST_ARRAY_SIZE; index++) {
+ inst_diff_time[index] = (uint32_t) (inst_end_time[index] -
+ inst_start_time[index]);
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST + 2] ==
+ CGNAPT_CMD_INSTRUMENTATION_SUB1) {
+ printf("\n\nTimer Diff:\n");
+
+ for (index = 0; index < INST_ARRAY_SIZE; index++) {
+ if (Msg[CGNAPT_DBG_CMD_OFST + 2] ==
+ CGNAPT_CMD_INSTRUMENTATION_SUB1) {
+ if ((index % 5) == 0)
+ printf("\n");
+ printf(" 0x%08x", inst_diff_time[index]);
+ }
+
+ diff_sum += inst_diff_time[index];
+ }
+
+ printf("\ndiff_sum %u, INST_ARRAY_SIZE %d, Ave Time %u\n",
+ diff_sum, INST_ARRAY_SIZE, (diff_sum / INST_ARRAY_SIZE));
+ } else if (Msg[CGNAPT_DBG_CMD_OFST + 1] ==
+ CGNAPT_CMD_INSTRUMENTATION_SUB1) {
+ /* p plid entry dbg 7 1 0
+ * p plid entry dbg 7 1 1 <--- pkt_work_cgnapt
+ * p plid entry dbg 7 1 2 <--- pkt4_work_cgnapt
+ * p plid entry dbg 7 1 3 <--- pkt_work_cgnapt_key
+ * p plid entry dbg 7 1 4 <--- pkt4_work_cgnapt_key
+ * p plid entry dbg 7 1 5 <--- in port ah to out port ah
+ * - pkt life in the system
+ * p plid entry dbg 7 1 6 <--- how long this instrumentation
+ * itself is taking
+ */
+ cgnapt_inst_index = 0;
+ cgnapt_num_func_to_inst = Msg[CGNAPT_DBG_CMD_OFST + 2];
+ printf("Instrumentation data collection started for fn# %d\n",
+ cgnapt_num_func_to_inst);
+ } else if (Msg[CGNAPT_DBG_CMD_OFST + 1] ==
+ CGNAPT_CMD_INSTRUMENTATION_SUB2) {
+ /* p plid entry dbg 7 2 0
+ * Test all major functions by calling them multiple times
+ * pkt_work_cgnapt, pkt4_work_cgnapt, pkt_work_cgnapt_key,
+ * pkt4_work_cgnapt_key
+ */
+ if (cgnapt_test_pktmbuf_pool == NULL) {
+ cgnapt_test_pktmbuf_pool = rte_pktmbuf_pool_create(
+ "cgnapt_test_pktmbuf_pool", 63, 32, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ }
+
+ if (cgnapt_test_pktmbuf_pool == NULL)
+ printf("CGNAPT test mbuf pool create failed.\n");
+
+ struct rte_mbuf *cgnapt_test_pkt0 =
+ rte_pktmbuf_alloc(cgnapt_test_pktmbuf_pool);
+ if (cgnapt_test_pkt0 == NULL)
+ printf("CGNAPT test pkt 0 alloc failed.");
+ struct rte_mbuf *cgnapt_test_pkt1 =
+ rte_pktmbuf_alloc(cgnapt_test_pktmbuf_pool);
+ if (cgnapt_test_pkt1 == NULL)
+ printf("CGNAPT test pkt 1 alloc failed.");
+ struct rte_mbuf *cgnapt_test_pkt2 =
+ rte_pktmbuf_alloc(cgnapt_test_pktmbuf_pool);
+ if (cgnapt_test_pkt2 == NULL)
+ printf("CGNAPT test pkt 2 alloc failed.");
+ struct rte_mbuf *cgnapt_test_pkt3 =
+ rte_pktmbuf_alloc(cgnapt_test_pktmbuf_pool);
+ if (cgnapt_test_pkt3 == NULL)
+ printf("CGNAPT test pkt 3 alloc failed.");
+
+ struct rte_mbuf *cgnapt_test_pkts[4];
+
+ cgnapt_test_pkts[0] = cgnapt_test_pkt0;
+ cgnapt_test_pkts[1] = cgnapt_test_pkt1;
+ cgnapt_test_pkts[2] = cgnapt_test_pkt2;
+ cgnapt_test_pkts[3] = cgnapt_test_pkt3;
+
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST;
+ /* header room + eth hdr size +
+ * src_aadr offset in ip header
+ */
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ /* header room + eth hdr size +
+ * dst_aadr offset in ip header
+ */
+ uint32_t prot_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
+ /* header room + eth hdr size +
+ * srprotocol char offset in ip header
+ */
+ int pktCnt = 0, entCnt = 0, exCnt = 0;
+
+ for (pktCnt = 0; pktCnt < 4; pktCnt++) {
+ uint32_t *src_addr =
+ RTE_MBUF_METADATA_UINT32_PTR
+ (cgnapt_test_pkts[pktCnt], src_addr_offset);
+ uint32_t *dst_addr =
+ RTE_MBUF_METADATA_UINT32_PTR
+ (cgnapt_test_pkts[pktCnt], dst_addr_offset);
+ uint8_t *protocol =
+ RTE_MBUF_METADATA_UINT8_PTR(cgnapt_test_pkts
+ [pktCnt],
+ prot_offset);
+ uint8_t *phy_port =
+ RTE_MBUF_METADATA_UINT8_PTR(cgnapt_test_pkts
+ [pktCnt], 70);
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(cgnapt_test_pkts
+ [pktCnt],
+ MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(
+ cgnapt_test_pkts[pktCnt],
+ MBUF_HDR_ROOM +
+ 6);
+ uint16_t *src_port =
+ RTE_MBUF_METADATA_UINT16_PTR
+ (cgnapt_test_pkts[pktCnt],
+ MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE);
+ uint16_t *dst_port =
+ RTE_MBUF_METADATA_UINT16_PTR
+ (cgnapt_test_pkts[pktCnt],
+ MBUF_HDR_ROOM + ETH_HDR_SIZE +
+ IP_HDR_SIZE + 2);
+ *src_addr = 0xc0a80001;
+ *dst_addr = 0x90418634;
+ *protocol = 0x6;
+ *phy_port = 0;
+ *src_port = 1234;
+ *dst_port = 4000;
+ eth_src[0] = 0xAB;
+ eth_src[1] = 0xAB;
+ eth_src[2] = 0xAB;
+ eth_src[3] = 0xAB;
+ eth_src[4] = 0xAB;
+ eth_src[5] = 0xAB;
+ eth_dest[0] = 0x90;
+ eth_dest[1] = 0xE2;
+ eth_dest[2] = 0xba;
+ eth_dest[3] = 0x54;
+ eth_dest[4] = 0x67;
+ eth_dest[5] = 0xc8;
+ }
+ struct rte_pipeline_table_entry *table_entries[4];
+ struct cgnapt_table_entry ctable_entries[4];
+ table_entries[0] = (struct rte_pipeline_table_entry *)
+ &ctable_entries[0];
+ table_entries[1] = (struct rte_pipeline_table_entry *)
+ &ctable_entries[1];
+ table_entries[2] = (struct rte_pipeline_table_entry *)
+ &ctable_entries[2];
+ table_entries[3] = (struct rte_pipeline_table_entry *)
+ &ctable_entries[3];
+ for (entCnt = 0; entCnt < 4; entCnt++) {
+ ctable_entries[entCnt].head.action =
+ RTE_PIPELINE_ACTION_PORT;
+ ctable_entries[entCnt].head.port_id = 0;
+
+ ctable_entries[entCnt].data.prv_ip = 0x01020304;
+ ctable_entries[entCnt].data.prv_port = 1234;
+ ctable_entries[entCnt].data.pub_ip = 0x0a0b0c0d;
+ ctable_entries[entCnt].data.pub_port = 4000;
+ ctable_entries[entCnt].data.prv_phy_port = 0;
+ ctable_entries[entCnt].data.pub_phy_port = 1;
+ ctable_entries[entCnt].data.ttl = 500;
+ }
+
+ uint64_t time1 = rte_get_tsc_cycles();
+
+ for (exCnt = 0; exCnt < 1000; exCnt++) {
+ pkt_work_cgnapt_key(cgnapt_test_pkts[0],
+ instrumentation_port_in_arg);
+ }
+ uint64_t time2 = rte_get_tsc_cycles();
+
+ printf("times for %d times execution of "
+ "pkt_work_cgnapt_key 0x%jx",
+ exCnt, time1);
+ printf(", 0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000000; exCnt++) {
+ pkt_work_cgnapt_key(cgnapt_test_pkts[0],
+ instrumentation_port_in_arg);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt_work_cgnapt_key 0x%jx", exCnt, time1);
+ printf("0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000; exCnt++) {
+ pkt4_work_cgnapt_key(cgnapt_test_pkts,
+ instrumentation_port_in_arg);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt4_work_cgnapt_key 0x%jx",
+ exCnt, time1);
+ printf(" 0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000000; exCnt++) {
+ pkt4_work_cgnapt_key(cgnapt_test_pkts,
+ instrumentation_port_in_arg);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt4_work_cgnapt_key 0x%jx",
+ exCnt, time1);
+ printf("0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ uint64_t mask = 0xff;
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000; exCnt++) {
+ pkt_work_cgnapt(cgnapt_test_pkts[0],
+ table_entries[0], 3, &mask,
+ NULL);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt_work_cgnapt 0x%jx",
+ exCnt, time1);
+ printf("0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000000; exCnt++) {
+ pkt_work_cgnapt(cgnapt_test_pkts[0],
+ table_entries[0], 3, &mask,
+ NULL);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt_work_cgnapt 0x%jx",
+ exCnt, time1);
+ printf("0x%jx, diff %" PRIu64 "\n", time2,
+ time2 - time1);
+
+ time1 = rte_get_tsc_cycles();
+ for (exCnt = 0; exCnt < 1000; exCnt++) {
+ pkt4_work_cgnapt(cgnapt_test_pkts,
+ table_entries, 0, &mask, NULL);
+ }
+ time2 = rte_get_tsc_cycles();
+ printf("times for %d times execution of "
+ "pkt4_work_cgnapt 0x%jx",
+ exCnt, time1);
+ printf("0x%jx, diff % " PRIu64 "\n", time2,
+ time2 - time1);
+
+ int idummy = ctable_entries[0].data.prv_port;
+
+ idummy++;
+
+ }
+ }
+ return rsp;
+ }
+ #endif
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_LS_ENTRY) {
+ printf("CG-NAPT be entries are:\n");
+ printf("Pipeline pointer %p\n", p);
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_DYN) {
+ printf("Total Number of dynamic napt entries: %" PRIu64 "\n",
+ p_nat->dynCgnaptCount);
+
+ #ifdef CGNAPT_DEBUGGING
+ printf("MAX PORT PER CLIENT:");
+ printf("%" PRIu64 ",%" PRIu64 ",%" PRIu64 "\n",
+ p_nat->max_port_dec_err1, p_nat->max_port_dec_err2,
+ p_nat->max_port_dec_err3);
+ printf("MPPC success : %" PRIu64 "\n",
+ p_nat->max_port_dec_success);
+
+ printf("Release port:err:%" PRIu64 ",ret::%" PRIu64 ",get::%"
+ PRIu64 ",suc::%" PRIu64 "\n", p_nat->pfb_err,
+ p_nat->pfb_ret, p_nat->pfb_get, p_nat->pfb_suc);
+ printf("Get port::err:%" PRIu64 ",ret::%" PRIu64 ",get::%"
+ PRIu64 ",suc::%" PRIu64 "\n", p_nat->gfp_err,
+ p_nat->gfp_ret, p_nat->gfp_get, p_nat->gfp_suc);
+ printf("Ring Info:\n");
+ rte_ring_dump(stdout, p_nat->port_alloc_ring);
+ #endif
+ return rsp;
+ }
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_IPV6) {
+ dual_stack_enable = Msg[CGNAPT_DBG_CMD_OFST + 1];
+ printf("Dual Stack option set: %x\n", dual_stack_enable);
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_MAPS_INFO) {
+ pipelines_port_info();
+ pipelines_map_info();
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_ITER_COM_TBL) {
+ uint32_t count = 0;
+ const void *key;
+ void *data;
+ uint32_t next = 0;
+ int32_t index = 0;
+
+ do {
+ index =
+ rte_hash_iterate(napt_common_table, &key, &data,
+ &next);
+
+ if ((index != -EINVAL) && (index != -ENOENT)) {
+ printf("\n%04d ", count);
+ rte_hexdump(stdout, "KEY", key,
+ sizeof(struct
+ pipeline_cgnapt_entry_key));
+
+ //print_key((struct pipeline_cgnapt_entry_key *)
+ // key);
+ int32_t position =
+ rte_hash_lookup(napt_common_table,
+ key);
+ print_cgnapt_entry(&napt_hash_tbl_entries
+ [position]);
+ }
+
+ count++;
+ } while (index != -ENOENT);
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_IF_STATS) {
+
+ struct app_params *app =
+ (struct app_params *)p_nat->app_params_addr;
+ uint8_t cmd[2];
+
+ cmd[0] = Msg[CGNAPT_DBG_CMD_OFST + 1];
+ cmd[1] = Msg[CGNAPT_DBG_CMD_OFST + 2];
+ switch (cmd[0]) {
+ case CGNAPT_IF_STATS_HWQ:
+ printf("n_pktq_hwq_int :%d\n", app->n_pktq_hwq_in);
+ printf("n_pktq_hwq_out :%d\n", app->n_pktq_hwq_out);
+ printf("\n");
+ uint8_t i, j;
+
+ for (i = 0; i < app->n_pktq_hwq_in; i++) {
+ struct rte_eth_stats stats;
+
+ rte_eth_stats_get(p_nat->links_map[i], &stats);
+
+ if (is_phy_port_privte(i))
+ printf("Private Port Stats %d\n", i);
+ else
+ printf("Public Port Stats %d\n", i);
+
+ printf("\n\tipackets : %" PRIu64 "",
+ stats.ipackets);
+ printf("\n\topackets : %" PRIu64 "",
+ stats.opackets);
+ printf("\n\tierrors : %" PRIu64 "",
+ stats.ierrors);
+ printf("\n\toerrors : %" PRIu64 "",
+ stats.oerrors);
+ printf("\n\trx_nombuf: %" PRIu64 "",
+ stats.rx_nombuf);
+ printf("\n");
+ if (is_phy_port_privte(i))
+ printf("Private Q:");
+ else
+ printf("Public Q:");
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS;
+ j++)
+ printf(" %" PRIu64 ", %" PRIu64 "|",
+ stats.q_ipackets[j],
+ stats.q_opackets[j]);
+
+ printf("\n\n");
+
+ }
+
+ return rsp;
+
+ case CGNAPT_IF_STATS_SWQ:
+
+ printf("n_pktq_swq :%d\n", app->n_pktq_swq);
+
+ if (cmd[1] < app->n_pktq_swq) {
+ rte_ring_dump(stdout, app->swq[cmd[1]]);
+ return rsp;
+ }
+ printf("SWQ number is invalid\n");
+ return rsp;
+
+ case CGNAPT_IF_STATS_OTH:
+ printf("\n");
+ printf("config_file:%s\n", app->config_file);
+ printf("script_file:%s\n", app->script_file);
+ printf("parser_file:%s\n", app->parser_file);
+ printf("output_file:%s\n", app->output_file);
+ printf("n_msgq :%d\n", app->n_msgq);
+ printf("n_pktq_tm :%d\n", app->n_pktq_tm);
+ printf("n_pktq_source :%d\n", app->n_pktq_source);
+ printf("n_pktq_sink :%d\n", app->n_pktq_sink);
+ printf("n_pipelines :%d\n", app->n_pipelines);
+ printf("\n");
+ return rsp;
+ default:
+ printf("Command does not match\n\n");
+ return rsp;
+
+ } /* switch */
+
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_MAX_CLI_PER_PUB_IP) {
+ if (nat_only_config_flag) {
+ printf("Command not supported for NAT only config.\n");
+ return rsp;
+ }
+ uint16_t ii;
+
+ printf("\tPublic IP: Num Clients\n");
+ for (ii = 0; ii < CGNAPT_MAX_PUB_IP; ii++)
+ printf("\t%x : %7d\n", all_public_ip[ii].ip,
+ rte_atomic16_read(&all_public_ip[ii].count));
+ return rsp;
+ }
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_PUB_IP_LIST) {
+
+ int i;
+ for (i = 0; i < p_nat->pub_ip_count; i++)
+ printf("%x : (%d,%d)\n", p_nat->pub_ip_port_set[i].ip,
+ p_nat->pub_ip_port_set[i].start_port,
+ p_nat->pub_ip_port_set[i].end_port);
+ return rsp;
+ }
+
+ #ifdef CGNAPT_TIMING_INST
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_TIMING_INST) {
+ if (Msg[CGNAPT_DBG_CMD_OFST + 1] == 0) {
+ p_nat->time_measurements_on = 1;
+ p_nat->time_measurements = 0;
+ printf("CGNAPT timing instrumentation turned on.\n");
+ printf("Max samples %d\n", p_nat->max_time_mesurements);
+ }
+ if (Msg[CGNAPT_DBG_CMD_OFST + 1] == 1) {
+ p_nat->time_measurements_on = 0;
+ printf("CGNAPT timing instrumentation turned off.\n");
+ printf("Cur Samples %d\n", p_nat->time_measurements);
+ }
+ if (Msg[CGNAPT_DBG_CMD_OFST + 1] == 2) {
+ uint64_t sum = p_nat->external_time_sum +
+ p_nat->internal_time_sum;
+ uint64_t isump = (p_nat->internal_time_sum * 100) / sum;
+ uint64_t esump = (p_nat->external_time_sum * 100) / sum;
+ printf("CGNAPT timing instrumentation status ...\n");
+ printf("Max Count %d, Cur Count %d, Status %d (1=ON)\n",
+ p_nat->max_time_mesurements,
+ p_nat->time_measurements,
+ p_nat->time_measurements_on);
+ printf("Internal Time Sum %" PRIu64 " , Ave %" PRIu64
+ ", percent %" PRIu64 "\n",
+ p_nat->internal_time_sum,
+ (p_nat->internal_time_sum /
+ p_nat->time_measurements), isump);
+ printf("External Time Sum %" PRIu64 " , Ave %" PRIu64
+ ", percent %" PRIu64 "\n",
+ p_nat->external_time_sum,
+ (p_nat->external_time_sum /
+ p_nat->time_measurements), esump);
+ }
+
+ return rsp;
+ }
+ #endif
+
+ if (Msg[CGNAPT_DBG_CMD_OFST] == CGNAPT_DBG_CMD_PRINT_NSP) {
+ struct cgnapt_nsp_node *ll = nsp_ll;
+
+ while (ll != NULL) {
+ fprintf(stderr, "NSP Prefix/Depth=>%x%x:%x%x:%x%x: "
+ "%x%x:%x%x:%x%x:%x%x:%x%x/%d",
+ ll->nsp.prefix[0], ll->nsp.prefix[1],
+ ll->nsp.prefix[2], ll->nsp.prefix[3],
+ ll->nsp.prefix[4], ll->nsp.prefix[5],
+ ll->nsp.prefix[6], ll->nsp.prefix[7],
+ ll->nsp.prefix[8], ll->nsp.prefix[9],
+ ll->nsp.prefix[10], ll->nsp.prefix[11],
+ ll->nsp.prefix[12], ll->nsp.prefix[13],
+ ll->nsp.prefix[14], ll->nsp.prefix[15],
+ ll->nsp.depth);
+
+ ll = ll->next;
+ }
+
+ return rsp;
+ }
+
+ printf("CG-NAPT debug handler called with wrong args %x %x\n", Msg[0],
+ Msg[1]);
+ int i = 0;
+
+ for (i = 0; i < 20; i++)
+ printf("%02x ", Msg[i]);
+ printf("\n");
+ return rsp;
+}
+
+/**
+ * Function to print num of clients per IP address
+ *
+ */
+void print_num_ip_clients(void)
+{
+ if (nat_only_config_flag) {
+ printf("Command not supported for NAT only config.\n");
+ return;
+ }
+
+ uint16_t ii;
+ printf("\tPublic IP: Num Clients\n");
+ for (ii = 0; ii < CGNAPT_MAX_PUB_IP; ii++)
+ printf("\t%08x : %7d\n", all_public_ip[ii].ip,
+ rte_atomic16_read(&all_public_ip[ii].count));
+}
+
+/**
+ * Function to print CGNAPT version info
+ *
+ * @param p
+ * An unused pointer to struct pipeline
+ * @param msg
+ * void pointer to incoming arguments
+ */
+void *pipeline_cgnapt_msg_req_ver_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_cgnapt_entry_delete_msg_rsp *rsp = msg;
+ uint8_t *Msg = msg;
+
+ rsp->status = 0;
+
+ printf("CG-NAPT debug handler called with args %x %x, offset %d\n",
+ Msg[CGNAPT_VER_CMD_OFST], Msg[CGNAPT_VER_CMD_OFST + 1],
+ CGNAPT_VER_CMD_OFST);
+
+ if (Msg[CGNAPT_VER_CMD_OFST] == CGNAPT_VER_CMD_VER) {
+ printf("CGNAPT Version %s\n", CGNAPT_VERSION);
+ return rsp;
+ }
+ printf("CG-NAPT Version handler called with wrong args %x %x\n",
+ Msg[0], Msg[1]);
+ int i = 0;
+
+ for (i = 0; i < 20; i++)
+ printf("%02x ", Msg[i]);
+ printf("\n");
+ return rsp;
+}
+
+/**
+ * Function to show CGNAPT stats
+ *
+ */
+void all_cgnapt_stats(void)
+{
+ int i;
+ struct pipeline_cgnapt *p_nat;
+ uint64_t receivedPktCount = 0;
+ uint64_t missedPktCount = 0;
+ uint64_t naptDroppedPktCount = 0;
+ uint64_t naptedPktCount = 0;
+ uint64_t inaptedPktCount = 0;
+ uint64_t enaptedPktCount = 0;
+ uint64_t arpicmpPktCount = 0;
+
+ printf("\nCG-NAPT Packet Stats:\n");
+ for (i = 0; i < n_cgnapt_pipeline; i++) {
+ p_nat = all_pipeline_cgnapt[i];
+
+ receivedPktCount += p_nat->receivedPktCount;
+ missedPktCount += p_nat->missedPktCount;
+ naptDroppedPktCount += p_nat->naptDroppedPktCount;
+ naptedPktCount += p_nat->naptedPktCount;
+ inaptedPktCount += p_nat->inaptedPktCount;
+ enaptedPktCount += p_nat->enaptedPktCount;
+ arpicmpPktCount += p_nat->arpicmpPktCount;
+
+ printf("pipeline %d stats:\n", p_nat->pipeline_num);
+ printf("Received %" PRIu64 ",", p_nat->receivedPktCount);
+ printf("Missed %" PRIu64 ",", p_nat->missedPktCount);
+ printf("Dropped %" PRIu64 ",", p_nat->naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", p_nat->naptedPktCount);
+ printf("ingress %" PRIu64 ",", p_nat->inaptedPktCount);
+ printf("egress %" PRIu64 "\n", p_nat->enaptedPktCount);
+ printf("arpicmp pkts %" PRIu64 "\n", p_nat->arpicmpPktCount);
+
+
+ #ifdef CGNAPT_DEBUGGING
+ printf("\n Drop detail 1:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount1);
+ printf("\n Drop detail 2:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount2);
+ printf("\n Drop detail 3:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount3);
+ printf("\n Drop detail 4:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount4);
+ printf("\n Drop detail 5:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount5);
+ printf("\n Drop detail 6:%" PRIu64 "",
+ p_nat->naptDroppedPktCount6);
+
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount1,
+ p_nat->missedpktcount2);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount3,
+ p_nat->missedpktcount4);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount5,
+ p_nat->missedpktcount6);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount7,
+ p_nat->missedpktcount8);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount9,
+ p_nat->missedpktcount10);
+
+ #endif
+
+ }
+
+ printf("\nTotal pipeline stats:\n");
+ printf("Received %" PRIu64 ",", receivedPktCount);
+ printf("Missed %" PRIu64 ",", missedPktCount);
+ printf("Dropped %" PRIu64 ",", naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", naptedPktCount);
+ printf("ingress %" PRIu64 ",", inaptedPktCount);
+ printf("egress %" PRIu64 "\n", enaptedPktCount);
+ printf("arpicmp pkts %" PRIu64 "\n", arpicmpPktCount);
+}
+
+void all_cgnapt_clear_stats(void)
+{
+ int i;
+ struct pipeline_cgnapt *p_nat;
+ printf("\nCG-NAPT Packet Stats:\n");
+ for (i = 0; i < n_cgnapt_pipeline; i++) {
+ p_nat = all_pipeline_cgnapt[i];
+
+ printf("pipeline %d stats:\n", p_nat->pipeline_num);
+ printf("Received %" PRIu64 ",", p_nat->receivedPktCount);
+ printf("Missed %" PRIu64 ",", p_nat->missedPktCount);
+ printf("Dropped %" PRIu64 ",", p_nat->naptDroppedPktCount);
+ printf("Translated %" PRIu64 ",", p_nat->naptedPktCount);
+ printf("ingress %" PRIu64 ",", p_nat->inaptedPktCount);
+ printf("egress %" PRIu64 "\n", p_nat->enaptedPktCount);
+ printf("arpicmp pkts %" PRIu64 "\n", p_nat->arpicmpPktCount);
+
+ p_nat->receivedPktCount = 0;
+ p_nat->missedPktCount = 0;
+ p_nat->naptDroppedPktCount = 0;
+ p_nat->naptedPktCount = 0;
+ p_nat->inaptedPktCount = 0;
+ p_nat->enaptedPktCount = 0;
+ p_nat->arpicmpPktCount = 0;
+
+ #ifdef CGNAPT_DEBUGGING
+ printf("\n Drop detail 1:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount1);
+ printf("\n Drop detail 2:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount2);
+ printf("\n Drop detail 3:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount3);
+ printf("\n Drop detail 4:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount4);
+ printf("\n Drop detail 5:%" PRIu64 ",",
+ p_nat->naptDroppedPktCount5);
+ printf("\n Drop detail 6:%" PRIu64 "",
+ p_nat->naptDroppedPktCount6);
+
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount1,
+ p_nat->missedpktcount2);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount3,
+ p_nat->missedpktcount4);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount5,
+ p_nat->missedpktcount6);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount7,
+ p_nat->missedpktcount8);
+ printf("\nPkt_miss: %" PRIu64 " %" PRIu64 "",
+ p_nat->missedpktcount9,
+ p_nat->missedpktcount10);
+
+ #endif
+
+ }
+}
+
+/**
+ * Function to print common CGNAPT table entries
+ *
+ */
+void print_static_cgnapt_entries(void)
+{
+ uint32_t count = 0;
+ const void *key;
+ void *data;
+ uint32_t next = 0;
+ int32_t index = 0;
+ struct cgnapt_table_entry *entry;
+ do {
+ index = rte_hash_iterate(napt_common_table,
+ &key, &data, &next);
+
+ if ((index != -EINVAL) && (index != -ENOENT)) {
+ printf("\n%04d ", count);
+ rte_hexdump(stdout, "KEY", key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ int32_t position = rte_hash_lookup(
+ napt_common_table, key);
+ entry = &napt_hash_tbl_entries[position];
+
+ if (entry->data.timeout == STATIC_CGNAPT_TIMEOUT)
+ rte_hexdump(stdout, "Entry",
+ (const void *)entry,
+ sizeof(struct cgnapt_table_entry));
+ }
+
+ count++;
+ } while (index != -ENOENT);
+}
+
+/**
+ * Function to show CGNAPT stats
+ *
+ */
+
+struct pipeline_be_ops pipeline_cgnapt_be_ops = {
+ .f_init = pipeline_cgnapt_init,
+ .f_free = pipeline_cgnapt_free,
+ .f_run = NULL,
+ .f_timer = pipeline_cgnapt_timer,
+ .f_track = pipeline_cgnapt_track,
+};
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h
new file mode 100644
index 00000000..c9b81fa8
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.h
@@ -0,0 +1,808 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_CGNAPT_BE_H__
+#define __INCLUDE_PIPELINE_CGNAPT_BE_H__
+
+/**
+ * @file
+ * Pipeline CG-NAPT BE.
+ *
+ * Pipeline CG-NAPT Back End (BE).
+ * Responsible for packet processing.
+ *
+ */
+
+#include "pipeline_common_be.h"
+#include "vnf_common.h"
+#include <rte_pipeline.h>
+#include <rte_hash.h>
+#include "pipeline_timer_be.h"
+#include "pipeline_arpicmp_be.h"
+#include "cgnapt_pcp_be.h"
+#include "lib_arp.h"
+
+#define PIPELINE_CGNAPT_KEY_MAX_SIZE 64
+
+extern uint8_t CGNAPT_DEBUG;
+#define CGNAPT_DBG_CMD_OFST 8
+#define CGNAPT_DBG_CMD_STATS_SHOW 0
+#define CGNAPT_DBG_CMD_STATS_CLEAR 1
+#define CGNAPT_DBG_CMD_DBG_LEVEL 2
+#define CGNAPT_DBG_CMD_DBG_SHOW 3
+#define CGNAPT_DBG_CMD_LS_ENTRY 4
+#define CGNAPT_DBG_CMD_DYN 5
+#define CGNAPT_DBG_CMD_IF_STATS 6
+#define CGNAPT_DBG_CMD_INSTRUMENTATION 7
+#define CGNAPT_DBG_CMD_ITER_COM_TBL 8
+#define CGNAPT_DBG_CMD_MAPS_INFO 9
+#define CGNAPT_DBG_CMD_OFST1 10
+#define CGNAPT_DBG_CMD_IPV6 11
+#define CGNAPT_DBG_CMD_PRINT_DS 12
+#define CGNAPT_DBG_CMD_PRINT_NSP 13
+#define CGNAPT_DBG_MAX_CLI_PER_PUB_IP 14
+#define CGNAPT_DBG_PUB_IP_LIST 15
+#define CGNAPT_DBG_TIMING_INST 16
+
+
+#ifdef PCP_ENABLE
+
+#define CGNAPT_DBG_PCP 17
+/* PCP sub commands */
+enum{
+CGNAPT_PCP_CMD_STATS,
+CGNAPT_PCP_CMD_PCP_ENABLE,
+CGNAPT_PCP_CMD_GET_LIFETIME,
+CGNAPT_PCP_CMD_SET_LIFETIME,
+CGNAPT_PCP_CMD_OFST = 8,
+};
+
+#endif
+
+/*
+ * CGNAPT_DBG_CMD_INSTRUMENTATION Sub commands
+*/
+ #define CGNAPT_CMD_INSTRUMENTATION_SUB0 0
+ #define CGNAPT_CMD_INSTRUMENTATION_SUB1 1
+ #define CGNAPT_CMD_INSTRUMENTATION_SUB2 2
+
+/*
+ * CGNAPT_DBG_CMD_IF_STATS Sub commands
+*/
+#define CGNAPT_IF_STATS_HWQ 0
+#define CGNAPT_IF_STATS_SWQ 1
+#define CGNAPT_IF_STATS_OTH 2
+
+/* Version command info */
+#define CGNAPT_VER_CMD_OFST 8
+#define CGNAPT_VER_CMD_VER 1
+
+/* Network Specific Prefix commnd */
+#define CGNAPT_NSP_CMD_OFST 8
+
+/* #define PIPELINE_CGNAPT_INSTRUMENTATION */
+#ifdef PIPELINE_CGNAPT_INSTRUMENTATION
+void *instrumentation_port_in_arg;
+struct rte_mempool *cgnapt_test_pktmbuf_pool;
+
+#define INST_ARRAY_SIZE 100000
+#define CGNAPT_INST5_SIG 0xAA
+#define CGNAPT_INST5_WAIT 200
+#define CGNAPT_INST5_OFST 10
+
+uint64_t *inst_start_time;
+uint64_t *inst_end_time;
+uint32_t *inst_diff_time;
+
+uint32_t cgnapt_inst_index;
+uint32_t cgnapt_inst5_flag;
+uint32_t cgnapt_inst5_wait;
+uint8_t cgnapt_num_func_to_inst;
+
+#endif
+
+#define CGNAPT_VERSION "1.8"
+#define CGNAPT_DYN_TIMEOUT (3*10) /* 30 secs */
+#define MAX_DYN_ENTRY (70000 * 16)
+
+#define NAPT_ENTRY_STALE 1
+#define NAPT_ENTRY_VALID 0
+
+/* For max_port_per_client */
+#define MAX_PORT_INVALID_KEY -1
+#define MAX_PORT_NOT_REACHED 0
+#define MAX_PORT_REACHED 1
+/* increment */
+#define MAX_PORT_INC_SUCCESS 1
+#define MAX_PORT_INC_REACHED 0
+#define MAX_PORT_INC_ERROR -1
+/* decrement */
+#define MAX_PORT_DEC_SUCCESS 1
+#define MAX_PORT_DEC_REACHED 0
+#define MAX_PORT_DEC_ERROR -1
+/* add_entry */
+#define MAX_PORT_ADD_SUCCESS 1
+#define MAX_PORT_ADD_UNSUCCESS 0
+#define MAX_PORT_ADD_ERROR -1
+/* del_entry */
+#define MAX_PORT_DEL_SUCCESS 1
+#define MAX_PORT_DEL_UNSUCCESS 0
+#define MAX_PORT_DEL_ERROR -1
+
+#define PIPELINE_CGNAPT_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ &entries[i], i, arg); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], \
+ entries[i], i, arg); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entries[pos], pos, arg); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(rte_p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_CGNAPT_PORT_OUT_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkt, \
+ uint32_t *pkts_mask, \
+ void *arg) \
+{ \
+ f_pkt4_work(pkt, arg); \
+ f_pkt_work(*pkt, arg); \
+ \
+ int i = *pkts_mask; i++; \
+ return 0; \
+}
+
+#define PIPELINE_CGNAPT_PORT_OUT_BAH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+__rte_unused struct rte_pipeline *rte_p, \
+struct rte_mbuf **pkt, \
+uint32_t *pkts_mask, \
+void *arg) \
+{ \
+ f_pkt4_work(pkt, arg); \
+ \
+ f_pkt_work(*pkt, arg); \
+ \
+ int i = *pkts_mask; i++; \
+ return 0; \
+}
+
+#define PIPELINE_CGNAPT_KEY_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i; \
+ \
+ if (CGNAPT_DEBUG > 1) \
+ printf("cgnapt_key hit fn: %"PRIu32"\n", n_pkts); \
+ \
+ pkt_burst_cnt = 0; \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ \
+ return 0; \
+} \
+
+
+#define PIPELINE_CGNAPT_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ &entries[i], i, arg); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], \
+ entries[i], i, arg); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entries[pos], pos, arg); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(rte_p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+/* IPv4 offsets */
+#define SRC_ADR_OFST_IP4 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST)
+#define DST_ADR_OFST_IP4 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST)
+#define SRC_PRT_OFST_IP4_TCP (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SIZE)
+#define SRC_PRT_OFST_IP4_UDP SRC_PRT_OFST_IP4_TCP
+#define DST_PRT_OFST_IP4_TCP (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SIZE + 2)
+#define DST_PRT_OFST_IP4_UDP DST_PRT_OFST_IP4_TCP
+#define PROT_OFST_IP4 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST)
+#define IDEN_OFST_IP4_ICMP (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SIZE + 4)
+#define SEQN_OFST_IP4_ICMP (MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SIZE + 6)
+
+/*NAT64*/
+
+/* IPv6 offsets */
+#define SRC_ADR_OFST_IP6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_SRC_ADR_OFST)
+#define DST_ADR_OFST_IP6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_DST_ADR_OFST)
+#define SRC_PRT_OFST_IP6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_SIZE)
+#define DST_PRT_OFST_IP6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_SIZE + 2)
+#define PROT_OFST_IP6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST)
+
+/* After IPv6 to IPv4 conversion */
+#define SRC_ADR_OFST_IP6t4 (20 + MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IP_HDR_SRC_ADR_OFST)
+#define DST_ADR_OFST_IP6t4 (20 + MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IP_HDR_DST_ADR_OFST)
+#define SRC_PRT_OFST_IP6t4 (20 + MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IP_HDR_SIZE)
+#define DST_PRT_OFST_IP6t4 (20 + MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IP_HDR_SIZE + 2)
+#define PROT_OFST_IP6t4 (20 + MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IP_HDR_PROTOCOL_OFST)
+#define ETH_OFST_IP6t4 (20 + MBUF_HDR_ROOM)
+
+/* After IPv4 to IPv6 conversion */
+#define DST_PRT_OFST_IP4t6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IPV6_HDR_SIZE + 2 - 20)
+#define DST_ADR_OFST_IP4t6 (MBUF_HDR_ROOM + ETH_HDR_SIZE + \
+ IPV6_HDR_DST_ADR_OFST - 20)
+
+#define TRAFFIC_TYPE_MIX 0
+#define TRAFFIC_TYPE_IPV4 4
+#define TRAFFIC_TYPE_IPV6 6
+
+#define CGNAPT_MAX_PUB_IP 256
+
+
+/**
+ * A structure defining public ip and associated client count.
+ */
+struct public_ip {
+ uint32_t ip;
+ rte_atomic16_t count; /* how many clients are using the public_ip */
+} all_public_ip[CGNAPT_MAX_PUB_IP];
+
+/**
+ * Command to dump number of clients using an IP address.
+ */
+void print_num_ip_clients(void);
+
+extern struct rte_hash *napt_common_table;
+extern struct public_ip all_public_ip[CGNAPT_MAX_PUB_IP];
+
+/**
+ * A structure defining pipeline_cgnapt - placeholder for all
+ * CGNAPT pipeline variables
+ *
+ *
+ */
+struct pipeline_cgnapt {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_CGNAPT_MSG_REQS];
+
+ uint32_t n_flows;
+ uint32_t key_offset;
+ uint32_t key_size;
+ uint32_t hash_offset;
+
+ uint32_t n_entries;
+
+ /* Dynamic NAPT Start */
+ uint8_t is_static_cgnapt;
+ uint16_t max_port_per_client;
+ uint16_t max_clients_per_ip;
+
+ struct pub_ip_port_set *pub_ip_port_set;
+ uint8_t pub_ip_count;
+ struct pub_ip_range *pub_ip_range;
+ uint8_t pub_ip_range_count;
+
+ struct napt_port_alloc_elem *allocated_ports;
+ struct napt_port_alloc_elem *free_ports;
+ struct rte_ring *port_alloc_ring;
+
+ uint64_t *port_map;
+ uint16_t port_map_array_size;
+
+ uint64_t n_cgnapt_entry_deleted;
+ uint64_t n_cgnapt_entry_added;
+ uint64_t naptedPktCount;
+ uint64_t naptDroppedPktCount;
+
+ uint64_t inaptedPktCount;
+ uint64_t enaptedPktCount;
+ uint64_t receivedPktCount;
+ uint64_t missedPktCount;
+ uint64_t dynCgnaptCount;
+ uint64_t arpicmpPktCount;
+
+ uint64_t app_params_addr;
+ uint8_t pipeline_num;
+ uint8_t pkt_burst_cnt;
+ uint8_t hw_checksum_reqd;
+ uint8_t traffic_type;
+ uint8_t links_map[PIPELINE_MAX_PORT_IN];
+ uint8_t outport_id[PIPELINE_MAX_PORT_IN];
+
+ struct pipeline_cgnapt_entry_key
+ cgnapt_dyn_ent_table[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t cgnapt_dyn_ent_index[RTE_PORT_IN_BURST_SIZE_MAX];
+
+ /* table lookup keys */
+ struct pipeline_cgnapt_entry_key keys[RTE_HASH_LOOKUP_BULK_MAX];
+ /* pointers to table lookup keys */
+ void *key_ptrs[RTE_HASH_LOOKUP_BULK_MAX];
+ /* table lookup results */
+ int32_t lkup_indx[RTE_HASH_LOOKUP_BULK_MAX];
+ /* entries used for pkts fwd */
+ struct rte_pipeline_table_entry *entries[RTE_HASH_LOOKUP_BULK_MAX];
+ uint64_t valid_packets; /* bitmap of valid packets to process */
+ uint64_t invalid_packets;/* bitmap of invalid packets to be dropped */
+
+ uint8_t vnf_set; /* to identify as separate LB-CGNAPT set */
+
+ /* Local ARP & ND Tables */
+ struct lib_arp_route_table_entry
+ local_lib_arp_route_table[MAX_ARP_RT_ENTRY];
+ uint8_t local_lib_arp_route_ent_cnt;
+ struct lib_nd_route_table_entry
+ local_lib_nd_route_table[MAX_ND_RT_ENTRY];
+ uint8_t local_lib_nd_route_ent_cnt;
+
+ /* For internal debugging purpose */
+#ifdef CGNAPT_TIMING_INST
+ uint64_t in_port_exit_timestamp;
+ uint64_t external_time_sum;
+ uint64_t internal_time_sum;
+ uint32_t time_measurements;
+ uint32_t max_time_mesurements;
+ uint8_t time_measurements_on;
+#endif
+
+#ifdef CGNAPT_DEBUGGING
+
+ uint32_t naptDebugCount;
+
+ uint64_t naptDroppedPktCount1;
+ uint64_t naptDroppedPktCount2;
+ uint64_t naptDroppedPktCount3;
+ uint64_t naptDroppedPktCount4;
+ uint64_t naptDroppedPktCount5;
+ uint64_t naptDroppedPktCount6;
+
+ uint64_t kpc1, kpc2;
+
+ uint64_t missedpktcount1;
+ uint64_t missedpktcount2;
+ uint64_t missedpktcount3;
+ uint64_t missedpktcount4;
+ uint64_t missedpktcount5;
+ uint64_t missedpktcount6;
+ uint64_t missedpktcount7;
+ uint64_t missedpktcount8;
+ uint64_t missedpktcount9;
+ uint64_t missedpktcount10;
+
+ uint64_t missedpktcount11;
+ uint64_t missedpktcount12;
+
+
+ uint64_t max_port_dec_err1;
+ uint64_t max_port_dec_err2;
+ uint64_t max_port_dec_err3;
+ uint64_t max_port_dec_success;
+
+ uint64_t pfb_err;
+ uint64_t pfb_ret;
+ uint64_t pfb_get;
+ uint64_t pfb_suc;
+ uint64_t gfp_suc;
+ uint64_t gfp_get;
+ uint64_t gfp_ret;
+ uint64_t gfp_err;
+#endif
+} __rte_cache_aligned;
+
+/**
+ * A structure defining the CG-NAPT input port handler arg.
+ */
+struct pipeline_cgnapt_in_port_h_arg {
+ struct pipeline_cgnapt *p;
+ uint8_t in_port_id;
+};
+
+enum {
+ CGNAPT_PRV_PORT_ID,
+ CGNAPT_PUB_PORT_ID,
+};
+
+uint16_t cgnapt_meta_offset;
+uint8_t dual_stack_enable;
+uint16_t dest_if_offset;
+uint8_t nat_only_config_flag;
+uint8_t CGNAPT_DEBUG;
+
+#if (RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN)
+/* x86 == little endian */
+/* network == big endian */
+#define CHECK_ENDIAN_16(x) rte_be_to_cpu_16(x)
+#else
+#define CHECK_ENDIAN_16(x) (x)
+#endif
+#define IP_VHL_DEF (0x40 | 0x05)
+struct rte_mempool *cgnapt_icmp_pktmbuf_tx_pool;
+struct rte_mbuf *cgnapt_icmp_pkt;
+struct rte_pipeline *myP;
+uint8_t icmp_pool_init;
+
+#define MAX_NUM_LOCAL_MAC_ADDRESS 16
+
+/***** NAT64 NSP declarations *****/
+/**
+ * A structure defining nsp node.
+ */
+struct cgnapt_nsp_node {
+ struct pipeline_cgnapt_nsp_t nsp;
+ struct cgnapt_nsp_node *next;
+};
+
+struct cgnapt_nsp_node *nsp_ll;
+
+/***** Common Table declarations *****/
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+#define MAX_NAPT_ENTRIES 16777216 /* 0x1000000 */
+#define NUM_NAPT_PORT_BULK_ALLOC 250
+
+
+struct rte_hash *napt_common_table;
+struct cgnapt_table_entry *napt_hash_tbl_entries;
+
+/***** Multiple NAT IP declarations *****/
+
+/**
+ * A structure defining public ip and associated port range set
+ */
+struct pub_ip_port_set {
+ uint32_t ip;
+ uint16_t start_port;
+ uint16_t end_port;
+};
+
+/**
+ * A structure defining public ip range
+ */
+struct pub_ip_range {
+ uint32_t start_ip;
+ uint32_t end_ip;
+};
+
+/***** Common Port Allocation declarations *****/
+
+int create_napt_common_table(uint32_t nFlows);
+struct rte_mempool *napt_port_pool;
+
+#define MAX_CGNAPT_SETS 8
+
+/**
+ * A structure defining a bulk port allocation element.
+ */
+struct napt_port_alloc_elem {
+ uint32_t count;
+ uint32_t ip_addr[NUM_NAPT_PORT_BULK_ALLOC];
+ uint16_t ports[NUM_NAPT_PORT_BULK_ALLOC];
+};
+
+int napt_port_alloc_init(struct pipeline_cgnapt *p_nat);
+void release_iport(uint16_t port, uint32_t public_ip,
+ struct pipeline_cgnapt *p_nat);
+int get_free_iport(struct pipeline_cgnapt *p_nat, uint32_t *public_ip);
+
+/***************************** Function declarations *************************/
+
+void
+pkt4_work_cgnapt_ipv6_prv(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt_work_cgnapt_ipv6_prv(struct rte_mbuf *pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt4_work_cgnapt_ipv6_pub(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt_work_cgnapt_ipv6_pub(struct rte_mbuf *pkt,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt4_work_cgnapt_ipv4_prv(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt_work_cgnapt_ipv4_prv(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt4_work_cgnapt_ipv4_pub(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt_work_cgnapt_ipv4_pub(struct rte_mbuf **pkts,
+ uint32_t in_pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+/* in port handler key functions */
+void
+pkt4_work_cgnapt_key_ipv4_prv(struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt_work_cgnapt_key_ipv4_prv(struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt4_work_cgnapt_key_ipv4_pub(struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void
+pkt_work_cgnapt_key_ipv4_pub(struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt4_work_cgnapt_key_ipv6_pub(struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt_work_cgnapt_key_ipv6_pub(struct rte_mbuf *pkts,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt4_work_cgnapt_key_ipv6_prv(struct rte_mbuf **pkts,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+void
+pkt_work_cgnapt_key_ipv6_prv(struct rte_mbuf *pkt,
+ uint32_t pkt_num,
+ void *arg, struct pipeline_cgnapt *p_nat);
+
+void send_icmp_dest_unreachable_msg(void);
+unsigned short cksum_calc(unsigned short *addr, int len);
+void print_mbuf(const char *rx_tx, unsigned int portid, struct rte_mbuf *mbuf,
+ unsigned int line);
+
+
+/* Max port per client declarations */
+/**
+ * A structure defining maximun ports per client
+ */
+struct max_port_per_client {
+ uint32_t prv_ip;
+ uint32_t prv_phy_port;
+ uint8_t max_port_cnt;
+};
+
+/**
+ * A structure defining maximun ports per client key
+ */
+struct max_port_per_client_key {
+ uint32_t prv_ip;
+ uint32_t prv_phy_port;
+};
+
+struct rte_hash *max_port_per_client_hash;
+struct max_port_per_client *max_port_per_client_array;
+
+
+int init_max_port_per_client(struct pipeline_cgnapt *p_nat);
+int is_max_port_per_client_reached(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat);
+int increment_max_port_counter(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat);
+int decrement_max_port_counter(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat);
+int max_port_per_client_add_entry(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat);
+int max_port_per_client_del_entry(uint32_t prv_ip_param,
+ uint32_t prv_phy_port_param,
+ struct pipeline_cgnapt *p_nat);
+
+/* Print functions */
+void print_pkt(struct rte_mbuf *pkt);
+void log_pkt(struct rte_mbuf *pkt);
+void print_key(struct pipeline_cgnapt_entry_key *key);
+void print_entry1(struct rte_pipeline_table_entry *entry);
+void print_cgnapt_entry(struct cgnapt_table_entry *entry);
+void my_print_entry(struct cgnapt_table_entry *ent);
+
+/* CLI custom handler back-end helper functions */
+
+void *pipeline_cgnapt_msg_req_custom_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_entry_add_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_entry_del_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_entry_sync_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_entry_dbg_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_entry_addm_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_ver_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_nsp_add_handler(
+ struct pipeline *p,
+ void *msg);
+
+void *pipeline_cgnapt_msg_req_nsp_del_handler(
+ struct pipeline *p,
+ void *msg);
+#ifdef PCP_ENABLE
+extern void *pipeline_cgnapt_msg_req_pcp_handler(
+ struct pipeline *p,
+ void *msg);
+#endif
+
+int pipeline_cgnapt_msg_req_entry_addm_pair(
+ struct pipeline *p, void *msg,
+ uint32_t src_ip, uint16_t src_port,
+ uint32_t dest_ip, uint16_t dest_port,
+ uint16_t rx_port, uint32_t ttl,
+ uint8_t type, uint8_t src_ipv6[16]);
+
+/* CGNAPT Functions */
+extern void rte_pipeline_action_handler_port_ext(
+ struct rte_pipeline *p,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries);
+
+uint64_t pkt_miss_cgnapt(
+ struct pipeline_cgnapt_entry_key *key,
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry **table_entry,
+ uint64_t *pkts_mask,
+ uint32_t pkt_num,
+ void *arg);
+
+struct cgnapt_table_entry *add_dynamic_cgnapt_entry(
+ struct pipeline *p,
+ struct pipeline_cgnapt_entry_key *key,
+ //#ifdef PCP_ENABLE
+ uint32_t timeout,
+ //#endif
+ uint8_t pkt_type,
+ uint8_t *src_addr,
+ uint8_t *err);
+
+void calculate_hw_checksum(
+ struct rte_mbuf *pkt,
+ uint8_t ip_ver,
+ uint8_t protocol);
+
+uint64_t nextPowerOf2(uint64_t n);
+struct ether_addr *get_local_link_hw_addr(uint8_t out_port);
+uint8_t local_dest_mac_present(uint8_t out_port);
+
+enum PKT_TYPE {
+PKT_TYPE_IPV4,
+PKT_TYPE_IPV6,
+PKT_TYPE_IPV6to4,
+PKT_TYPE_IPV4to6,
+};
+void hw_checksum(struct rte_mbuf *pkt, enum PKT_TYPE ver);
+void sw_checksum(struct rte_mbuf *pkt, enum PKT_TYPE ver);
+int rte_get_pkt_ver(struct rte_mbuf *pkt);
+void print_common_table(void);
+#if CT_CGNAT
+extern int add_dynamic_cgnapt_entry_alg(
+ struct pipeline *p,
+ struct pipeline_cgnapt_entry_key *key,
+ struct cgnapt_table_entry **entry_ptr1,
+ struct cgnapt_table_entry **entry_ptr2);
+#endif
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_common.h b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_common.h
new file mode 100644
index 00000000..4f4253cd
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_common.h
@@ -0,0 +1,271 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_CGNAPT_COMMON_H__
+#define __INCLUDE_PIPELINE_CGNAPT_COMMON_H__
+
+#include "pipeline_common_fe.h"
+
+extern uint8_t CGNAPT_DEBUG;
+
+struct pipeline_cgnapt_entry_key {
+ uint32_t ip;
+ uint16_t port; /* L4 port */
+ uint16_t pid; /* if port id */
+};
+
+/*
+ * CGNAPY Entry
+ */
+enum cgnapt_entry_type {
+ CGNAPT_ENTRY_IPV4,
+ CGNAPT_ENTRY_IPV6
+};
+
+#ifdef PCP_ENABLE
+/**
+ * An enum defining the CG-NAPT entry creation type
+ */
+
+enum {
+ STATIC_CGNAPT_ENTRY,
+ DYNAMIC_CGNAPT_ENTRY,
+ PCP_CGNAPT_ENTRY,
+};
+#endif
+
+struct app_pipeline_cgnapt_entry_params {
+ enum cgnapt_entry_type type;
+ union {
+ uint32_t prv_ip; /* private ip address */
+ uint8_t prv_ipv6[16];
+ uint16_t u16_prv_ipv6[8];
+ uint32_t u32_prv_ipv6[4];
+ } u;
+ uint32_t prv_ip;
+ uint16_t prv_port; /* private port */
+ uint32_t pub_ip; /* public ip address */
+ uint16_t pub_port; /* public port */
+ uint16_t prv_phy_port; /* physical port on private side */
+ uint16_t pub_phy_port; /* physical port on public side */
+ uint32_t ttl; /* time to live */
+ long long int timeout;
+ #ifdef PCP_ENABLE
+ struct rte_timer *timer;
+ #endif
+};
+
+/*
+ *CGNAPT table
+ */
+
+struct cgnapt_table_entry {
+ struct rte_pipeline_table_entry head;
+ struct app_pipeline_cgnapt_entry_params data;
+} __rte_cache_aligned;
+
+/**
+ * A structure defining the CG-NAPT multiple entry parameter.
+ */
+struct app_pipeline_cgnapt_mentry_params {
+ enum cgnapt_entry_type type;
+ union {
+ uint32_t prv_ip; /* private ip address */
+ uint8_t prv_ipv6[16];
+ uint16_t u16_prv_ipv6[8];
+ uint32_t u32_prv_ipv6[4];
+ } u;
+ uint32_t prv_ip; /* private ip address */
+ uint16_t prv_port; /* private port start */
+ uint32_t pub_ip; /* public ip address */
+ uint16_t pub_port; /* public port start */
+ uint16_t prv_phy_port; /* physical port on private side */
+ uint16_t pub_phy_port; /* physical port on public side */
+ uint32_t ttl; /* time to live */
+ uint32_t num_ue; /* number of UEs to add */
+ uint16_t prv_port_max; /* max value for private port */
+ uint16_t pub_port_max; /* max value for public port */
+};
+
+/**
+ * A structure defining the NAT64 Network Specific Prefix.
+ */
+struct pipeline_cgnapt_nsp_t {
+ uint8_t prefix[16];
+ uint8_t depth;
+};
+
+
+/*
+ * Messages
+ */
+enum pipeline_cgnapt_msg_req_type {
+ PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADD,
+ PIPELINE_CGNAPT_MSG_REQ_ENTRY_DEL,
+ /* to be used for periodic synchronization */
+ PIPELINE_CGNAPT_MSG_REQ_ENTRY_SYNC,
+ /* to be used for debug purposes */
+ PIPELINE_CGNAPT_MSG_REQ_ENTRY_DBG,
+ /* Multiple (bulk) add */
+ PIPELINE_CGNAPT_MSG_REQ_ENTRY_ADDM,
+ PIPELINE_CGNAPT_MSG_REQ_VER,
+ PIPELINE_CGNAPT_MSG_REQ_NSP_ADD,
+ PIPELINE_CGNAPT_MSG_REQ_NSP_DEL,
+ #ifdef PCP_ENABLE
+ PIPELINE_CGNAPT_MSG_REQ_PCP,
+ #endif
+ PIPELINE_CGNAPT_MSG_REQS
+};
+
+/**
+ * A structure defining MSG ENTRY ADD request.
+ */
+struct pipeline_cgnapt_entry_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_cgnapt_entry_key key;
+
+ /* data */
+ struct app_pipeline_cgnapt_entry_params data;
+};
+
+/**
+ * A structure defining MSG ENTRY ADD response.
+ */
+struct pipeline_cgnapt_entry_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/**
+ * A structure defining MSG ENTRY MADD request.
+ */
+struct pipeline_cgnapt_entry_addm_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* data */
+ struct app_pipeline_cgnapt_mentry_params data;
+};
+
+struct pipeline_cgnapt_entry_addm_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/**
+ * A structure defining MSG ENTRY DELETE request.
+ */
+struct pipeline_cgnapt_entry_delete_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_cgnapt_entry_key key;
+};
+
+/**
+ * A structure defining MSG ENTRY DELETE response.
+ */
+struct pipeline_cgnapt_entry_delete_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ENTRY SYNC
+ */
+struct pipeline_cgnapt_entry_sync_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* data */
+ struct app_pipeline_cgnapt_entry_params data;
+};
+
+struct pipeline_cgnapt_entry_sync_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/**
+ * A structure defining the debug command response message.
+ */
+struct pipeline_cgnapt_entry_dbg_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/**
+ * A structure defining the NSP add request.
+ */
+struct pipeline_cgnapt_nsp_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* Network Specific Prefix and prefix length */
+ struct pipeline_cgnapt_nsp_t nsp;
+};
+
+/**
+ * A structure defining the NSP add response.
+ */
+struct pipeline_cgnapt_nsp_add_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/**
+ * A structure defining MSG NSP DEL request
+ */
+struct pipeline_cgnapt_nsp_del_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* Network Specific Prefix and prefix length */
+ struct pipeline_cgnapt_nsp_t nsp;
+
+};
+
+/**
+ * A structure defining MSG NSP DEL response
+ */
+struct pipeline_cgnapt_nsp_del_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/**
+ * A structure defining the debug command request message.
+ */
+struct pipeline_cgnapt_entry_dbg_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_cgnapt_msg_req_type subtype;
+
+ /* data */
+ uint8_t data[5];
+};
+
+extern struct pipeline_be_ops pipeline_cgnapt_be_ops;
+void print_num_ip_clients(void);
+void all_cgnapt_stats(void);
+void all_cgnapt_clear_stats(void);
+void print_static_cgnapt_entries(void);
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_timer.c b/VNFs/vCGNAPT/pipeline/pipeline_timer.c
new file mode 100644
index 00000000..1ac3ba7b
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_timer.c
@@ -0,0 +1,37 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "pipeline_timer.h"
+#include "pipeline_timer_be.h"
+
+/*
+ * @file
+ *
+ * Front End (FE) file for Timer pipeline
+ * No cmds are implemented for Timer pipeline
+ *
+ */
+static struct pipeline_fe_ops pipeline_timer_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_timer = {
+ .name = "TIMER",
+ .be_ops = &pipeline_timer_be_ops,
+ .fe_ops = &pipeline_timer_fe_ops,
+};
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_timer.h b/VNFs/vCGNAPT/pipeline/pipeline_timer.h
new file mode 100644
index 00000000..2788fe68
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_timer.h
@@ -0,0 +1,24 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_TIMER_H__
+#define __INCLUDE_PIPELINE_TIMER_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_timer;
+
+#endif
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_timer_be.c b/VNFs/vCGNAPT/pipeline/pipeline_timer_be.c
new file mode 100644
index 00000000..ed1c5875
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_timer_be.c
@@ -0,0 +1,507 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_hexdump.h>
+#include <rte_timer.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_jhash.h>
+#include "app.h"
+#include "pipeline_timer_be.h"
+#include "pipeline_cgnapt_be.h"
+
+#define BLURT printf("This is line %d of file %s (function %s)\n",\
+ __LINE__, __FILE__, __func__)
+/**
+ * @file
+ * Pipeline Timer Implementation.
+ *
+ * Implementation of Pipeline TIMER Back End (BE).
+ * Runs on separate timer core.
+ *
+ */
+
+
+/**
+ * @struct
+ * Main Pipeline structure for Timer.
+ *
+ *
+ */
+
+
+struct pipeline_timer {
+
+ uint32_t dequeue_loop_cnt;
+
+} __rte_cache_aligned;
+
+struct rte_mempool *timer_mempool;
+struct rte_mempool *timer_key_mempool;
+static int timer_objs_mempool_count;
+static int timer_ring_alloc_cnt;
+uint64_t cgnapt_timeout;
+uint32_t timer_lcore;
+
+uint8_t TIMER_DEBUG;
+
+/**
+* Function to enqueue timer objects from CGNAPT
+*
+* @param egress_key
+* CGNAPT egress key
+* @param ingress_key
+* CGNAPT inress key
+* @param egress_entry
+* CGNAPT egress entry
+* @param ingress_entry
+* CGNAPT ingress entry
+* @param p_nat
+* CGNAPT thread main pipeline structure
+*/
+
+void timer_thread_enqueue(struct pipeline_cgnapt_entry_key *egress_key,
+ struct pipeline_cgnapt_entry_key *ingress_key,
+ struct cgnapt_table_entry *egress_entry,
+ struct cgnapt_table_entry *ingress_entry,
+ struct pipeline *p_nat)
+{
+
+ struct timer_key *tk_ptr;
+
+ if (rte_mempool_get(timer_key_mempool, (void **)&tk_ptr) < 0) {
+ printf("TIMER - Error in getting timer_key alloc buffer\n");
+ return;
+ }
+
+ rte_memcpy(&tk_ptr->egress_key, egress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ rte_memcpy(&tk_ptr->ingress_key, ingress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ tk_ptr->egress_entry = egress_entry;
+ tk_ptr->ingress_entry = ingress_entry;
+ tk_ptr->p_nat = (struct pipeline *) p_nat;
+
+ if (TIMER_DEBUG == 1) {
+ rte_hexdump(stdout, "Egress Key", &tk_ptr->egress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ rte_hexdump(stdout, "Ingress Key", &tk_ptr->ingress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ rte_hexdump(stdout, "Egress Entry", &tk_ptr->egress_entry,
+ sizeof(struct cgnapt_table_entry));
+ rte_hexdump(stdout, "Ingress Entry", &tk_ptr->ingress_entry,
+ sizeof(struct cgnapt_table_entry));
+ }
+
+ if (rte_ring_mp_enqueue(timer_ring, (void *)tk_ptr) == -ENOBUFS)
+ printf("Ring enqueue failed: trying to enqueue\n");
+}
+
+/**
+* Function to dequeue timer objects coming from CGNAPT
+*
+*/
+void timer_thread_dequeue(void)
+{
+ struct timer_key *tk_ptr;
+ int ret;
+
+ ret = rte_ring_sc_dequeue(timer_ring, (void *)&tk_ptr);
+ if (ret == -ENOENT)
+ return;
+
+ if (TIMER_DEBUG == 1) {
+ BLURT;
+ rte_hexdump(stdout, "Egress Key", &tk_ptr->egress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ rte_hexdump(stdout, "Ingress Key", &tk_ptr->ingress_key,
+ sizeof(struct pipeline_cgnapt_entry_key));
+ rte_hexdump(stdout, "Egress Entry", &tk_ptr->egress_entry,
+ sizeof(struct cgnapt_table_entry));
+ rte_hexdump(stdout, "Ingress Entry", &tk_ptr->ingress_entry,
+ sizeof(struct cgnapt_table_entry));
+ }
+
+ #ifdef PCP_ENABLE
+ /* To differentiate between PCP req entry and dynamic entry we
+ * are using "timeout" value in the table entry
+ * timeout is - 1 : static entry
+ * timeout is 0 : dynamic entry
+ * timeout > 0 : pcp entry
+ * timeout is 0 then default cgnapt_timeout value is used
+ */
+
+ //If PCP entry already exits
+
+ if (tk_ptr->egress_entry->data.timer != NULL) {
+
+ if (rte_timer_reset(tk_ptr->egress_entry->data.timer,
+ tk_ptr->egress_entry->data.timeout * rte_get_timer_hz(),
+ SINGLE, timer_lcore,
+ cgnapt_entry_delete,
+ tk_ptr) < 0)
+ printf("PCP Entry Err : Timer already running\n");
+
+
+ } else{
+ #endif
+
+ struct rte_timer *timer;
+
+ if (rte_mempool_get(timer_mempool, (void **)&timer) < 0) {
+ printf("TIMER - Error in getting timer alloc buffer\n");
+ return;
+ }
+ rte_timer_init(timer);
+
+ #ifdef PCP_ENABLE
+ if (tk_ptr->egress_entry->data.timeout > 0)
+ tk_ptr->egress_entry->data.timer = timer;
+ #endif
+
+ if (rte_timer_reset(
+ timer,
+ #ifdef PCP_ENABLE
+ tk_ptr->egress_entry->data.timeout > 0 ?
+ tk_ptr->egress_entry->data.timeout * rte_get_timer_hz() :
+ #endif
+ cgnapt_timeout,
+ SINGLE,
+ timer_lcore,
+ cgnapt_entry_delete,
+ tk_ptr) < 0)
+ printf("Err : Timer already running\n");
+
+ #ifdef PCP_ENABLE
+ }
+ #endif
+}
+
+/**
+ * Function to delete a NAT entry due to timer expiry
+ *
+ * @param timer
+ * A pointer to struct rte_timer
+ * @param arg
+ * void pointer to timer arguments
+ */
+void cgnapt_entry_delete(struct rte_timer *timer, void *arg)
+{
+ int ret = 0;
+
+ struct timer_key *tk_ptr = (struct timer_key *)arg;
+ struct pipeline_cgnapt *p_nat = (struct pipeline_cgnapt *)
+ tk_ptr->p_nat;
+
+ if (
+ #ifdef PCP_ENABLE
+ (tk_ptr->egress_entry->data.timeout > 0) ||
+ #endif
+ ((tk_ptr->egress_entry->data.ttl == 1) &&
+ (tk_ptr->ingress_entry->data.ttl == 1))) {
+
+ /* call pipeline hash table egress entry delete */
+ #ifdef CGNAPT_DEBUGGING
+ #ifdef CGNAPT_DBG_PRNT
+ printf("\nTimer egr:");
+ print_key(&tk_ptr->egress_key);
+ #endif
+ #endif
+
+ rte_hash_del_key(napt_common_table,
+ &tk_ptr->egress_key);
+
+ /* call pipeline hash table ingress entry delete */
+ #ifdef CGNAPT_DEBUGGING
+ #ifdef CGNAPT_DBG_PRNT
+ printf("\nTimer ing:");
+ print_key(&tk_ptr->ingress_key);
+ #endif
+ #endif
+
+ rte_hash_del_key(napt_common_table,
+ &tk_ptr->ingress_key);
+
+ p_nat->dynCgnaptCount -= 2;
+ p_nat->n_cgnapt_entry_deleted += 2;
+
+ if (is_phy_port_privte(tk_ptr->egress_key.pid)) {
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG > 2)
+ printf("Deleting port:%d\n",
+ tk_ptr->ingress_key.port);
+ #endif
+
+ uint32_t public_ip = tk_ptr->egress_entry->data.pub_ip;
+
+ release_iport(tk_ptr->ingress_key.port, public_ip, p_nat);
+
+ ret = decrement_max_port_counter(tk_ptr->egress_key.ip,
+ tk_ptr->egress_key.pid,
+ p_nat);
+
+ if (ret == MAX_PORT_DEC_REACHED)
+ rte_atomic16_dec(&all_public_ip
+ [rte_jhash(&public_ip, 4, 0) %
+ CGNAPT_MAX_PUB_IP].count);
+
+ #ifdef CGNAPT_DBG_PRNT
+ if (CGNAPT_DEBUG >= 2) {
+ if (ret < 0)
+ printf("Max Port hash entry does not "
+ "exist: %d\n", ret);
+ if (!ret)
+ printf("Max Port Deletion entry for "
+ "the IP address: 0x%x\n",
+ tk_ptr->egress_key.ip);
+ }
+ #endif
+ }
+
+ rte_timer_stop(timer);
+ rte_mempool_put(timer_mempool, timer);
+ rte_mempool_put(timer_key_mempool, tk_ptr);
+ return;
+ }
+
+ if (!tk_ptr->egress_entry->data.ttl)
+ tk_ptr->egress_entry->data.ttl = 1;
+
+ if (!tk_ptr->ingress_entry->data.ttl)
+ tk_ptr->ingress_entry->data.ttl = 1;
+
+ /*cgnapt_timeout*/
+ rte_timer_reset(timer, cgnapt_timeout, SINGLE,
+ timer_lcore, cgnapt_entry_delete, tk_ptr);
+
+}
+
+/*
+ * Function to parse the timer pipeline parameters
+ *
+ * @params p
+ * Timer pipeline structure
+ * @params params
+ * Timer pipeline params read from config file
+ *
+ * @return
+ * 0 on success, value on failure
+ */
+static int
+pipeline_cgnapt_parse_args(struct pipeline_timer *p,
+ struct pipeline_params *params)
+{
+ uint32_t dequeue_loop_cnt_present = 0;
+ uint32_t n_flows_present = 0;
+ struct pipeline_timer *p_timer = (struct pipeline_timer *)p;
+ uint32_t i;
+
+ if (TIMER_DEBUG > 2) {
+ printf("TIMER pipeline_cgnapt_parse_args params->n_args: %d\n",
+ params->n_args);
+ }
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (TIMER_DEBUG > 2) {
+ printf("TIMER args[%d]: %s %d, %s\n", i, arg_name,
+ atoi(arg_value), arg_value);
+ }
+
+ if (strcmp(arg_name, "dequeue_loop_cnt") == 0) {
+ if (dequeue_loop_cnt_present)
+ return -1;
+ dequeue_loop_cnt_present = 1;
+
+ p_timer->dequeue_loop_cnt = atoi(arg_value);
+ printf("dequeue_loop_cnt : %d\n",
+ p_timer->dequeue_loop_cnt);
+ continue;
+ }
+
+ if (strcmp(arg_name, "n_flows") == 0) {
+ if(n_flows_present)
+ return -1;
+ n_flows_present = 1;
+
+ printf("Timer : n_flows = %d\n", atoi(arg_value));
+ timer_objs_mempool_count =
+ nextPowerOf2(atoi(arg_value));
+ timer_ring_alloc_cnt =
+ nextPowerOf2(atoi(arg_value));
+ printf("Timer : next power of 2 of n_flows = %d\n",
+ timer_ring_alloc_cnt);
+ }
+ }
+
+ if(!n_flows_present){
+ printf("Timer : n_flows is not present\n");
+ return -1;
+ }
+
+
+ return 0;
+}
+
+uint32_t get_timer_core_id(void)
+{
+ return timer_lcore;
+}
+
+/*
+ * Function to initialize main Timer pipeline
+ *
+ * Init Timer pipeline parameters
+ * Parse Timer pipline parameters
+ *
+ * @params params
+ * Timer pipeline parameters read from config file
+ * @params arg
+ * Pointer to the app_params structure
+ *
+ * @return
+ * Timer pipeline struct pointer on success , NULL on failue
+ */
+static void *pipeline_timer_init(struct pipeline_params *params, void *arg)
+{
+ struct app_params *app = (struct app_params *)arg;
+ struct pipeline_timer *p_timer;
+ uint32_t size;
+
+ printf("Entering pipeline_timer_init\n");
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_timer));
+ p_timer = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ if (p_timer == NULL)
+ return NULL;
+
+ p_timer->dequeue_loop_cnt = 100;
+ cgnapt_timeout = rte_get_tsc_hz() * CGNAPT_DYN_TIMEOUT;
+ printf("cgnapt_timerout%" PRIu64 "", cgnapt_timeout);
+
+ timer_lcore = rte_lcore_id();
+
+ if (pipeline_cgnapt_parse_args(p_timer, params))
+ return NULL;
+
+ /* Create port alloc buffer */
+
+ timer_mempool = rte_mempool_create("timer_mempool",
+ timer_objs_mempool_count,
+ sizeof(struct rte_timer),
+ 0, 0,
+ NULL, NULL,
+ NULL, NULL, rte_socket_id(), 0);
+ if (timer_mempool == NULL)
+ rte_panic("timer_mempool create error\n");
+
+ timer_key_mempool = rte_mempool_create("timer_key_mempool",
+ timer_objs_mempool_count,
+ sizeof(struct timer_key),
+ 0, 0,
+ NULL, NULL,
+ NULL, NULL, rte_socket_id(), 0);
+ if (timer_key_mempool == NULL)
+ rte_panic("timer_key_mempool create error\n");
+
+ timer_ring = rte_ring_create("TIMER_RING",
+ timer_ring_alloc_cnt, rte_socket_id(), 0);
+
+ if (timer_ring == NULL)
+ rte_panic("timer_ring creation failed");
+
+ return (void *)p_timer;
+}
+
+/*
+ * Function to free the Timer pipeline
+ *
+ * @params pipeline
+ * Timer pipeline structure pointer
+ *
+ * @return
+ * 0 on success, Negitive value on failure
+ */
+static int pipeline_timer_free(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *)pipeline;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ rte_free(p);
+
+ return 0;
+}
+
+/*
+ * Function to run custom code continiously
+ *
+ * @params pipeline
+ * Timer pipeline structure pointer
+ *
+ * @return
+ * 0 on success, Negitive value on failure
+ */
+static int pipeline_timer_run(void *pipeline)
+{
+ struct pipeline_timer *p = (struct pipeline_timer *)pipeline;
+ uint32_t i;
+
+ if (p == NULL)
+ return -EINVAL;
+ for (i = 0; i < p->dequeue_loop_cnt; i++)
+ timer_thread_dequeue();
+
+ return 0;
+}
+
+/*
+ * Function to run custom code on pipeline timer expiry
+ *
+ * @params pipeline
+ * Timer pipeline structure pointer
+ *
+ * @return
+ * 0 on success, Negitive value on failure
+ */
+static int pipeline_timer_timer(__rte_unused void *pipeline)
+{
+ rte_timer_manage();
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_timer_be_ops = {
+ .f_init = pipeline_timer_init,
+ .f_free = pipeline_timer_free,
+ .f_run = pipeline_timer_run,
+ .f_timer = pipeline_timer_timer,
+ .f_track = NULL,
+};
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_timer_be.h b/VNFs/vCGNAPT/pipeline/pipeline_timer_be.h
new file mode 100644
index 00000000..47def684
--- /dev/null
+++ b/VNFs/vCGNAPT/pipeline/pipeline_timer_be.h
@@ -0,0 +1,55 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_TIMER_BE_H__
+#define __INCLUDE_PIPELINE_TIMER_BE_H__
+
+#include <rte_timer.h>
+#include "pipeline_cgnapt_be.h"
+#include "pipeline_common_be.h"
+#include "pipeline_cgnapt_common.h"
+
+extern struct pipeline_be_ops pipeline_timer_be_ops;
+/*uint8_t timer_ring_init;*/
+struct rte_ring *timer_ring;
+extern struct rte_mempool *timer_mempool;
+
+extern struct rte_mempool *timer_key_mempool;
+/*static int timer_objs_mempool_count = 70000;*/
+/*static int timer_ring_alloc_cnt = 4096;*/
+extern uint64_t cgnapt_timeout;
+extern uint32_t timer_lcore;
+
+/* one timer entry created for pair of egress and ingress entry */
+struct timer_key {
+ struct pipeline_cgnapt_entry_key egress_key, ingress_key;
+ struct cgnapt_table_entry *egress_entry, *ingress_entry;
+ struct pipeline *p_nat;
+} __rte_cache_aligned;
+
+/******* Function declarations ********/
+
+void cgnapt_entry_delete(struct rte_timer *tim, void *arg);
+
+void timer_thread_enqueue(struct pipeline_cgnapt_entry_key *egress_key,
+ struct pipeline_cgnapt_entry_key *ingress_key,
+ struct cgnapt_table_entry *egress_entry,
+ struct cgnapt_table_entry *ingress_entry,
+ struct pipeline *p_nat);
+
+void timer_thread_dequeue(void);
+extern uint64_t nextPowerOf2(uint64_t n);
+#endif
diff --git a/docs/vCGNAPT/INSTALL.rst b/docs/vCGNAPT/INSTALL.rst
new file mode 100644
index 00000000..3a556819
--- /dev/null
+++ b/docs/vCGNAPT/INSTALL.rst
@@ -0,0 +1,185 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, National Center of Scientific Research "Demokritos" and others.
+
+============================
+CGNAPT - Installation Guide
+============================
+
+
+vCGNAPT Compilation
+===================
+
+After downloading (or doing a git clone) in a directory (samplevnf)
+
+###### Dependencies
+* DPDK 16.04: Downloaded and installed via vnf_build.sh or manually from [here](http://fast.dpdk.org/rel/dpdk-16.04.tar.xz)
+Both the options are available as part of vnf_build.sh below.
+* libpcap-dev
+* libzmq
+* libcurl
+
+###### Environment variables
+
+Apply all the additional patches in 'patches/dpdk_custom_patch/' and build dpdk
+
+::
+ export RTE_SDK=<dpdk 16.04 directory>
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+This is done by vnf_build.sh script.
+
+Auto Build:
+==========
+$ ./tools/vnf_build.sh in samplevnf root folder
+
+Follow the steps in the screen from option [1] --> [8] and select option [7]
+to build the vnfs.
+It will automatically download DPDK 16.04 and any required patches and will setup
+everything and build vCGNAPT VNFs.
+
+Following are the options for setup:
+
+::
+
+ ----------------------------------------------------------
+ Step 1: Environment setup.
+ ----------------------------------------------------------
+ [1] Check OS and network connection
+
+ ----------------------------------------------------------
+ Step 2: Download and Install
+ ----------------------------------------------------------
+ [2] Agree to download
+ [3] Download packages
+ [4] Download DPDK zip (optional, use it when option 4 fails)
+ [5] Install DPDK
+ [6] Setup hugepages
+
+ ----------------------------------------------------------
+ Step 3: Build VNF
+ ----------------------------------------------------------
+ [7] Build VNF
+
+ [8] Exit Script
+
+An vCGNAPT executable will be created at the following location
+samplevnf/VNFs/vCGNAPT/build/vCGNAPT
+
+
+Manual Build:
+============
+1. Download DPDK 16.04 from dpdk.org
+ - http://dpdk.org/browse/dpdk/snapshot/dpdk-16.04.zip
+2. unzip dpdk-16.04 and apply dpdk patch
+ - cd dpdk-16.04
+ - patch -p0 < VNF_CORE/patches/dpdk_custom_patch/rte_pipeline.patch
+ - patch -p1 < VNF_CORE/patches/dpdk_custom_patch/i40e-fix-link-management.patch
+ - patch -p1 < VNF_CORE/patches/dpdk_custom_patch/i40e-fix-Rx-hang-when-disable-LLDP.patch
+ - patch -p1 < VNF_CORE/patches/dpdk_custom_patch/i40e-fix-link-status-change-interrupt.patch
+ - patch -p1 < VNF_CORE/patches/dpdk_custom_patch/i40e-fix-VF-bonded-device-link-down.patch
+ - build dpdk
+ - make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc
+ - cd x86_64-native-linuxapp-gcc
+ - make
+ - Setup huge pages
+ - For 1G/2M hugepage sizes, for example 1G pages, the size must be specified
+ explicitly and can also be optionally set as the default hugepage size for
+ the system. For example, to reserve 8G of hugepage memory in the form of
+ eight 1G pages, the following options should be passed to the kernel:
+ * default_hugepagesz=1G hugepagesz=1G hugepages=8 hugepagesz=2M hugepages=2048
+ - Add this to Go to /etc/default/grub configuration file.
+ - Append "default_hugepagesz=1G hugepagesz=1G hugepages=8 hugepagesz=2M hugepages=2048"
+ to the GRUB_CMDLINE_LINUX entry.
+3. Setup Environment Variable
+ - export RTE_SDK=<samplevnf>/dpdk-16.04
+ - export RTE_TARGET=x86_64-native-linuxapp-gcc
+ - export VNF_CORE=<samplevnf>
+ or using ./toot/setenv.sh
+4. Build vCGNAPT VNFs
+ - cd <samplevnf>/VNFs/vCGNAPT
+ - make clean
+ - make
+5. An vCGNAPT executable will be created at the following location
+ - <samplevnf>/VNFs/vCGNAPT/build/vCGNAPT
+
+Run
+====
+
+Setup Port to run VNF:
+----------------------
+::
+ 1. cd <samplevnf>/dpdk-16.04
+ 3. ./tool/dpdk_nic_bind.py --status <--- List the network device
+ 2. ./tool/dpdk_nic_bind.py -b igb_uio <PCI Port 0> <PCI Port 1>
+ .. _More details: http://dpdk.org/doc/guides-16.04/linux_gsg/build_dpdk.html#binding-and-unbinding-network-ports-to-from-the-kernel-modules
+
+ Make the necessary changes to the config files to run the vCGNAPT VNF
+ eg: ports_mac_list = 00:00:00:30:21:F0 00:00:00:30:21:F1
+
+Dynamic CGNAPT
+--------------
+Update the configuration according to system configuration.
+
+::
+ ./vCGNAPT -p <port mask> -f <config> -s <script> - SW_LoadB
+ ./vCGNAPT -p <port mask> -f <config> -s <script> -hwlb <num_WT> - HW_LoadB
+
+Static CGNAPT
+-------------
+Update the script file and add Static NAT Entry
+
+::
+ e.g,
+ ;p <pipeline id> entry addm <prv_ipv4/6> prvport> <pub_ip> <pub_port> <phy_port> <ttl> <no_of_entries> <end_prv_port> <end_pub_port>
+ ;p 3 entry addm 152.16.100.20 1234 152.16.40.10 1 0 500 65535 1234 65535
+
+Run IPv4
+----------
+::
+ Software LoadB
+ --------------
+ cd <samplevnf>/VNFs/vCGNAPT/build
+ ./vCGNAPT -p 0x3 -f ./config/arp_txrx-2P-1T.cfg -s ./config/arp_txrx_ScriptFile_2P.cfg
+
+
+ Hardware LoadB
+ --------------
+ cd <samplevnf>/VNFs/vCGNAPT/build
+ ./vCGNAPT -p 0x3 -f ./config/arp_hwlb-2P-1T.cfg -s ./config/arp_hwlb_scriptfile_2P.cfg --hwlb 1
+
+Run IPv6
+---------
+::
+ Software LoadB
+ --------------
+ cd <samplevnf>/VNFs/vCGNAPT/build
+ ./vCGNAPT -p 0x3 -f ./config/arp_txrx-2P-1T-ipv6.cfg -s ./config/arp_txrx_ScriptFile_2P.cfg
+
+
+ Hardware LoadB
+ --------------
+ cd <samplevnf>/VNFs/vCGNAPT/build
+ ./vCGNAPT -p 0x3 -f ./config/arp_hwlb-2P-1T-ipv6.cfg -s ./config/arp_hwlb_scriptfile_2P.cfg --hwlb 1
+
+vCGNAPT execution on BM & SRIOV:
+--------------------------------
+::
+ To run the VNF, execute the following:
+ samplevnf/VNFs/vCGNAPT# ./build/vCGNAPT -p 0x3 -f ./config/arp_txrx-2P-1T.cfg -s ./config/arp_txrx_ScriptFile_2P.cfg
+ Command Line Params:
+ -p PORTMASK: Hexadecimal bitmask of ports to configure
+ -f CONFIG FILE: vCGNAPT configuration file
+ -s SCRIPT FILE: vCGNAPT script file
+
+vCGNAPT execution on OVS:
+-------------------------
+::
+ To run the VNF, execute the following:
+ samplevnf/VNFs/vCGNAPT# ./build/vCGNAPT -p 0x3 ./config/arp_txrx-2P-1T.cfg -s ./config/arp_txrx_ScriptFile_2P.cfg --disable-hw-csum
+ Command Line Params:
+ -p PORTMASK: Hexadecimal bitmask of ports to configure
+ -f CONFIG FILE: vCGNAPT configuration file
+ -s SCRIPT FILE: vCGNAPT script file
+ --disable-hw-csum :Disable TCP/UDP hw checksum
diff --git a/docs/vCGNAPT/README.rst b/docs/vCGNAPT/README.rst
new file mode 100644
index 00000000..eda94831
--- /dev/null
+++ b/docs/vCGNAPT/README.rst
@@ -0,0 +1,189 @@
+.. this work is licensed under a creative commons attribution 4.0 international
+.. license.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) opnfv, national center of scientific research "demokritos" and others.
+
+========================================================
+Carrier Grade Network Address Port Translation - vCGNAPT
+========================================================
+
+1 Introduction
+==============
+This application implements vCGNAPT. The idea of vCGNAPT is to extend the life of
+the service providers IPv4 network infrastructure and mitigate IPv4 address
+exhaustion by using address and port translation in large scale. It processes the
+traffic in both the directions.
+
+It also supports the connectivity between the IPv6 access network to IPv4 data network
+using the IPv6 to IPv4 address translation and vice versa.
+
+About DPDK
+----------
+The DPDK IP Pipeline Framework provides set of libraries to build a pipeline
+application. In this document, CG-NAT application will be explained with its
+own building blocks.
+
+This document assumes the reader possess the knowledge of DPDK concepts and IP
+Pipeline Framework. For more details, read DPDK Getting Started Guide, DPDK
+Programmers Guide, DPDK Sample Applications Guide.
+
+2. Scope
+==========
+This application provides a standalone DPDK based high performance vCGNAPT
+Virtual Network Function implementation.
+
+3. Features
+===========
+The vCGNAPT VNF currently supports the following functionality:
+ • Static NAT
+ • Dynamic NAT
+ • Static NAPT
+ • Dynamic NAPT
+ • ARP (request, response, gratuitous)
+ • ICMP (terminal echo, echo response, passthrough)
+ • ICMPv6 and ND (Neighbor Discovery)
+ • UDP, TCP and ICMP protocol passthrough
+ • Multithread support
+ • Multiple physical port support
+ • Limiting max ports per client
+ • Limiting max clients per public IP address
+ • Live Session tracking to NAT flow
+ • NAT64
+ • PCP Support
+ • ALG SIP
+ • ALG FTP
+
+4. High Level Design
+====================
+The Upstream path defines the traffic from Private to Public and the downstream
+path defines the traffic from Public to Private. The vCGNAPT has same set of
+components to process Upstream and Downstream traffic.
+
+In vCGNAPT application, each component is constructed as IP Pipeline framework.
+It includes Master pipeline component, load balancer pipeline component and vCGNAPT
+pipeline component.
+
+A Pipeline framework is collection of input ports, table(s), output ports and
+actions (functions). In vCGNAPT pipeline, main sub components are the Inport function
+handler, Table and Table function handler. vCGNAPT rules will be configured in the
+table which translates egress and ingress traffic according to physical port
+information from which side packet is arrived. The actions can be forwarding to the
+output port (either egress or ingress) or to drop the packet.
+
+vCGNAPT Graphical Overview
+==========================
+The idea of vCGNAPT is to extend the life of the service providers IPv4 network infrastructure
+and mitigate IPv4 address exhaustion by using address and port translation in large scale.
+It processes the traffic in both the directions.
+
+.. code-block:: console
+ +------------------+
+ | +-----+
+ | Private consumer | CPE |---------------+
+ | IPv4 traffic +-----+ |
+ +------------------+ |
+ +------------------+ v +----------------+
+ | | +------------+ | |
+ | Private IPv4 | | vCGNAPT | | Public |
+ | access network | | NAT44 | | IPv4 traffic |
+ | | +------------+ | |
+ +------------------+ | +----------------+
+ +------------------+ |
+ | +-----+ |
+ | Private consumer| CPE |-----------------+
+ | IPv4 traffic +-----+
+ +------------------+
+ Figure 1: vCGNAPT deployment in Service provider network
+
+
+Components of vCGNAPT
+=====================
+In vCGNAPT, each component is constructed as a packet framework. It includes Master pipeline
+component, driver, load balancer pipeline component and vCGNAPT worker pipeline component. A
+pipeline framework is a collection of input ports, table(s), output ports and actions
+(functions).
+
+Receive and transmit driver
+----------------------------
+Packets will be received in bulk and provided to load balancer thread. The transmit takes
+packets from worker thread in a dedicated ring and sent to the hardware queue.
+
+ARPICMP pipeline
+------------------------
+ARPICMP pipeline is responsible for handling all l2l3 arp related packets.
+
+----------------
+This component does not process any packets and should configure with Core 0,
+to save cores for other components which processes traffic. The component
+is responsible for:
+ 1. Initializing each component of the Pipeline application in different threads
+ 2. Providing CLI shell for the user
+ 3. Propagating the commands from user to the corresponding components.
+ 4. ARP and ICMP are handled here.
+
+Load Balancer pipeline
+------------------------
+Load balancer is part of the Multi-Threaded CGMAPT release which distributes
+the flows to Multiple ACL worker threads.
+
+Distributes traffic based on the 2 or 5 tuple (source address, source port,
+destination address, destination port and protocol) applying an XOR logic
+distributing the load to active worker threads, thereby maintaining an
+affinity of flows to worker threads.
+
+Tuple can be modified/configured using configuration file
+
+4. vCGNAPT - Static
+====================
+The vCGNAPT component performs translation of private IP & port to public IP &
+port at egress side and public IP & port to private IP & port at Ingress side
+based on the NAT rules added to the pipeline Hash table. The NAT rules are
+added to the Hash table via user commands. The packets that have a matching
+egress key or ingress key in the NAT table will be processed to change IP &
+port and will be forwarded to the output port. The packets that do not have a
+match will be taken a default action. The default action may result in drop of
+the packets.
+
+5. vCGNAPT- Dynamic
+===================
+The vCGNAPT component performs translation of private IP & port to public IP & port
+at egress side and public IP & port to private IP & port at Ingress side based on the
+NAT rules added to the pipeline Hash table. Dynamic nature of vCGNAPT refers to the
+addition of NAT entries in the Hash table dynamically when new packet arrives. The NAT
+rules will be added to the Hash table automatically when there is no matching entry in
+the table and the packet is circulated through software queue. The packets that have a
+matching egress key or ingress key in the NAT table will be processed to change IP &
+port and will be forwarded to the output port defined in the entry.
+
+Dynamic vCGNAPT acts as static one too, we can do NAT entries statically. Static NAT
+entries port range must not conflict to dynamic NAT port range.
+
+vCGNAPT Static Topology:
+------------------------
+::
+ IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 1) IXIA
+ operation:
+ Egress --> The packets sent out from ixia(port 0) will be CGNAPTed to ixia(port 1).
+ Igress --> The packets sent out from ixia(port 1) will be CGNAPTed to ixia(port 0).
+
+vCGNAPT Dynamic Topology (L4REPLAY):
+------------------------------------
+::
+ IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 0)L4REPLAY
+ operation:
+ Egress --> The packets sent out from ixia will be CGNAPTed to L3FWD/L4REPLAY.
+ Ingress --> The L4REPLAY upon reception of packets (Private to Public Network),
+ will immediately replay back the traffic to IXIA interface. (Pub -->Priv).
+
+How to run L4Replay:
+--------------------
+::
+ 1. After the installation of samplevnf:
+ go to <samplevnf/VNFs/L4Replay>
+ 2. ./buid/L4replay -c core_mask -n no_of_channels(let it be as 2) -- -p PORT_MASK --config="(port,queue,lcore)"
+ eg: ./L4replay -c 0xf -n 4 -- -p 0x3 --config="(0,0,1)"
+
+6. Installation, Compile and Execution
+-----------------------------------------------------------------
+Plase refer to <samplevnf>/docs/vCGNAPT/INSTALL.rst for installation, configuration, compilation
+and execution.
diff --git a/docs/vCGNAPT/RELEASE_NOTES.rst b/docs/vCGNAPT/RELEASE_NOTES.rst
new file mode 100644
index 00000000..91b73075
--- /dev/null
+++ b/docs/vCGNAPT/RELEASE_NOTES.rst
@@ -0,0 +1,80 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, National Center of Scientific Research "Demokritos" and others.
+
+=========================================================
+Carrier Grade Network Address Port Translation - vCGNAPT
+=========================================================
+
+1. Introduction
+================
+This is the beta release for vCGNAPT VNF.
+vCGNAPT application can be run independently (refer INSTALL.rst).
+
+2. User Guide
+===============
+Refer to README.rst for further details on vCGNAPT, HLD, features supported, test
+plan. For build configurations and execution requisites please refer to
+INSTALL.rst.
+
+3. Feature for this release
+===========================
+This release supports following features as part of vCGNAPT:
+- vCGNAPT can run as a standalone application on bare-metal linux server or on a
+ virtual machine using SRIOV and OVS dpdk.
+- Static NAT
+- Dynamic NAT
+- Static NAPT
+- Dynamic NAPT
+- ARP (request, response, gratuitous)
+- ICMP (terminal echo, echo response, passthrough)
+- ICMPv6 and ND (Neighbor Discovery)
+- UDP, TCP and ICMP protocol passthrough
+- Multithread support
+- Multiple physical port support
+- Limiting max ports per client
+- Limiting max clients per public IP address
+- Live Session tracking to NAT flow
+- PCP support
+- NAT64
+- ALG SIP
+- ALG FTP
+
+4. System requirements - OS and kernel version
+==============================================
+This is supported on Ubuntu 14.04 and Ubuntu 16.04 and kernel version less than 4.5
+
+ VNFs on BareMetal support:
+ OS: Ubuntu 14.04 or 16.04 LTS
+ kernel: < 4.5
+ http://releases.ubuntu.com/16.04/
+ Download/Install the image: ubuntu-16.04.1-server-amd64.iso
+
+ VNFs on Standalone Hypervisor
+ HOST OS: Ubuntu 14.04 or 16.04 LTS
+ http://releases.ubuntu.com/16.04/
+ Download/Install the image: ubuntu-16.04.1-server-amd64.iso
+ - OVS (DPDK) - 2.5
+ - kernel: < 4.5
+ - Hypervisor - KVM
+ - VM OS - Ubuntu 16.04/Ubuntu 14.04
+
+5. Known Bugs and limitations
+=============================
+- Hadware Loab Balancer feature is supported on fortville nic FW version 4.53 and below.
+- L4 UDP Replay is used to capture throughput for dynamic cgnapt
+- Hardware Checksum offload is not supported for IPv6 traffic.
+- CGNAPT on sriov is tested till 4 threads
+
+6. Future Work
+==============
+- SCTP passthrough support
+- Multi-homing support
+- Performance optimization on different platforms
+
+7. References
+=============
+Following links provides additional information
+ .. _QUICKSTART: http://dpdk.org/doc/guides-16.04/linux_gsg/quick_start.html
+ .. _DPDKGUIDE: http://dpdk.org/doc/guides-16.04/prog_guide/index.html