summaryrefslogtreecommitdiffstats
path: root/common/VIL/l2l3_stack
diff options
context:
space:
mode:
Diffstat (limited to 'common/VIL/l2l3_stack')
-rw-r--r--common/VIL/l2l3_stack/Makefile35
-rw-r--r--common/VIL/l2l3_stack/bond.c1595
-rw-r--r--common/VIL/l2l3_stack/build/.interface.o.d180
-rw-r--r--common/VIL/l2l3_stack/build/.l2_proto.o.d175
-rw-r--r--common/VIL/l2l3_stack/build/.main.o.d209
-rw-r--r--common/VIL/l2l3_stack/hle.c43
-rw-r--r--common/VIL/l2l3_stack/hle.h40
-rw-r--r--common/VIL/l2l3_stack/interface.c1478
-rw-r--r--common/VIL/l2l3_stack/interface.h873
-rw-r--r--common/VIL/l2l3_stack/l2_proto.c239
-rw-r--r--common/VIL/l2l3_stack/l2_proto.h150
-rw-r--r--common/VIL/l2l3_stack/l3fwd_common.h111
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm4.c1119
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm4.h374
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm6.c1058
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm6.h315
-rw-r--r--common/VIL/l2l3_stack/l3fwd_main.c145
-rw-r--r--common/VIL/l2l3_stack/lib_arp.c2655
-rw-r--r--common/VIL/l2l3_stack/lib_arp.h506
-rw-r--r--common/VIL/l2l3_stack/lib_icmpv6.c410
-rw-r--r--common/VIL/l2l3_stack/lib_icmpv6.h113
-rw-r--r--common/VIL/l2l3_stack/main_l2l3.c304
-rw-r--r--common/VIL/l2l3_stack/tsx.c167
-rw-r--r--common/VIL/l2l3_stack/tsx.h38
24 files changed, 12332 insertions, 0 deletions
diff --git a/common/VIL/l2l3_stack/Makefile b/common/VIL/l2l3_stack/Makefile
new file mode 100644
index 00000000..b85bf1d4
--- /dev/null
+++ b/common/VIL/l2l3_stack/Makefile
@@ -0,0 +1,35 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http:#www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = Protocol
+
+# all source are stored in SRCS-y
+SRCS-y := main.c l2_proto.c interface.c lib_arp.c lib_icmpv6.c l3fwd_main.c l3fwd_lpm4.c l3fwd_lpm6.c bond.c tsx.c hle.c
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(USER_FLAGS)
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O0 -g
+CFLAGS += -mrtm -mhle
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/common/VIL/l2l3_stack/bond.c b/common/VIL/l2l3_stack/bond.c
new file mode 100644
index 00000000..8fd11712
--- /dev/null
+++ b/common/VIL/l2l3_stack/bond.c
@@ -0,0 +1,1595 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <interface.h>
+#include "tsx.h"
+extern interface_main_t ifm;
+extern uint8_t ifm_debug;
+extern int USE_RTM_LOCKS;
+extern rte_rwlock_t rwlock;
+
+int ifm_bond_port_create(const char *name, int mode, port_config_t * portconf)
+{
+ int port_id;
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: i/p name %p, mode %d\n\r", __FUNCTION__,
+ name, mode);
+ if (name == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param name cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (mode < 0 || mode > 6) {
+ RTE_LOG(ERR, IFM, "%s: Param mode should be withing 0 to 6\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (portconf == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param portconf cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ bond_port = ifm_get_port_by_name(name);
+ if (bond_port == NULL) {
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "Call ifm_port_setup %s\n\r", name);
+ port_id = rte_eth_bond_create(name, mode, 0);
+ if (port_id < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to create bond port %s with mode %u\n\r",
+ __FUNCTION__, name, mode);
+ return IFM_FAILURE;
+ }
+ RTE_LOG(INFO, IFM,
+ "%s: Created bond port %s(%u) on socket %u with "
+ "mode %u.\n\r", __FUNCTION__, name, port_id,
+ rte_eth_dev_socket_id(port_id), mode);
+
+ bond_port = (l2_phy_interface_t *) rte_zmalloc(NULL,
+ sizeof
+ (l2_phy_interface_t),
+ RTE_CACHE_LINE_SIZE);
+ bond_port->pmdid = port_id;
+ strncpy(bond_port->ifname, name, IFM_IFNAME_LEN);
+ memcpy(&bond_port->port_config, portconf,
+ sizeof(port_config_t));
+ bond_port->flags |= IFM_MASTER;
+ struct bond_port *bond_info;
+ bond_info = (struct bond_port *)rte_zmalloc(NULL,
+ sizeof(struct
+ bond_port),
+ RTE_CACHE_LINE_SIZE);
+ bond_info->socket_id = rte_eth_dev_socket_id(port_id);
+ bond_info->mode = mode;
+ bond_info->bond_portid = port_id;
+ bond_port->bond_config = bond_info;
+ if (mode == IFM_BONDING_MODE_8023AD)
+ bond_port->tx_buf_len =
+ (2 * RTE_ETH_TX_BUFFER_SIZE(IFM_BURST_SIZE)) *
+ RTE_MAX_ETHPORTS;
+ //ifm_add_port_to_port_list(bond_port);
+ ifm.port_list[port_id] = bond_port;
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s: Added bond port %s(%u) to port list\n\r",
+ __FUNCTION__, name, port_id);
+ } else {
+ RTE_LOG(INFO, IFM, "%s: Port %s already exists in the"
+ " port list\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (!(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Previously port %s was not "
+ "configured as Bond port\n\r", __FUNCTION__,
+ name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->mode != mode) {
+ if (rte_eth_bond_mode_set(bond_port->pmdid, mode) < 0) {
+ RTE_LOG(ERR, IFM, "%s: rte_eth_bond_mode_set "
+ "failed\n\r", __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->mode =
+ rte_eth_bond_mode_get(bond_port->pmdid);
+ /* xmit policy may change for based on mode */
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s: Bond port %u mode is updated. Mode %u xmit_policy %u."
+ "\n\r", __FUNCTION__, bond_port->pmdid,
+ bond_port->bond_config->mode,
+ bond_port->bond_config->xmit_policy);
+ }
+ port_id = bond_port->pmdid;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return port_id;
+}
+
+int ifm_bond_port_delete(const char *name)
+{
+ l2_phy_interface_t *bond_port;
+ if (name == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param name cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ bond_port = ifm_get_port_by_name(name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port with name %s not"
+ " found in the list\n\r", __FUNCTION__, name);
+ return IFM_FAILURE;
+ }
+ if (!(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %s is not "
+ "configured is not bond port\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config && bond_port->bond_config->slave_count > 0) {
+ RTE_LOG(ERR, IFM, "%s: First unbind all slave "
+ "ports from the bond port %s\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ int ret;
+ ret = rte_eth_bond_free(name);
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to delete "
+ "bond port %s\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: Bond port %s deleted successfully\n\r",
+ __FUNCTION__, name);
+
+ if (bond_port && bond_port->bond_config != NULL) {
+ rte_free(bond_port->bond_config);
+ bond_port->bond_config = NULL;
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ //ifm.port_list[bond_port->pmdid] = NULL;
+ return IFM_SUCCESS;
+}
+
+int ifm_add_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port, *slave_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ // slave_port = ifm.port_list[slave_port_id];
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: i/p bond id %u, slave id %u\n\r",
+ __FUNCTION__, bonded_port_id, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available in "
+ "port list.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port && !(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not configured "
+ "as Master port. %u\n\r", __FUNCTION__, bonded_port_id,
+ bond_port->flags & IFM_MASTER);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port && bond_port->bond_config
+ && bond_port->bond_config->slave_count == RTE_MAX_ETHPORTS) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to bind.Already %u ports are bonded to master port...\n\r ",
+ __FUNCTION__, RTE_MAX_ETHPORTS);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (slave_port && slave_port->flags & IFM_SLAVE) {
+ /* Have to check whether the port is already part of someother bond port */
+ if (slave_port->bond_config != NULL) {
+ if (bonded_port_id !=
+ slave_port->bond_config->bond_portid) {
+ RTE_LOG(ERR, IFM,
+ "%s: Slave port %u is already part"
+ " of other bond port %u.\n\r",
+ __FUNCTION__, slave_port_id,
+ slave_port->bond_config->bond_portid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ } else {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Slave port %u is already bounded to %u\n\r",
+ __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ }
+ }
+ if (bond_port->bond_config && bond_port->bond_config->slave_count &&
+ bond_port->link_speed != slave_port->link_speed
+ && bond_port->link_duplex != slave_port->link_duplex) {
+ RTE_LOG(ERR, IFM,
+ "%s: Error in adding slave port to bond port. Reason speed mismatch\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Slave port %u Master port %u\n\r",
+ __FUNCTION__, slave_port_id, bonded_port_id);
+ int ret;
+ ret = rte_eth_bond_slave_add(bond_port->pmdid, slave_port->pmdid);
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to add slave port %u to bond "
+ "port %u.\n\r", __FUNCTION__, slave_port->pmdid,
+ bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ slave_port->flags |= IFM_SLAVE;
+ /* Populate bond config information */
+ if (bond_port->bond_config) {
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ bond_port->bond_config->internal_ms =
+ rte_eth_bond_link_monitoring_get(bond_port->pmdid);
+ bond_port->bond_config->link_up_delay_ms =
+ rte_eth_bond_link_up_prop_delay_get(bond_port->pmdid);
+ bond_port->bond_config->link_down_delay_ms =
+ rte_eth_bond_link_down_prop_delay_get(bond_port->pmdid);
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ bond_port->bond_config->slave_count =
+ rte_eth_bond_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->slaves,
+ RTE_MAX_ETHPORTS);
+ bond_port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->
+ active_slaves,
+ RTE_MAX_ETHPORTS);
+ slave_port->bond_config = bond_port->bond_config;
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Slave count is %u\n\r",
+ __FUNCTION__,
+ bond_port->bond_config->slave_count);
+ if (bond_port->bond_config->slave_count == 1) {
+ ret =
+ ifm_port_setup(bond_port->pmdid,
+ &(bond_port->port_config));
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to start bond port %u.\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ } else {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Skipping"
+ " port setup\n\r", __FUNCTION__);
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int ifm_remove_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port, *slave_port;
+
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ //slave_port = ifm.port_list[slave_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available "
+ "in port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available "
+ "in port list.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port && !(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not configured "
+ "as Master port.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port && !(slave_port->flags & IFM_SLAVE)) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not configured"
+ " as slave port.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int i;
+ int found = 0;
+ for (i = 0; i < bond_port->bond_config->slave_count; i++) {
+ if (slave_port_id == bond_port->bond_config->slaves[i]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not binded "
+ "with bond port %u\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_slave_remove(bonded_port_id, slave_port_id) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to unbind slave port %u"
+ " from bond port %u\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ slave_port->flags &= ~IFM_SLAVE;
+ slave_port->bond_config = NULL;
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ bond_port->bond_config->slave_count =
+ rte_eth_bond_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->slaves,
+ RTE_MAX_ETHPORTS);
+ bond_port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->
+ active_slaves, RTE_MAX_ETHPORTS);
+
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(ERR, IFM, "%s: Unbinded slave port %u from the bond "
+ "port %u %d\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id,
+ rte_eth_bond_primary_get(bond_port->pmdid));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int set_bond_mode(uint8_t bonded_port_id, uint8_t mode)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if(bond_port)
+ ifm_remove_port_details(bond_port->pmdid);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ if (bond_port && bond_port->bond_config->mode == mode) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Already bond port is set with the given"
+ " mode %u\n\r.", __FUNCTION__, mode);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_SUCCESS;
+
+ }
+ if (rte_eth_bond_mode_set(bond_port->pmdid, mode) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to set bond mode %u for port id %u\n\r.",
+ __FUNCTION__, mode, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->mode = rte_eth_bond_mode_get(bond_port->pmdid);
+ /* xmit policy may change for based on mode */
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Bond port %u mode is updated. Mode %u xmit_policy %u."
+ "\n\r.", __FUNCTION__, bond_port->pmdid,
+ bond_port->bond_config->mode,
+ bond_port->bond_config->xmit_policy);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_SUCCESS;
+}
+
+int get_bond_mode(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t mode = bond_port->bond_config->mode;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return mode;
+}
+
+int set_bond_primary(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ l2_phy_interface_t *slave_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ // slave_port = ifm.port_list[slave_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int i;
+ int found = 0;
+ for (i = 0; i < bond_port->bond_config->slave_count; i++) {
+ if (slave_port_id == bond_port->bond_config->slaves[i]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ RTE_LOG(ERR, IFM, "%s: Slave port %u is not binded "
+ "with bond port %u. Slave port should be binded first\n\r",
+ __FUNCTION__, slave_port_id, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ if (bond_port->bond_config->primary == slave_port_id) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Already slave port %u is primary for bond port"
+ "%u\n\r.", __FUNCTION__, bonded_port_id,
+ slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+
+ }
+ if (rte_eth_bond_primary_set(bond_port->pmdid, slave_port->pmdid) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s:Failed to set slave %u as primary for bond port %u\n\r.",
+ __FUNCTION__, slave_port->pmdid, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Primary port is updated as %u for bond port %u",
+ __FUNCTION__, bond_port->bond_config->primary,
+ bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int get_bond_primary_port(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t primary = bond_port->bond_config->primary;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return primary;
+}
+
+int get_bond_slave_count(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t slave_count = bond_port->bond_config->slave_count;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return slave_count;
+}
+
+int get_bond_active_slave_count(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t slave_count = bond_port->bond_config->active_slave_count;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return slave_count;
+}
+
+int get_bond_slaves(uint8_t bonded_port_id, uint8_t slaves[RTE_MAX_ETHPORTS])
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ memcpy(slaves, bond_port->bond_config->slaves,
+ bond_port->bond_config->slave_count);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_active_slaves(uint8_t bonded_port_id,
+ uint8_t active_slaves[RTE_MAX_ETHPORTS])
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ memcpy(active_slaves, bond_port->bond_config->active_slaves,
+ bond_port->bond_config->active_slave_count);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int set_bond_mac_address(uint8_t bonded_port_id, struct ether_addr *mac_addr)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (mac_addr == NULL) {
+ RTE_LOG(ERR, IFM, "%s: MAC address cannot be NULL.\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_mac_address_set(bond_port->pmdid, mac_addr) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set MAC addr for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ rte_eth_macaddr_get(bond_port->pmdid,
+ (struct ether_addr *)bond_port->macaddr);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int reset_bond_mac_addr(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_mac_address_reset(bond_port->pmdid) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to reset MAC addr for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ rte_eth_macaddr_get(bond_port->pmdid,
+ (struct ether_addr *)bond_port->macaddr);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+}
+
+int set_bond_xmitpolicy(uint8_t bonded_port_id, uint8_t policy)
+{
+
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+ int ret = 0;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->xmit_policy == policy) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old policy value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_xmit_policy_set(bond_port->pmdid, policy) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set policy for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ ret = rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ret < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_xmit_policy_set failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->xmit_policy = policy;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_xmitpolicy(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: RD Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s:Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t policy = bond_port->bond_config->xmit_policy;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s:Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return policy;
+}
+
+int set_bond_link_montitor_frequency(uint8_t bonded_port_id,
+ uint32_t internal_ms)
+{
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ int ret = 0;
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->internal_ms == internal_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old frequency value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_monitoring_set(bond_port->pmdid, internal_ms) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to set link monitor frequency for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ ret = rte_eth_bond_link_monitoring_get(bond_port->pmdid);
+ if (ret < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_monitoring_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->internal_ms = internal_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_monitor_frequency(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t internal_ms = bond_port->bond_config->internal_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return internal_ms;
+}
+
+int set_bond_linkdown_delay(uint8_t bonded_port_id, uint32_t delay_ms)
+{
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ int delay = 0;
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->link_down_delay_ms == delay_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old delay value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_down_prop_delay_set(bond_port->pmdid, delay_ms) <
+ 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set delay for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ delay = rte_eth_bond_link_down_prop_delay_get(bond_port->pmdid);
+ if (delay < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_down_prop_delay_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->link_down_delay_ms = delay;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_down_delay(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ //bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t delay_ms = bond_port->bond_config->link_down_delay_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return delay_ms;
+
+}
+
+int set_bond_linkup_delay(uint8_t bonded_port_id, uint32_t delay_ms)
+{
+ l2_phy_interface_t *bond_port;
+ int delay = 0;
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->link_up_delay_ms == delay_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old delay value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_up_prop_delay_set(bond_port->pmdid, delay_ms) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set delay for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ delay = rte_eth_bond_link_up_prop_delay_get(bond_port->pmdid);
+ if (delay < 0) {
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_up_prop_delay_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->link_up_delay_ms = delay;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_up_delay(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ uint32_t delay_ms;
+
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ if (bond_port == NULL) {
+ if (ifm_debug & IFM_DEBUG) {
+ RTE_LOG(ERR, IFM,
+ "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__,
+ bonded_port_id);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ delay_ms = bond_port->bond_config->link_up_delay_ms;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return delay_ms;
+}
diff --git a/common/VIL/l2l3_stack/build/.interface.o.d b/common/VIL/l2l3_stack/build/.interface.o.d
new file mode 100644
index 00000000..582958f4
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.interface.o.d
@@ -0,0 +1,180 @@
+dep_interface.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h /usr/include/string.h \
+ /usr/include/xlocale.h /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h \
+ /usr/include/stdint.h /usr/include/x86_64-linux-gnu/bits/wchar.h \
+ /usr/include/inttypes.h /usr/include/x86_64-linux-gnu/sys/queue.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h /usr/include/setjmp.h \
+ /usr/include/x86_64-linux-gnu/bits/setjmp.h /usr/include/ctype.h \
+ /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \
+ /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/lib_arp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pipeline.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/tsx.h
diff --git a/common/VIL/l2l3_stack/build/.l2_proto.o.d b/common/VIL/l2l3_stack/build/.l2_proto.o.d
new file mode 100644
index 00000000..13bcf504
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.l2_proto.o.d
@@ -0,0 +1,175 @@
+dep_l2_proto.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h /usr/include/string.h \
+ /usr/include/xlocale.h /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h \
+ /usr/include/stdint.h /usr/include/x86_64-linux-gnu/bits/wchar.h \
+ /usr/include/inttypes.h /usr/include/x86_64-linux-gnu/sys/queue.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h /usr/include/setjmp.h \
+ /usr/include/x86_64-linux-gnu/bits/setjmp.h /usr/include/ctype.h \
+ /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \
+ /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h
diff --git a/common/VIL/l2l3_stack/build/.main.o.d b/common/VIL/l2l3_stack/build/.main.o.d
new file mode 100644
index 00000000..9d27accd
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.main.o.d
@@ -0,0 +1,209 @@
+dep_main.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/main.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h /usr/include/stdint.h \
+ /usr/include/x86_64-linux-gnu/bits/wchar.h /usr/include/inttypes.h \
+ /usr/include/string.h /usr/include/xlocale.h \
+ /usr/include/x86_64-linux-gnu/sys/queue.h /usr/include/errno.h \
+ /usr/include/x86_64-linux-gnu/bits/errno.h /usr/include/linux/errno.h \
+ /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/include/ctype.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h /usr/include/x86_64-linux-gnu/bits/setjmp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_tcp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_udp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_string_fns.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_timer.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/lib_arp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pipeline.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /usr/include/setjmp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_common.h \
+ /usr/include/x86_64-linux-gnu/sys/param.h \
+ /usr/include/x86_64-linux-gnu/bits/param.h /usr/include/linux/param.h \
+ /usr/include/x86_64-linux-gnu/asm/param.h \
+ /usr/include/asm-generic/param.h /usr/include/unistd.h \
+ /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+ /usr/include/x86_64-linux-gnu/bits/environments.h \
+ /usr/include/x86_64-linux-gnu/bits/confname.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_hash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_jhash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_hash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_lpm.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm4.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_compat.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm_sse.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm6.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm6.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_lpm_ipv6.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm4.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm6.h
diff --git a/common/VIL/l2l3_stack/hle.c b/common/VIL/l2l3_stack/hle.c
new file mode 100644
index 00000000..a0661b32
--- /dev/null
+++ b/common/VIL/l2l3_stack/hle.c
@@ -0,0 +1,43 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include "tsx.h"
+//#include "hle.h"
+#include <xmmintrin.h>
+
+void hle_init(void)
+{
+ mutex_val = 0;
+}
+
+int hle_lock(void)
+{
+ while (__atomic_exchange_n
+ (&mutex_val, 1, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE))
+ _mm_pause();
+ return TRUE;
+}
+
+int hle_release(void)
+{
+ __atomic_store_n(&mutex_val, 0,
+ __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+ return TRUE;
+}
+
+int is_hle_locked(void)
+{
+ return (mutex_val == 0) ? FALSE : TRUE;
+}
diff --git a/common/VIL/l2l3_stack/hle.h b/common/VIL/l2l3_stack/hle.h
new file mode 100644
index 00000000..21da710d
--- /dev/null
+++ b/common/VIL/l2l3_stack/hle.h
@@ -0,0 +1,40 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#define HLE_TRUE 1
+#define HLE_FALSE 0
+
+volatile int mutex_val;
+/*
+ * hle mutex
+ * @param void
+ */
+void hle_mutex(void);
+/*
+ * To lock instrution
+ * @param void
+ */
+int hle_lock(void);
+/*
+ * To release held lock
+ * @param void
+ */
+int hle_release(void);
+/*
+ * To check whether lock is held
+ * @param void
+ */
+int is_locked(void);
diff --git a/common/VIL/l2l3_stack/interface.c b/common/VIL/l2l3_stack/interface.c
new file mode 100644
index 00000000..84c390da
--- /dev/null
+++ b/common/VIL/l2l3_stack/interface.c
@@ -0,0 +1,1478 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include <interface.h>
+#include <rte_byteorder.h>
+#include <lib_arp.h>
+#include <tsx.h>
+
+interface_main_t ifm;
+int USE_RTM_LOCKS = 0;
+rte_rwlock_t rwlock;
+uint8_t ifm_debug;
+static int prev_state;
+
+void config_ifm_debug(int dbg, int flag)
+{
+ switch (dbg) {
+ case IFM_DEBUG_CONFIG:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_CONFIG;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_CONFIG;
+ }
+ break;
+ case IFM_DEBUG_RXTX:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_RXTX;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_RXTX;
+ }
+ break;
+ case IFM_DEBUG_LOCKS:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_LOCKS;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_LOCKS;
+ }
+ break;
+ case IFM_DEBUG:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG;
+ } else {
+ ifm_debug &= ~IFM_DEBUG;
+ }
+ break;
+ }
+}
+
+void ifm_init(void)
+{
+ int i = 0;
+ config_ifm_debug(IFM_DEBUG_CONFIG, 1);
+ if (can_use_intel_core_4th_gen_features()) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "TSX not currently supported...\n\r");
+ USE_RTM_LOCKS = 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "TSX not supported\n\r");
+ USE_RTM_LOCKS = 0;
+ }
+ if (USE_RTM_LOCKS)
+ rtm_init();
+ else
+ rte_rwlock_init(&rwlock);
+
+ for (i = 0; i < IFM_MAX_PORTARR_SZ; i++) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ ifm.port_list[i] = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ ifm.nport_intialized = rte_eth_dev_count();
+ ifm.nport_configured = 0;
+ RTE_LOG(INFO, IFM, "IFM_INIT: Number of ports initialized during "
+ "PCI probing %u.\n\r", ifm.nport_intialized);
+}
+
+void ifm_remove_port_details(uint8_t portid)
+{
+ if (ifm.port_list[portid] != NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ l2_phy_interface_t *port = ifm.port_list[portid];
+ ifm.port_list[portid] = NULL;
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: NULL set for port %u\n\r",
+ __FUNCTION__, portid);
+ rte_free(port);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Failed to remove port details.Port %u info"
+ " is already Null.\n\r", __FUNCTION__, portid);
+ }
+}
+
+l2_phy_interface_t *ifm_get_port(uint8_t port_id)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ port = ifm.port_list[port_id];
+
+ if (port == NULL) {
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ if (port->pmdid == port_id) {
+ /*RTE_LOG(INFO, IFM, "%s: Port %u found....\n\r",
+ __FUNCTION__, port_id); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+ } else {
+
+/* RTE_LOG(INFO, IFM,"%s: Mismatch given port %u port in loc %u\n\r",__FUNCTION__,port_id,
+ ifm.port_list[port_id]->pmdid);
+*/
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+}
+
+l2_phy_interface_t *ifm_get_first_port(void)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[0];
+ if (port == NULL) {
+ /*RTE_LOG(ERR, IFM, "%s: Port info not found... configure it first.\n\r",
+ __FUNCTION__); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info is found...%p\n\r",
+ __FUNCTION__, port->pmdid, port); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+}
+
+l2_phy_interface_t *ifm_get_next_port(uint8_t port_id)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[port_id + 1];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info is found...\n\r",
+ __FUNCTION__, port_id); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+}
+
+l2_phy_interface_t *ifm_get_port_by_name(const char *name)
+{
+ l2_phy_interface_t *port = NULL;
+ int i;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ for (i = 0; i < RTE_MAX_ETHPORTS && ifm.port_list[i]; i++) {
+ port = ifm.port_list[i];
+ if (strcmp(name, port->ifname) == 0) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "FOUND! port %u %s\n\r",
+ port->pmdid, port->ifname);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+}
+
+void lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param)
+{
+ struct rte_eth_link link;
+ l2_phy_interface_t *port;
+ int nclients = ifm.nclient;
+ int i;
+
+ RTE_SET_USED(param);
+ RTE_SET_USED(type);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ rte_eth_link_get(port_id, &link);
+ for (i = 0; i < nclients; i++)
+ ifm.if_client[i].cb_linkupdate(port_id, link.link_status);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (link.link_status) {
+ port->link_status = IFM_ETH_LINK_UP;
+ port->link_speed = link.link_speed;
+ port->link_duplex = link.link_duplex;
+ RTE_LOG(INFO, IFM,
+ "EVENT-- PORT %u Link UP - Speed %u Mbps - %s.\n",
+ port_id, (unsigned)link.link_speed,
+ (link.link_duplex ==
+ ETH_LINK_FULL_DUPLEX) ? ("full-duplex")
+ : ("half-duplex"));
+ if (port->flags & IFM_MASTER) {
+ port->flags |= IFM_BONDED;
+ port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(port->pmdid,
+ port->
+ bond_config->
+ active_slaves,
+ RTE_MAX_ETHPORTS);
+ struct ether_addr new_mac;
+ rte_eth_macaddr_get(port->pmdid,
+ (struct ether_addr *)
+ &new_mac);
+ if (memcmp
+ (&new_mac, port->macaddr,
+ sizeof(struct ether_addr))) {
+ RTE_LOG(INFO, IFM,
+ "Bond port %u MAC has changed.\n\r",
+ port->pmdid);
+ } else {
+ RTE_LOG(INFO, IFM,
+ "Bond port %u MAC remains same\n\r",
+ port->pmdid);
+ }
+ }
+ if (port->flags & IFM_SLAVE) {
+ uint8_t master_portid =
+ port->bond_config->bond_portid;
+ struct rte_eth_link linkstatus;
+ rte_eth_link_get(master_portid, &linkstatus);
+ RTE_LOG(INFO, IFM, "Port %u 's Master(%u) status is %u\n\r", port_id,
+ master_portid, linkstatus.link_status);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (port->ipv4_list != NULL) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "Sending garp on port %u\n\r",
+ port->pmdid);
+ if (!prev_state) {
+ send_gratuitous_arp(port);
+ prev_state = 1;
+ }
+ }
+#if 0
+ else {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "IP is not enabled on port %u, not sending GARP\n\r",
+ port->pmdid);
+ }
+#endif
+ } else {
+ if (port->flags & IFM_MASTER) {
+ port->flags &= ~IFM_BONDED;
+ //RTE_LOG(INFO, IFM, "IFM_MASTER port, resetting IFM_BONDED. %u\n\r", port->flags);
+ }
+ port->link_status = IFM_ETH_LINK_DOWN;
+ RTE_LOG(INFO, IFM, "EVENT-- PORT %u is Link DOWN.\n",
+ port_id);
+ if (port->flags & IFM_SLAVE) {
+ struct rte_eth_link linkstatus;
+ uint8_t master_portid =
+ port->bond_config->bond_portid;
+ rte_eth_link_get_nowait(master_portid,
+ &linkstatus);
+ RTE_LOG(INFO, IFM,
+ "Port %u 's Master(%u) status is %u\n\r",
+ port_id, master_portid,
+ linkstatus.link_status);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ prev_state = 0;
+ }
+ }
+ //print_interface_details();
+}
+
+void ifm_update_linkstatus(uint8_t port_id, uint16_t linkstatus)
+{
+ struct rte_eth_link link;
+ l2_phy_interface_t *port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ rte_eth_link_get(port_id, &link);
+ if (linkstatus == IFM_ETH_LINK_UP) {
+ port->admin_status = IFM_ETH_LINK_UP;
+ if(!link.link_status) {
+ if (rte_eth_dev_set_link_up(port_id) < 0) {
+ RTE_LOG(INFO, IFM,
+ "%s:Port %u admin up is unsuccessful\n\r",
+ __FUNCTION__, port->pmdid);
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s:Port %u admin up...\n\r",
+ __FUNCTION__, port->pmdid);
+ send_gratuitous_arp(port);
+ return;
+ }
+ }
+ } else if (linkstatus == IFM_ETH_LINK_DOWN)
+ {
+ int status;
+ port->admin_status = IFM_ETH_LINK_DOWN;
+ /* need to check the following if */
+ if(link.link_status) {
+ status = rte_eth_dev_set_link_down(port_id);
+ if (status < 0)
+ {
+ rte_panic("(%" PRIu32 "): PMD set link down error %"
+ PRId32 "\n", port_id, status);
+ }
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+}
+
+void ifm_set_l2_interface_mtu(uint8_t port_id, uint16_t mtu)
+{
+ int ret;
+ l2_phy_interface_t *port;
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+
+ if (port != NULL && port->pmdid == port_id) {
+ ret = rte_eth_dev_set_mtu(port_id, mtu);
+ if (ret != 0)
+ RTE_LOG(INFO, IFM,
+ "set_l2_interface_mtu: Set MTU failed. ret=%d\n",
+ ret);
+ else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port->mtu = mtu;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return;
+ }
+ }
+}
+
+void ifm_set_port_promisc(uint8_t port_id, uint8_t enable)
+{
+ l2_phy_interface_t *port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (enable == 1) {
+ rte_eth_promiscuous_enable(port_id);
+ port->promisc = 1;
+ } else {
+ rte_eth_promiscuous_disable(port_id);
+ port->promisc = 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+}
+
+int32_t ifm_get_nactive_ports(void)
+{
+ return ifm.nport_configured;
+}
+
+int32_t ifm_get_nports_initialized(void)
+{
+ return ifm.nport_intialized;
+}
+
+uint16_t ifm_receive_bulk_pkts(uint8_t port_id, uint16_t qid,
+ struct rte_mbuf **rx_pkts)
+{
+ uint64_t no_of_rcvd_pkt;
+ no_of_rcvd_pkt =
+ rte_eth_rx_burst(port_id, qid, rx_pkts, IFM_BURST_SIZE);
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_receive_bulk_pkts: port_id %u no_of_rcvd_pkt %lu\n\r",
+ port_id, no_of_rcvd_pkt);
+ return no_of_rcvd_pkt;
+}
+
+uint16_t ifm_transmit_bulk_pkts(l2_phy_interface_t *port,
+ struct rte_mbuf **tx_pkts, uint64_t npkts)
+{
+ uint32_t burst_tx_delay_time = IFM_BURST_TX_WAIT_US;
+ uint32_t burst_tx_retry_num = IFM_BURST_TX_RETRIES;
+ uint32_t retry;
+ uint32_t no_of_tx_pkt;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ no_of_tx_pkt = rte_eth_tx_burst(port->pmdid, IFM_TX_DEFAULT_Q, tx_pkts,
+ npkts);
+ if (unlikely(no_of_tx_pkt < npkts)) {
+ retry = 0;
+ while (no_of_tx_pkt < IFM_BURST_SIZE
+ && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ no_of_tx_pkt =
+ rte_eth_tx_burst(port->pmdid, IFM_TX_DEFAULT_Q,
+ &tx_pkts[no_of_tx_pkt],
+ IFM_BURST_SIZE - no_of_tx_pkt);
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_bulk_pkts: no_of_tx_pkt %u\n\r",
+ no_of_tx_pkt);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return no_of_tx_pkt;
+}
+
+int ifm_transmit_single_pkt(l2_phy_interface_t *port, struct rte_mbuf *tx_pkts)
+{
+ uint64_t tx_npkts = 0;
+ if (tx_pkts == NULL || port == NULL) {
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: tx_pkts and port are NULL ");
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: port->pmdid %u\n\r",
+ port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ tx_npkts =
+ rte_eth_tx_buffer(port->pmdid, IFM_TX_DEFAULT_Q, port->tx_buffer,
+ tx_pkts);
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: port->pmdid %u No of packets buffered %lu\n\r",
+ port->pmdid, tx_npkts);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port->n_txpkts +=
+ rte_eth_tx_buffer_flush(port->pmdid, IFM_TX_DEFAULT_Q,
+ port->tx_buffer);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: no of pkts flushed %lu\n\r",
+ port->n_txpkts);
+ return tx_npkts;
+}
+
+int16_t ifm_add_ipv4_port(uint8_t port_id, uint32_t ipaddr, uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv4list_t *ipconf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ ipconf = (ipv4list_t *) rte_zmalloc(NULL, sizeof(ipv4list_t),
+ RTE_CACHE_LINE_SIZE);
+ if (ipconf != NULL) {
+ ipconf->next = NULL;
+ //ipconf->ipaddr = rte_bswap32(ipaddr);
+ ipconf->ipaddr = ipaddr;
+ ipconf->port = port;
+ ipconf->addrlen = addrlen;
+ if (port->ipv4_list == NULL)
+ port->flags |= IFM_IPV4_ENABLED;
+ ipconf->next = (ipv4list_t *) port->ipv4_list;
+ port->ipv4_list = (ipv4list_t *) ipconf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int16_t ifm_remove_ipv4_port(uint8_t port_id, uint32_t ipaddr,
+ uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv4list_t *iplist, *previplist = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (port->ipv4_list == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+ }
+ iplist = (ipv4list_t *) port->ipv4_list;
+ while (iplist != NULL) {
+ if (addrlen == iplist->addrlen &&
+ memcpy(&iplist->ipaddr, &ipaddr, addrlen)) {
+ if (iplist == port->ipv4_list) {
+ port->ipv4_list = iplist->next;
+ } else {
+ if (previplist != NULL)
+ previplist->next = iplist->next;
+ }
+ port->flags &= ~IFM_IPV4_ENABLED;
+ rte_free(iplist);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ } else {
+ previplist = iplist;
+ iplist = iplist->next;
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int8_t ifm_add_ipv6_port(uint8_t port_id, uint8_t ip6addr[], uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv6list_t *ip6conf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ ip6conf = (ipv6list_t *) rte_zmalloc(NULL, sizeof(ipv6list_t),
+ RTE_CACHE_LINE_SIZE);
+ if (ip6conf != NULL) {
+ ip6conf->next = NULL;
+ memcpy(ip6conf->ipaddr, ip6addr, IFM_IPV6_ADDR_SIZE);
+ ip6conf->port = port;
+ ip6conf->addrlen = addrlen;
+
+ if (port->ipv6_list == NULL) {
+ port->flags |= IFM_IPV6_ENABLED;
+ }
+ ip6conf->next = (ipv6list_t *) port->ipv6_list;
+ port->ipv6_list = (ipv6list_t *) ip6conf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int16_t ifm_remove_ipv6_port(uint8_t port_id, uint32_t ip6addr,
+ uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv6list_t *ip6list, *previp6list = NULL;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (port->ipv6_list == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+ }
+ ip6list = (ipv6list_t *) port->ipv6_list;
+ while (ip6list != NULL) {
+ if (addrlen == ip6list->addrlen &&
+ memcpy(&ip6list->ipaddr, &ip6addr, addrlen)) {
+ if (ip6list == port->ipv6_list) {
+ port->ipv6_list = ip6list->next;
+ } else {
+ if (previp6list != NULL)
+ previp6list->next =
+ ip6list->next;
+ }
+ port->flags &= ~IFM_IPV6_ENABLED;
+ rte_free(ip6list);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ } else {
+ previp6list = ip6list;
+ ip6list = ip6list->next;
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return -1;
+}
+
+int32_t ifm_chk_port_ipv4_enabled(uint8_t port_id)
+{
+ l2_phy_interface_t *port;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if ((port->flags & IFM_IPV4_ENABLED) == 0) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 1;
+ }
+}
+
+int32_t ifm_chk_port_ipv6_enabled(uint8_t port_id)
+{
+ l2_phy_interface_t *port;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(ERR, IFM, "%s: Port %u info not found..."
+ " configure it first.\n\r",
+ __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if ((port->flags & IFM_IPV6_ENABLED) == 0) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 1;
+ }
+}
+
+void ifm_register_for_linkupdate(uint32_t clientid,
+ void (*cb_linkupdate) (uint8_t, unsigned int))
+{
+ ifm.if_client[ifm.nclient].cb_linkupdate = cb_linkupdate;
+ ifm.if_client[ifm.nclient].clientid = clientid;
+ ifm.nclient++;
+}
+
+int ifm_port_setup(uint8_t port_id, port_config_t *pconfig)
+{
+ int status, sock;
+ char buf[12];
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link linkstatus;
+ l2_phy_interface_t *port = NULL;
+
+ if (!ifm.nport_intialized) {
+ RTE_LOG(ERR, IFM, "%s: Failed to configure port %u. 0 ports"
+ "were intialized during PCI probe...\n\r",
+ __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: Configuring port %u with "
+ "nrxq: %u, ntxq: %u\n\r", __FUNCTION__,
+ port_id, pconfig->nrx_queue, pconfig->ntx_queue);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock1 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (ifm.port_list[port_id] == NULL) {
+ ifm.port_list[port_id] =
+ (l2_phy_interface_t *) rte_zmalloc(NULL,
+ sizeof
+ (l2_phy_interface_t),
+ RTE_CACHE_LINE_SIZE);
+ ifm.port_list[port_id]->pmdid = port_id;
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock1 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+
+ rte_eth_link_get(port_id, &linkstatus);
+ if (linkstatus.link_status) {
+ if (ifm_debug & IFM_DEBUG_CONFIG) {
+ RTE_LOG(INFO, IFM, "%s: %u is up.Stop it before"
+ " reconfiguring.\n\r", __FUNCTION__, port_id);
+ }
+ rte_eth_dev_stop(port_id);
+ }
+ /*Configure an Ethernet device. rets 0 on success queue */
+ status = rte_eth_dev_configure(port_id, pconfig->nrx_queue,
+ pconfig->ntx_queue, &pconfig->port_conf);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_configure is failed"
+ "for port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ status = rte_eth_dev_callback_register(port_id,
+ RTE_ETH_EVENT_INTR_LSC,
+ lsi_event_callback, NULL);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_callback_register()"
+ " failed for port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ /*promiscuous mode is enabled set it */
+ if (pconfig->promisc)
+ rte_eth_promiscuous_enable(port_id);
+
+ sock = rte_eth_dev_socket_id(port_id);
+ if (sock == -1)
+ RTE_LOG(ERR, IFM, "%s: Warning: rte_eth_dev_socket_id,"
+ " port_id value is"
+ "out of range %u\n\r", __FUNCTION__, port_id);
+ /*Port initialization */
+ int ntxqs;
+ for (ntxqs = 0; ntxqs < pconfig->ntx_queue; ntxqs++) {
+ status = rte_eth_tx_queue_setup(port_id, ntxqs,
+ IFM_TX_DESC_DEFAULT, sock,
+ &(pconfig->tx_conf));
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_tx_queue_setup failed"
+ " for port %u\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ }
+ port = ifm_get_port(port_id);
+ if (port == NULL) {
+ RTE_LOG(INFO, IFM, "%s: Port is NULL @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ return IFM_FAILURE;
+ }
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock 2 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (port->tx_buf_len == 0) {
+ port->tx_buf_len = RTE_ETH_TX_BUFFER_SIZE(IFM_BURST_SIZE);
+ }
+ port->tx_buffer = rte_zmalloc_socket("tx_buffer", port->tx_buf_len, 0,
+ rte_eth_dev_socket_id(port_id));
+
+ if (port->tx_buffer == NULL) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: Failed to allocate tx buffers for"
+ " port %u\n\r", __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ rte_eth_tx_buffer_init(port->tx_buffer, IFM_BURST_SIZE);
+
+ sprintf(buf, "MEMPOOL%d", port_id);
+ port->mempool = rte_mempool_create(buf,
+ pconfig->mempool.pool_size,
+ pconfig->mempool.buffer_size,
+ pconfig->mempool.cache_size,
+ sizeof(struct
+ rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL, sock, 0);
+ if (port->mempool == NULL) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_mempool_create is failed for port"
+ " %u. Error: %s\n\r",
+ __FUNCTION__, port_id, rte_strerror(rte_errno));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int nrxqs;
+ for (nrxqs = 0; nrxqs < pconfig->nrx_queue; nrxqs++) {
+ status = rte_eth_rx_queue_setup(port_id, nrxqs,
+ IFM_RX_DESC_DEFAULT, sock,
+ &(pconfig->rx_conf),
+ port->mempool);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM,
+ "%s: rte_eth_rx_queue_setup is failed "
+ "for port %u queue %u. Error: %s\n\r",
+ __FUNCTION__, port_id, nrxqs,
+ rte_strerror(rte_errno));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ }
+ /*Start link */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ status = rte_eth_dev_start(port_id);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_start is failed for"
+ " port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ rte_delay_ms(5000);
+ /*Get device info and populate interface structure */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock3 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ rte_eth_macaddr_get(port_id, (struct ether_addr *)port->macaddr);
+ if (pconfig->promisc)
+ port->promisc = 1;
+ rte_eth_link_get(port_id, &linkstatus);
+ /*Link status */
+ port->link_duplex = linkstatus.link_duplex;
+ port->link_autoneg = linkstatus.link_autoneg;
+ port->link_speed = linkstatus.link_speed;
+ port->admin_status = pconfig->state;
+
+ /*Get dev_info */
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(port_id, &dev_info);
+ port->min_rx_bufsize = dev_info.min_rx_bufsize;
+ port->max_rx_pktlen = dev_info.max_rx_pktlen;
+ port->max_rx_queues = dev_info.max_rx_queues;
+ port->max_tx_queues = dev_info.max_tx_queues;
+ rte_eth_dev_get_mtu(port_id, &(port->mtu));
+
+ /*Add rx and tx packet function ptrs */
+ port->retrieve_bulk_pkts = &ifm_receive_bulk_pkts;
+ port->transmit_bulk_pkts = &ifm_transmit_bulk_pkts;
+ port->transmit_single_pkt = &ifm_transmit_single_pkt;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR3 lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ RTE_LOG(INFO, IFM, "%s: Port %u is successfully configured.\n\r",
+ __FUNCTION__, port_id);
+ return IFM_SUCCESS;
+}
+
+int ifm_configure_ports(port_config_t *pconfig)
+{
+ uint8_t port_id;
+ int status = 0;
+ if (!ifm.nport_intialized) {
+ RTE_LOG(ERR, IFM, "%s, Configuring ports failed. Zero ports "
+ "are intialized during PCI probe", __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (pconfig == NULL) {
+ RTE_LOG(ERR, IFM, "%s, Configuring ports failed. "
+ "Param pconfig is NULL\n\r", __FUNCTION__);
+ return IFM_FAILURE;
+ }
+
+ /*Initialize all ports */
+ for (port_id = 0; port_id < ifm.nport_intialized; port_id++) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "Call ifm_port_setup %u\n\r",
+ port_id);
+ status =
+ ifm_port_setup(pconfig[port_id].port_id, &pconfig[port_id]);
+ if (status == IFM_SUCCESS)
+ ifm.nport_configured++;
+ }
+ if (!ifm.nport_configured) {
+ RTE_LOG(ERR, IFM, "%s: Zero ports are configured\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ RTE_LOG(INFO, IFM, "%s: Number of ports sucessfully configured:"
+ " %d\n\r", __FUNCTION__, ifm.nport_configured);
+ return IFM_SUCCESS;
+}
+
+void print_interface_details(void)
+{
+ l2_phy_interface_t *port;
+ int i = 0;
+ struct sockaddr_in ip;
+ printf("\n\r");
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ for (i = 0; i < RTE_MAX_ETHPORTS && ifm.port_list[i]; i++) {
+ port = ifm.port_list[i];
+ printf(" %u", port->pmdid);
+ if (port->ifname && strlen(port->ifname)) {
+ printf(" (%s)\t", port->ifname);
+ } else
+ printf("\t\t");
+ printf("MAC:%02x:%02x:%02x:%02x:%02x:%02x Adminstate:%s"
+ " Operstate:%s \n\r",
+ port->macaddr[0], port->macaddr[1],
+ port->macaddr[2], port->macaddr[3],
+ port->macaddr[4], port->macaddr[5],
+ port->admin_status ? "UP" : "DOWN",
+ port->link_status ? "UP" : "DOWN");
+ printf("\t\t");
+ printf("Speed: %u, %s-duplex\n\r", port->link_speed,
+ port->link_duplex ? "full" : "half");
+ printf("\t\t");
+
+ if (port->ipv4_list != NULL) {
+ ip.sin_addr.s_addr =
+ (unsigned long)((ipv4list_t *) (port->ipv4_list))->
+ ipaddr;
+ printf("IP: %s/%d", inet_ntoa(ip.sin_addr),
+ ((ipv4list_t *) (port->ipv4_list))->addrlen);
+ } else {
+ printf("IP: NA");
+ }
+
+ printf("\r\n");
+ printf("\t\t");
+ if (port->ipv6_list != NULL) {
+ uint8_t *addr =
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr;
+ printf
+ ("IPv6: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x",
+ addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], addr[6], addr[7], addr[8], addr[9],
+ addr[10], addr[11], addr[12], addr[13], addr[14],
+ addr[15]);
+ } else {
+ printf("IPv6: NA");
+ }
+
+ if (port->flags & IFM_SLAVE) {
+ printf(" IFM_SLAVE ");
+ printf(" MasterPort: %u",
+ port->bond_config->bond_portid);
+ }
+ if (port->flags & IFM_MASTER) {
+ printf(" IFM_MASTER ");
+ printf(" Mode: %u", port->bond_config->mode);
+ printf(" PrimaryPort: %u", port->bond_config->primary);
+ printf("\n\r");
+ printf("\t\tSlavePortCount: %u",
+ port->bond_config->slave_count);
+ printf(" SlavePorts:");
+ int i;
+ for (i = 0; i < port->bond_config->slave_count; i++) {
+ printf(" %u ", port->bond_config->slaves[i]);
+ }
+ printf(" ActivePortCount: %u",
+ port->bond_config->active_slave_count);
+ printf(" ActivePorts:");
+ for (i = 0; i < port->bond_config->active_slave_count;
+ i++) {
+ printf(" %u ",
+ port->bond_config->active_slaves[i]);
+ }
+ printf("\n\r");
+ printf("\t\t");
+ printf("Link_monitor_freq: %u ms ",
+ port->bond_config->internal_ms);
+ printf(" Link_up_prop_delay: %u ms ",
+ port->bond_config->link_up_delay_ms);
+ printf(" Link_down_prop_delay: %u ms ",
+ port->bond_config->link_down_delay_ms);
+ printf("\n\r");
+ printf("\t\t");
+ printf("Xmit_policy: %u",
+ port->bond_config->xmit_policy);
+ }
+ printf("\n\r");
+ printf("\t\t");
+ printf("n_rxpkts: %" PRIu64 " ,n_txpkts: %" PRIu64 " ,",
+ port->n_rxpkts, port->n_txpkts);
+ struct rte_eth_stats eth_stats;
+ rte_eth_stats_get(port->pmdid, &eth_stats);
+ printf("pkts_in: %" PRIu64 " ,", eth_stats.ipackets);
+ printf("pkts_out: %" PRIu64 " ", eth_stats.opackets);
+ printf("\n\r");
+ printf("\t\t");
+ printf("in_errs: %" PRIu64 " ,", eth_stats.ierrors);
+ printf("in_missed: %" PRIu64 " ,", eth_stats.imissed);
+ printf("out_errs: %" PRIu64 " ,", eth_stats.oerrors);
+ printf("mbuf_errs: %" PRIu64 " ", eth_stats.rx_nombuf);
+ printf("\n\r");
+ printf("\n\r");
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+}
diff --git a/common/VIL/l2l3_stack/interface.h b/common/VIL/l2l3_stack/interface.h
new file mode 100644
index 00000000..0f654fa1
--- /dev/null
+++ b/common/VIL/l2l3_stack/interface.h
@@ -0,0 +1,873 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef INTERFACE_H
+#define INTERFACE_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_eth_ctrl.h>
+#include <rte_errno.h>
+#include <rte_port_ethdev.h>
+#include <rte_eth_bond.h>
+#include <rte_rwlock.h>
+
+#define RTE_LOGTYPE_IFM RTE_LOGTYPE_USER1
+#define IFM_SUCCESS 0
+#define IFM_FAILURE -1
+/*
+ * IFM Ether link related macros
+ */
+#define IFM_ETH_LINK_HALF_DUPLEX 0
+#define IFM_ETH_LINK_FULL_DUPLEX 1
+#define IFM_ETH_LINK_DOWN 0
+#define IFM_ETH_LINK_UP 1
+#define IFM_ETH_LINK_FIXED 0
+
+/*
+ * Bonding
+ */
+#define IFM_SLAVE (1<<0)
+#define IFM_MASTER (1<<1)
+#define IFM_BONDED (1<<2)
+#define IFM_IPV4_ENABLED (1<<3)
+#define IFM_IPV6_ENABLED (1<<4)
+
+#define IFM_BONDING_MODE_ROUND_ROBIN 0
+#define IFM_BONDING_MODE_ACTIVE_BACKUP 1
+#define IFM_BONDING_MODE_BALANCE 2
+#define IFM_BONDING_MODE_BROADCAST 3
+#define IFM_BONDING_MODE_8023AD 4
+#define IFM_BONDING_MODE_TLB 5
+#define IFM_BONDING_MODE_ALB 6
+
+#define IFM_BALANCE_XMIT_POLICY_LAYER2 0
+#define IFM_BALANCE_XMIT_POLICY_LAYER23 1
+#define IFM_BALANCE_XMIT_POLICY_LAYER34 2
+/*
+ * Queue related macros
+ */
+#define IFM_QUEUE_STAT_CNTRS 16
+#define IFM_TX_DEFAULT_Q 0
+#define IFM_RX_DEFAULT_Q 0
+#define IFM_RX_DESC_DEFAULT 128
+#define IFM_TX_DESC_DEFAULT 512
+#define IFM_BURST_SIZE 32
+#define IFM_BURST_TX_WAIT_US 1
+#define IFM_BURST_TX_RETRIES 64
+#define BURST_TX_DRAIN_US 100
+
+/*
+ * Misc
+ */
+#define IFM_IFNAME_LEN 16
+#define IFM_CLIENT_NAME 20
+#define IFM_MAX_CLIENT 10
+
+#define IFM_ETHER_ADDR_SIZE 6
+#define IFM_IPV6_ADDR_SIZE 16
+
+#define IFM_DEBUG_CONFIG (1<<0)
+#define IFM_DEBUG_RXTX (1<<1)
+#define IFM_DEBUG_LOCKS (1<<2)
+#define IFM_DEBUG (1<<4)
+#define IFM_MAX_PORTARR_SZ 64
+#define IFM_MAX_PORTARR_SZ 64
+/**
+ * Mempool configuration details:
+ * Stores the mempool configuration information for the port.
+ */
+struct mempool_config {
+ uint32_t pool_size;/**< The number of elements in the mempool.*/
+ uint32_t buffer_size;
+ /**< The size of an element*/
+ uint32_t cache_size;
+ /**< Cache size */
+ uint32_t cpu_socket_id;
+ /**< The socket identifier in the case of NUMA.*/
+} __rte_cache_aligned;
+
+/**
+ * Port configuration:
+ * Stores the configuration information for the port.
+ * This structure is used during port and tx/rx queue setup.
+ */
+typedef struct _port_config_ {
+ uint8_t port_id; /**< port id or pmd id to be configured */
+ int nrx_queue; /**< no of rx queues */
+ int ntx_queue; /**< no of tx queues */
+ uint32_t tx_buf_size;
+ uint32_t state; /**< noshut/shut the admin state of the port*/
+ uint32_t promisc; /**< enable/diable promisc mode*/
+ struct mempool_config mempool;
+ /**< Mempool configurations */
+ struct rte_eth_conf port_conf;
+ /**< port configuration */
+ struct rte_eth_rxconf rx_conf;
+ /**< rx queue configurations */
+ struct rte_eth_txconf tx_conf;
+ /**< tx queue configurations */
+} port_config_t;
+
+/**
+ * Port statistics:
+ * if_stats structure is a member variable of structure l2_phy_interface_t.
+ * Used to maintain stats retreived from rte_eth_stats structure.
+ */
+typedef struct _if_stats_ {
+ uint64_t rx_npkts;/**< Total number of successfully received packets.*/
+ uint64_t tx_npkts;/**< Total number of successfully transmitted bytes. */
+ uint64_t rx_bytes;/**< Total number of successfully received bytes.*/
+ uint64_t tx_bytes;/**< Total number of successfully transmitted bytes.*/
+ uint64_t rx_missed_pkts;
+ /**< no of packets dropped by hw due because rx queues are full*/
+ uint64_t rx_err_pkts;/**< Total number of erroneous received packets. */
+ uint64_t rx_nobuf_fail;/**< Total number of RX mbuf allocation failures. */
+ uint64_t tx_failed_pkts;/**< Total number of failed transmitted packets.*/
+ uint64_t q_rxpkts[IFM_QUEUE_STAT_CNTRS];/**< Total number of queue RX packets.*/
+ uint64_t q_txpkts[IFM_QUEUE_STAT_CNTRS];/**< Total number of queue TX packets.*/
+ uint64_t q_rx_bytes[IFM_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully received queue bytes.*/
+ uint64_t q_tx_bytes[IFM_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully transmitted queue bytes.*/
+ uint64_t q_rx_pkt_drop[IFM_QUEUE_STAT_CNTRS];
+ /**<Total number of queue packets received that are dropped.*/
+} __rte_cache_aligned if_stats;
+/**
+ * structure to store bond port information
+ */
+struct bond_port {
+ uint8_t bond_portid;
+ /**<portid of the bond port.*/
+ uint8_t socket_id;
+ /**<socketid of the port.*/
+ uint8_t mode;
+ /**<mode config.*/
+ uint8_t xmit_policy;
+ /**<xmit policy for this port.*/
+ uint32_t internal_ms;
+ /**<in frequency.*/
+ uint32_t link_up_delay_ms;
+ /**<frequency of informing linkup delay.*/
+ uint32_t link_down_delay_ms;
+ /**<frequency of informing linkdown delay.*/
+ uint8_t primary;
+ /**<primary port of this bond.*/
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ /**<list of slaves*/
+ int slave_count;
+ /**<slave count.*/
+ uint8_t active_slaves[RTE_MAX_ETHPORTS];
+ /**<list of active slaves.*/
+ int active_slave_count;
+ /**<cnt of active slave.*/
+} __rte_cache_aligned;
+
+/**
+ * Physical port details:
+ * Used to store information about configured port.
+ * Most of the member variables in this structure are populated
+ * from struct rte_eth_dev_info
+ */
+typedef struct _l2_phy_interface_ {
+ struct _l2_phy_interface_ *next; /**< pointer to physical interface list */
+ uint8_t pmdid; /**< populated from rth_eth_dev_info */
+ unsigned int if_index; /**< populated from rth_eth_dev_info */
+ char ifname[IFM_IFNAME_LEN]; /**< populated from rth_eth_dev_info */
+ uint16_t mtu; /**< mtu value - configurable */
+ uint8_t macaddr[IFM_ETHER_ADDR_SIZE]; /**< Ether addr*/
+ uint32_t promisc; /**< promisc mode - configurable*/
+ uint32_t flags; /**< Used for link bonding */
+ /* Link status */
+ uint32_t link_speed; /**< line speed */
+ uint16_t link_duplex:1; /**< duplex mode */
+ uint16_t link_autoneg:1; /**< auto negotiation*/
+ uint16_t link_status:1; /**< operational status */
+ uint16_t admin_status:1; /**< Admin status of a port*/
+ /* queue details */
+ struct rte_mempool *mempool; /**< HW Q*/
+ uint32_t min_rx_bufsize; /**< rx buffer size supported */
+ uint32_t max_rx_pktlen; /**< max size of packet*/
+ uint16_t max_rx_queues; /**< max number of rx queues supported */
+ uint16_t max_tx_queues; /**< max number queues supported*/
+ uint64_t n_rxpkts; /**< number of packets received */
+ uint64_t n_txpkts; /**< number of packets transmitted */
+ if_stats stats; /**< port stats - populated from rte_eth_ifstats */
+ uint16_t(*retrieve_bulk_pkts) (uint8_t, uint16_t, struct rte_mbuf **);
+ /**< pointer to read packets*/
+ uint16_t(*transmit_bulk_pkts) (struct _l2_phy_interface_ *, struct rte_mbuf **, uint64_t);
+ /**< pointer to transmit the bulk of packets */
+ int (*transmit_single_pkt) (struct _l2_phy_interface_ *, struct rte_mbuf *);
+ /**< pointer to transmit the a single packet*/
+ struct rte_eth_dev_tx_buffer *tx_buffer;
+ uint64_t tx_buf_len; /**< number of packets in tx_buf */
+ void *ipv4_list; /**< pointer to ip list */
+ void *ipv6_list; /**< pointer to ipv6 list */
+ struct bond_port *bond_config; /**< pointer to bond info*/
+ port_config_t port_config;
+} __rte_cache_aligned l2_phy_interface_t;
+
+/**
+ * Port IPv4 address details:
+ * Used to maintain IPv4 information of a port.
+ */
+typedef struct _ipv4list_ {
+ struct _ipv4list_ *next;/**< pointer to IPv4 list */
+ uint32_t ipaddr; /**< Configured ipv4 address */
+ unsigned int addrlen; /**< subnet mask or addrlen */
+ unsigned int mtu; /**< IPv6 mtu*/
+ l2_phy_interface_t *port;
+ /**< pointer to a port on which this ipaddr is configured*/
+} ipv4list_t;
+
+/**
+ * Port IPv6 address details:
+ * Used to maintain IPv6 information of a port.
+ */
+typedef struct _ipv6list_ {
+ struct _ipv6list_ *next; /**< Ptr IPv6 list */
+ uint8_t ipaddr[IFM_IPV6_ADDR_SIZE]; /**< Configured ipv6 address */
+ unsigned int addrlen; /**< subnet mask or addrlen*/
+ unsigned int mtu; /**< IPv6 mtu*/
+ l2_phy_interface_t *port; /**< ptr to a port on whicch ipv6 addr is configured*/
+} ipv6list_t;
+
+/**
+ * Interface Manager client details:
+ * Maintains information about clients who registered for link status update.
+ * Stores callback function to be called in case of link state change.
+ */
+typedef struct _ifm_client_ {
+ uint32_t clientid; /**< unique client id identifies the client used for indexing*/
+ void (*cb_linkupdate) (uint8_t, unsigned int);
+ /**< callback function to be triggered during an event*/
+} __rte_cache_aligned ifm_client;
+
+/**
+ * Interface manager global structure:
+ * IFM main structure has pointer configured port list.
+ */
+typedef struct _interface_main_ {
+ l2_phy_interface_t *port_list[IFM_MAX_PORTARR_SZ];
+ uint32_t nport_configured; /**< no of ports sucessfully configured during PCI probe*/
+ uint32_t nport_intialized; /**< no of ports sucessfully initialized through ifm_init*/
+ uint8_t nclient; /**< no of clients registered for Interface manager events*/
+ ifm_client if_client[IFM_MAX_CLIENT]; /**< Array of interface manager client details*/
+} __rte_cache_aligned interface_main_t;
+
+/**
+ * Init function of Interface manager. Calls port_setup function for every port.
+ *
+ * @param *pconfig
+ * A pointer to port_config_t contains port configuration.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int ifm_configure_ports(port_config_t *pconfig);
+
+/**
+ * Returns first port from port list.
+ *
+ * @param
+ * None
+ *
+ * @returns
+ * On success - Returns a pointer to first port in the list of
+ * type l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_first_port(void);
+
+/**
+ * Get a port from the physical port list which is next node to
+ * the given portid in the list.
+ *
+ * @param portid
+ * A pmdid of port.
+ *
+ * @returns
+ * On success - Returns a pointer to next port in the list of
+ * type l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_next_port(uint8_t port_id);
+
+/**
+ * Get a pointer to port for the given portid from the physical port list.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * On success - returns pointer to l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_port(uint8_t);
+
+/**
+ * Get a pointer to port for the given port name from the physical port list.
+ *
+ * @param name
+ * Name of the port
+ *
+ * @returns
+ * On success - returns pointer to l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_port_by_name(const char *name);
+/**
+ * Removes given port from the physical interface list.
+ *
+ * @params
+ * portid - pmd_id of port.
+ * @returns
+ * none
+ */
+void ifm_remove_port_details(uint8_t portid);
+
+/**
+ * Adds give port to the begining of physical interface list.
+ *
+ * @param l2_phy_interface_t *
+ * pointer to l2_phy_interface_t.
+ * @returns
+ * none
+ */
+void ifm_add_port_to_port_list(l2_phy_interface_t *);
+
+/**
+ * Checks whether the global physical port list is NULL.
+ *
+ * @returns
+ * 0 - On success.
+ * 1 - On Failure.
+ */
+int is_port_list_null(void);
+
+/**
+ * Configures the device port. Also sets tx and rx queue.
+ * Populates port structure and adds it physical interface list.
+ *
+ * @param portconfig
+ * Contains configuration about rx queue, tx queue.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int ifm_port_setup(uint8_t port_id, port_config_t *);
+
+/**
+ * Initializes interface manager main structure
+ * @params
+ * none
+ * @returns
+ * none
+ */
+void ifm_init(void);
+
+/**
+ * Returns number of ports initialized during pci probe.
+ *
+ * @params
+ * void
+ *
+ * @returns
+ * number of ports initialized - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_get_nports_initialized(void);
+
+/**
+ * Returns number of ports initialized ifm_init.
+ *
+ * @params
+ * void
+ *
+ * @returns
+ * number of ports initialized - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_get_nactive_ports(void);
+
+/**
+ * Checks whether port is ipv4 enabled.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_chk_port_ipv4_enabled(uint8_t port_id);
+
+/**
+ * Checks whether port is ipv6 enabled.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_chk_port_ipv6_enabled(uint8_t port_id);
+
+/**
+ * Remove ipv4 address from the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ipaddr
+ * ipv4 address to be removed
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_remove_ipv4_port(uint8_t port_id, uint32_t ipaddr,
+ uint32_t addrlen);
+
+/**
+ * Remove ipv6 address from the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ip6addr
+ * ipv4 address to be removed
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_remove_ipv6_port(uint8_t port_id, uint32_t ip6addr,
+ uint32_t addrlen);
+
+/**
+ * Add ipv4 address to the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ipaddr
+ * ipv4 address to be configured
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_add_ipv4_port(uint8_t port_id, uint32_t ipaddr, uint32_t addrlen);
+
+/**
+ * Add ipv6 address to the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ip6addr
+ * ipv6 address to be configured
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int8_t ifm_add_ipv6_port(uint8_t port_id, uint8_t ip6addr[], uint32_t addrlen);
+
+/**
+ * Buffers the packet in the tx quueue.
+ *
+ * @param *port
+ * pointer to the port.
+ * @param *tx_pkts
+ * packet to be transmitted
+ *
+ * @returns
+ * number of packets transmitted
+ */
+int ifm_transmit_single_pkt(l2_phy_interface_t *port,
+ struct rte_mbuf *tx_pkts);
+
+/**
+ * Transmit the packet
+ *
+ * @param *port
+ * pointer to the port.
+ * @param *tx_pkts
+ * packets to be transmitted
+ * @param npkts
+ * number of packets to be transmitted
+ *
+ * @returns
+ * number of packets transmitted
+ */
+uint16_t ifm_transmit_bulk_pkts(l2_phy_interface_t *, struct rte_mbuf **tx_pkts,
+ uint64_t npkts);
+
+/**
+ * Receive burst of 32 packets
+ *
+ * @param portid
+ * From which port we need to read packets
+ * @param qid
+ * From which port we need to read packets
+ * @param npkts
+ * mbuf in which read packets will be placed
+ *
+ * @returns
+ * number of packets read
+ */
+uint16_t ifm_receive_bulk_pkts(uint8_t port_id, uint16_t qid,
+ struct rte_mbuf **rx_pkts);
+
+/**
+ * Enable or disable promiscmous mode
+ *
+ * @param portid
+ * pmd id of the port
+ * @param enable
+ * 1 - enable, IFM_SUCCESS - disable
+ *
+ * @returns
+ * none
+ */
+void ifm_set_port_promisc(uint8_t port_id, uint8_t enable);
+
+/**
+ * Enable or disable promiscmous mode
+ *
+ * @param portid
+ * pmd id of the port
+ * @param enable
+ * 1 - enable, 0 - disable
+ *
+ * @returns
+ * none
+ */
+void ifm_set_l2_interface_mtu(uint8_t port_id, uint16_t mtu);
+
+/**
+ * Set MTU value for the port
+ *
+ * @param portid
+ * pmd id of the port
+ * @param mtu
+ * MTU value
+ *
+ * @returns
+ * none
+ */
+void ifm_update_linkstatus(uint8_t port_id, uint16_t linkstatus);
+
+/**
+ * Register for link state event
+ *
+ * @param clientid
+ * Unique number identifies client.
+ * @param cb_linkupdate
+ * Callback function which has to be called at time of event
+ *
+ * @returns
+ * none
+ */
+void ifm_register_for_linkupdate(uint32_t clientid,
+ void (*cb_linkupdate) (uint8_t, unsigned int));
+
+/**
+ * Callback which is triggered at the time of link state change which in turn triggers registered
+ * clients callback
+ *
+ * @param portid
+ * pmd id of the port
+ * @param type
+ * lsi event type
+ * @param
+ * Currently not used
+ *
+ * @returns
+ * none
+ */
+void lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param);
+/*
+ * Prints list of interfaces
+ * @param vois
+ */
+void print_interface_details(void);
+/*
+ * Creates bond interface
+ * @Param name
+ * name of bond port
+ * @Param mode
+ * mode
+ * @Param portconf
+ * port configuration to be applied
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_bond_port_create(const char *name, int mode, port_config_t *portconf);
+/*
+ * Deletes bond interface
+ * @Param name
+ * name of bond port
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_bond_port_delete(const char *name);
+/*
+ * Addes a port as slave to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_add_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Removes a port as slave to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_remove_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Sets bond port 's mode
+ * @Param bonded_port_id
+ * bond port id
+ * @Param mode
+ * mode 0 ... 5
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_mode(uint8_t bonded_port_id, uint8_t mode);
+/*
+ * Get bond port 's mode
+ * @Param bonded_port_id
+ * bond port id
+ * @returns mode value or -1 on failure
+ */
+int get_bond_mode(uint8_t bonded_port_id);
+/*
+ * Set a slave port to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_primary(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Get primary port of the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns port id of primary on success and 1 on failure
+ */
+int get_bond_primary_port(uint8_t bonded_port_id);
+/*
+ * Get slave count for the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns slave count on success and 1 on failure
+ */
+int get_bond_slave_count(uint8_t bonded_port_id);
+/*
+ * Get active slave count for the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns active slaves count on success and 1 on failure
+ */
+int get_bond_active_slave_count(uint8_t bonded_port_id);
+/*
+ * Get slaves in the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slaves
+ * array to save slave port
+ * @returns 0 on success and 1 on failure
+ */
+int get_bond_slaves(uint8_t bonded_port_id, uint8_t slaves[RTE_MAX_ETHPORTS]);
+/*
+ * Get active slaves in the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slaves
+ * array to save slave port
+ * @returns 0 on success and 1 on failure
+ */
+int get_bond_active_slaves(uint8_t bonded_port_id,
+ uint8_t slaves[RTE_MAX_ETHPORTS]);
+/*
+ * Sets bond port 's mac address
+ * @Param bonded_port_id
+ * bond port id
+ * @Param mode
+ * mac_addr - mac addr
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_mac_address(uint8_t bonded_port_id, struct ether_addr *mac_addr);
+/*
+ * Sets bond port 's MAC
+ * @Param bonded_port_id
+ * bond port id
+ * @returns 0 on success and 1 on failure
+ */
+int reset_bond_mac_addr(uint8_t bonded_port_id);
+int get_bond_mac(uint8_t bonded_port_id, struct ether_addr *macaddr);
+/*
+ * Sets bond port 's policy
+ * @Param bonded_port_id
+ * bond port id
+ * @Param policy
+ * xmit policy
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_xmitpolicy(uint8_t bonded_port_id, uint8_t policy);
+/*
+ * Get bond port 's xmit policy
+ * @Param bonded_port_id
+ * bond port id
+ * @returns xmit policy value or -1 on failure
+ */
+int get_bond_xmitpolicy(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's monitor frequency
+ * @Param bonded_port_id
+ * bond port id
+ * @Param internal_ms
+ * frequency in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_link_montitor_frequency(uint8_t bonded_port_id,
+ uint32_t internal_ms);
+/*
+ * Get bond port 's monitor frequency
+ * @Param bonded_port_id
+ * bond port id
+ * @returns frequency value or -1 on failure
+ */
+int get_bond_link_monitor_frequency(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's link down delay
+ * @Param bonded_port_id
+ * bond port id
+ * @Param delay_ms
+ * delay time in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_linkdown_delay(uint8_t bonded_port_id, uint32_t delay_ms);
+/*
+ * Get bond port 's link down delay
+ * @Param bonded_port_id
+ * bond port id
+ * @returns delay ms value or -1 on failure
+ */
+int get_bond_link_down_delay(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's link up delay
+ * @Param bonded_port_id
+ * bond port id
+ * @Param delay_ms
+ * delay time in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_linkup_delay(uint8_t bonded_port_id, uint32_t delay_ms);
+/*
+ * Get bond port 's link up delay
+ * @Param bonded_port_id
+ * bond port id
+ * @returns delay ms value or -1 on failure
+ */
+int get_bond_link_up_delay(uint8_t bonded_port_id);
+/*
+ * Print port s statistics
+ * @Param void
+ * @returns void
+ */
+void print_stats(void);
+/*
+ * Gets information about port
+ * @Param port_id
+ * portid of the port
+ * @param port_info
+ * port to address to copy port info
+ * @returns 0 on success otherwise -1
+ */
+int ifm_get_port_info(uint8_t port_id, l2_phy_interface_t *port_info);
+/*
+ * Gets information about next port of given portid
+ * @Param port_id
+ * portid of the port
+ * @param port_info
+ * port to address to copy port info
+ * @returns 0 on success otherwise -1
+ */
+int ifm_get_next_port_info(uint8_t port_id, l2_phy_interface_t *port_info);
+/*
+ * Enable ifm debug
+ * @Param dbg value
+ * Debug- 1(port config),2(port RXTX),3(hle LOCKS),4(GENERALDEBUG)
+ * @param flag
+ * Enable 1, disable 0
+ * @returns 0 on success otherwise -1
+ */
+void config_ifm_debug(int dbg, int flag);
+#endif
diff --git a/common/VIL/l2l3_stack/l2_proto.c b/common/VIL/l2l3_stack/l2_proto.c
new file mode 100644
index 00000000..44c50b08
--- /dev/null
+++ b/common/VIL/l2l3_stack/l2_proto.c
@@ -0,0 +1,239 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/*
+ * Filename - l2_proto.c
+ * L2 Protocol Handler
+ */
+
+#include "l2_proto.h"
+
+static struct proto_packet_type *proto_list[3];
+/*
+ * Function to register the rx functions for different ethertypes. This is maintained in a list.
+ */
+void
+list_add_type(uint16_t type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *))
+{
+ if (type == ETHER_TYPE_IPv4) {
+ proto_list[IPv4_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[IPv4_VAL]->type = type;
+ proto_list[IPv4_VAL]->func = func;
+ }
+
+ else if (type == ETHER_TYPE_ARP) {
+ proto_list[ARP_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[ARP_VAL]->type = type;
+ proto_list[ARP_VAL]->func = func;
+ } else if (type == ETHER_TYPE_IPv6) {
+ proto_list[IPv6_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[IPv6_VAL]->type = type;
+ proto_list[IPv6_VAL]->func = func;
+ }
+
+}
+
+/*
+ * Check the mac address to see whether it is destined to this host or not.
+ * Call relevant functions registered by other modules when the ethertype matches,
+ * if it is destined to this host. Drop the packet otherwise.
+ */
+
+void
+l2_check_mac(struct rte_mbuf *m[IFM_BURST_SIZE], l2_phy_interface_t *port,
+ uint8_t i, uint64_t *pkts_mask, uint64_t *arp_pkts_mask,
+ uint64_t *ipv4_pkts_mask, uint64_t *ipv6_pkts_mask)
+{
+ struct ether_hdr *eth=NULL;
+ uint16_t same_mac=0;
+ uint16_t ethtype = 0;
+
+ if (m[i] != NULL) {
+ eth = rte_pktmbuf_mtod(m[i], struct ether_hdr *);
+ if(eth)
+ ethtype = rte_be_to_cpu_16(eth->ether_type);
+ if (eth == NULL) {
+ /*Destination MAC address inside the packet */
+ printf("l2_check_mac: Ethernet Dest Addr NULL !!!\n");
+ return;
+ }
+ ethtype = rte_be_to_cpu_16(eth->ether_type);
+#if L2_PROTO_DBG
+ printf("%s => mbuf pkt dest mac addr: %x:%x:%x:%x:%x:%x\n",
+ __FUNCTION__, eth->d_addr.addr_bytes[0],
+ eth->d_addr.addr_bytes[1], eth->d_addr.addr_bytes[2],
+ eth->d_addr.addr_bytes[3], eth->d_addr.addr_bytes[4],
+ eth->d_addr.addr_bytes[5]);
+ printf("%s => port mac addr: %x:%x:%x:%x:%x:%x\n", __FUNCTION__,
+ port->macaddr[0], port->macaddr[1], port->macaddr[2],
+ port->macaddr[3], port->macaddr[4], port->macaddr[5]);
+
+#endif
+ /* Compare the mac addresses */
+ same_mac =
+ (is_same_ether_addr
+ (&eth->d_addr, (struct ether_addr *)port->macaddr)
+ ||
+ ((is_broadcast_ether_addr
+ ((struct ether_addr *)&eth->d_addr)
+ && (ethtype == ETHER_TYPE_ARP)))
+ || (ethtype == ETHER_TYPE_IPv6
+ && eth->d_addr.addr_bytes[0] == 0x33
+ && eth->d_addr.addr_bytes[1] == 0x33));
+
+ if (!same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *pkts_mask ^= temp_mask;
+ rte_pktmbuf_free(m[i]);
+ m[i] = NULL;
+ } else if ((ethtype == ETHER_TYPE_IPv4) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *ipv4_pkts_mask ^= temp_mask;
+ } else if ((ethtype == ETHER_TYPE_ARP) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *arp_pkts_mask ^= temp_mask;
+ } else if ((ethtype == ETHER_TYPE_IPv6) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *ipv6_pkts_mask ^= temp_mask;
+ }
+ }
+ printf("\n%s: arp_pkts_mask = %" PRIu64 ", ipv4_pkts_mask = %" PRIu64
+ ", ipv6_pkts_mask =%" PRIu64 ", pkt-type = %x, sam_mac = %d\n",
+ __FUNCTION__, *arp_pkts_mask, *ipv4_pkts_mask, *ipv6_pkts_mask,
+ ethtype, same_mac);
+}
+
+void
+protocol_handler_recv(struct rte_mbuf **pkts_burst, uint16_t nb_rx,
+ l2_phy_interface_t *port)
+{
+ uint8_t i;
+ uint64_t pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t arp_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t ipv4_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t ipv6_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+
+ /*Check the mac address of every single packet and unset the bits in the packet mask
+ *for those packets which are not destined to this host
+ */
+ for (i = 0; i < nb_rx; i++) {
+ l2_check_mac(pkts_burst, port, i, &pkts_mask, &arp_pkts_mask,
+ &ipv4_pkts_mask, &ipv6_pkts_mask);
+ }
+ if (nb_rx) {
+ if (arp_pkts_mask) {
+ proto_list[ARP_VAL]->func(pkts_burst, nb_rx,
+ arp_pkts_mask, port);
+ printf
+ ("=================After ARP ==================\n");
+ }
+ if (ipv4_pkts_mask) {
+ printf
+ ("=================Calling IPV4 L3 RX ==================\n");
+ printf("====nb_rx:%u, ipv4_pkts_mask: %lu\n\n", nb_rx,
+ ipv4_pkts_mask);
+ proto_list[IPv4_VAL]->func(pkts_burst, nb_rx,
+ ipv4_pkts_mask, port);
+ }
+ if (ipv6_pkts_mask) {
+ printf
+ ("=================Calling IPV6 L3 RX ==================\n");
+ printf("====nb_rx:%u, ipv6_pkts_mask: %lu\n\n", nb_rx,
+ ipv6_pkts_mask);
+ proto_list[IPv6_VAL]->func(pkts_burst, nb_rx,
+ ipv6_pkts_mask, port);
+ }
+ }
+}
+
+#if 0
+switch (qid) {
+case 1:
+ {
+#if 0
+ printf
+ ("=====================ENTERED ARP CASE================\n");
+ while (cur->type != ETHER_TYPE_ARP && cur != NULL) {
+ cur = cur->next;
+ }
+ if (cur != NULL) {
+ //printf("L2 PROTO TEST-14=================================\n");
+ printf
+ ("==============\nARPARPARPARP \n=======================\n");
+ cur->func(pkts_burst, nb_rx, pkts_mask, portid);
+ }
+#endif
+ proto_list[ARP_VAL]->func(pkts_burst, nb_rx, arp_pkts_mask,
+ portid);
+ break;
+ }
+case 0:
+ {
+#if 0
+ while (cur->type != ETHER_TYPE_IPv4 && cur != NULL) {
+ cur = cur->next;
+ }
+ if (cur != NULL) {
+ //printf("L2 PROTO TEST-15=================================\n");
+ //printf("==============\nPkts mask in while calling IPv4 %d \n=======================\n",ipv4_pkts_mask);
+ cur->func(pkts_burst, nb_rx, ipv4_pkts_mask, portid);
+ }
+ break;
+#endif
+ // printf("=========Inside switch==============\n");
+ proto_list[IPv4_VAL]->func(pkts_burst, nb_rx, ipv4_pkts_mask,
+ portid);
+ break;
+ }
+ /* case 2:
+ {
+ while(cur->type != ETHER_TYPE_IPv6 && cur != NULL)
+ {
+ cur = cur->next;
+ }
+ if(cur != NULL)
+ {
+ cur->func(pkts_burst, nb_rx, ipv6_pkts_mask, portid);
+ }
+ break;
+ } */
+default:
+ {
+ rte_exit(EXIT_FAILURE, "Ethertype not found \n");
+ break;
+ }
+}
+#endif
+
+/*
+ * L2 Stack Init for future
+
+
+ void
+l2_stack_init(void)
+{
+
+}
+
+*/
diff --git a/common/VIL/l2l3_stack/l2_proto.h b/common/VIL/l2l3_stack/l2_proto.h
new file mode 100644
index 00000000..05466070
--- /dev/null
+++ b/common/VIL/l2l3_stack/l2_proto.h
@@ -0,0 +1,150 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+ * @file
+ * L2 Protocol Handler
+ * Reads the packet from the interface and sets the
+ * masks for a burst of packets based on ethertype and
+ * calls the relevant function registered for that ethertype
+ *
+ */
+
+#ifndef L2_PROTO_H
+#define L2_PROTO_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_eth_ctrl.h>
+#include <interface.h>
+
+/* Array indexes of proto_packet_type structure */
+#define IPv4_VAL 0 /**< Array index for IPv4 */
+#define ARP_VAL 1 /**< Array index for ARP */
+#define IPv6_VAL 2 /**< Array index for IPv6 */
+
+/* Enable to print L2_Proto debugs */
+#define L2_PROTO_DBG 1 /**< Enable to print L2 Proto debugs */
+
+/**
+ * A structure used to call the function handlers for a certain ethertype
+ */
+struct proto_packet_type {
+ uint16_t type; /**< Ethertype */
+ void (*func) (struct rte_mbuf **m, uint16_t nb_pkts, uint64_t pkt_mask, l2_phy_interface_t *port); /**< Function pointer to the registered callback function */
+} __rte_cache_aligned;/**< RTE Cache alignment */
+
+/**
+ * Function called from other modules to add the certain rx functions for particular ethertypes
+ *
+ * @param type
+ * Ethertype
+ * @param (*func)()
+ * Function pointer to the function being registered by different modules
+ */
+void
+list_add_type(uint16_t type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+/**
+ * Function to check whether the destination mac address of the packet is the mac address of the received port.
+ * Drop the packet if it is not destined to the host.
+ * If it is destined to this host, then set the packet masks for IPv4, IPv6 and ARP packet types for a burst of packets.
+ *
+ * @param m
+ * rte_mbuf packet
+ *
+ * @param portid
+ * Portid from which the packet was received
+ *
+ * @param pos
+ * Index of the packet in the burst
+ *
+ * @param pkts_mask
+ * Packet mask where bits are set at positions for the packets in the burst which were destined to the host
+ *
+ * @param arp_pkts_mask
+ * Packet mask for ARP where bits are set for valid ARP packets
+ *
+ * @param ipv4_pkts_mask
+ * Packet mask for IPv4 where bits are set for valid IPv4 packets
+ *
+ * @param ipv6_pkts_mask
+ * Packet mask for IPv6 where bits are set for valid IPv6 packets
+ *
+ */
+void
+l2_check_mac(struct rte_mbuf *m[IFM_BURST_SIZE], l2_phy_interface_t *port,
+ uint8_t pos, uint64_t *pkts_mask, uint64_t *arp_pkts_mask,
+ uint64_t *ipv4_pkts_mask, uint64_t *ipv6_pkts_mask);
+
+/**
+ * Entry function to L2 Protocol Handler where appropriate functions are called for particular ethertypes
+ *
+ * @param m
+ * rte_mbuf packet
+ *
+ * @param nb_rx
+ * Number of packets read
+ *
+ * @param portid
+ * Port-id of the port in which packet was received
+ */
+void
+protocol_handler_recv(struct rte_mbuf *m[IFM_BURST_SIZE], uint16_t nb_rx,
+ l2_phy_interface_t *port);
+
+#endif
diff --git a/common/VIL/l2l3_stack/l3fwd_common.h b/common/VIL/l2l3_stack/l3fwd_common.h
new file mode 100644
index 00000000..cece57c0
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_common.h
@@ -0,0 +1,111 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd common header file for LPM IPv4 and IPv6 stack initialization
+*/
+
+#ifndef L3FWD_COMMON_H
+#define L3FWD_COMMON_H
+
+/* Standard Libraries */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <getopt.h>
+#include <unistd.h>
+
+/* DPDK RTE Libraries */
+#include <rte_common.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_port.h>
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_table_hash.h>
+#include <rte_table.h>
+#include <rte_table_lpm.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <l3fwd_lpm4.h>
+#include <l3fwd_lpm6.h>
+#include <rte_table_lpm_ipv6.h>
+
+/**
+* Define the Macros
+*/
+#define MAX_ROUTES 4 /**< MAX route that can be added*/
+#define L3FWD_DEBUG 1 /**< if set, enables the fast path logs */
+#define MULTIPATH_FEAT 1 /**< if set, enables the ECMP Multicast feature */
+
+//#define IPPROTO_ICMPV6 58 /**< Protocol ID for ICMPv6 */
+
+/**
+* L3fwd initilazation for creating IPv4 and IPv6 LPM table.
+*/
+void l3fwd_init(void);
+
+/**
+* L3fwd IPv4 LPM table population, it calls IPv4 route add function which stores all the route in LPM table
+*/
+void populate_lpm4_table_routes(void);
+
+/**
+* L3fwd IPv6 LPM table population, it calls IPv6 route add function which stores all the route in LPM6 table
+*/
+void populate_lpm6_table_routes(void);
+
+/**
+* L3fwd LPM table population for both IPv4 and IPv6.
+*/
+void populate_lpm_routes(void);
+
+#endif
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm4.c b/common/VIL/l2l3_stack/l3fwd_lpm4.c
new file mode 100644
index 00000000..081038b6
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm4.c
@@ -0,0 +1,1119 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "l3fwd_common.h"
+#include "interface.h"
+#include "l2_proto.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+#include <inttypes.h>
+
+/* Declare Global variables */
+
+/* Global for IPV6 */
+void *lpm4_table; /**< lpm4_table handler */
+
+/*Hash table for L2 adjacency */
+struct rte_hash *l2_adj_hash_handle; /**< l2 adjacency hash table handler */
+struct rte_hash *fib_path_hash_handle; /**< fib path hash table handler */
+
+l3_stats_t stats; /**< L3 statistics */
+
+/* Global load balancing hash table for ECMP*/
+uint8_t nh_links[MAX_SUPPORTED_FIB_PATHS][HASH_BUCKET_SIZE] = /**< Round Robin Hash entries for ECMP only*/
+{
+ /* 1 path, No Load balancing is required */
+ {0},
+
+ /* 2 path */
+ {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
+
+ /* 3 path */
+ {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0,
+ 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1,
+ 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
+ 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
+
+ /* 4 path */
+ {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
+
+ /* 5 path */
+ {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0,
+ 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1,
+ 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2,
+ 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3},
+
+ /* 6 path */
+ {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+ 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+ 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+ 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
+
+ /* 7 path */
+ {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1,
+ 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3,
+ 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5,
+ 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0},
+
+ /* 8 path */
+ {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7}
+};
+
+#if 0
+#define META_DATA_OFFSET 128
+
+#define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+#define TCP_START (IP_START + 20)
+
+static void print_pkt(struct rte_mbuf *pkt)
+{
+ int i;
+ int size = 14;
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, ETHERNET_START);
+
+ printf("Meta-data:\n");
+ for (i = 0; i < size; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+ printf("\n");
+ printf("IP and TCP/UDP headers:\n");
+ rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
+ for (i = 0; i < 40; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+
+}
+#endif
+static struct ip_protocol_type *proto_type[2];
+int lpm_init(void)
+{
+
+ /* Initiliaze LPMv4 params */
+ struct rte_table_lpm_params lpm_params = {
+ .name = "LPMv4",
+ .n_rules = IPV4_L3FWD_LPM_MAX_RULES,
+ .number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S,
+ .flags = 0,
+ .entry_unique_size = sizeof(struct fib_info),
+ .offset = 128,
+ };
+
+ /* Create LPMv4 tables */
+ lpm4_table =
+ rte_table_lpm_ops.f_create(&lpm_params, rte_socket_id(),
+ sizeof(struct fib_info));
+ if (lpm4_table == NULL) {
+ printf("Failed to create LPM IPV4 table\n");
+ return 0;
+ }
+
+ /*Initialize L2 ADJ hash params */
+ struct rte_hash_parameters l2_adj_ipv4_params = {
+ .name = "l2_ADJ_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct l2_adj_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ /* Create IPv4 L2 Adj Hash tables */
+ l2_adj_hash_handle = rte_hash_create(&l2_adj_ipv4_params);
+
+ if (l2_adj_hash_handle == NULL) {
+ printf("L2 ADJ rte_hash_create failed\n");
+ return 0;
+ } else {
+ printf("l2_adj_hash_handle %p\n\n", (void *)l2_adj_hash_handle);
+ }
+
+ /*Initialize Fib PAth hassh params */
+ struct rte_hash_parameters fib_path_ipv4_params = {
+ .name = "FIB_PATH_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct fib_path_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ /* Create FIB PATH Hash tables */
+ fib_path_hash_handle = rte_hash_create(&fib_path_ipv4_params);
+
+ if (fib_path_hash_handle == NULL) {
+ printf("FIB path rte_hash_create failed\n");
+ return 0;
+ }
+ return 1;
+}
+
+int lpm4_table_route_add(struct routing_info *data)
+{
+
+ struct routing_info *fib = data;
+ struct rte_table_lpm_key lpm_key = {
+ .ip = fib->dst_ip_addr,
+ .depth = fib->depth,
+ };
+ uint8_t i;
+ static int Total_route_count;
+ struct fib_info entry;
+ entry.dst_ip_addr = rte_bswap32(fib->dst_ip_addr);
+ entry.depth = fib->depth;
+ entry.fib_nh_size = fib->fib_nh_size; /**< For Single Path, greater then 1 for Multipath(ECMP)*/
+
+#if MULTIPATH_FEAT
+ if (entry.fib_nh_size == 0 || entry.fib_nh_size > MAX_FIB_PATHS)
+#else
+ if (entry.fib_nh_size != 1) /**< For Single FIB_PATH */
+#endif
+ {
+ printf("Route can't be configured!!, entry.fib_nh_size = %d\n",
+ entry.fib_nh_size);
+ return 0;
+ }
+ /* Populate L2 adj and precomputes l2 encap string */
+#if MULTIPATH_FEAT
+ for (i = 0; i < entry.fib_nh_size; i++)
+#else
+ for (i = 0; i < 1; i++)
+#endif
+ {
+ struct fib_path *fib_path_addr = NULL;
+
+ fib_path_addr =
+ populate_fib_path(fib->nh_ip_addr[i], fib->out_port[i]);
+ if (fib_path_addr) {
+
+ entry.path[i] = fib_path_addr;
+ printf("Fib info for the Dest IP");
+ printf(" : %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "/%" PRIu8
+ " => fib_path Addr: %p, l2_adj Addr: %p\n",
+ (fib->dst_ip_addr & 0xFF000000) >> 24,
+ (fib->dst_ip_addr & 0x00FF0000) >> 16,
+ (fib->dst_ip_addr & 0x0000FF00) >> 8,
+ (fib->dst_ip_addr & 0x000000FF), fib->depth,
+ fib_path_addr,
+ (void *)entry.path[i]->l2_adj_ptr);
+ } else {
+ printf("Fib info for the Dest IP :\
+ %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 "/%" PRIu8 " => fib_path Addr: NULL \n", (fib->dst_ip_addr & 0xFF000000) >> 24, (fib->dst_ip_addr & 0x00FF0000) >> 16, (fib->dst_ip_addr & 0x0000FF00) >> 8, (fib->dst_ip_addr & 0x000000FF), fib->depth);
+ entry.path[i] = NULL; /**< setting all other fib_paths to NULL */
+ }
+ }
+
+ int key_found, ret;
+ void *entry_ptr;
+ ret =
+ rte_table_lpm_ops.f_add(lpm4_table, (void *)&lpm_key, &entry,
+ &key_found, &entry_ptr);
+
+ if (ret != 0) {
+ printf("Failed to Add IP route\n");
+ return 0;
+ }
+ Total_route_count++;
+ printf("Total Routed Added : %u, Key_found: %d\n", Total_route_count,
+ key_found);
+ printf("Adding Route to LPM table...\n");
+
+ printf("Iterate with Cuckoo Hash table\n");
+ iterate_cuckoo_hash_table();
+ return 1;
+}
+
+int lpm4_table_route_delete(uint32_t dst_ip, uint8_t depth)
+{
+
+ struct rte_table_lpm_key lpm_key = {
+ .ip = dst_ip,
+ .depth = depth,
+ };
+
+ int key_found, ret;
+ void *entry = NULL;
+
+ entry = rte_zmalloc(NULL, 512, RTE_CACHE_LINE_SIZE);
+
+ /* Deleting a IP route from LPMv4 table */
+ ret =
+ rte_table_lpm_ops.f_delete(lpm4_table, &lpm_key, &key_found, entry);
+
+ if (ret) {
+ printf("Failed to Delete IP route from LPMv4 table\n");
+ return 0;
+ }
+
+ printf("Deleted route from LPM table (IPv4 Address = %"
+ PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "/%u , key_found = %d\n", (lpm_key.ip & 0xFF000000) >> 24,
+ (lpm_key.ip & 0x00FF0000) >> 16, (lpm_key.ip & 0x0000FF00) >> 8,
+ (lpm_key.ip & 0x000000FF), lpm_key.depth, key_found);
+
+ /* Deleting a L2 Adj entry if refcount is 1, Else decrement Refcount */
+ remove_fib_l2_adj_entry(entry);
+ rte_free(entry);
+ printf("Iterate with Cuckoo Hash table\n");
+ iterate_cuckoo_hash_table();
+ return 1;
+}
+
+int
+lpm4_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask)
+{
+
+ struct routing_table_entry *ipv4_entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t lookup_hit_mask_ipv4 = 0;
+ int status;
+ uint64_t pkts_key_mask = pkts_mask;
+ uint64_t lookup_miss_mask_ipv4 = pkts_mask;
+
+ static uint64_t sent_count;
+ static uint64_t rcvd_count;
+ rcvd_count += nb_pkts;
+ if (L3FWD_DEBUG) {
+ printf
+ (" Received IPv4 nb_pkts: %u, Rcvd_count: %lu\n, pkts_mask: %p\n",
+ nb_pkts, rcvd_count, (void *)pkts_mask);
+ }
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+
+ for (; pkts_key_mask;) {
+/**< Populate key offset in META DATA for all valid pkts */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_key_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ pkts_key_mask &= ~pkt_mask;
+ struct rte_mbuf *mbuf = pkts_burst[pos];
+ uint32_t *lpm_key = NULL;
+ uint32_t *dst_addr = NULL;
+ lpm_key = (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, 128);
+ dst_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ dst_addr_offset);
+ *lpm_key = *dst_addr;
+ if (L3FWD_DEBUG) {
+
+ printf("Rcvd Pakt (IPv4 Address = %"
+ PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ")\n",
+ (rte_cpu_to_be_32(*lpm_key) & 0xFF000000) >> 24,
+ (rte_cpu_to_be_32(*lpm_key) & 0x00FF0000) >> 16,
+ (rte_cpu_to_be_32(*lpm_key) & 0x0000FF00) >> 8,
+ (rte_cpu_to_be_32(*lpm_key) & 0x000000FF));
+ }
+ }
+
+ /* Lookup for IP route in LPM table */
+ if (L3FWD_DEBUG)
+ printf("\nIPV4 Lookup Mask Before = %p\n",
+ (void *)lookup_hit_mask_ipv4);
+ status =
+ rte_table_lpm_ops.f_lookup(lpm4_table, pkts_burst, pkts_mask,
+ &lookup_hit_mask_ipv4,
+ (void **)ipv4_entries);
+
+ if (status) {
+ printf("LPM Lookup failed for IP route\n");
+ return 0;
+ }
+
+ lookup_miss_mask_ipv4 = lookup_miss_mask_ipv4 & (~lookup_hit_mask_ipv4);
+ if (L3FWD_DEBUG) {
+ printf
+ ("AFTER lookup_hit_mask_ipv4 = %p, lookup_miss_mask_ipv4 =%p\n",
+ (void *)lookup_hit_mask_ipv4,
+ (void *)lookup_miss_mask_ipv4);
+ }
+
+ for (; lookup_miss_mask_ipv4;) {
+/**< Drop packets for lookup_miss_mask */
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_miss_mask_ipv4);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_miss_mask_ipv4 &= ~pkt_mask;
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ if (L3FWD_DEBUG)
+ printf("\n DROP PKT IPV4 Lookup_miss_Mask = %p\n",
+ (void *)lookup_miss_mask_ipv4);
+ }
+
+ *hit_mask = lookup_hit_mask_ipv4;
+ for (; lookup_hit_mask_ipv4;) {
+/**< Process the packets for lookup_hit_mask*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_hit_mask_ipv4);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_hit_mask_ipv4 &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts_burst[pos];
+
+ struct fib_info *entry = (struct fib_info *)ipv4_entries[pos];
+
+#if MULTIPATH_FEAT
+
+ uint8_t ecmp_path = 0;
+ ecmp_path = ip_hash_load_balance(pkts_burst[pos]);
+ uint8_t selected_path = 0;
+ struct fib_path *fib_path = NULL;
+ if (((entry->fib_nh_size != 0)
+ && (entry->fib_nh_size - 1) < MAX_SUPPORTED_FIB_PATHS)
+ && ((ecmp_path != 0) && (ecmp_path - 1) < HASH_BUCKET_SIZE))
+ selected_path =
+ nh_links[entry->fib_nh_size - 1][ecmp_path - 1];
+ if (selected_path < MAX_FIB_PATHS)
+ fib_path = entry->path[selected_path];
+ if (L3FWD_DEBUG) {
+ printf
+ ("Total supported Path :%u, Hashed ECMP Key : %u, selected Fib_path: %u\n",
+ entry->fib_nh_size, ecmp_path, selected_path);
+ }
+#else
+ struct fib_path *fib_path = entry->path[0];
+#endif
+
+ if (fib_path == NULL) {
+ rte_pktmbuf_free(pkt);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("Fib_path is NULL, ARP has not resolved, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ if (fib_path->l2_adj_ptr->flags == L2_ADJ_UNRESOLVED) {
+ if (fib_path->l2_adj_ptr->phy_port->ipv4_list != NULL)
+ request_arp(fib_path->l2_adj_ptr->phy_port->
+ pmdid, fib_path->nh_ip);
+
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("L2_ADJ_UNRESOLVED, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ /* extract ip headers and MAC */
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC BEFORE- DST MAC %02x:%02x:%02x:%02x:%02x:%02x, \
+ SRC MAC %02x:%02x:%02x:%02x:%02x:%02x \n",
+ eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3], eth_dest[4], eth_dest[5], eth_src[0], eth_src[1],
+ eth_src[2], eth_src[3], eth_src[4], eth_src[5]);
+ }
+ /* Rewrite the packet with L2 string */
+ memcpy(eth_dest, fib_path->l2_adj_ptr->l2_string, sizeof(struct ether_addr) * 2); // For MAC
+ if (L3FWD_DEBUG) {
+ int k = 0;
+ for (k = 0; k < 14; k++) {
+ printf("%02x ",
+ fib_path->l2_adj_ptr->l2_string[k]);
+ printf("\n");
+ }
+ printf
+ ("MAC AFTER DST MAC %02x:%02x:%02x:%02x:%02x:%02x, \
+ SRC MAC %02x:%02x:%02x:%02x:%02x:%02x\n", eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3], eth_dest[4], eth_dest[5], eth_src[0], eth_src[1], eth_src[2], eth_src[3], eth_src[4], eth_src[5]);
+ }
+ port_ptr[pos] = fib_path->l2_adj_ptr->phy_port;
+ if (L3FWD_DEBUG) {
+ printf("l3fwd_lookup API!!!!\n");
+ //print_pkt(pkt);
+ }
+
+ sent_count++;
+ stats.nb_tx_l3_pkt++;
+ if (L3FWD_DEBUG)
+ printf
+ ("Successfully sent to port %u, sent_count : %lu\n\r",
+ fib_path->out_port, sent_count);
+ }
+ return 1;
+}
+
+int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+{
+ if (link_len < sizeof(struct ipv4_hdr))
+ return -1;
+ if (((pkt->version_ihl) >> 4) != 4)
+ return -1;
+ if ((pkt->version_ihl & 0xf) < 5)
+ return -1;
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ return -1;
+ return 0;
+}
+
+int
+get_dest_mac_for_nexthop(uint32_t next_hop_ip,
+ uint8_t out_phy_port, struct ether_addr *hw_addr)
+{
+ struct arp_entry_data *arp_data = NULL;
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = out_phy_port;
+ arp_key.ip = next_hop_ip;
+
+ arp_data = retrieve_arp_entry(arp_key);
+ if (arp_data == NULL) {
+ printf("ARP entry is not found for ip %x, port %d\n",
+ next_hop_ip, out_phy_port);
+ return 0;
+ }
+ ether_addr_copy(&arp_data->eth_addr, hw_addr);
+ return 1;
+}
+
+struct l2_adj_entry *retrieve_l2_adj_entry(struct l2_adj_key_ipv4 l2_adj_key)
+{
+ struct l2_adj_entry *ret_l2_adj_data = NULL;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ int ret =
+ rte_hash_lookup_data(l2_adj_hash_handle, &l2_adj_key,
+ (void **)&ret_l2_adj_data);
+ if (ret < 0) {
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ return NULL;
+ } else {
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 Adj hash lookup Success, Entry Already Exist ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ return ret_l2_adj_data;
+ }
+}
+
+void remove_fib_l2_adj_entry(void *entry)
+{
+ struct fib_info entry1;
+ memcpy(&entry1, entry, sizeof(struct fib_info));
+
+ struct fib_path *fib_path_addr = entry1.path[0]; /**< For Single path */
+ if (fib_path_addr->refcount > 1) {
+ printf
+ (" BEFORE fib_path entry, nh_ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ fib_path_addr->refcount--; /**< Just decrement the refcount this entry is still referred*/
+ printf("AFTER fib_path entry, nh_ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ } else {
+/**< Refcount is 1 so delete both fib_path and l2_adj_entry */
+
+ struct l2_adj_entry *adj_addr = NULL;
+ adj_addr = fib_path_addr->l2_adj_ptr;
+
+ if (adj_addr != NULL) {
+/** < l2_adj_entry is has some entry in hash table*/
+ struct l2_adj_key_ipv4 l2_adj_key = {
+ .Next_hop_ip = fib_path_addr->nh_ip,
+ .out_port_id = fib_path_addr->out_port,
+ };
+ #ifdef L3FWD_DEBUG
+ printf
+ (" l2_adj_entry is removed for ip %x, port %d, refcount %d\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_addr->refcount);
+ #endif
+
+ rte_hash_del_key(l2_adj_hash_handle, &l2_adj_key);
+ rte_free(adj_addr); /**< free the memory which was allocated for Hash entry */
+ adj_addr = NULL;
+ }
+
+ struct fib_path_key_ipv4 path_key = {
+ .nh_ip = fib_path_addr->nh_ip,
+ .out_port = fib_path_addr->out_port,
+ };
+
+ printf
+ ("fib_path entry is removed for ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ rte_hash_del_key(fib_path_hash_handle, &path_key);
+ rte_free(fib_path_addr); /**< Free the memory which was allocated for Hash entry*/
+ fib_path_addr = NULL;
+ }
+}
+
+struct l2_adj_entry *populate_l2_adj(uint32_t ipaddr, uint8_t portid)
+{
+
+ struct l2_adj_key_ipv4 l2_adj_key;
+ l2_adj_key.out_port_id = portid;
+ l2_adj_key.Next_hop_ip = ipaddr;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ struct ether_addr eth_dst;
+ struct l2_adj_entry *adj_data = NULL;
+
+ /* Populate L2 adj if the MAC Address is already present in L2 Adj HAsh Table */
+ adj_data = retrieve_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) { /**< L2 Adj Entry Exists*/
+
+ printf
+ ("l2_adj_entry exists ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+ ether_addr_copy(&adj_data->eth_addr, &eth_dst);
+ adj_data->refcount++;
+ printf
+ ("l2_adj_entry UPDATED Refcount for NH ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+ return adj_data;
+ }
+
+ struct ether_addr eth_src;
+ l2_phy_interface_t *port;
+ //uint16_t ether_type = 0x0800;
+ port = ifm_get_port(portid);
+
+ if (port != NULL) {
+ memcpy(&eth_src, &port->macaddr, sizeof(struct ether_addr));
+ unsigned char *p = (unsigned char *)eth_src.addr_bytes;
+ printf("S-MAC %x:%x:%x:%x:%x:%x\n\r", p[0], p[1], p[2], p[3],
+ p[4], p[5]);
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return NULL;
+ }
+
+ adj_data->out_port_id = portid;
+ adj_data->Next_hop_ip = ipaddr;
+ adj_data->refcount++;
+
+ adj_data->phy_port = port;
+ memset(&adj_data->eth_addr, 0, sizeof(struct ether_addr));
+ memset(&adj_data->l2_string, 0, 256);
+
+ /**< Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(l2_adj_hash_handle, &l2_adj_key,
+ adj_data);
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 adj data stored in l2_adj_entry hash table,Addr:%p\n",
+ adj_data);
+ #endif
+ } else {
+ #ifdef L2L3_DEBUG
+ printf("\n PORT %u IS DOWN...\n", portid);
+ #endif
+ return NULL;
+ }
+ /* Query ARP to get L2 Adj */
+ if (get_dest_mac_for_nexthop(ipaddr, portid, &eth_dst)) {
+ unsigned char *p = (unsigned char *)eth_dst.addr_bytes;
+ printf
+ ("ARP resolution success and stored in l2_adj_entry hash table:D-MAC %x:%x:%x:%x:%x:%x\n\r",
+ p[0], p[1], p[2], p[3], p[4], p[5]);
+
+ memcpy(adj_data->l2_string, &eth_dst, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6], &eth_src,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+ } else {
+ adj_data->flags = L2_ADJ_UNRESOLVED;
+ printf
+ (" ARP resolution Failed !! , unable to write in l2_adj_entry\n");
+ }
+ return adj_data;
+}
+
+struct fib_path *populate_fib_path(uint32_t nh_ip, uint8_t portid)
+{
+
+ struct fib_path_key_ipv4 path_key;
+ path_key.out_port = portid;
+ path_key.nh_ip = nh_ip;
+ path_key.filler1 = 0;
+ path_key.filler2 = 0;
+ path_key.filler3 = 0;
+
+ struct fib_path *fib_data = NULL;
+
+ /* Populate fib_path */
+ fib_data = retrieve_fib_path_entry(path_key);
+
+ if (fib_data) {/**< fib_path entry already exists */
+
+ /* Already present in FIB_PATH cuckoo HAsh Table */
+ printf
+ ("fib_path_entry already exists for NextHop ip: %x, port %d\n, Refcount %u Addr:%p\n",
+ fib_data->nh_ip, fib_data->out_port, fib_data->refcount,
+ fib_data);
+ fib_data->refcount++;
+ fib_data->l2_adj_ptr->refcount++;
+ printf
+ ("fib_path Refcount Updated NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port, fib_data->refcount);
+ return fib_data;
+ } else {
+ printf("fib_path entry Doesn't Exists.......\n");
+ }
+
+ fib_data = NULL;
+ struct l2_adj_entry *l2_adj_ptr = NULL;
+ l2_adj_ptr = populate_l2_adj(nh_ip, portid);
+
+ if (l2_adj_ptr) {
+
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct fib_path));
+ fib_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ fib_data->out_port = portid;
+ fib_data->nh_ip = nh_ip;
+ fib_data->refcount++;
+ fib_data->l2_adj_ptr = l2_adj_ptr;
+
+ printf("%s: get port details %u %d\n\r", __FUNCTION__, portid,
+ __LINE__);
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ int status;
+ status =
+ rte_hash_add_key_data(fib_path_hash_handle, &path_key,
+ fib_data);
+ if (status) {
+ printf
+ ("fib_path entry addition to hash table FAILED!! NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port,
+ fib_data->refcount);
+
+ rte_free(fib_data);
+ } else {
+ printf
+ ("fib_path entry Added into hash table for the NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port,
+ fib_data->refcount);
+ printf
+ (" l2_adj_entry Addr: %p, Fib_path Addr: %p, FibPath->l2ADJ Addr:%p \n",
+ l2_adj_ptr, fib_data, fib_data->l2_adj_ptr);
+ printf
+ (" ARP resolution success l2_adj_entry Addr: %p, Fib_path Addr: %p \n",
+ l2_adj_ptr, fib_data);
+ return fib_data;
+ }
+ } else {
+ printf
+ (" ARP resolution failed and unable to write fib path in fib_path cuckoo hash\n");
+ }
+ return NULL;
+}
+
+struct fib_path *retrieve_fib_path_entry(struct fib_path_key_ipv4 path_key)
+{
+ printf("FIB PATH for NExtHOP IP : %x, port :%u\n", path_key.nh_ip,
+ path_key.out_port);
+
+ struct fib_path *ret_fib_path_data = NULL;
+ int ret =
+ rte_hash_lookup_data(fib_path_hash_handle, &path_key,
+ (void **)&ret_fib_path_data);
+ if (ret < 0) {
+ printf
+ ("FIB PATH hash lookup Failed!! ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ return NULL;
+ } else {
+ printf("FIB PATH ALREADY Exists for NExtHOP IP: %x, port: %u\n",
+ path_key.nh_ip, path_key.out_port);
+ return ret_fib_path_data;
+ }
+}
+
+void iterate_cuckoo_hash_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ printf("\n\t\t\t FIB_path Cache table....");
+ printf
+ ("\n----------------------------------------------------------------");
+ printf("\n\tNextHop IP Port Refcount l2_adj_ptr_addrress\n");
+ printf
+ ("\n----------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (fib_path_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct fib_path *tmp_data = (struct fib_path *)next_data;
+ struct fib_path_key_ipv4 tmp_key;
+ memcpy(&tmp_key, next_key, sizeof(tmp_key));
+ printf("\t %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ " \t %u \t %u \t %p\n",
+ (tmp_data->nh_ip & 0xFF000000) >> 24,
+ (tmp_data->nh_ip & 0x00FF0000) >> 16,
+ (tmp_data->nh_ip & 0x0000FF00) >> 8,
+ (tmp_data->nh_ip & 0x000000FF), tmp_data->out_port,
+ tmp_data->refcount, tmp_data->l2_adj_ptr);
+
+ }
+ iter = 0;
+
+ printf("\n\t\t\t L2 ADJ Cache table.....");
+ printf
+ ("\n------------------------------------------------------------------------------------");
+ printf
+ ("\n\tNextHop IP Port \t l2 Encap string \t l2_Phy_interface\n");
+ printf
+ ("\n------------------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (l2_adj_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct l2_adj_entry *l2_data = (struct l2_adj_entry *)next_data;
+ struct l2_adj_key_ipv4 l2_key;
+ memcpy(&l2_key, next_key, sizeof(l2_key));
+ printf("\t %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "\t %u \t%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x\t%p\n",
+ (l2_data->Next_hop_ip & 0xFF000000) >> 24,
+ (l2_data->Next_hop_ip & 0x00FF0000) >> 16,
+ (l2_data->Next_hop_ip & 0x0000FF00) >> 8,
+ (l2_data->Next_hop_ip & 0x000000FF),
+ l2_data->out_port_id, l2_data->l2_string[0],
+ l2_data->l2_string[1], l2_data->l2_string[2],
+ l2_data->l2_string[3], l2_data->l2_string[4],
+ l2_data->l2_string[5], l2_data->l2_string[6],
+ l2_data->l2_string[7], l2_data->l2_string[8],
+ l2_data->l2_string[9], l2_data->l2_string[10],
+ l2_data->l2_string[11], l2_data->phy_port);
+ }
+}
+
+void print_l3_stats(void)
+{
+ printf("==============================================\n");
+ printf("\t\t L3 STATISTICS \t\n");
+ printf("==============================================\n");
+ printf(" Num of Received L3 Pkts : %lu\n", stats.nb_rx_l3_pkt);
+ printf(" Num of Dropped L3 Pkts : %lu\n", stats.nb_l3_drop_pkt);
+ printf(" Num of Transmitted L3 Pkts : %lu\n", stats.nb_tx_l3_pkt);
+ printf(" Num of ICMP Pkts Rcvd at L3 : %lu\n", stats.nb_rx_l3_icmp_pkt);
+ printf(" Num of ICMP Pkts Tx to ICMP : %lu\n", stats.nb_tx_l3_icmp_pkt);
+ stats.total_nb_rx_l3_pkt = stats.nb_rx_l3_icmp_pkt + stats.nb_rx_l3_pkt;
+ stats.total_nb_tx_l3_pkt = stats.nb_tx_l3_icmp_pkt + stats.nb_tx_l3_pkt;
+ printf(" Total Num of Rcvd pkts at L3: %lu\n",
+ stats.total_nb_rx_l3_pkt);
+ printf(" Total Num of Sent pkts at L3: %lu\n",
+ stats.total_nb_tx_l3_pkt);
+}
+
+void
+ip_local_packets_process(struct rte_mbuf **pkt_burst, uint16_t nb_rx,
+ uint64_t icmp_pkt_mask, l2_phy_interface_t *port)
+{
+ process_arpicmp_pkt_parse(pkt_burst, nb_rx, icmp_pkt_mask, port);
+}
+
+void
+ip_forward_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_pkts,
+ uint64_t ipv4_forward_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("ip_forward_deliver BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = ipv4_forward_pkts_mask;
+
+ struct ipv4_hdr *ipv4_hdr;
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t hit_mask = 0;
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(pkt_burst[pos], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ /* Make sure the IPv4 packet is valid */
+ if (is_valid_ipv4_pkt(ipv4_hdr, pkt_burst[pos]->pkt_len) < 0) {
+ rte_pktmbuf_free(pkt_burst[pos]); /**< Drop the Unknown IPv4 Packet */
+ pkt_burst[pos] = NULL;
+ ipv4_forward_pkts_mask &= ~(1LLU << pos); /**< That will clear bit of that position*/
+ nb_pkts--;
+ stats.nb_l3_drop_pkt++;
+ }
+ }
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("\nl3fwd_rx_ipv4_packets_received AFTER DROP: nb_pkts: %u, valid_Pkts_mask :%lu\n",
+ nb_pkts, ipv4_forward_pkts_mask);
+ }
+
+ /* Lookup for IP destination in LPMv4 table */
+ lpm4_table_lookup(pkt_burst, nb_pkts, ipv4_forward_pkts_mask, port_ptr,
+ &hit_mask);
+
+ for (; hit_mask;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(hit_mask);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ hit_mask &= ~pkt_mask; /**< remove this packet from the mask */
+
+ port_ptr[pos]->transmit_single_pkt(port_ptr[pos],
+ pkt_burst[pos]);
+ }
+
+}
+
+void
+l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *port))
+{
+ switch (protocol_type) {
+ case IPPROTO_ICMP:
+ proto_type[IP_LOCAL] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_LOCAL]->protocol_type = protocol_type;
+ proto_type[IP_LOCAL]->func = func;
+ break;
+
+ case IPPROTO_TCP: // Time being treared as Remote forwarding
+ case IPPROTO_UDP:
+ proto_type[IP_REMOTE] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_REMOTE]->protocol_type = protocol_type;
+ proto_type[IP_REMOTE]->func = func;
+ break;
+
+ }
+
+}
+
+void l3fwd_rx_ipv4_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("l3fwd_rx_ipv4_packets_received BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = valid_pkts_mask;
+
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t configure_port_ip = 0;
+ uint64_t icmp_pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ uint64_t ipv4_forward_pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ uint16_t nb_icmp_pkt = 0;
+ uint16_t nb_l3_pkt = 0;
+
+ if (port->ipv4_list != NULL)
+ configure_port_ip =
+ (uint32_t) (((ipv4list_t *) (port->ipv4_list))->ipaddr);
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(m[pos], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+
+ if ((ipv4_hdr->next_proto_id == IPPROTO_ICMP)
+ && (ipv4_hdr->dst_addr == configure_port_ip)) {
+ ipv4_forward_pkts_mask &= ~pkt_mask; /**< Its ICMP, remove this packet from the ipv4_forward_pkts_mask*/
+ stats.nb_rx_l3_icmp_pkt++; /**< Increment stats for ICMP PKT */
+ nb_icmp_pkt++;
+ } else{ // Forward the packet
+ icmp_pkts_mask &= ~pkt_mask; /**< Not ICMP, remove this packet from the icmp_pkts_mask*/
+ stats.nb_rx_l3_pkt++;
+ nb_l3_pkt++; /**< Increment stats for L3 PKT */
+ }
+ }
+
+ if (icmp_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEiVED LOCAL ICMP PKT at L3...\n PROCESSING ICMP LOCAL PKT...\n");
+ proto_type[IP_LOCAL]->func(m, nb_icmp_pkt, icmp_pkts_mask,
+ port);
+ }
+
+ if (ipv4_forward_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEIVED L3 PKT, \n\n FORWARDING L3 PKT....\n");
+ proto_type[IP_REMOTE]->func(m, nb_l3_pkt,
+ ipv4_forward_pkts_mask, port);
+ }
+}
+
+void
+resolve_l2_adj(uint32_t nexthop_ip, uint8_t out_port_id,
+ const struct ether_addr *hw_addr)
+{
+ struct l2_adj_key_ipv4 l2_adj_key = {
+ .Next_hop_ip = nexthop_ip,
+ .out_port_id = out_port_id,
+ };
+ //uint16_t ether_type = 0x0800;
+
+ struct l2_adj_entry *adj_data = retrieve_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) { /**< L2 Adj Entry Exists*/
+
+ printf
+ ("l2_adj_entry exists ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+
+ if (adj_data->flags == L2_ADJ_UNRESOLVED
+ || memcmp(hw_addr, &adj_data->eth_addr,
+ sizeof(struct ether_addr))) {
+ memcpy(adj_data->l2_string, hw_addr, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6],
+ &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+ }
+
+ return;
+ }
+
+ l2_phy_interface_t *port;
+ port = ifm_get_port(out_port_id);
+ if (port != NULL) {
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return;
+ }
+
+ adj_data->out_port_id = out_port_id;
+ adj_data->Next_hop_ip = nexthop_ip;
+ adj_data->phy_port = port;
+
+ memcpy(adj_data->l2_string, hw_addr, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6], &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+
+ rte_hash_add_key_data(l2_adj_hash_handle, &l2_adj_key,
+ adj_data);
+ printf
+ ("L2 adj data stored in l2_adj_entry hash table,Addr:%p\n",
+ adj_data);
+ } else
+ printf("PORT:%u IS DOWN...\n", out_port_id);
+
+ return;
+}
+
+uint8_t ip_hash_load_balance(struct rte_mbuf *mbuf)
+{
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST;
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr = NULL;
+ uint32_t *src_addr = NULL;
+ src_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, src_addr_offset);
+ dst_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, dst_addr_offset);
+
+ uint32_t hash_key1 = *src_addr; /* STORE SRC IP in key1 variable */
+ uint32_t hash_key2 = *dst_addr; /* STORE DST IP in key variable */
+
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With SRC and DST IP, Result is hask_key1 */
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 16); /* Circular Rotate to 16 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 8); /* Circular Rotate to 8 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key1 = hash_key1 & (HASH_BUCKET_SIZE - 1); /* MASK the KEY with BUCKET SIZE */
+ if (L3FWD_DEBUG)
+ printf("Hash Result_key: %d, \n", hash_key1);
+ return hash_key1;
+}
+
+uint32_t rotr32(uint32_t value, unsigned int count)
+{
+ const unsigned int mask = (CHAR_BIT * sizeof(value) - 1);
+ count &= mask;
+ return (value >> count) | (value << ((-count) & mask));
+}
+
+void
+ip_local_out_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_rx,
+ uint64_t ipv4_pkts_mask, l2_phy_interface_t *port)
+{
+ ip_forward_deliver(pkt_burst, nb_rx, ipv4_pkts_mask, port);
+}
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm4.h b/common/VIL/l2l3_stack/l3fwd_lpm4.h
new file mode 100644
index 00000000..69e62368
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm4.h
@@ -0,0 +1,374 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd lpm4 header file is for IPv4 specific declarations
+*/
+#ifndef L3FWD_LPM_H
+#define L3FWD_LPM_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdbool.h>
+
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include "l3fwd_common.h"
+#include "l3fwd_lpm6.h"
+#include "interface.h"
+
+/**
+* Define all RTE MBUF offset size
+*/
+
+#define MBUF_HDR_ROOM 256 /**< MBUF HEADER ROOM OFFSET */
+
+/* IPv4 */
+#define ETH_HDR_SIZE 14 /**< ETHER HEADER OFFSET */
+#define IP_HDR_SIZE 20 /**< IP HEADER OFFSET */
+#define IP_HDR_DST_ADR_OFST 16 /**< IP HEADER DST IP ADDRESS OFFSET */
+#define IP_HDR_SRC_ADR_OFST 12 /**< IP HEADER SRC IP ADDRESS OFFSET */
+
+/* Rules and Tables8s */
+#define IPV4_L3FWD_LPM_MAX_RULES 256 /**< Number of LPM RULES */
+#define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8) /**< Number of TABLE 8s for LPM */
+#define MAX_FIB_PATHS 8 /**< MAX FIB PATH, If ECMP feature is enabled */
+#define IP_LOCAL 0 /**< for ICMP Packet destined to Local */
+#define IP_REMOTE 1 /**< for ICMP Packet destined to Local */
+
+/* ECMP MACROS */
+#define MAX_SUPPORTED_FIB_PATHS 8 /**< for ECMP max supported FIB Paths */
+#define HASH_BUCKET_SIZE 64 /**< size of HASH bucket for ECMP */
+
+/* L2 Adjacency Macro */
+#define L2_ADJ_RESOLVED 0x00 /** <MACRO to define a flag as Resolved*/
+#define L2_ADJ_UNRESOLVED 0x01 /** <MacrO to define a flag as Unresolved */
+/**
+* A structure used to define the routing information for IPv4
+* This structure is used as input parameters for route ADD
+*/
+struct routing_info {
+ uint32_t dst_ip_addr; /**< DST IP Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint32_t nh_ip_addr[MAX_FIB_PATHS]; /**< NextHop IP Address */
+ uint8_t out_port[MAX_FIB_PATHS]; /**< OUTGOING PORT */
+} __rte_cache_aligned;
+
+/**
+* A structure used to define the fib path for Destination IP Address
+* This fib path is shared accross different fib_info.
+*/
+struct fib_path {
+ uint32_t nh_ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t out_port; /**< Output port */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_info has same fib_path*/
+ struct l2_adj_entry *l2_adj_ptr; /**< Address of the L2 ADJ table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib info (Route info)
+* This fib info structure can have multiple fib paths.
+*/
+struct fib_info {
+ uint32_t dst_ip_addr; /**< DST IP Address */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint8_t depth; /**< Depth */
+ struct fib_path *path[MAX_FIB_PATHS]; /**< Array of pointers to the fib_path */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_entry {
+ struct ether_addr eth_addr; /**< Ether address */
+ uint32_t Next_hop_ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t out_port_id; /**< Output port */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_path has same L2_adj_entry*/
+ uint8_t l2_string[256]; /**< L2 string, to rewrite the packet before transmission */
+ l2_phy_interface_t *phy_port; /**< Address of the L2 physical interface structure */
+ uint8_t flags; /**< Set to unresolved, when ARP entry not available. Set to resolved, when ARP is available */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct fib_path_key_ipv4 {
+ uint32_t nh_ip; /**< Next hop IP address */
+ uint8_t out_port; /**< Output port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct l2_adj_key_ipv4 {
+ uint32_t Next_hop_ip; /**< Next hop IP address */
+ uint8_t out_port_id; /**< Output port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to hold the fib info after LPM Lookup
+*/
+struct routing_table_entry {
+ uint32_t ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t port_id; /**< Output port ID */
+ struct l2_adj_entry *l2_adj_ptr; /**< Address of L2 Adjacency table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L3 counter statistics
+*/
+typedef struct l3fwd_stats {
+ uint64_t nb_rx_l3_pkt; /**< Num of L3 pkts Received */
+ uint64_t nb_tx_l3_pkt; /**< Num of L3 pkts Transmitted */
+ uint64_t nb_rx_l3_icmp_pkt;
+ /**< Num of ICMP pkts Received at L3*/
+ uint64_t nb_tx_l3_icmp_pkt;
+ /**< Num of ICMP pkts Transmitted at L3*/
+ uint64_t nb_l3_drop_pkt; /**< Num of L3 Packets Dropped*/
+ uint64_t total_nb_rx_l3_pkt;
+ /**< Total Num of L3 Packets received, includes ICMP Pkt*/
+ uint64_t total_nb_tx_l3_pkt;
+ /**< Total Num of L3 Packets Transmitted, includes ICMP Pkt*/
+} l3_stats_t;
+
+struct ip_protocol_type {
+ uint8_t protocol_type; /**< Protocol Type */
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+} __rte_cache_aligned;
+
+/* Function Declarations */
+
+/**
+ * To creare LPM table, Cuckoo hash table for fib_path and l2_adj_entry tables
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm_init(void);
+
+/**
+ * To add a route in LPM table by populating fib_path and L2 Adjacency.
+ * @param input_array
+ * To add the route based on routing_info stucture.
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_route_add(struct routing_info *input_array);
+
+/**
+ * To Delete the IP route and corresponding fib_path and L2 Adjacency entries.
+ * @param ip
+ * Destionation IP for which the route need to deleted
+ * @param depth
+ * netmask for the Destination IP
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_route_delete(uint32_t ip, uint8_t depth);
+
+/**
+ * To perform a LPM table lookup
+ * @param pkts_burst
+ * Burst of packets that needs to be lookup in LPM table
+ * @param nb_pkts
+ * number of packets that needs to be lookup in LPM table
+ * @param valid_pkts_mask
+ * lookup of the valid IPv4 Pkt mask
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask,
+ l2_phy_interface_t *port[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask);
+
+/**
+ * To Verify whether the received IPv4 Packet is valid or not
+ * @param pkt
+ * packet pointing to IPv4 header that needs to be verifed
+ * @param link_len
+ * length of the IPv4 Pkt
+ * @return
+ * 0 for failure, 1 for success
+*/
+int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
+
+/**
+ * To forward the valid L3 packets for LMP table lookup and forward ICMP Pkts to ICMP module
+ * @param m
+ * packet burst of type rte_mbuf
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param pkt_mask
+ * Valid IPv4 packets mask that needs to be processed
+ * @param port
+ * IPv4 Pkt received form the input port structure.
+ * @return
+ * 0 for failure, 1 for success
+*/
+void l3fwd_rx_ipv4_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t pkt_mask, l2_phy_interface_t *port);
+
+/**
+ * To get the destination MAC Address for the nexthop IP and outgoing port
+ * @param next_hop_ip
+ * Next HOP IP Address for which MAC address is needed
+ * @param out_phy_port
+ * Outgoing physical port
+ * @param hw_addr
+ * pointer to the ether_add, This gets update with valid MAC address based on nh_ip and out port
+ * @return
+ * 0 if failure, 1 if success
+ */
+int get_dest_mac_for_nexthop(uint32_t next_hop_ip,
+ uint8_t out_phy_port, struct ether_addr *hw_addr);
+/**
+ * To retrieve the l2_adj_entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the l2_adj_key_ipv4
+ * @param l2_adj_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_entry if lookup success
+*/
+
+struct l2_adj_entry *retrieve_l2_adj_entry(struct l2_adj_key_ipv4 l2_adj_key);
+
+/**
+ * To populate the l2_adj_entry for the nexthop IP and outgoing port
+ * @param ipaddr
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_entry if lookup success
+*/
+
+struct l2_adj_entry *populate_l2_adj(uint32_t ipaddr, uint8_t portid);
+
+/**
+ * To populate the fib_path for the nexthop IP and outgoing port
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the type fib_path if lookup success
+*/
+struct fib_path *populate_fib_path(uint32_t nh_ip, uint8_t portid);
+
+/**
+ * To retrieve the fib_path entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the fib_path_key_ipv4
+ * @param path_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type fib_path if lookup success
+*/
+
+struct fib_path *retrieve_fib_path_entry(struct fib_path_key_ipv4 path_key);
+
+/**
+ * To delete the fib path and l2 adjacency entry from the cuckoo hash table
+ * @return
+ * None
+*/
+void remove_fib_l2_adj_entry(void *);
+
+/**
+ * To iterate the cuckoo hash table for fib_path and l2_adj_entry and print the table contents
+ * @return
+ * None
+*/
+void iterate_cuckoo_hash_table(void);
+
+/**
+ * To print the l3 counter statitics
+ * @return
+ * None
+*/
+void print_l3_stats(void);
+
+/**
+ * To get the hash resultant value based on SRC IP and DST IP
+ * @param mbuf
+ * packet of type rte_mbuf
+ * @return
+ * It returns a result of type uint8_t
+ */
+
+uint8_t ip_hash_load_balance(struct rte_mbuf *mbuf);
+
+/**
+ * Rotates the count number of bits from the value
+ * @param value
+ * an integer value
+ * @param count
+ * rotates a count number of bits from integer value
+ * @return
+ * It returns a result.
+ */
+
+uint32_t rotr32(uint32_t value, unsigned int count);
+
+void
+resolve_l2_adj(uint32_t nexthop_ip, uint8_t out_port_id,
+ const struct ether_addr *hw_addr);
+
+void
+l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+void
+ip_local_packets_process(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+void ip_local_out_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+void
+ip_forward_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+#endif /* L3FWD_LPM_H */
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm6.c b/common/VIL/l2l3_stack/l3fwd_lpm6.c
new file mode 100644
index 00000000..7aa7fb6a
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm6.c
@@ -0,0 +1,1058 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "l3fwd_common.h"
+#include "interface.h"
+#include "l2_proto.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+
+/* Declare Global variables */
+
+/* Global for IPV6 */
+void *lpm6_table; /**< lpm6 table handler */
+struct rte_hash *l2_adj_ipv6_hash_handle; /**< IPv6 l2 adjacency table handler */
+struct rte_hash *fib_path_ipv6_hash_handle; /**< IPv6 fib path hash table handler */
+extern uint8_t nh_links[MAX_SUPPORTED_FIB_PATHS][HASH_BUCKET_SIZE];
+extern l3_stats_t stats; /**< L3 statistics */
+
+static struct ipv6_protocol_type *proto_type[2];
+
+int lpm6_init(void)
+{
+
+ /* Initiliaze LPMv6 params */
+
+ struct rte_table_lpm_ipv6_params lpm6_params = {
+ .name = "LPMv6",
+ .n_rules = IPV6_L3FWD_LPM_MAX_RULES,
+ .number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S,
+ .entry_unique_size = sizeof(struct ipv6_fib_info),
+ .offset = 128,
+ };
+
+ /* Create LPMv6 tables */
+ lpm6_table =
+ rte_table_lpm_ipv6_ops.f_create(&lpm6_params, rte_socket_id(),
+ sizeof(struct ipv6_fib_info));
+ if (lpm6_table == NULL) {
+ printf("Failed to create LPM IPV6 table\n");
+ return 0;
+ }
+
+ /*Initialize IPv6 params for l2 Adj */
+ struct rte_hash_parameters l2_adj_ipv6_params = {
+ .name = "l2_ADJ_IPV6_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct l2_adj_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ l2_adj_ipv6_hash_handle = rte_hash_create(&l2_adj_ipv6_params);
+ if (l2_adj_ipv6_hash_handle == NULL) {
+ printf("ND for IPV6 rte_hash_create failed.\n");
+ return 0;
+ } else {
+ printf("ND IPV6_hash_handle %p\n\n",
+ (void *)l2_adj_ipv6_hash_handle);
+ }
+
+ /*Initialize Fib PAth hassh params */
+ struct rte_hash_parameters fib_path_ipv6_params = {
+ .name = "FIB_PATH_IPV6_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct fib_path_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ .extra_flag = 1,
+ };
+
+ /* Create FIB PATH Hash tables */
+ fib_path_ipv6_hash_handle = rte_hash_create(&fib_path_ipv6_params);
+
+ if (fib_path_ipv6_hash_handle == NULL) {
+ printf("FIB path rte_hash_create failed\n");
+ return 0;
+ }
+ return 1;
+}
+
+int lpm6_table_route_add(struct ipv6_routing_info *data)
+{
+
+ struct ipv6_routing_info *fib = data;
+ /* Populate the Key */
+ struct rte_table_lpm_ipv6_key lpm6_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ lpm6_key.ip[i] = fib->dst_ipv6[i];
+ }
+ lpm6_key.depth = fib->depth;
+
+ static int Total_route_count;
+ struct ipv6_fib_info entry;
+ for (i = 0; i < 16; i++) {
+ entry.dst_ipv6[i] = fib->dst_ipv6[i];
+ }
+ entry.depth = fib->depth;
+ entry.fib_nh_size = fib->fib_nh_size;
+
+#if MULTIPATH_FEAT
+ if (entry.fib_nh_size == 0 || entry.fib_nh_size > MAX_FIB_PATHS)
+#else
+ if (entry.fib_nh_size != 1) /**< For Single FIB_PATH */
+#endif
+ {
+ printf
+ ("Route's can't be configured!!, entry.fib_nh_size = %d\n",
+ entry.fib_nh_size);
+ return 0;
+ }
+
+ /* Populate L2 adj and precomputes l2 encap string */
+#if MULTIPATH_FEAT
+ for (i = 0; i < entry.fib_nh_size; i++)
+#else
+ for (i = 0; i < 1; i++)
+#endif
+ {
+ struct ipv6_fib_path *ipv6_fib_path_addr = NULL;
+ ipv6_fib_path_addr =
+ populate_ipv6_fib_path(fib->nh_ipv6[i], fib->out_port[i]);
+
+ if (ipv6_fib_path_addr) {
+ entry.path[i] = ipv6_fib_path_addr;
+ printf("Fib path for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u) ==> fib_path Addr :%p, L2_adj Addr ;%p\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2],
+ lpm6_key.ip[3], lpm6_key.ip[4], lpm6_key.ip[5],
+ lpm6_key.ip[6], lpm6_key.ip[7], lpm6_key.ip[8],
+ lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13],
+ lpm6_key.ip[14], lpm6_key.ip[15], fib->depth,
+ ipv6_fib_path_addr,
+ (void *)entry.path[i]->l2_adj_ipv6_ptr);
+ } else {
+ printf("Fib path for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u) ==> fib_path Addr : NULL\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2],
+ lpm6_key.ip[3], lpm6_key.ip[4], lpm6_key.ip[5],
+ lpm6_key.ip[6], lpm6_key.ip[7], lpm6_key.ip[8],
+ lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13],
+ lpm6_key.ip[14], lpm6_key.ip[15], fib->depth);
+ entry.path[i] = NULL; /**< setting all other fib_paths to NULL */
+ }
+ }
+
+ int key_found, ret;
+ void *entry_ptr;
+
+ /* Adding a IP route in LPMv6 table */
+ printf("%s, Line %u \n", __FUNCTION__, __LINE__);
+
+ ret =
+ rte_table_lpm_ipv6_ops.f_add(lpm6_table, (void *)&lpm6_key, &entry,
+ &key_found, &entry_ptr);
+ printf("%s, Line %u \n", __FUNCTION__, __LINE__);
+
+ if (ret) {
+ printf("Failed to Add IP route in LPMv6\n");
+ return 0;
+ }
+ printf("Added route to IPv6 LPM table (IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u)\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2], lpm6_key.ip[3],
+ lpm6_key.ip[4], lpm6_key.ip[5], lpm6_key.ip[6], lpm6_key.ip[7],
+ lpm6_key.ip[8], lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13], lpm6_key.ip[14],
+ lpm6_key.ip[15], fib->depth);
+
+ Total_route_count++;
+ printf("Total Routed Added : %u, Key_found: %d\n", Total_route_count,
+ key_found);
+
+ if (Total_route_count == 2)
+ ipv6_iterate__hash_table();
+
+ return 1;
+}
+
+int
+lpm6_table_route_delete(uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE], uint8_t depth)
+{
+
+ /* Populate the Key */
+ struct rte_table_lpm_ipv6_key lpm6_key;
+ memcpy(&lpm6_key.ip, &dst_ipv6, sizeof(RTE_LPM_IPV6_ADDR_SIZE));
+ lpm6_key.depth = depth;
+ int key_found, ret;
+ char *entry = NULL;
+ entry = rte_zmalloc(NULL, 512, RTE_CACHE_LINE_SIZE);
+ /* Delete a IP route in LPMv6 table */
+ ret =
+ rte_table_lpm_ipv6_ops.f_delete(lpm6_table, &lpm6_key, &key_found,
+ entry);
+
+ if (ret) {
+ printf("Failed to Delete IP route from LPMv6 table\n");
+ return 0;
+ }
+
+ printf("Deleted route from IPv6 LPM table (IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u, key_found = %d\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2], lpm6_key.ip[3],
+ lpm6_key.ip[4], lpm6_key.ip[5], lpm6_key.ip[6], lpm6_key.ip[7],
+ lpm6_key.ip[8], lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13], lpm6_key.ip[14],
+ lpm6_key.ip[15], lpm6_key.depth, key_found);
+
+ /* Deleting a L2 Adj entry if refcount is 1, Else decrement Refcount */
+ remove_ipv6_fib_l2_adj_entry(entry);
+ rte_free(entry); // free memory
+ return 1;
+}
+
+int
+lpm6_table_lookup(struct rte_mbuf **pkts_burst,
+ uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask)
+{
+ struct ipv6_routing_table_entry
+ *ipv6_entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t lookup_hit_mask_ipv6 = 0;
+ int status;
+ uint64_t lookup_miss_mask = pkts_mask;
+ /*Populate the key offset in META DATA */
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST_IPV6;
+ uint64_t pkts_key_mask = pkts_mask;
+
+ //for(i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
+ for (; pkts_key_mask;) {
+/**< Populate key offset in META DATA for all valid pkts */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_key_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ pkts_key_mask &= ~pkt_mask;
+
+ uint8_t *lpm6_key;
+ uint8_t dst_addr[RTE_LPM_IPV6_ADDR_SIZE];
+ memcpy(dst_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(pkts_burst[pos],
+ dst_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ lpm6_key =
+ (uint8_t *) RTE_MBUF_METADATA_UINT8_PTR(pkts_burst[pos],
+ 128);
+ memcpy(lpm6_key, dst_addr, RTE_LPM_IPV6_ADDR_SIZE);
+ }
+ /* Lookup for IP route in LPM6 table */
+ printf(" IPV6 Lookup Mask Before = %p, nb_pkts :%u\n",
+ (void *)pkts_mask, nb_pkts);
+ status =
+ rte_table_lpm_ops.f_lookup(lpm6_table, pkts_burst, pkts_mask,
+ &lookup_hit_mask_ipv6,
+ (void **)ipv6_entries);
+ if (status) {
+ printf("LPM Lookup failed for IP route\n");
+ return 0;
+ }
+ printf(" IPV6 Lookup Mask After = %p\n", (void *)lookup_hit_mask_ipv6);
+ lookup_miss_mask = lookup_miss_mask & (~lookup_hit_mask_ipv6);
+ if (L3FWD_DEBUG) {
+ printf("AFTER lookup_hit_mask = %p, lookup_miss_mask =%p\n",
+ (void *)lookup_hit_mask_ipv6, (void *)lookup_miss_mask);
+ }
+
+ for (; lookup_miss_mask;) {
+/**< Drop packets for lookup_miss_mask */
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_miss_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_miss_mask &= ~pkt_mask;
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ if (L3FWD_DEBUG)
+ printf("\n DROP PKT IPV4 Lookup_miss_Mask = %p\n",
+ (void *)lookup_miss_mask);
+
+ }
+ *hit_mask = lookup_hit_mask_ipv6;
+ for (; lookup_hit_mask_ipv6;) {
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_hit_mask_ipv6);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_hit_mask_ipv6 &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts_burst[pos];
+
+ struct ipv6_fib_info *entry =
+ (struct ipv6_fib_info *)ipv6_entries[pos];
+
+#if MULTIPATH_FEAT
+
+ uint8_t ecmp_path = ipv6_hash_load_balance(pkts_burst[pos]);
+ uint8_t selected_path = 0;
+ struct ipv6_fib_path *fib_path = NULL;
+ if (((entry->fib_nh_size != 0)
+ && (entry->fib_nh_size - 1) < MAX_SUPPORTED_FIB_PATHS)
+ && ((ecmp_path != 0) && (ecmp_path - 1) < HASH_BUCKET_SIZE))
+ selected_path =
+ nh_links[entry->fib_nh_size - 1][ecmp_path - 1];
+ if (selected_path < MAX_FIB_PATHS)
+ fib_path = entry->path[selected_path];
+ printf
+ ("Total supported Path :%u, Hashed ECMP Key : %u, selected Fib_path: %u\n",
+ entry->fib_nh_size, ecmp_path, selected_path);
+#else
+ struct ipv6_fib_path *fib_path = entry->path[0];
+#endif
+ if (fib_path == NULL) {
+ printf("Fib_path is NULL, ND has not resolved\n");
+ rte_pktmbuf_free(pkt);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ printf
+ ("Fib_path is NULL, ND has not resolved, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ if (fib_path->l2_adj_ipv6_ptr->flags == L2_ADJ_UNRESOLVED) {
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("L2_ADJ_UNRESOLVED, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC BEFORE- DST MAC %02x:%02x:%02x:%02x"
+ ":%02x:%02x, "
+ "SRC MAC %02x:%02x:%02x:%02x:"
+ "%02x:%02x \n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3],
+ eth_dest[4], eth_dest[5], eth_src[0],
+ eth_src[1],
+ eth_src[2], eth_src[3],
+ eth_src[4], eth_src[5]);
+ }
+
+ /* Rewrite the packet with L2 string */
+ memcpy(eth_dest, fib_path->l2_adj_ipv6_ptr->l2_string,
+ sizeof(struct ether_addr) * 2 + 2);
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC AFTER DST MAC %02x:%02x:%02x:%02x:%02x:%02x,"
+ "SRC MAC %02x:%02x:%02x:%02x:"
+ "%02x:%02x\n", eth_dest[0],
+ eth_dest[1], eth_dest[2], eth_dest[3],
+ eth_dest[4],
+ eth_dest[5], eth_src[0], eth_src[1],
+ eth_src[2],
+ eth_src[3], eth_src[4], eth_src[5]);
+ }
+ port_ptr[pos] = fib_path->l2_adj_ipv6_ptr->phy_port;
+
+ //fib_path->l2_adj_ipv6_ptr->phy_port->transmit_single_pkt(fib_path->l2_adj_ipv6_ptr->phy_port, pkt);
+ if (L3FWD_DEBUG)
+ printf("Successfully sent to port %u \n\r",
+ fib_path->out_port);
+ }
+ return 1;
+}
+
+void l3fwd_rx_ipv6_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask, l2_phy_interface_t *port)
+{
+ if (!port)
+ return;
+ if (L3FWD_DEBUG) {
+ printf
+ ("l3fwd_rx_ipv6_packets_received BEFORE DROP: nb_pkts: %u, from in_port %u, valid_pkts_mask:%"
+ PRIu64 "\n", nb_pkts, port->pmdid, valid_pkts_mask);
+ }
+ uint64_t pkts_for_process = valid_pkts_mask;
+
+ struct ipv6_hdr *ipv6_hdr;
+ //struct ether_hdr *eth_h;
+ uint64_t icmp_pkts_mask = valid_pkts_mask;
+ uint64_t ipv6_forward_pkts_mask = valid_pkts_mask;
+ uint16_t nb_icmpv6_pkt = 0;
+ uint16_t nb_l3_pkt = 0;
+
+ uint8_t configured_port_ipv6[RTE_LPM_IPV6_ADDR_SIZE] = { 0 };
+ int8_t solicited_node_multicast_addr[RTE_LPM_IPV6_ADDR_SIZE] = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xff, 0x00, 0x00, 0x00 };
+ uint8_t dest_ipv6_addr[RTE_LPM_IPV6_ADDR_SIZE];
+
+ memset(dest_ipv6_addr, 0, RTE_LPM_IPV6_ADDR_SIZE);
+
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ int ii;
+ if (port->ipv6_list != NULL) {
+ for (ii = 0; ii < 16; ii += 1) {
+ configured_port_ipv6[ii] =
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii];
+ }
+ }
+ // memcpy(&configured_port_ipv6, &(((ipv6list_t*)(port->ipv6_list))->ipaddr), RTE_LPM_IPV6_ADDR_SIZE);
+
+ for (ii = 0; ii < 16; ii += 2) {
+ if (port && port->ipv6_list)
+ printf("%02X%02X ",
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii],
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii +
+ 1]);
+ }
+
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", configured_port_ipv6[ii],
+ configured_port_ipv6[ii + 1]);
+ }
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ //printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ //eth_h = rte_pktmbuf_mtod(m[pos], struct ether_hdr *);
+ printf("\n%s : LINE #%u, POS%u\n", __FUNCTION__, __LINE__,
+ pos);
+ //ipv6_hdr = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ if (m[pos] == NULL) {
+ printf("\n%s : M_POS IS NULLLLLLL, LINE: %u\n",
+ __FUNCTION__, __LINE__);
+ return;
+ }
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(m[pos], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 13; ii += 1) {
+ dest_ipv6_addr[ii] = ipv6_hdr->dst_addr[ii];
+ }
+
+ printf("\n");
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_hdr->dst_addr[ii],
+ ipv6_hdr->dst_addr[ii + 1]);
+ }
+ printf("\n");
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", dest_ipv6_addr[ii],
+ dest_ipv6_addr[ii + 1]);
+ }
+
+ printf("\n%s : LINE # %u", __FUNCTION__, __LINE__);
+ if ((ipv6_hdr->proto == IPPROTO_ICMPV6) &&
+ (!memcmp
+ (&ipv6_hdr->dst_addr, &configured_port_ipv6[0],
+ RTE_LPM_IPV6_ADDR_SIZE)
+ || !memcmp(&dest_ipv6_addr[0],
+ &solicited_node_multicast_addr[0],
+ RTE_LPM_IPV6_ADDR_SIZE))) {
+ ipv6_forward_pkts_mask &= ~pkt_mask; /**< Its ICMP, remove this packet from the ipv6_forward_pkts_mask*/
+ stats.nb_rx_l3_icmp_pkt++; /**< Increment stats for ICMP PKT */
+ nb_icmpv6_pkt++;
+ } else{ // Forward the packet
+ icmp_pkts_mask &= ~pkt_mask; /**< Not ICMP, remove this packet from the icmp_pkts_mask*/
+ stats.nb_rx_l3_pkt++;
+ nb_l3_pkt++; /**< Increment stats for L3 PKT */
+ }
+ }
+
+ if (icmp_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEiVED LOCAL ICMP PKT at L3...\n PROCESSING ICMP LOCAL PKT...\n");
+ proto_type[IP_LOCAL]->func(m, nb_icmpv6_pkt, icmp_pkts_mask,
+ port);
+ }
+
+ if (ipv6_forward_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEIVED L3 PKT, \n\n FORWARDING L3 PKT....\n");
+ proto_type[IP_REMOTE]->func(m, nb_l3_pkt,
+ ipv6_forward_pkts_mask, port);
+ }
+}
+
+struct ipv6_fib_path *populate_ipv6_fib_path(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid)
+{
+
+ struct fib_path_key_ipv6 path_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ path_key.nh_ipv6[i] = nh_ipv6[i];
+ }
+ path_key.out_port = portid;
+ path_key.filler1 = 0;
+ path_key.filler2 = 0;
+ path_key.filler3 = 0;
+
+ struct ipv6_fib_path *fib_data = NULL;
+ /* Populate fib_path if it is present in FIB_PATH cuckoo HAsh Table */
+ fib_data = retrieve_ipv6_fib_path_entry(path_key);
+
+ if (fib_data) {
+
+ printf(" Fib path entry exists for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u\n",
+ nh_ipv6[0], nh_ipv6[1], nh_ipv6[2], nh_ipv6[3],
+ nh_ipv6[4], nh_ipv6[5], nh_ipv6[6], nh_ipv6[7],
+ nh_ipv6[8], nh_ipv6[9], nh_ipv6[10], nh_ipv6[11],
+ nh_ipv6[12], nh_ipv6[13], nh_ipv6[14], nh_ipv6[15],
+ portid);
+
+ fib_data->refcount++;
+ return fib_data; // Entry Exists. Return True (1)
+ } else {
+ printf("IPv6 fib_path entry Doesn't Exists.......\n");
+ }
+
+ /* populate L2 Adj */
+ fib_data = NULL;
+ struct l2_adj_ipv6_entry *l2_adj_ptr = NULL;
+ l2_adj_ptr = populate_ipv6_l2_adj(nh_ipv6, portid);
+
+ if (l2_adj_ptr) {
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct ipv6_fib_path));
+ fib_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ for (i = 0; i < 16; i++) {
+ fib_data->nh_ipv6[i] = nh_ipv6[i];
+ }
+ fib_data->out_port = portid;
+ //memcpy(fib_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+
+ fib_data->refcount++;
+ fib_data->l2_adj_ipv6_ptr = l2_adj_ptr;
+
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(fib_path_ipv6_hash_handle, &path_key,
+ fib_data);
+ printf
+ (" ND resolution success l2_adj_entry %p\n, ipv6_fib_path_addr %p",
+ l2_adj_ptr, fib_data);
+ return fib_data;
+ } else {
+ printf
+ ("ND resolution failed and unable to write fib path in fib_path cuckoo hash\n");
+ }
+ return NULL;
+
+}
+
+struct l2_adj_ipv6_entry *populate_ipv6_l2_adj(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid)
+{
+
+ struct l2_adj_key_ipv6 l2_adj_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ l2_adj_key.nh_ipv6[i] = nh_ipv6[i];
+ }
+ l2_adj_key.out_port_id = portid;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ struct l2_adj_ipv6_entry *adj_data = NULL;
+ struct ether_addr eth_dst;
+ /* Populate L2 adj if the MAC Address is present in L2 Adj HAsh Table */
+ adj_data = retrieve_ipv6_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) {
+
+ printf("ipv6_l2_adj_entry exists for Next Hop IPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u\n",
+ nh_ipv6[0], nh_ipv6[1], nh_ipv6[2], nh_ipv6[3],
+ nh_ipv6[4], nh_ipv6[5], nh_ipv6[6], nh_ipv6[7],
+ nh_ipv6[8], nh_ipv6[9], nh_ipv6[10], nh_ipv6[11],
+ nh_ipv6[12], nh_ipv6[13], nh_ipv6[14], nh_ipv6[15],
+ portid);
+
+ ether_addr_copy(&adj_data->eth_addr, &eth_dst);
+ adj_data->refcount++;
+ return adj_data; // Entry Exists. Return True (1)
+ }
+
+ struct ether_addr eth_src;
+ uint16_t ether_type = 0x086DD;
+ l2_phy_interface_t *port;
+ port = ifm_get_port(portid);
+ if (port == NULL) {
+ printf("PORT %u IS DOWN.. Unable to process !\n", portid);
+ return NULL;
+ }
+
+ memcpy(&eth_src, &port->macaddr, sizeof(struct ether_addr));
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return NULL;
+ }
+
+ adj_data->out_port_id = portid;
+ //memcpy(adj_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+ for (i = 0; i < 16; i++) {
+ adj_data->nh_ipv6[i] = nh_ipv6[i];
+ }
+ adj_data->refcount++;
+ adj_data->phy_port = port;
+
+ rte_hash_add_key_data(l2_adj_ipv6_hash_handle, &l2_adj_key, adj_data);
+
+ /* Query ND to get L2 Adj */
+ if (get_dest_mac_for_nexthop_ipv6(nh_ipv6, portid, &eth_dst)) {
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, &eth_dst,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6], &eth_src,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+ printf
+ (" ND resolution successful and stored in ipv6_l2_adj_entry %p\n",
+ adj_data);
+
+ return adj_data;
+ } else {
+ adj_data->flags = L2_ADJ_UNRESOLVED;
+ printf
+ ("ND resolution failed and unable to write in ipv6_l2_adj_entry\n");
+ }
+ return NULL;
+}
+
+struct l2_adj_ipv6_entry *retrieve_ipv6_l2_adj_entry(struct l2_adj_key_ipv6
+ l2_adj_key)
+{
+ struct l2_adj_ipv6_entry *ret_l2_adj_data = NULL;
+
+ int ret =
+ rte_hash_lookup_data(l2_adj_ipv6_hash_handle, &l2_adj_key,
+ (void **)&ret_l2_adj_data);
+ if (ret < 0) {
+ printf
+ ("L2 Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ } else {
+ printf("L2 Adj hash lookup Successful..!!!\n");
+ return ret_l2_adj_data;
+ }
+ return NULL;
+}
+
+int get_dest_mac_for_nexthop_ipv6(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint32_t out_phy_port,
+ struct ether_addr *hw_addr)
+{
+ struct nd_entry_data *nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ tmp_nd_key.ipv6[i] = nh_ipv6[i];
+ }
+ tmp_nd_key.port_id = out_phy_port;
+
+ nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (nd_data == NULL) {
+ printf("ND entry is not found\n");
+ return 0;
+ }
+ ether_addr_copy(&nd_data->eth_addr, hw_addr);
+
+ return 1;
+}
+
+struct ipv6_fib_path *retrieve_ipv6_fib_path_entry(struct fib_path_key_ipv6
+ path_key)
+{
+
+ struct ipv6_fib_path *ret_fib_path_data = NULL;
+ int ret =
+ rte_hash_lookup_data(fib_path_ipv6_hash_handle, &path_key,
+ (void **)&ret_fib_path_data);
+ if (ret < 0) {
+ printf
+ ("FIB Path Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ return NULL;
+ } else {
+ return ret_fib_path_data;
+ }
+}
+
+void remove_ipv6_fib_l2_adj_entry(void *entry)
+{
+ struct ipv6_fib_info entry1;
+ memcpy(&entry1, entry, sizeof(struct ipv6_fib_info));
+
+ struct ipv6_fib_path *fib_path_addr = entry1.path[0]; //fib_info->path[0];
+ if (fib_path_addr->refcount > 1) {
+ printf("BEFORE fib_path entry is not Removed! nh_iPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u, refcount :%d\n",
+ fib_path_addr->nh_ipv6[0], fib_path_addr->nh_ipv6[1],
+ fib_path_addr->nh_ipv6[2], fib_path_addr->nh_ipv6[3],
+ fib_path_addr->nh_ipv6[4], fib_path_addr->nh_ipv6[5],
+ fib_path_addr->nh_ipv6[6], fib_path_addr->nh_ipv6[7],
+ fib_path_addr->nh_ipv6[8], fib_path_addr->nh_ipv6[9],
+ fib_path_addr->nh_ipv6[10], fib_path_addr->nh_ipv6[11],
+ fib_path_addr->nh_ipv6[12], fib_path_addr->nh_ipv6[13],
+ fib_path_addr->nh_ipv6[14], fib_path_addr->nh_ipv6[15],
+ fib_path_addr->out_port, fib_path_addr->refcount);
+ fib_path_addr->refcount--; // Just decrement the refcount this entry is still referred
+ printf("AFTER fib_path entry is not Removed! nh_iPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u, refcount :%d\n",
+ fib_path_addr->nh_ipv6[0], fib_path_addr->nh_ipv6[1],
+ fib_path_addr->nh_ipv6[2], fib_path_addr->nh_ipv6[3],
+ fib_path_addr->nh_ipv6[4], fib_path_addr->nh_ipv6[5],
+ fib_path_addr->nh_ipv6[6], fib_path_addr->nh_ipv6[7],
+ fib_path_addr->nh_ipv6[8], fib_path_addr->nh_ipv6[9],
+ fib_path_addr->nh_ipv6[10], fib_path_addr->nh_ipv6[11],
+ fib_path_addr->nh_ipv6[12], fib_path_addr->nh_ipv6[13],
+ fib_path_addr->nh_ipv6[14], fib_path_addr->nh_ipv6[15],
+ fib_path_addr->out_port, fib_path_addr->refcount);
+ } else { // Refcount is 1 so delete both fib_path and l2_adj_entry
+
+ struct l2_adj_ipv6_entry *adj_addr = NULL;
+ adj_addr = fib_path_addr->l2_adj_ipv6_ptr;
+
+ if (adj_addr != NULL) { //l2_adj_entry is has some entry in hash table
+ printf("%s: CHECK %d\n\r", __FUNCTION__, __LINE__);
+ struct l2_adj_key_ipv6 l2_adj_key;
+ memcpy(&l2_adj_key.nh_ipv6, fib_path_addr->nh_ipv6,
+ RTE_LPM_IPV6_ADDR_SIZE);
+ l2_adj_key.out_port_id =
+ fib_path_addr->out_port,
+ rte_hash_del_key(l2_adj_ipv6_hash_handle,
+ &l2_adj_key);
+ rte_free(adj_addr); // free memory
+ adj_addr = NULL;
+ }
+
+ struct fib_path_key_ipv6 path_key;
+ memcpy(&path_key.nh_ipv6, fib_path_addr->nh_ipv6,
+ RTE_LPM_IPV6_ADDR_SIZE);
+ path_key.out_port = fib_path_addr->out_port;
+ rte_hash_del_key(fib_path_ipv6_hash_handle, &path_key);
+ rte_free(fib_path_addr); //Free the memory
+ fib_path_addr = NULL;
+ }
+}
+
+int is_valid_ipv6_pkt(struct ipv6_hdr *pkt, uint32_t link_len)
+{
+ if (link_len < sizeof(struct ipv4_hdr))
+ return -1;
+ if (rte_cpu_to_be_16(pkt->payload_len) < sizeof(struct ipv6_hdr))
+ return -1;
+
+ return 0;
+}
+
+void
+ipv6_l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *))
+{
+ switch (protocol_type) {
+ case IPPROTO_ICMPV6:
+ proto_type[IP_LOCAL] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_LOCAL]->protocol_type = protocol_type;
+ proto_type[IP_LOCAL]->func = func;
+ break;
+
+ case IPPROTO_TCP: // Time being treared as Remote forwarding
+ case IPPROTO_UDP:
+ proto_type[IP_REMOTE] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_REMOTE]->protocol_type = protocol_type;
+ proto_type[IP_REMOTE]->func = func;
+ break;
+ }
+}
+
+void
+ipv6_local_deliver(struct rte_mbuf **pkt_burst, __rte_unused uint16_t nb_rx,
+ uint64_t icmp_pkt_mask, l2_phy_interface_t *port)
+{
+ for (; icmp_pkt_mask;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(icmp_pkt_mask);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ icmp_pkt_mask &= ~pkt_mask; /**< remove this packet from the mask */
+
+ process_icmpv6_pkt(pkt_burst[pos], port);
+ }
+}
+
+void
+ipv6_forward_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_pkts,
+ uint64_t ipv6_forward_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("ip_forward_deliver BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = ipv6_forward_pkts_mask;
+
+ struct ipv6_hdr *ipv6_hdr;
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t hit_mask = 0;
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(pkt_burst[pos], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ /* Make sure the IPv4 packet is valid */
+
+ if (is_valid_ipv6_pkt(ipv6_hdr, pkt_burst[pos]->pkt_len) < 0) {
+ rte_pktmbuf_free(pkt_burst[pos]); /**< Drop the Unknown IPv4 Packet */
+ pkt_burst[pos] = NULL;
+ ipv6_forward_pkts_mask &= ~(1LLU << pos); /**< That will clear bit of that position*/
+ nb_pkts--;
+ stats.nb_l3_drop_pkt++;
+ }
+ }
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("\nl3fwd_rx_ipv4_packets_received AFTER DROP: nb_pkts: %u, valid_Pkts_mask :%lu\n",
+ nb_pkts, ipv6_forward_pkts_mask);
+ }
+
+ /* Lookup for IP destination in LPMv4 table */
+ lpm6_table_lookup(pkt_burst, nb_pkts, ipv6_forward_pkts_mask, port_ptr,
+ &hit_mask);
+}
+
+uint8_t ipv6_hash_load_balance(struct rte_mbuf *mbuf)
+{
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST_IPV6;
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST_IPV6;
+ uint8_t src_addr[RTE_LPM_IPV6_ADDR_SIZE];
+ uint8_t dst_addr[RTE_LPM_IPV6_ADDR_SIZE];
+
+ memcpy(&src_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(mbuf, src_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ memcpy(&dst_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(mbuf, dst_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ uint32_t hash_key1 = 0; /* STORE Accumulated value of SRC IP in key1 variable */
+ uint32_t hash_key2 = 0; /* STORE Accumulated value of DST IP in key2 variable */
+ uint8_t i;
+ for (i = 0; i < RTE_LPM_IPV6_ADDR_SIZE; i++) {
+ hash_key1 += src_addr[i]; /* Accumulate */
+ hash_key2 += dst_addr[i]; /* Accumulate */
+ }
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With SRC and DST IP, Result is hask_key1 */
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+ hash_key1 = rotr32(hash_key1, RTE_LPM_IPV6_ADDR_SIZE); /* Circular Rotate to 16 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 8); /* Circular Rotate to 8 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key1 = hash_key1 & (HASH_BUCKET_SIZE - 1); /* MASK the KEY with BUCKET SIZE */
+ if (L3FWD_DEBUG)
+ printf("Hash Result_key: %d, \n", hash_key1);
+ return hash_key1;
+}
+
+void
+resolve_ipv6_l2_adj(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE], uint8_t portid,
+ struct ether_addr *hw_addr)
+{
+ struct l2_adj_ipv6_entry *adj_data = NULL;
+ struct ether_addr eth_dst;
+ uint16_t ether_type = 0x086DD;
+
+ struct l2_adj_key_ipv6 l2_adj_key;
+ memcpy(&l2_adj_key.nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+ l2_adj_key.out_port_id = portid;
+
+ adj_data = retrieve_ipv6_l2_adj_entry(l2_adj_key);
+ if (adj_data) {
+ if (adj_data->flags == L2_ADJ_UNRESOLVED
+ || memcmp(&adj_data->eth_addr, hw_addr, 6)) {
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, hw_addr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6],
+ &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+ }
+
+ return;
+ }
+
+ l2_phy_interface_t *port;
+ port = ifm_get_port(portid);
+ if (port == NULL) {
+ printf("PORT %u IS DOWN..! Unable to Process\n", portid);
+ return;
+ }
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return;
+ }
+
+ adj_data->out_port_id = portid;
+ memcpy(adj_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+
+ adj_data->phy_port = port;
+
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, hw_addr, sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6], &port->macaddr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(l2_adj_ipv6_hash_handle, &l2_adj_key, adj_data);
+
+ printf(" ND resolution successful and stored in ipv6_l2_adj_entry %p\n",
+ adj_data);
+}
+
+void ipv6_iterate__hash_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ uint8_t ii;
+ printf("\n\t\t\t IPv6 FIB_path Cache table....");
+ printf
+ ("\n------------------------------------------------------------------------------");
+ printf
+ ("\n\tNextHop IP \t\t\t\t Port Refcount l2_adj_ptr_addrress\n\n");
+ printf
+ ("--------------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (fib_path_ipv6_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct ipv6_fib_path *tmp_data =
+ (struct ipv6_fib_path *)next_data;
+ struct fib_path_key_ipv6 tmp_key;
+ memcpy(&tmp_key, next_key, sizeof(tmp_key));
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", tmp_data->nh_ipv6[ii],
+ tmp_data->nh_ipv6[ii + 1]);
+ }
+ printf(" \t %u \t %u \t %p\n", tmp_data->out_port,
+ tmp_data->refcount, tmp_data->l2_adj_ipv6_ptr);
+
+ }
+
+ iter = 0;
+
+ printf("\n\t\t\t L2 ADJ Cache table.....");
+ printf
+ ("\n----------------------------------------------------------------------------------\n");
+ printf
+ ("\tNextHop IP \t\t\t\t Port \t l2 Encap string \t l2_Phy_interface\n");
+ printf
+ ("\n------------------------------------------------------------------------------------\n");
+ while (rte_hash_iterate
+ (l2_adj_ipv6_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct l2_adj_ipv6_entry *l2_data =
+ (struct l2_adj_ipv6_entry *)next_data;
+ struct l2_adj_key_ipv6 l2_key;
+ memcpy(&l2_key, next_key, sizeof(l2_key));
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", l2_data->nh_ipv6[ii],
+ l2_data->nh_ipv6[ii + 1]);
+ }
+ printf(" \t%u\t%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x\t%p\n",
+ l2_data->out_port_id,
+ l2_data->l2_string[0],
+ l2_data->l2_string[1],
+ l2_data->l2_string[2],
+ l2_data->l2_string[3],
+ l2_data->l2_string[4],
+ l2_data->l2_string[5],
+ l2_data->l2_string[6],
+ l2_data->l2_string[7],
+ l2_data->l2_string[8],
+ l2_data->l2_string[9],
+ l2_data->l2_string[10],
+ l2_data->l2_string[11], l2_data->phy_port);
+ }
+}
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm6.h b/common/VIL/l2l3_stack/l3fwd_lpm6.h
new file mode 100644
index 00000000..fdd8287a
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm6.h
@@ -0,0 +1,315 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd lpm6 header file is for IPv6 specific declarations
+*/
+
+#ifndef L3FWD_LPM6_H
+#define L3FWD_LPM6_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdbool.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include <rte_table_lpm_ipv6.h>
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "interface.h"
+
+/**
+* Define all RTE MBUF offset size
+*/
+
+#define MBUF_HDR_ROOM 256 /**< MBUF HEADER ROOM OFFSET */
+/* IPv6 */
+#define IP_HDR_SIZE_IPV6 40 /**< IPv6 HEADER OFFSET */
+#define IP_HDR_SRC_ADR_OFST_IPV6 8 /**< IPv6 HEADER SRC IP ADDRESS OFFSET */
+#define IP_HDR_DST_ADR_OFST_IPV6 24 /**< IPv6 HEADER DST IP ADDRESS OFFSET */
+
+/* IPV6 Rules and Tables8s */
+#define IPV6_L3FWD_LPM_MAX_RULES 1024 /**< Number of LPM6 Rules*/
+#define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16) /**< Number of Table 8 for LPM6 */
+
+#define MAX_FIB_PATHS 8 /**< MAX FIB PATH, If ECMP feature is enabled */
+
+/**
+* A structure used to define the routing information for IPv6
+* This structure is used as input parameters for route ADD
+*/
+struct ipv6_routing_info {
+ uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< DST IPv6 Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint8_t nh_ipv6[MAX_FIB_PATHS][RTE_LPM_IPV6_ADDR_SIZE]; /**< NextHop IP Address */
+ uint8_t out_port[MAX_FIB_PATHS]; /**< OUTGOING PORT */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib path for Destination IPv6 Address
+* This fib path is shared accross different fib_info.
+*/
+struct ipv6_fib_path {
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IP address (only valid for remote routes) */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_info has same fib_path*/
+ uint8_t out_port; /**< Output port */
+ struct l2_adj_ipv6_entry *l2_adj_ipv6_ptr;/**< Address of the L2 ADJ table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib info (Route info)
+* This fib info structure can have multiple fib paths.
+*/
+struct ipv6_fib_info {
+ uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< DST IPv6 Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metric */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ struct ipv6_fib_path *path[MAX_FIB_PATHS]; /**< Array of pointers to the fib_path */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_ipv6_entry {
+ struct ether_addr eth_addr; /**< Ether address */
+ uint8_t out_port_id; /**< Outgoing port */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IP address (only valid for remote routes) */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_path has same L2_adj_entry*/
+ uint8_t l2_string[256]; /**< L2 string, to rewrite the packet before transmission */
+ l2_phy_interface_t *phy_port; /**< Address of the L2 physical interface structure */
+ uint8_t flags; /**< flags for marking this entry as resolved or unresolved. */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_key_ipv6 {
+ /*128 Bit of IPv6 Address */
+ /*<48bit Network> <16bit Subnet> <64bit Interface> */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IPv6 address */
+ uint8_t out_port_id; /**< Outgoing port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct fib_path_key_ipv6 {
+ /*128 Bit of IPv6 Address */
+ /*<48bit Network> <16bit Subnet> <64bit Interface> */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IPv6 address */
+ uint8_t out_port; /**< Outgoing port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+struct ipv6_protocol_type {
+ uint8_t protocol_type; /**< Protocol Type */
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+} __rte_cache_aligned;
+
+/* Function Declarations */
+/**
+ * To creare LPM6 table, Cuckoo hash table for fib_path and l2_adj_entry tables
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_init(void);
+
+/**
+ * To add a route in LPM6 table by populating fib_path and L2 Adjacency.
+ * @param data
+ * To add the route based on ipv6_routing_info stucture.
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_route_add(struct ipv6_routing_info *data);
+
+/**
+ * To Delete the IP route and corresponding fib_path and L2 Adjacency entries.
+ * @param dst_ipv6
+ * Destionation IPv6 for which the route need to deleted
+ * @param depth
+ * netmask for the Destination IP
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_route_delete(uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t depth);
+
+/**
+ * To perform a LPM6 table lookup
+ * @param pkts_burst
+ * Burst of packets that needs to be lookup in LPM6 table
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param pkts_mask
+ * number of valid pkts mask that needs to be lookup in LPM6 table
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask);
+
+/**
+ * To forward the valid L3 packets for LMP6 table lookup and forward ICMP Pkts to ICMP module
+ * @param m
+ * packet burst of type rte_mbuf
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param valid_pkts_mask
+ * Valid IPv6 packets mask that needs to be processed
+ * @param in_port
+ * IPv6 Pkt received form the input port.
+ * @return
+ * None
+ */
+void l3fwd_rx_ipv6_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask,
+ l2_phy_interface_t *in_port);
+
+/**
+ * To populate the fib_path for the nexthop IPv6 and outgoing port
+ * @param nh_ipv6
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param out_port
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the type ipv6_fib_path if lookup success
+*/
+struct ipv6_fib_path *populate_ipv6_fib_path(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t out_port);
+
+/**
+ * To retrieve the fib_path entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the fib_path_key_ipv4
+ * @param path_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type ipv6_fib_path if lookup success
+*/
+struct ipv6_fib_path *retrieve_ipv6_fib_path_entry(struct fib_path_key_ipv6
+ path_key);
+
+/**
+ * To retrieve the l2_adj_entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the l2_adj_key_ipv6
+ * @param l2_adj_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type l2_adj_ipv6_entry if lookup success
+*/
+struct l2_adj_ipv6_entry *retrieve_ipv6_l2_adj_entry(struct l2_adj_key_ipv6
+ l2_adj_key);
+
+/**
+ * To populate the l2_adj_entry for the nexthop IP and outgoing port
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_ipv6_entry if lookup success
+*/
+struct l2_adj_ipv6_entry *populate_ipv6_l2_adj(uint8_t
+ nh_ip[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid);
+
+/**
+ * To get the destination MAC Address for the nexthop IP and outgoing port
+ * @param nh_ipv6
+ * Next HOP IP Address for which MAC address is needed
+ * @param out_phy_port
+ * Outgoing physical port
+ * @param hw_addr
+ * pointet to the ether_add, This gets update with valid MAC address based on nh_ip and out port
+ * @return
+ * 0 if failure, 1 if success
+ */
+int get_dest_mac_for_nexthop_ipv6(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint32_t out_phy_port,
+ struct ether_addr *hw_addr);
+
+/**
+ * To delete the ipv6 fib path and l2 adjacency entry from the cuckoo hash table
+ * @return
+ * None
+*/
+void remove_ipv6_fib_l2_adj_entry(void *entry);
+
+void
+ipv6_l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+void
+ipv6_local_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+void
+ipv6_forward_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+int is_valid_ipv6_pkt(struct ipv6_hdr *pkt, uint32_t link_len);
+uint8_t ipv6_hash_load_balance(struct rte_mbuf *mbuf);
+
+/**
+ * To resolve l2_adj_entry based on nexthop IP, outgoing port and ether hw address.
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be resolved
+ * @param portid
+ * outgong port ID
+ * @hw_addr
+ * Ethernet hardware address for the above nexthop IP and out port ID.
+ * @return
+ * Return is void.
+*/
+
+void resolve_ipv6_l2_adj(uint8_t nh_ip[RTE_LPM_IPV6_ADDR_SIZE], uint8_t portid,
+ struct ether_addr *hw_addr);
+
+void ipv6_iterate__hash_table(void);
+#endif /* L3FWD_LPM_H */
diff --git a/common/VIL/l2l3_stack/l3fwd_main.c b/common/VIL/l2l3_stack/l3fwd_main.c
new file mode 100644
index 00000000..247d8876
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_main.c
@@ -0,0 +1,145 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/****************************************************************************
+*
+* filename : :l3fwd_main.c
+*
+*
+******************************************************************************/
+
+#include "l3fwd_common.h"
+#include "l2_proto.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "interface.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+
+struct routing_info input_array[] = {
+#if MULTIPATH_FEAT
+ {IPv4(30, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(40, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(50, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(60, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(100, 100, 100, 100), 24, 0, 2,
+ {IPv4(120, 0, 0, 2), IPv4(120, 0, 0, 2)}, {1, 1} }, // FIb Path Available
+
+ {IPv4(200, 100, 100, 100), 24, 0, 2,
+ {IPv4(80, 0, 0, 2), IPv4(80, 40, 50, 60)}, {1, 1} }, // Fib path Not Available
+#else
+ {IPv4(30, 12, 0, 1), 24, 0, 1,
+ {IPv4(192, 168, 0, 2)}, {1} },
+
+ {IPv4(20, 12, 0, 1), 24, 0, 1,
+ {IPv4(120, 0, 0, 2)}, {1} },
+#endif
+};
+
+struct ipv6_routing_info ipv6_input_array[] = {
+
+ {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 0, 2,
+ {{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}
+ },
+ {1, 1}
+ },
+
+ {{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, 48, 0, 2,
+ {{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}
+ },
+ {1, 1}
+ },
+};
+
+void l3fwd_init(void)
+{
+ printf(" *********** L3 Initialization START ************\n");
+ if (lpm_init() == 0) {
+ rte_exit(EXIT_FAILURE, "L3 Initialization IPv4 Failed\n");
+ }
+ if (lpm6_init() == 0) {
+ rte_exit(EXIT_FAILURE, "L3 Initialization for IPV6 Failed\n");
+ }
+
+ list_add_type(ETHER_TYPE_IPv4, l3fwd_rx_ipv4_packets);
+ list_add_type(ETHER_TYPE_IPv6, l3fwd_rx_ipv6_packets);
+
+ l3_protocol_type_add(IPPROTO_ICMP, ip_local_packets_process);
+ l3_protocol_type_add(IPPROTO_TCP, ip_forward_deliver);
+ l3_protocol_type_add(IPPROTO_UDP, ip_forward_deliver);
+
+ ipv6_l3_protocol_type_add(IPPROTO_ICMPV6, ipv6_local_deliver);
+ ipv6_l3_protocol_type_add(IPPROTO_TCP, ipv6_forward_deliver);
+ ipv6_l3_protocol_type_add(IPPROTO_UDP, ipv6_forward_deliver);
+
+}
+
+void populate_lpm_routes(void)
+{
+ populate_lpm4_table_routes();
+ //populate_lpm6_table_routes();
+}
+
+void populate_lpm4_table_routes(void)
+{
+ uint8_t i;
+ printf
+ (" *********** L3 IPV4 Route Initialization START ************\n");
+ for (i = 0; i < MAX_ROUTES; i++) {
+ if (lpm4_table_route_add(&input_array[i])) {
+
+ printf("Total routes Added# %d\n", i + 1);
+ } else {
+ rte_exit(EXIT_FAILURE,
+ "L3 route addition failed for the route# %d\n",
+ i);
+ }
+ }
+ printf
+ (" *********** L3 IPV4 Route Initialization END ************\n\n");
+}
+
+void populate_lpm6_table_routes(void)
+{
+ uint8_t i;
+ printf
+ (" *********** L3 IPV6 Route Initialization START ************\n");
+ for (i = 0; i < 2; i++) {
+ if (lpm6_table_route_add(&ipv6_input_array[i])) {
+
+ printf("Added route # %d\n", i);
+ } else {
+ rte_exit(EXIT_FAILURE,
+ "L3 route addition failed for the route# %d\n",
+ i);
+ }
+ }
+ printf(" *********** L3 IPV6 Route Initialization END ************\n");
+}
diff --git a/common/VIL/l2l3_stack/lib_arp.c b/common/VIL/l2l3_stack/lib_arp.c
new file mode 100644
index 00000000..042dd39c
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_arp.c
@@ -0,0 +1,2655 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <execinfo.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include <rte_timer.h>
+#include "interface.h"
+#include "l2_proto.h"
+#include "lib_arp.h"
+#include "l3fwd_lpm4.h"
+#include "vnf_common.h"
+
+#if (RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN)
+#define CHECK_ENDIAN_16(x) rte_be_to_cpu_16(x)
+#define CHECK_ENDIAN_32(x) rte_be_to_cpu_32(x)
+#else
+#define CHECK_ENDIAN_16(x) (x)
+#define CHECK_ENDIAN_32(x) (x)
+#endif
+
+#define NB_ARPICMP_MBUF 64
+#define NB_NDICMP_MBUF 64
+#define IP_VERSION_4 0x40
+#define IP_HDRLEN 0x05 /**< default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION_4 | IP_HDRLEN)
+
+#define is_multicast_ipv4_addr(ipv4_addr) \
+ (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0)
+
+extern uint8_t prv_in_port_a[16];
+extern uint32_t timer_lcore;
+uint32_t arp_timeout = ARP_TIMER_EXPIRY;
+
+/*ND IPV6 */
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+static int my_inet_pton_ipv6(int af, const char *src, void *dst);
+static int inet_pton_ipv6(const char *src, unsigned char *dst);
+static int inet_pton_ipv4(const char *src, unsigned char *dst);
+extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
+ uint8_t netmask_ipv6[]);
+
+uint8_t vnf_common_arp_lib_init;
+uint8_t vnf_common_nd_lib_init;
+uint8_t loadb_pipeline_count;
+
+uint32_t ARPICMP_DEBUG;
+uint32_t NDIPV6_DEBUG;
+
+uint32_t arp_route_tbl_index;
+uint32_t nd_route_tbl_index;
+uint32_t link_hw_addr_array_idx;
+
+uint32_t lib_arp_get_mac_req;
+uint32_t lib_arp_nh_found;
+uint32_t lib_arp_no_nh_found;
+uint32_t lib_arp_arp_entry_found;
+uint32_t lib_arp_no_arp_entry_found;
+uint32_t lib_arp_populate_called;
+uint32_t lib_arp_delete_called;
+uint32_t lib_arp_duplicate_found;
+
+uint32_t lib_nd_get_mac_req;
+uint32_t lib_nd_nh_found;
+uint32_t lib_nd_no_nh_found;
+uint32_t lib_nd_nd_entry_found;
+uint32_t lib_nd_no_arp_entry_found;
+uint32_t lib_nd_populate_called;
+uint32_t lib_nd_delete_called;
+uint32_t lib_nd_duplicate_found;
+struct rte_mempool *lib_arp_pktmbuf_tx_pool;
+struct rte_mempool *lib_nd_pktmbuf_tx_pool;
+
+struct rte_mbuf *lib_arp_pkt;
+struct rte_mbuf *lib_nd_pkt;
+
+uint8_t default_ether_addr[6] = { 0, 0, 0, 0, 1, 1 };
+uint8_t default_ip[4] = { 0, 0, 1, 1 };
+
+static struct rte_hash_parameters arp_hash_params = {
+ .name = "ARP",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct arp_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+static struct rte_hash_parameters nd_hash_params = {
+ .name = "ND",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct nd_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+struct rte_hash *arp_hash_handle;
+struct rte_hash *nd_hash_handle;
+
+void print_pkt1(struct rte_mbuf *pkt);
+
+struct app_params *myApp;
+struct rte_pipeline *myP;
+uint8_t num_vnf_threads;
+
+/**
+* A structure for Arp port address
+*/
+struct arp_port_address {
+ uint32_t ip; /**< Ip address */
+ uint8_t mac_addr[6]; /**< Mac address */
+};
+
+struct arp_port_address arp_port_addresses[RTE_MAX_ETHPORTS];
+struct rte_mempool *timer_mempool_arp;
+
+int timer_objs_mempool_count = 70000;
+
+#define MAX_NUM_ARP_ENTRIES 64
+#define MAX_NUM_ND_ENTRIES 64
+
+uint32_t get_nh(uint32_t, uint32_t *);
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[]);
+
+#define MAX_ARP_DATA_ENTRY_TABLE 7
+
+struct table_arp_entry_data arp_entry_data_table[MAX_ARP_DATA_ENTRY_TABLE] = {
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(192, 168, 0, 2)},
+ {{0, 0, 0, 0, 0, 2}, 0, INCOMPLETE, IPv4(192, 168, 0, 3)},
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(30, 40, 50, 60)},
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(120, 0, 0, 2)},
+ {{0, 0, 0, 0, 0, 4}, 3, INCOMPLETE, IPv4(1, 1, 1, 4)},
+ {{0, 0, 0, 0, 0, 5}, 4, INCOMPLETE, IPv4(1, 1, 1, 5)},
+ {{0, 0, 0, 0, 0, 6}, 1, INCOMPLETE, IPv4(1, 1, 1, 7)},
+};
+
+#define MAX_ND_DATA_ENTRY_TABLE 7
+struct table_nd_entry_data nd_entry_data_table[MAX_ND_DATA_ENTRY_TABLE] = {
+ {{0, 0, 0, 0, 0, 8}, 1, INCOMPLETE,
+ {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, 0},
+
+ {{0, 0, 0, 0, 0, 9}, 1, INCOMPLETE,
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}, 0},
+ {{0, 0, 0, 0, 0, 10}, 2, INCOMPLETE,
+ {3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 11}, 3, INCOMPLETE,
+ {4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 12}, 4, INCOMPLETE,
+ {5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 13}, 5, INCOMPLETE,
+ {6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 14}, 6, INCOMPLETE,
+ {7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+};
+
+struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY] = {
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
+};
+
+struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+void print_trace(void);
+
+/* Obtain a backtrace and print it to stdout. */
+void print_trace(void)
+{
+ void *array[10];
+ size_t size;
+ char **strings;
+ size_t i;
+
+ size = backtrace(array, 10);
+ strings = backtrace_symbols(array, size);
+
+ RTE_LOG(INFO, LIBARP, "Obtained %zd stack frames.\n", size);
+
+ for (i = 0; i < size; i++)
+ RTE_LOG(INFO, LIBARP, "%s\n", strings[i]);
+
+ free(strings);
+}
+
+uint32_t get_nh(uint32_t ip, uint32_t *port)
+{
+ int i = 0;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+ if (((lib_arp_route_table[i].
+ ip & lib_arp_route_table[i].mask) ==
+ (ip & lib_arp_route_table[i].mask))) {
+
+ *port = lib_arp_route_table[i].port;
+ lib_arp_nh_found++;
+ return lib_arp_route_table[i].nh;
+ }
+ if (ARPICMP_DEBUG)
+ printf("No nh match ip 0x%x, port %u, t_ip "
+ "0x%x, t_port %u, mask 0x%x, r1 %x, r2 %x\n",
+ ip, *port, lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].port,
+ lib_arp_route_table[i].mask,
+ (lib_arp_route_table[i].ip &
+ lib_arp_route_table[i].mask),
+ (ip & lib_arp_route_table[i].mask));
+ }
+ if (ARPICMP_DEBUG)
+ printf("No NH - ip 0x%x, port %u\n", ip, *port);
+ lib_arp_no_nh_found++;
+ return 0;
+}
+
+/*ND IPv6 */
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[])
+{
+ int i = 0;
+ uint8_t netmask_ipv6[16], netip_nd[16], netip_in[16];
+ uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
+ memset(netmask_ipv6, 0, sizeof(netmask_ipv6));
+ memset(netip_nd, 0, sizeof(netip_nd));
+ memset(netip_in, 0, sizeof(netip_in));
+ if (!ipv6)
+ return;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+
+ convert_prefixlen_to_netmask_ipv6(lib_nd_route_table[i].depth,
+ netmask_ipv6);
+
+ for (k = 0; k < 16; k++) {
+ if (lib_nd_route_table[i].ipv6[k] & netmask_ipv6[k]) {
+ depthflags++;
+ netip_nd[k] = lib_nd_route_table[i].ipv6[k];
+ }
+ }
+
+ for (l = 0; l < 16; l++) {
+ if (ipv6[l] & netmask_ipv6[l]) {
+ depthflags1++;
+ netip_in[l] = ipv6[l];
+ }
+ }
+ int j = 0;
+ if ((depthflags == depthflags1)
+ && (memcmp(netip_nd, netip_in, sizeof(netip_nd)) == 0)) {
+ //&& (lib_nd_route_table[i].port == port))
+ *port = lib_nd_route_table[i].port;
+ lib_nd_nh_found++;
+
+ for (j = 0; j < 16; j++)
+ nhipv6[j] = lib_nd_route_table[i].nhipv6[j];
+
+ return;
+ }
+
+ if (NDIPV6_DEBUG)
+ printf("No nh match\n");
+ depthflags = 0;
+ depthflags1 = 0;
+ }
+ if (NDIPV6_DEBUG)
+ printf("No NH - ip 0x%x, port %u\n", ipv6[0], *port);
+ lib_nd_no_nh_found++;
+}
+
+/* Added for Multiport changes*/
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr)
+{
+ lib_arp_get_mac_req++;
+ uint32_t nhip = 0;
+
+ nhip = get_nh(ipaddr, phy_port);
+ if (nhip == 0) {
+ if (ARPICMP_DEBUG)
+ printf("ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ //return 0;
+ return NH_NOT_FOUND;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port; /* Changed for Multi Port */
+ tmp_arp_key.ip = nhip;
+
+ if (ARPICMP_DEBUG)
+ printf("%s: nhip: %x, phyport: %d\n", __FUNCTION__, nhip,
+ *phy_port);
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG) {
+ printf
+ ("ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+ if (nhip != 0) {
+ if (ARPICMP_DEBUG)
+ printf("CG-NAPT requesting ARP for ip %x, "
+ "port %d\n", nhip, *phy_port);
+ request_arp(*phy_port, nhip); //Changed for Multiport
+
+ }
+ lib_arp_no_arp_entry_found++;
+ return ARP_NOT_FOUND;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ if (ARPICMP_DEBUG)
+ printf("%s: ARPICMP hwaddr found\n", __FUNCTION__);
+ return ARP_FOUND;
+}
+
+int get_dest_mac_address(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint32_t *nhip)
+{
+ lib_arp_get_mac_req++;
+
+ *nhip = get_nh(ipaddr, phy_port);
+ if (*nhip == 0) {
+ if (ARPICMP_DEBUG && ipaddr)
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ return 0;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port;
+ tmp_arp_key.ip = *nhip;
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG && ipaddr) {
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+ lib_arp_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ return 1;
+
+}
+
+int get_dest_mac_addr(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr)
+{
+ lib_arp_get_mac_req++;
+ uint32_t nhip = 0;
+
+ nhip = get_nh(ipaddr, phy_port);
+ if (nhip == 0) {
+ if (ARPICMP_DEBUG && ipaddr)
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ return 0;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port;
+ tmp_arp_key.ip = nhip;
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG && ipaddr) {
+ printf
+ ("ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+
+ if (nhip != 0) {
+ if (ARPICMP_DEBUG > 4)
+ printf
+ ("CG-NAPT requesting ARP for ip %x, port %d\n",
+ nhip, *phy_port);
+ if (ifm_chk_port_ipv4_enabled(*phy_port)) {
+ request_arp(*phy_port, nhip);
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s: IP is not enabled on port %u, not sending ARP REQ\n\r",
+ __FUNCTION__, *phy_port);
+ }
+
+ }
+ lib_arp_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ return 1;
+}
+
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ get_nh_ipv6(ipv6addr, phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j])
+ flag++;
+ }
+ if (flag == 0) {
+ if (NDIPV6_DEBUG)
+ printf("NDIPV6 no nh found for ipv6 "
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x, port %d\n",
+ ipv6addr[0], ipv6addr[1], ipv6addr[2],
+ ipv6addr[3], ipv6addr[4], ipv6addr[5],
+ ipv6addr[6], ipv6addr[7], ipv6addr[8],
+ ipv6addr[9], ipv6addr[10], ipv6addr[11],
+ ipv6addr[12], ipv6addr[13], ipv6addr[14],
+ ipv6addr[15], *phy_port);
+ return 0;
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = *phy_port;
+
+ for (i = 0; i < 16; i++)
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG) {
+ printf("NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ }
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+}
+
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ get_nh_ipv6(ipv6addr, phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j]) {
+ flag++;
+ }
+ }
+ if (flag == 0) {
+ if (NDIPV6_DEBUG && ipv6addr)
+ RTE_LOG(INFO, LIBARP,
+ "NDIPV6 no nh found for ipv6 %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ return 0;
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = *phy_port;
+
+ for (i = 0; i < 16; i++) {
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+ }
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG && ipv6addr) {
+ RTE_LOG(INFO, LIBARP,
+ "NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ }
+ if (flag != 0) {
+ if (ARPICMP_DEBUG > 4)
+ printf
+ ("Requesting ARP for ipv6 addr and port %d\n",
+ *phy_port);
+ request_nd(&nhipv6[0], ifm_get_port(*phy_port));
+
+ }
+
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+}
+
+/**
+* A structure for arp entries in Arp table
+*
+*/
+struct lib_arp_arp_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint64_t macaddr; /**< Mac address */
+};
+
+static const char *arp_op_name(uint16_t arp_op)
+{
+ switch (CHECK_ENDIAN_16(arp_op)) {
+ case (ARP_OP_REQUEST):
+ return "ARP Request";
+ case (ARP_OP_REPLY):
+ return "ARP Reply";
+ case (ARP_OP_REVREQUEST):
+ return "Reverse ARP Request";
+ case (ARP_OP_REVREPLY):
+ return "Reverse ARP Reply";
+ case (ARP_OP_INVREQUEST):
+ return "Peer Identify Request";
+ case (ARP_OP_INVREPLY):
+ return "Peer Identify Reply";
+ default:
+ break;
+ }
+ return "Unkwown ARP op";
+}
+
+static void print_icmp_packet(struct icmp_hdr *icmp_h)
+{
+ RTE_LOG(INFO, LIBARP, " ICMP: type=%d (%s) code=%d id=%d seqnum=%d\n",
+ icmp_h->icmp_type,
+ (icmp_h->icmp_type == IP_ICMP_ECHO_REPLY ? "Reply" :
+ (icmp_h->icmp_type ==
+ IP_ICMP_ECHO_REQUEST ? "Reqest" : "Undef")),
+ icmp_h->icmp_code, CHECK_ENDIAN_16(icmp_h->icmp_ident),
+ CHECK_ENDIAN_16(icmp_h->icmp_seq_nb));
+}
+
+static void print_ipv4_h(struct ipv4_hdr *ip_h)
+{
+ struct icmp_hdr *icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ RTE_LOG(INFO, LIBARP, " IPv4: Version=%d HLEN=%d Type=%d Length=%d\n",
+ (ip_h->version_ihl & 0xf0) >> 4, (ip_h->version_ihl & 0x0f),
+ ip_h->type_of_service, rte_cpu_to_be_16(ip_h->total_length));
+ if (ip_h->next_proto_id == IPPROTO_ICMP) {
+ print_icmp_packet(icmp_h);
+ }
+}
+
+static void print_arp_packet(struct arp_hdr *arp_h)
+{
+ RTE_LOG(INFO, LIBARP, " ARP: hrd=%d proto=0x%04x hln=%d "
+ "pln=%d op=%u (%s)\n",
+ CHECK_ENDIAN_16(arp_h->arp_hrd),
+ CHECK_ENDIAN_16(arp_h->arp_pro), arp_h->arp_hln,
+ arp_h->arp_pln, CHECK_ENDIAN_16(arp_h->arp_op),
+ arp_op_name(arp_h->arp_op));
+
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_hrd format for IPv4 ARP (%d)\n",
+ (arp_h->arp_hrd));
+ } else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_pro format for IPv4 ARP (%d)\n",
+ (arp_h->arp_pro));
+ } else if (arp_h->arp_hln != 6) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_hln format for IPv4 ARP (%d)\n",
+ arp_h->arp_hln);
+ } else if (arp_h->arp_pln != 4) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_pln format for IPv4 ARP (%d)\n",
+ arp_h->arp_pln);
+ } else {
+ RTE_LOG(INFO, LIBARP,
+ " sha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_sha.addr_bytes[0],
+ arp_h->arp_data.arp_sha.addr_bytes[1],
+ arp_h->arp_data.arp_sha.addr_bytes[2],
+ arp_h->arp_data.arp_sha.addr_bytes[3],
+ arp_h->arp_data.arp_sha.addr_bytes[4],
+ arp_h->arp_data.arp_sha.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " sip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) & 0xFF);
+ RTE_LOG(INFO, LIBARP,
+ " tha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_tha.addr_bytes[0],
+ arp_h->arp_data.arp_tha.addr_bytes[1],
+ arp_h->arp_data.arp_tha.addr_bytes[2],
+ arp_h->arp_data.arp_tha.addr_bytes[3],
+ arp_h->arp_data.arp_tha.addr_bytes[4],
+ arp_h->arp_data.arp_tha.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " tip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) & 0xFF);
+ }
+}
+
+static void print_eth(struct ether_hdr *eth_h)
+{
+ RTE_LOG(INFO, LIBARP, " ETH: src=%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_h->s_addr.addr_bytes[0],
+ eth_h->s_addr.addr_bytes[1],
+ eth_h->s_addr.addr_bytes[2],
+ eth_h->s_addr.addr_bytes[3],
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth_h->d_addr.addr_bytes[0],
+ eth_h->d_addr.addr_bytes[1],
+ eth_h->d_addr.addr_bytes[2],
+ eth_h->d_addr.addr_bytes[3],
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5]);
+
+}
+
+static void
+print_mbuf(const char *rx_tx, uint8_t portid, struct rte_mbuf *mbuf,
+ unsigned line)
+{
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ struct arp_hdr *arp_h =
+ (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ struct ipv4_hdr *ipv4_h =
+ (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+
+ RTE_LOG(INFO, LIBARP, "%s(%d): on port %d pkt-len=%u nb-segs=%u\n",
+ rx_tx, line, portid, mbuf->pkt_len, mbuf->nb_segs);
+ print_eth(eth_h);
+ switch (rte_cpu_to_be_16(eth_h->ether_type)) {
+ case ETHER_TYPE_IPv4:
+ print_ipv4_h(ipv4_h);
+ break;
+ case ETHER_TYPE_ARP:
+ print_arp_packet(arp_h);
+ break;
+ default:
+ RTE_LOG(INFO, LIBARP, " unknown packet type\n");
+ break;
+ }
+ fflush(stdout);
+}
+
+struct arp_entry_data *retrieve_arp_entry(struct arp_key_ipv4 arp_key)
+{
+ struct arp_entry_data *ret_arp_data = NULL;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ int ret = rte_hash_lookup_data(arp_hash_handle, &arp_key,
+ (void **)&ret_arp_data);
+ if (ret < 0) {
+ // RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ } else {
+
+ if (ret_arp_data->mode == DYNAMIC_ARP) {
+ struct arp_timer_key callback_key;
+ callback_key.port_id = ret_arp_data->port;
+ callback_key.ip = ret_arp_data->ip;
+ /*lcore need to check which parameter need to be put */
+ if (rte_timer_reset(ret_arp_data->timer,
+ (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore,
+ arp_timer_callback,
+ &callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ }
+
+ return ret_arp_data;
+ }
+
+ return NULL;
+}
+
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key)
+{
+ struct nd_entry_data *ret_nd_data = NULL;
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+ int i = 0;
+
+ /*Find a nd IPv6 key-data pair in the hash table for ND IPv6 */
+ int ret = rte_hash_lookup_data(nd_hash_handle, &nd_key,
+ (void **)&ret_nd_data);
+ if (ret < 0) {
+/* RTE_LOG(INFO, LIBARP,"nd-hash: no lookup Entry Found - ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);*/
+ } else {
+ if (ret_nd_data->mode == DYNAMIC_ND) {
+ struct nd_timer_key callback_key;
+ callback_key.port_id = ret_nd_data->port;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key.ipv6[i] = ret_nd_data->ipv6[i];
+
+ }
+
+ if (rte_timer_reset
+ (ret_nd_data->timer,
+ (arp_timeout * rte_get_tsc_hz()), SINGLE,
+ timer_lcore, nd_timer_callback, &callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ }
+ return ret_nd_data;
+ }
+
+ return NULL;
+}
+
+void print_arp_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ printf
+ ("------------------------ ARP CACHE -----------------------------------------\n");
+ printf
+ ("----------------------------------------------------------------------------\n");
+ printf("\tport hw addr status ip addr\n");
+ printf
+ ("----------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate(arp_hash_handle, &next_key, &next_data, &iter)
+ >= 0) {
+
+ struct arp_entry_data *tmp_arp_data =
+ (struct arp_entry_data *)next_data;
+ struct arp_key_ipv4 tmp_arp_key;
+ memcpy(&tmp_arp_key, next_key, sizeof(struct arp_key_ipv4));
+ printf
+ ("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s %d.%d.%d.%d\n",
+ tmp_arp_data->port, tmp_arp_data->eth_addr.addr_bytes[0],
+ tmp_arp_data->eth_addr.addr_bytes[1],
+ tmp_arp_data->eth_addr.addr_bytes[2],
+ tmp_arp_data->eth_addr.addr_bytes[3],
+ tmp_arp_data->eth_addr.addr_bytes[4],
+ tmp_arp_data->eth_addr.addr_bytes[5],
+ tmp_arp_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE",
+ (tmp_arp_data->ip >> 24),
+ ((tmp_arp_data->ip & 0x00ff0000) >> 16),
+ ((tmp_arp_data->ip & 0x0000ff00) >> 8),
+ ((tmp_arp_data->ip & 0x000000ff)));
+ }
+
+ uint32_t i = 0;
+ printf("\nARP routing table has %d entries\n", arp_route_tbl_index);
+ printf("\nIP_Address Mask Port NH_IP_Address\n");
+ for (i = 0; i < arp_route_tbl_index; i++) {
+ printf("0x%x 0x%x %d 0x%x\n",
+ lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].mask,
+ lib_arp_route_table[i].port, lib_arp_route_table[i].nh);
+ }
+
+ printf
+ ("\nARP Stats: Total Queries %u, ok_NH %u, no_NH %u, ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_arp_get_mac_req, lib_arp_nh_found, lib_arp_no_nh_found,
+ lib_arp_arp_entry_found, lib_arp_no_arp_entry_found,
+ lib_arp_populate_called, lib_arp_delete_called,
+ lib_arp_duplicate_found);
+
+ printf("ARP table key len is %lu\n", sizeof(struct arp_key_ipv4));
+}
+
+/* ND IPv6 */
+void print_nd_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ uint8_t ii = 0, j = 0, k = 0;
+ printf
+ ("------------------------------------------------------------------------------------------------------\n");
+ printf("\tport hw addr status ip addr\n");
+
+ printf
+ ("------------------------------------------------------------------------------------------------------\n");
+ while (rte_hash_iterate(nd_hash_handle, &next_key, &next_data, &iter) >=
+ 0) {
+
+ struct nd_entry_data *tmp_nd_data =
+ (struct nd_entry_data *)next_data;
+ struct nd_key_ipv6 tmp_nd_key;
+ memcpy(&tmp_nd_key, next_key, sizeof(struct nd_key_ipv6));
+ printf("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s\n",
+ tmp_nd_data->port,
+ tmp_nd_data->eth_addr.addr_bytes[0],
+ tmp_nd_data->eth_addr.addr_bytes[1],
+ tmp_nd_data->eth_addr.addr_bytes[2],
+ tmp_nd_data->eth_addr.addr_bytes[3],
+ tmp_nd_data->eth_addr.addr_bytes[4],
+ tmp_nd_data->eth_addr.addr_bytes[5],
+ tmp_nd_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE");
+ printf("\t\t\t\t\t\t");
+ for (ii = 0; ii < ND_IPV6_ADDR_SIZE; ii += 2) {
+ printf("%02X%02X ", tmp_nd_data->ipv6[ii],
+ tmp_nd_data->ipv6[ii + 1]);
+ }
+ printf("\n");
+ }
+
+ uint32_t i = 0;
+ printf("\n\nND IPV6 routing table has %d entries\n",
+ nd_route_tbl_index);
+ printf
+ ("\nIP_Address Depth Port NH_IP_Address\n");
+ for (i = 0; i < nd_route_tbl_index; i++) {
+ printf("\n");
+
+ for (j = 0; j < ND_IPV6_ADDR_SIZE; j += 2) {
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ lib_nd_route_table[i].ipv6[j],
+ lib_nd_route_table[i].ipv6[j + 1]);
+ }
+
+ printf
+ ("\n\t\t\t %d %d \n",
+ lib_nd_route_table[i].depth, lib_nd_route_table[i].port);
+ printf("\t\t\t\t\t\t\t\t\t");
+ for (k = 0; k < ND_IPV6_ADDR_SIZE; k += 2) {
+ printf("%02X%02X ", lib_nd_route_table[i].nhipv6[k],
+ lib_nd_route_table[i].ipv6[k + 1]);
+ }
+ }
+ printf
+ ("\nND IPV6 Stats: \nTotal Queries %u, ok_NH %u, no_NH %u, ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_nd_get_mac_req, lib_nd_nh_found, lib_nd_no_nh_found,
+ lib_nd_nd_entry_found, lib_nd_no_arp_entry_found,
+ lib_nd_populate_called, lib_nd_delete_called,
+ lib_nd_duplicate_found);
+ printf("ND table key len is %lu\n\n", sizeof(struct nd_key_ipv6));
+}
+
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid, void *arg)
+{
+
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_delete_called++;
+
+ struct arp_entry_data *ret_arp_data = NULL;
+
+ int ret = rte_hash_lookup_data(arp_hash_handle, &arp_key,
+ (void **)&ret_arp_data);
+ if (ret < 0) {
+// RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ return;
+ } else {
+ if (ret_arp_data->mode == DYNAMIC_ARP) {
+ if (ret_arp_data->retry_count == 3) {
+ rte_timer_stop(ret_arp_data->timer);
+ rte_free(ret_arp_data->timer_key);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP Entry Deleted for IP :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >>
+ 16),
+ ((arp_key.ip & 0x0000ff00) >>
+ 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ rte_hash_del_key(arp_hash_handle, &arp_key);
+ //print_arp_table();
+ } else {
+ ret_arp_data->retry_count++;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "RETRY ARP..retry count : %u\n",
+ ret_arp_data->retry_count);
+ //print_arp_table();
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "TIMER STARTED FOR %u seconds\n",
+ ARP_TIMER_EXPIRY);
+ if (ifm_chk_port_ipv4_enabled
+ (ret_arp_data->port)) {
+ request_arp(ret_arp_data->port,
+ ret_arp_data->ip);
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s: IP is not enabled on port %u, not sending GARP\n\r",
+ __FUNCTION__,
+ ret_arp_data->port);
+ }
+ if (rte_timer_reset(ret_arp_data->timer,
+ (arp_timeout *
+ rte_get_tsc_hz()), SINGLE,
+ timer_lcore,
+ arp_timer_callback,
+ arg) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+
+ }
+ } else {
+ rte_hash_del_key(arp_hash_handle, &arp_key);
+ }
+ }
+}
+
+/* ND IPv6 */
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid)
+{
+ int i = 0;
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ nd_key.ipv6[i] = ipv6addr[i];
+ }
+
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_delete_called++;
+
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "Deletes rte hash table nd entry for port %d ipv6=",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ RTE_LOG(INFO, LIBARP, "%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+ struct nd_timer_key callback_key;
+ callback_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key.ipv6[i] = ipv6addr[i];
+
+ }
+ int ret = rte_hash_lookup_data(arp_hash_handle, &callback_key,
+ (void **)&ret_nd_data);
+ if (ret < 0) {
+// RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ } else {
+ if (ret_nd_data->mode == DYNAMIC_ND) {
+ rte_timer_stop(ret_nd_data->timer);
+ rte_free(ret_nd_data->timer);
+ }
+ }
+ rte_hash_del_key(nd_hash_handle, &nd_key);
+}
+
+void
+populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid, uint8_t mode)
+{
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_populate_called++;
+
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "populate_arp_entry ip %x, port %d\n",
+ arp_key.ip, arp_key.port_id);
+
+ struct arp_entry_data *new_arp_data = retrieve_arp_entry(arp_key);
+ if (new_arp_data && ((new_arp_data->mode == STATIC_ARP
+ && mode == DYNAMIC_ARP) || (new_arp_data->mode == DYNAMIC_ARP
+ && mode == STATIC_ARP))) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,"populate_arp_entry: ARP entry already exists(%d %d)\n",
+ new_arp_data->mode, mode);
+
+ return;
+ }
+
+ if (mode == DYNAMIC_ARP) {
+ if (new_arp_data
+ && is_same_ether_addr(&new_arp_data->eth_addr, hw_addr)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ lib_arp_duplicate_found++;
+ new_arp_data->retry_count = 0; // Reset
+ if (rte_timer_reset(new_arp_data->timer,
+ (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore,
+ arp_timer_callback,
+ new_arp_data->timer_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ return;
+ }
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct arp_entry_data));
+ new_arp_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ new_arp_data->eth_addr = *hw_addr;
+ new_arp_data->status = COMPLETE;
+ new_arp_data->port = portid;
+ new_arp_data->ip = ipaddr;
+ new_arp_data->mode = mode;
+ if (rte_mempool_get
+ (timer_mempool_arp, (void **)&(new_arp_data->timer)) < 0) {
+ RTE_LOG(INFO, LIBARP,
+ "TIMER - Error in getting timer alloc buffer\n");
+ return;
+ }
+
+ rte_hash_add_key_data(arp_hash_handle, &arp_key, new_arp_data);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)), arp_key.port_id);
+ }
+ // Call l3fwd module for resolving 2_adj structure.
+ resolve_l2_adj(ipaddr, portid, hw_addr);
+
+ rte_timer_init(new_arp_data->timer);
+ struct arp_timer_key *callback_key =
+ (struct arp_timer_key *)rte_malloc(NULL,
+ sizeof(struct
+ arp_timer_key *),
+ RTE_CACHE_LINE_SIZE);
+ callback_key->port_id = portid;
+ callback_key->ip = ipaddr;
+
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "TIMER STARTED FOR %u seconds\n",
+ ARP_TIMER_EXPIRY);
+ if (rte_timer_reset
+ (new_arp_data->timer, (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore, arp_timer_callback, callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+
+ new_arp_data->timer_key = callback_key;
+ } else {
+ if (new_arp_data
+ && is_same_ether_addr(&new_arp_data->eth_addr, hw_addr)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ lib_arp_duplicate_found++;
+ } else {
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof
+ (struct arp_entry_data));
+ new_arp_data =
+ rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ new_arp_data->eth_addr = *hw_addr;
+ new_arp_data->status = COMPLETE;
+ new_arp_data->port = portid;
+ new_arp_data->ip = ipaddr;
+ new_arp_data->mode = mode;
+
+ rte_hash_add_key_data(arp_hash_handle, &arp_key,
+ new_arp_data);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ // Call l3fwd module for resolving 2_adj structure.
+ resolve_l2_adj(ipaddr, portid, hw_addr);
+ }
+ }
+ if (ARPICMP_DEBUG) {
+ /* print entire hash table */
+ RTE_LOG(INFO, LIBARP,
+ "\tARP: table update - hwaddr=%02x:%02x:%02x:%02x:%02x:%02x ip=%d.%d.%d.%d on port=%d\n",
+ new_arp_data->eth_addr.addr_bytes[0],
+ new_arp_data->eth_addr.addr_bytes[1],
+ new_arp_data->eth_addr.addr_bytes[2],
+ new_arp_data->eth_addr.addr_bytes[3],
+ new_arp_data->eth_addr.addr_bytes[4],
+ new_arp_data->eth_addr.addr_bytes[5],
+ (arp_key.ip >> 24), ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)), portid);
+ puts("");
+ }
+}
+
+/*
+ * ND IPv6
+ *
+ * Install key - data pair in Hash table - From Pipeline Configuration
+ *
+ */
+
+void populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ipv6[],
+ uint8_t portid, uint8_t mode)
+{
+
+ /* need to lock here if multi-threaded */
+ /* rte_hash_add_key_data is not thread safe */
+ uint8_t i;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ nd_key.ipv6[i] = ipv6[i];
+
+// RTE_LOG(INFO, LIBARP,"\n");
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_populate_called++;
+
+ /* Validate if key-value pair already exists in the hash table for ND IPv6 */
+ struct nd_entry_data *new_nd_data = retrieve_nd_entry(nd_key);
+
+ if (mode == DYNAMIC_ND) {
+ if (new_nd_data
+ && is_same_ether_addr(&new_nd_data->eth_addr, hw_addr)) {
+
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "nd_entry exists port %d ipv6 = ",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+
+ lib_nd_duplicate_found++;
+ RTE_LOG(INFO, LIBARP, "nd_entry exists\n");
+ return;
+ }
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct nd_entry_data));
+ new_nd_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ //new_nd_data = (struct nd_entry_data *)rte_malloc(NULL, sizeof(struct nd_entry_data *),RTE_CACHE_LINE_SIZE);
+ new_nd_data->eth_addr = *hw_addr;
+ new_nd_data->status = COMPLETE;
+ new_nd_data->port = portid;
+ new_nd_data->mode = mode;
+ if (rte_mempool_get
+ (timer_mempool_arp, (void **)&(new_nd_data->timer)) < 0) {
+ RTE_LOG(INFO, LIBARP,
+ "TIMER - Error in getting timer alloc buffer\n");
+ return;
+ }
+
+ if (NDIPV6_DEBUG)
+ RTE_LOG(INFO, LIBARP, "populate_nd_entry ipv6=");
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ }
+
+ if (NDIPV6_DEBUG) {
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+ }
+
+ /*Add a key-data pair at hash table for ND IPv6 static routing */
+ rte_hash_add_key_data(nd_hash_handle, &nd_key, new_nd_data);
+ /* need to check the return value of the hash add */
+
+ /* after the hash is created then time is started */
+ rte_timer_init(new_nd_data->timer);
+ struct nd_timer_key *callback_key =
+ (struct nd_timer_key *)rte_malloc(NULL,
+ sizeof(struct nd_timer_key
+ *),
+ RTE_CACHE_LINE_SIZE);
+ callback_key->port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key->ipv6[i] = ipv6[i];
+ }
+ if (rte_timer_reset
+ (new_nd_data->timer, (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore, nd_timer_callback, callback_key) < 0)
+ RTE_LOG(INFO, LIBARP, "Err : Timer already running\n");
+ } else {
+ if (new_nd_data
+ && is_same_ether_addr(&new_nd_data->eth_addr, hw_addr)) {
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "nd_entry exists port %d ipv6 = ",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+
+ lib_nd_duplicate_found++;
+ } else {
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof
+ (struct nd_entry_data));
+ new_nd_data =
+ rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ //new_nd_data = (struct nd_entry_data *)rte_malloc(NULL, sizeof(struct nd_entry_data *),RTE_CACHE_LINE_SIZE);
+ new_nd_data->eth_addr = *hw_addr;
+ new_nd_data->status = COMPLETE;
+ new_nd_data->port = portid;
+ new_nd_data->mode = mode;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ }
+
+ /*Add a key-data pair at hash table for ND IPv6 static routing */
+ rte_hash_add_key_data(nd_hash_handle, &nd_key,
+ new_nd_data);
+ /* need to check the return value of the hash add */
+ }
+ }
+ if (NDIPV6_DEBUG)
+ printf
+ ("\n....Added a key-data pair at rte hash table for ND IPv6 static routing\n");
+
+ if (1) {
+ /* print entire hash table */
+ printf
+ ("\tND: table update - hwaddr=%02x:%02x:%02x:%02x:%02x:%02x on port=%d\n",
+ new_nd_data->eth_addr.addr_bytes[0],
+ new_nd_data->eth_addr.addr_bytes[1],
+ new_nd_data->eth_addr.addr_bytes[2],
+ new_nd_data->eth_addr.addr_bytes[3],
+ new_nd_data->eth_addr.addr_bytes[4],
+ new_nd_data->eth_addr.addr_bytes[5], portid);
+ RTE_LOG(INFO, LIBARP, "\tipv6=");
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ RTE_LOG(INFO, LIBARP, "%02X%02X ", new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+
+ RTE_LOG(INFO, LIBARP, "\n");
+
+ puts("");
+ }
+}
+
+void print_pkt1(struct rte_mbuf *pkt)
+{
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
+ int i = 0, j = 0;
+ RTE_LOG(INFO, LIBARP, "\nPacket Contents...\n");
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ RTE_LOG(INFO, LIBARP, "%02x ", rd[(20 * i) + j]);
+ RTE_LOG(INFO, LIBARP, "\n");
+ }
+}
+
+struct ether_addr broadcast_ether_addr = {
+ .addr_bytes[0] = 0xFF,
+ .addr_bytes[1] = 0xFF,
+ .addr_bytes[2] = 0xFF,
+ .addr_bytes[3] = 0xFF,
+ .addr_bytes[4] = 0xFF,
+ .addr_bytes[5] = 0xFF,
+};
+
+static const struct ether_addr null_ether_addr = {
+ .addr_bytes[0] = 0x00,
+ .addr_bytes[1] = 0x00,
+ .addr_bytes[2] = 0x00,
+ .addr_bytes[3] = 0x00,
+ .addr_bytes[4] = 0x00,
+ .addr_bytes[5] = 0x00,
+};
+
+#define MAX_NUM_MAC_ADDRESS 16
+struct ether_addr link_hw_addr[MAX_NUM_MAC_ADDRESS] = {
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc8} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x18, 0x19, 0x1a, 0x1b, 0xcd, 0xef} }
+};
+
+struct ether_addr *get_link_hw_addr(uint8_t out_port)
+{
+ return &link_hw_addr[out_port];
+}
+
+void request_arp(uint8_t port_id, uint32_t ip)
+{
+
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+
+ l2_phy_interface_t *link;
+ link = ifm_get_port(port_id);
+ struct rte_mbuf *arp_pkt = lib_arp_pkt;
+
+ if (arp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating arp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(arp_pkt, struct ether_hdr *);
+
+ ether_addr_copy(&broadcast_ether_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &link->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ arp_h->arp_hrd = CHECK_ENDIAN_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = CHECK_ENDIAN_16(ARP_OP_REQUEST);
+
+ ether_addr_copy((struct ether_addr *)
+ &link->macaddr[0], &arp_h->arp_data.arp_sha);
+ if (link && link->ipv4_list) {
+ arp_h->arp_data.arp_sip =
+ (((ipv4list_t *) (link->ipv4_list))->ipaddr);
+ }
+ ether_addr_copy(&null_ether_addr, &arp_h->arp_data.arp_tha);
+ arp_h->arp_data.arp_tip = rte_cpu_to_be_32(ip);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "arp tip:%x arp sip :%x\n",
+ arp_h->arp_data.arp_tip, arp_h->arp_data.arp_sip);
+ // mmcd changed length from 60 to 42 - real length of arp request, no padding on ethernet needed - looks now like linux arp
+ arp_pkt->pkt_len = 42;
+ arp_pkt->data_len = 42;
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "Sending arp request\n");
+ print_mbuf("TX", port_id, arp_pkt, __LINE__);
+ }
+ if (link)
+ link->transmit_single_pkt(link, arp_pkt);
+}
+
+struct rte_mbuf *request_echo(uint32_t port_id, uint32_t ip)
+{
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ l2_phy_interface_t *port = ifm_get_port(port_id);
+
+ struct rte_mbuf *icmp_pkt = lib_arp_pkt;
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating icmp_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length =
+ rte_cpu_to_be_16(sizeof(struct ipv4_hdr) + sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = IPPROTO_ICMP;
+ if (port && port->ipv4_list)
+ ip_h->src_addr =
+ rte_cpu_to_be_32(((ipv4list_t *) port->ipv4_list)->ipaddr);
+ ip_h->dst_addr = rte_cpu_to_be_32(ip);
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+
+ icmp_h->icmp_type = IP_ICMP_ECHO_REQUEST;
+ icmp_h->icmp_code = 0;
+ icmp_h->icmp_ident = 0xdead;
+ icmp_h->icmp_seq_nb = 0xbeef;
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+
+ print_mbuf("TX", 0, icmp_pkt, __LINE__);
+
+ return icmp_pkt;
+}
+
+#if 0
+/**
+ * Function to send ICMP dest unreachable msg
+ *
+ */
+struct rte_mbuf *send_icmp_dest_unreachable_msg(uint32_t src_ip,
+ uint32_t dest_ip)
+{
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ struct rte_mbuf *icmp_pkt = lib_arp_pkt;
+
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating icmp_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length =
+ rte_cpu_to_be_16(sizeof(struct ipv4_hdr) + sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = 1;
+
+ ip_h->dst_addr = rte_bswap32(dest_ip);
+ ip_h->src_addr = rte_bswap32(src_ip);
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+
+ icmp_h->icmp_type = 3; /* Destination Unreachable */
+ icmp_h->icmp_code = 13; /* Communication administratively prohibited */
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+
+ return icmp_pkt;
+}
+#endif
+void
+process_arpicmp_pkt_parse(struct rte_mbuf **pkt, uint16_t pkt_num,
+ uint64_t pkt_mask, l2_phy_interface_t *port)
+{
+ RTE_SET_USED(pkt_num);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "============ARP ENTRY================\n");
+ if (pkt_mask) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "============ARP PROCESS================\n");
+ }
+
+ uint64_t pkts_for_process = pkt_mask;
+ for (; pkts_for_process;) {
+/**< process only valid packets. */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkts_mask = 1LLU << pos; /** <bitmask representing only this packet. */
+ pkts_for_process &= ~pkts_mask; /** <remove this packet from the mask. */
+ process_arpicmp_pkt(pkt[pos], port);
+ }
+
+}
+
+void process_arpicmp_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port)
+{
+ uint8_t in_port_id = pkt->port;
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+
+ uint32_t cksum;
+ uint32_t ip_addr;
+
+ uint32_t req_tip;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "%s, portid %u. Line %d\n\r",
+ __FUNCTION__, port->pmdid, __LINE__);
+ arp_h =
+ (struct arp_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid hardware format of hardware address - not processing ARP req\n");
+ else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid protocol address format - not processing ARP req\n");
+ else if (arp_h->arp_hln != 6)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid hardware address length - not processing ARP req\n");
+ else if (arp_h->arp_pln != 4)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid protocol address length - not processing ARP req\n");
+ else {
+ if (port->ipv4_list == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "Ports IPV4 List is NULL.. Unable to Process\n");
+ return;
+ }
+
+ if (arp_h->arp_data.arp_tip !=
+ ((ipv4list_t *) (port->ipv4_list))->ipaddr) {
+ if (arp_h->arp_data.arp_tip == arp_h->arp_data.arp_sip) {
+ populate_arp_entry(
+ (struct ether_addr *)&arp_h->arp_data.arp_sha,
+ rte_cpu_to_be_32(arp_h->arp_data.arp_sip),
+ in_port_id,
+ DYNAMIC_ARP);
+
+ } else {
+ RTE_LOG(INFO, LIBARP,"ARP requested IP address mismatches interface IP - discarding\n");
+ }
+ }
+ /// revise conditionals to allow processing of requests with target ip = this ip and
+ // processing of replies to destination ip = this ip
+ else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REQUEST)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__, port->pmdid,
+ __LINE__);
+
+ RTE_LOG(INFO, LIBARP,
+ "arp_op %d, ARP_OP_REQUEST %d\n",
+ arp_h->arp_op,
+ rte_cpu_to_be_16
+ (ARP_OP_REQUEST));
+ print_mbuf("RX", in_port_id, pkt,
+ __LINE__);
+ }
+
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_cpu_to_be_32
+ (arp_h->arp_data.arp_sip),
+ in_port_id, DYNAMIC_ARP);
+
+ /*build reply */
+ req_tip = arp_h->arp_data.arp_tip;
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr); /**< set sender mac address*/
+ arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ether_addr_copy(&eth_h->s_addr,
+ &arp_h->arp_data.arp_sha);
+ arp_h->arp_data.arp_tip =
+ arp_h->arp_data.arp_sip;
+ arp_h->arp_data.arp_sip = req_tip;
+ ether_addr_copy(&eth_h->d_addr,
+ &arp_h->arp_data.arp_tha);
+
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX ARP REPLY PKT",
+ port->pmdid, pkt, __LINE__);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX", port->pmdid, pkt,
+ __LINE__);
+
+ return;
+ } else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REPLY)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP_OP_REPLY received");
+ print_mbuf("RX", port->pmdid, pkt,
+ __LINE__);
+ }
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_bswap32(arp_h->
+ arp_data.arp_sip),
+ in_port_id, DYNAMIC_ARP);
+
+ return;
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid ARP opcode - not processing ARP req %x\n",
+ arp_h->arp_op);
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+ } else {
+ ip_h =
+ (struct ipv4_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+
+ if (ip_h->next_proto_id != IPPROTO_ICMP) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "IP protocol ID is not set to ICMP - discarding\n");
+ }
+ } else if ((ip_h->version_ihl & 0xf0) != IP_VERSION_4) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "IP version other than 4 - discarding\n");
+ }
+ } else if ((ip_h->version_ihl & 0x0f) != IP_HDRLEN) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "Unknown IHL - discarding\n");
+ }
+ } else {
+ if (icmp_h->icmp_type == IP_ICMP_ECHO_REQUEST
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ ip_addr = ip_h->src_addr;
+ ether_addr_copy(&eth_h->s_addr,
+ &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0],
+ &eth_h->s_addr);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__,
+ port->pmdid, __LINE__);
+
+ if (is_multicast_ipv4_addr
+ (ip_h->dst_addr)) {
+ uint32_t ip_src;
+
+ ip_src =
+ rte_be_to_cpu_32(ip_addr);
+ if ((ip_src & 0x00000003) == 1)
+ ip_src =
+ (ip_src &
+ 0xFFFFFFFC) |
+ 0x00000002;
+ else
+ ip_src =
+ (ip_src &
+ 0xFFFFFFFC) |
+ 0x00000001;
+ ip_h->src_addr =
+ rte_cpu_to_be_32(ip_src);
+ ip_h->dst_addr = ip_addr;
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum =
+ ~rte_raw_cksum(ip_h,
+ sizeof(struct
+ ipv4_hdr));
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__,
+ port->pmdid,
+ __LINE__);
+ ip_h->src_addr = ip_h->dst_addr;
+ ip_h->dst_addr = ip_addr;
+ }
+
+ icmp_h->icmp_type = IP_ICMP_ECHO_REPLY;
+ cksum = ~icmp_h->icmp_cksum & 0xffff;
+ cksum +=
+ ~htons(IP_ICMP_ECHO_REQUEST << 8) &
+ 0xffff;
+ cksum += htons(IP_ICMP_ECHO_REPLY << 8);
+ cksum =
+ (cksum & 0xffff) + (cksum >> 16);
+ cksum =
+ (cksum & 0xffff) + (cksum >> 16);
+ icmp_h->icmp_cksum = ~cksum;
+
+ if (ARPICMP_DEBUG)
+ print_mbuf
+ ("TX ICMP ECHO REPLY PKT",
+ in_port_id, pkt, __LINE__);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX", port->pmdid,
+ pkt, __LINE__);
+
+ return;
+ } else if (icmp_h->icmp_type ==
+ IP_ICMP_ECHO_REPLY
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = in_port_id;
+ arp_key.ip =
+ rte_bswap32(ip_h->src_addr);
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ struct arp_entry_data *arp_entry =
+ retrieve_arp_entry(arp_key);
+ if (arp_entry == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Received unsolicited ICMP echo reply from ip%x, port %d\n",
+ arp_key.ip,
+ arp_key.port_id);
+ return;
+ }
+ arp_entry->status = COMPLETE;
+ }
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+ }
+}
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int my_inet_pton_ipv6(int af, const char *src, void *dst)
+{
+ switch (af) {
+ case AF_INET:
+ return inet_pton_ipv4(src, dst);
+ case AF_INET6:
+ return inet_pton_ipv6(src, dst);
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton_ipv4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton_ipv6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton_ipv4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static int arp_parse_args(struct pipeline_params *params)
+{
+ uint32_t arp_route_tbl_present = 0;
+ uint32_t nd_route_tbl_present = 0;
+ uint32_t ports_mac_list_present = 0;
+ uint32_t numArg;
+ uint32_t n_vnf_threads_present = 0;
+
+ uint32_t pktq_in_prv_present = 0;
+ uint32_t prv_to_pub_map_present = 0;
+
+ uint8_t n_prv_in_port = 0;
+ int i;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ in_port_dir_a[i] = 0; //make all RX ports ingress initially
+ prv_to_pub_map[i] = 0xff;
+ pub_to_prv_map[i] = 0xff;
+ }
+
+ RTE_SET_USED(ports_mac_list_present);
+ RTE_SET_USED(nd_route_tbl_present);
+ RTE_SET_USED(arp_route_tbl_present);
+ for (numArg = 0; numArg < params->n_args; numArg++) {
+ char *arg_name = params->args_name[numArg];
+ char *arg_value = params->args_value[numArg];
+
+ /* arp timer expiry */
+ if (strcmp(arg_name, "arp_timer_expiry") == 0) {
+ arp_timeout = atoi(arg_value);
+ }
+
+ /* pktq_in_prv */
+ if (strcmp(arg_name, "pktq_in_prv") == 0) {
+ if (pktq_in_prv_present) {
+ printf
+ ("Duplicate pktq_in_prv ... parse failed..\n\n");
+ return -1;
+ }
+ pktq_in_prv_present = 1;
+
+ int rxport = 0, j = 0;
+ char phy_port_num[5];
+ char *token = strtok(arg_value, "RXQ");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != '.')) {
+ phy_port_num[j] = token[j];
+ j++;
+ }
+ phy_port_num[j] = '\0';
+ rxport = atoi(phy_port_num);
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ if (rxport < 0)
+ rxport = 0;
+ printf
+ ("token: %s, phy_port_str: %s, phy_port_num %d\n",
+ token, phy_port_num, rxport);
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ in_port_dir_a[rxport] = 1; // set rxport egress
+ token = strtok(NULL, "RXQ");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf
+ ("VNF common parse error - no prv RX phy port\n");
+ return -1;
+ }
+ continue;
+ }
+
+ /* prv_to_pub_map */
+ if (strcmp(arg_name, "prv_to_pub_map") == 0) {
+ if (prv_to_pub_map_present) {
+ printf
+ ("Duplicated prv_to_pub_map ... parse failed ...\n");
+ return -1;
+ }
+ prv_to_pub_map_present = 1;
+
+ int rxport = 0, txport = 0, j = 0, k = 0;
+ char rx_phy_port_num[5];
+ char tx_phy_port_num[5];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != ',')) {
+ rx_phy_port_num[j] = token[j];
+ j++;
+ }
+ rx_phy_port_num[j] = '\0';
+ rxport = atoi(rx_phy_port_num);
+ if (rxport < 0)
+ rxport = 0;
+
+ j++;
+ k = 0;
+ while ((k < 4) && (token[j + k] != ')')) {
+ tx_phy_port_num[k] = token[j + k];
+ k++;
+ }
+ tx_phy_port_num[k] = '\0';
+ txport = atoi(tx_phy_port_num);
+ if (txport < 0)
+ txport = 0;
+
+ RTE_LOG(INFO, LIBARP, "token: %s,"
+ "rx_phy_port_str: %s, phy_port_num %d,"
+ "tx_phy_port_str: %s, tx_phy_port_num %d\n",
+ token, rx_phy_port_num, rxport,
+ tx_phy_port_num, txport);
+
+ if ((rxport >= PIPELINE_MAX_PORT_IN) ||
+ (txport >= PIPELINE_MAX_PORT_IN) ||
+ (in_port_dir_a[rxport] != 1)) {
+ printf
+ ("CG-NAPT parse error - incorrect prv-pub translation. Rx %d, Tx %d, Rx Dir %d\n",
+ rxport, txport,
+ in_port_dir_a[rxport]);
+ return -1;
+ }
+
+ prv_to_pub_map[rxport] = txport;
+ pub_to_prv_map[txport] = rxport;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ //n_vnf_threads = 3
+ if (strcmp(arg_name, "n_vnf_threads") == 0) {
+ if (n_vnf_threads_present)
+ return -1;
+ n_vnf_threads_present = 1;
+ trim(arg_value);
+ num_vnf_threads = atoi(arg_value);
+ if (num_vnf_threads <= 0) {
+ RTE_LOG(INFO, LIBARP,
+ "n_vnf_threads is invalid\n");
+ return -1;
+ }
+ RTE_LOG(INFO, LIBARP, "n_vnf_threads: 0x%x\n",
+ num_vnf_threads);
+ }
+
+ /* lib_arp_debug */
+ if (strcmp(arg_name, "lib_arp_debug") == 0) {
+ ARPICMP_DEBUG = atoi(arg_value);
+
+ continue;
+ }
+
+ /* ports_mac_list */
+ if (strcmp(arg_name, "ports_mac_list") == 0) {
+ ports_mac_list_present = 1;
+
+ uint32_t i = 0, j = 0, k = 0, MAC_NUM_BYTES = 6;
+
+ char byteStr[MAC_NUM_BYTES][3];
+ uint32_t byte[MAC_NUM_BYTES];
+
+ char *token = strtok(arg_value, " ");
+ while (token) {
+ k = 0;
+ for (i = 0; i < MAC_NUM_BYTES; i++) {
+ for (j = 0; j < 2; j++) {
+ byteStr[i][j] = token[k++];
+ }
+ byteStr[i][j] = '\0';
+ k++;
+ }
+
+ for (i = 0; i < MAC_NUM_BYTES; i++) {
+ byte[i] = strtoul(byteStr[i], NULL, 16);
+ }
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "token: %s",
+ token);
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ RTE_LOG(INFO, LIBARP,
+ ", byte[%u] %u", i,
+ byte[i]);
+ RTE_LOG(INFO, LIBARP, "\n");
+ }
+ //Populate the static arp_route_table
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ link_hw_addr
+ [link_hw_addr_array_idx].addr_bytes
+ [i] = byte[i];
+
+ link_hw_addr_array_idx++;
+ token = strtok(NULL, " ");
+ }
+
+ continue;
+ }
+
+ /* arp_route_tbl */
+ if (strcmp(arg_name, "arp_route_tbl") == 0) {
+ arp_route_tbl_present = 1;
+
+ uint32_t dest_ip = 0, mask = 0, tx_port = 0, nh_ip =
+ 0, i = 0, j = 0, k = 0, l = 0;
+ uint32_t arp_route_tbl_str_max_len = 10;
+ char dest_ip_str[arp_route_tbl_str_max_len];
+ char mask_str[arp_route_tbl_str_max_len];
+ char tx_port_str[arp_route_tbl_str_max_len];
+ char nh_ip_str[arp_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (arp_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ip_str[i] = token[i];
+ i++;
+ }
+ dest_ip_str[i] = '\0';
+ dest_ip = strtoul(dest_ip_str, NULL, 16);
+
+ i++;
+ j = 0;
+ while ((j < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ mask_str[j] = token[i + j];
+ j++;
+ }
+ mask_str[j] = '\0';
+ mask = strtoul(mask_str, NULL, 16);
+
+ j++;
+ k = 0;
+ while ((k < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ tx_port = strtoul(tx_port_str, NULL, 16); //atoi(tx_port_str);
+
+ k++;
+ l = 0;
+ while ((l < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ip_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ip_str[l] = '\0';
+ nh_ip = strtoul(nh_ip_str, NULL, 16); //atoi(nh_ip_str);
+
+ if (1) {
+ RTE_LOG(INFO, LIBARP, "token: %s, "
+ "dest_ip_str: %s, dest_ip %u, "
+ "mask_str: %s, mask %u, "
+ "tx_port_str: %s, tx_port %u, "
+ "nh_ip_str: %s, nh_ip %u\n",
+ token, dest_ip_str, dest_ip,
+ mask_str, mask, tx_port_str,
+ tx_port, nh_ip_str, nh_ip);
+ }
+
+ /* if (tx_port >= params->n_ports_out)
+ {
+ RTE_LOG(INFO, LIBARP,"ARP-ICMP parse error - incorrect tx_port %d, max %d\n",
+ tx_port, params->n_ports_out);
+ return -1;
+ }
+ */
+ //Populate the static arp_route_table
+ lib_arp_route_table[arp_route_tbl_index].ip =
+ dest_ip;
+ lib_arp_route_table[arp_route_tbl_index].mask =
+ mask;
+ lib_arp_route_table[arp_route_tbl_index].port =
+ tx_port;
+ lib_arp_route_table[arp_route_tbl_index].nh =
+ nh_ip;
+ arp_route_tbl_index++;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /*ND IPv6 */
+ /* nd_route_tbl */
+ if (strcmp(arg_name, "nd_route_tbl") == 0) {
+ nd_route_tbl_present = 1;
+
+ uint8_t dest_ipv6[16], depth = 0, tx_port =
+ 0, nh_ipv6[16], i = 0, j = 0, k = 0, l = 0;
+ uint8_t nd_route_tbl_str_max_len = 128; //64;
+ char dest_ipv6_str[nd_route_tbl_str_max_len];
+ char depth_str[nd_route_tbl_str_max_len];
+ char tx_port_str[nd_route_tbl_str_max_len];
+ char nh_ipv6_str[nd_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (nd_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ipv6_str[i] = token[i];
+ i++;
+ }
+ dest_ipv6_str[i] = '\0';
+ my_inet_pton_ipv6(AF_INET6, dest_ipv6_str,
+ &dest_ipv6);
+
+ i++;
+ j = 0;
+ while ((j < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ depth_str[j] = token[i + j];
+ j++;
+ }
+ depth_str[j] = '\0';
+ //converting string char to integer
+ int s;
+ for (s = 0; depth_str[s] != '\0'; ++s)
+ depth = depth * 10 + depth_str[s] - '0';
+
+ j++;
+ k = 0;
+ while ((k < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ tx_port = strtoul(tx_port_str, NULL, 16); //atoi(tx_port_str);
+
+ k++;
+ l = 0;
+ while ((l < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ipv6_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ipv6_str[l] = '\0';
+ my_inet_pton_ipv6(AF_INET6, nh_ipv6_str,
+ &nh_ipv6);
+
+ //Populate the static arp_route_table
+ for (i = 0; i < 16; i++) {
+ lib_nd_route_table
+ [nd_route_tbl_index].ipv6[i] =
+ dest_ipv6[i];
+ lib_nd_route_table
+ [nd_route_tbl_index].nhipv6[i] =
+ nh_ipv6[i];
+ }
+ lib_nd_route_table[nd_route_tbl_index].depth =
+ depth;
+ lib_nd_route_table[nd_route_tbl_index].port =
+ tx_port;
+
+ nd_route_tbl_index++;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /* any other */
+ //return -1;
+ }
+ /* Check that mandatory arguments are present */
+ /*
+ if ((arp_route_tbl_present == 0) || (ports_mac_list_present == 0)) {
+ RTE_LOG(INFO, LIBARP,"VNF common not all mandatory arguments are present\n");
+ RTE_LOG(INFO, LIBARP,"%d, %d \n",
+ arp_route_tbl_present, ports_mac_list_present);
+ return -1;
+ }
+ */
+
+ return 0;
+}
+
+void lib_arp_init(struct pipeline_params *params,
+ __rte_unused struct app_params *app)
+{
+
+ RTE_LOG(INFO, LIBARP, "ARP initialization ...\n");
+
+ /* Parse arguments */
+ if (arp_parse_args(params)) {
+ RTE_LOG(INFO, LIBARP, "arp_parse_args failed ...\n");
+ return;
+ }
+
+ /* create the arp_icmp mbuf rx pool */
+ lib_arp_pktmbuf_tx_pool =
+ rte_pktmbuf_pool_create("lib_arp_mbuf_tx_pool", NB_ARPICMP_MBUF, 32,
+ 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+ if (lib_arp_pktmbuf_tx_pool == NULL) {
+ RTE_LOG(INFO, LIBARP, "ARP mbuf pool create failed.\n");
+ return;
+ }
+
+ lib_arp_pkt = rte_pktmbuf_alloc(lib_arp_pktmbuf_tx_pool);
+ if (lib_arp_pkt == NULL) {
+ RTE_LOG(INFO, LIBARP, "ARP lib_arp_pkt alloc failed.\n");
+ return;
+ }
+
+ arp_hash_params.socket_id = rte_socket_id();
+ arp_hash_params.entries = MAX_NUM_ARP_ENTRIES;
+ arp_hash_params.key_len = sizeof(struct arp_key_ipv4);
+ arp_hash_handle = rte_hash_create(&arp_hash_params);
+
+ if (arp_hash_handle == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP rte_hash_create failed. socket %d ... \n",
+ arp_hash_params.socket_id);
+ } else {
+ RTE_LOG(INFO, LIBARP, "arp_hash_handle %p\n\n",
+ (void *)arp_hash_handle);
+ }
+
+ /* Create port alloc buffer */
+
+ timer_mempool_arp = rte_mempool_create("timer_mempool_arp",
+ timer_objs_mempool_count,
+ sizeof(struct rte_timer),
+ 0, 0,
+ NULL, NULL,
+ NULL, NULL, rte_socket_id(), 0);
+ if (timer_mempool_arp == NULL) {
+ rte_panic("timer_mempool create error\n");
+ }
+ rte_timer_subsystem_init();
+ list_add_type(ETHER_TYPE_ARP, process_arpicmp_pkt_parse);
+
+ /* ND IPv6 */
+ nd_hash_params.socket_id = rte_socket_id();
+ nd_hash_params.entries = MAX_NUM_ND_ENTRIES;
+ nd_hash_params.key_len = sizeof(struct nd_key_ipv6);
+ nd_hash_handle = rte_hash_create(&nd_hash_params);
+ if (nd_hash_handle == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "ND rte_hash_create failed. socket %d ... \n",
+ nd_hash_params.socket_id);
+ } else {
+ RTE_LOG(INFO, LIBARP, "nd_hash_handle %p\n\n",
+ (void *)nd_hash_handle);
+ }
+
+ return;
+}
+
+void arp_timer_callback(struct rte_timer *timer, void *arg)
+{
+ struct arp_timer_key *remove_key = (struct arp_timer_key *)arg;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "ARP TIMER callback : expire :%d\n",
+ (int)timer->expire);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Remove ARP Entry for IP :%d.%d.%d.%d , port %d\n",
+ (remove_key->ip >> 24),
+ ((remove_key->ip & 0x00ff0000) >> 16),
+ ((remove_key->ip & 0x0000ff00) >> 8),
+ ((remove_key->ip & 0x000000ff)), remove_key->port_id);
+ remove_arp_entry((uint32_t) remove_key->ip,
+ (uint8_t) remove_key->port_id, arg);
+ return;
+}
+
+void nd_timer_callback(struct rte_timer *timer, void *arg)
+{
+ struct nd_timer_key *remove_key = (struct nd_timer_key *)arg;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "nd time callback : expire :%d\n",
+ (int)timer->expire);
+ remove_nd_entry_ipv6(remove_key->ipv6, remove_key->port_id);
+ return;
+}
+
+void create_arp_table(void)
+{
+
+ int i;
+ for (i = 0; i < MAX_ARP_DATA_ENTRY_TABLE; i++) {
+ populate_arp_entry((const struct ether_addr *)
+ &arp_entry_data_table[i].eth_addr,
+ arp_entry_data_table[i].ip,
+ (uint8_t) arp_entry_data_table[i].port,
+ STATIC_ARP);
+ }
+ print_arp_table();
+ return;
+}
+
+void create_nd_table(void)
+{
+
+ int i;
+ for (i = 0; i < MAX_ND_DATA_ENTRY_TABLE; i++) {
+ populate_nd_entry((const struct ether_addr *)
+ nd_entry_data_table[i].eth_addr,
+ nd_entry_data_table[i].ipv6,
+ (uint8_t) nd_entry_data_table[i].port,
+ STATIC_ND);
+ }
+ print_nd_table();
+ return;
+}
+
+void send_gratuitous_arp(l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+
+ struct rte_mbuf *arp_pkt = lib_arp_pkt;
+
+ if (port == NULL) {
+ RTE_LOG(INFO, LIBARP, "PORT ID DOWN.. %s\n", __FUNCTION__);
+ return;
+
+ }
+
+ if (arp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating arp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(arp_pkt, struct ether_hdr *);
+
+ ether_addr_copy(&broadcast_ether_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ arp_h->arp_hrd = CHECK_ENDIAN_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = CHECK_ENDIAN_16(ARP_OP_REQUEST);
+
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0], &arp_h->arp_data.arp_sha);
+ if (port->ipv4_list == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "port->ipv4_list is NULL.. %s\n",
+ __FUNCTION__);
+ return;
+ }
+ arp_h->arp_data.arp_sip = (((ipv4list_t *) (port->ipv4_list))->ipaddr);
+ ether_addr_copy(&null_ether_addr, &arp_h->arp_data.arp_tha);
+ //arp_h->arp_data.arp_tip = rte_cpu_to_be_32(ip);
+ arp_h->arp_data.arp_tip = 0; //(((ipv4list_t *) (port->ipv4_list))->ipaddr);
+ // RTE_LOG(INFO, LIBARP,"arp tip:%x arp sip :%x\n", arp_h->arp_data.arp_tip,
+ //arp_h->arp_data.arp_sip);
+ // mmcd changed length from 60 to 42 - real length of arp request, no padding on ethernet needed - looks now like linux arp
+ arp_pkt->pkt_len = 42;
+ arp_pkt->data_len = 42;
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "SENDING GRATUITOUS ARP REQUEST\n");
+ print_mbuf("TX", port->pmdid, arp_pkt, __LINE__);
+ }
+ port->transmit_single_pkt(port, arp_pkt);
+}
+
+void set_arpdebug(int flag)
+{
+ if (flag) {
+ RTE_LOG(INFO, LIBARP, "Debugs turned on\n\r");
+ ARPICMP_DEBUG = 1;
+ NDIPV6_DEBUG = 1;
+
+ } else {
+ RTE_LOG(INFO, LIBARP, "Debugs turned off\n\r");
+ ARPICMP_DEBUG = 0;
+ NDIPV6_DEBUG = 0;
+ }
+}
+
+void set_arptimeout(uint32_t timeout_val)
+{
+ if (timeout_val == 0) {
+ RTE_LOG(INFO, LIBARP, "Cannot be zero...\n\r");
+ return;
+ }
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "set_arptimeout: arp_timeout %u, timeout_val %u\n\r",
+ arp_timeout, timeout_val);
+ arp_timeout = timeout_val;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "set_arptimeout: arp_timeout %u\n\r",
+ arp_timeout);
+}
diff --git a/common/VIL/l2l3_stack/lib_arp.h b/common/VIL/l2l3_stack/lib_arp.h
new file mode 100644
index 00000000..33875679
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_arp.h
@@ -0,0 +1,506 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_LIB_ARP_H__
+#define __INCLUDE_LIB_ARP_H__
+
+#include <rte_pipeline.h>
+#include "rte_ether.h"
+#include "l2_proto.h"
+#include "app.h"
+
+#define ND_IPV6_ADDR_SIZE 16 /**< 16 Byte of IPv6 Address. */
+#define ND_IPV6_TIMER_EXPIRY 300 /**< in Seconds, Timer for ND IPv6 Expiry */
+#define ARP_TIMER_EXPIRY 1800 /**< in Seconds, TIMER for ARP Expiry */
+#define TIMER_MILLISECOND 1
+#define RTE_LOGTYPE_LIBARP RTE_LOGTYPE_USER1
+#define MAX_ND_RT_ENTRY 16
+#define MAX_ARP_RT_ENTRY 16
+
+/**
+* A structure for Route table entries of IPv4
+*/
+
+struct lib_arp_route_table_entry {
+ uint32_t ip; /**< Ipv4 address*/
+ uint32_t mask; /**< mask */
+ uint32_t port; /**< Physical port */
+ uint32_t nh; /**< next hop */
+};
+
+/**
+* A structure for Route table entires of IPv6
+*
+*/
+struct lib_nd_route_table_entry {
+ uint8_t ipv6[16]; /**< Ipv6 address */
+ uint8_t depth; /**< Depth */
+ uint32_t port; /**< Port */
+ uint8_t nhipv6[16]; /**< next hop Ipv6 */
+};
+
+extern struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY];
+extern struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY];
+
+enum {
+ ARP_FOUND,
+ ARP_NOT_FOUND,
+ NH_NOT_FOUND,
+};
+
+enum arp_key_type {
+ ARP_IPV4,
+ ND_IPV6,
+};
+
+struct arp_key_ipv4 {
+ uint32_t ip; /**< IP address */
+ uint8_t port_id; /**< Port id */
+ uint8_t filler1; /**< filler 1, for better hash key */
+ uint8_t filler2; /**< filler 2, for better hash key */
+ uint8_t filler3; /**< filler 3, for better hash key */
+};
+
+/**
+* IPv6
+*/
+struct nd_key_ipv6 {
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< 128 Bit of IPv6 Address*/
+ uint8_t port_id; /**< Port id */
+ uint8_t filler1;
+ uint8_t filler2;
+ uint8_t filler3;
+};
+
+/**
+* Arp Key
+*/
+struct arp_key {
+ enum arp_key_type type;
+ union {
+ struct arp_key_ipv4 ipv4;
+ } key; /**< Key of type arp key Ipv4 */
+};
+
+/**
+* call back function parameter pair remove nd entry
+*
+*/
+
+struct nd_timer_key {
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< IPv6 address */
+ uint8_t port_id; /**< Port id */
+} __rte_cache_aligned;
+
+/**
+* call back function parameter remove arp entry
+*
+*/
+struct arp_timer_key {
+ uint32_t ip; /**< Ip address */
+ uint8_t port_id; /**< Port id */
+} __rte_cache_aligned;
+
+extern uint32_t ARPICMP_DEBUG;
+
+#define COMPLETE 1 /**< ARP entry populated and echo reply recieved. */
+#define INCOMPLETE 0 /**< ARP entry populated and either awaiting echo reply or stale entry. */
+
+extern uint32_t NDIPV6_DEBUG; /**< ND IPv6 */
+
+#define ICMPv6_COMPLETE 1 /**< ICMPv6 entry populated and echo reply recieved. */
+#define ICMPv6_INCOMPLETE 0 /**< ICMPv6 entry populated and either awaiting echo reply or stale entry. */
+#define STATIC_ARP 1 /**< Static ARP Entry. */
+#define DYNAMIC_ARP 0 /**< Dynamic ARP Entry. */
+#define STATIC_ND 1 /**< Static ND Entry. */
+#define DYNAMIC_ND 0 /**< Dynamic ND Entry. */
+
+/**
+* A structure is used to defined the ARP entry data
+* This structure is used as a input parameters for entry of ARP data
+*/
+
+struct arp_entry_data {
+ struct ether_addr eth_addr; /**< ethernet address */
+ uint32_t ip; /**< IP address */
+ uint8_t port; /**< Port */
+ uint8_t status; /**< Status of entry */
+ uint8_t mode; /**< Mode */
+ uint8_t retry_count; /**< retry count for ARP*/
+ struct rte_timer *timer; /**< Timer Associated with ARP*/
+ struct arp_timer_key *timer_key;
+} __attribute__ ((packed));
+
+/**
+* A structure is used to defined the table for arp entry data
+* This structure is used to maintain the arp entry data
+*/
+
+struct table_arp_entry_data {
+ uint8_t eth_addr[6]; /**< Ethernet address */
+ uint8_t port; /**< port */
+ uint8_t status; /**< status of entry */
+ uint32_t ip; /**< Ip address */
+} __attribute__ ((packed));
+
+/**
+* A structure is used to define the ND entry data for IPV6
+* This structure is used as a input parameters for ND entry data
+*/
+
+struct nd_entry_data {
+ struct ether_addr eth_addr; /**< Ethernet address */
+ uint8_t port; /**< port */
+ uint8_t status; /**< statusof the entry */
+ uint8_t mode; /**< Mode */
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< Ipv6 address */
+ struct rte_timer *timer; /**< Timer */
+} __attribute__ ((packed));
+
+/**
+* A structure is used to define the table for ND entry data
+* This structure is used to maintain ND entry data
+*
+*/
+
+struct table_nd_entry_data {
+ uint8_t eth_addr[6]; /**< Ethernet address */
+ uint8_t port; /**< Port */
+ uint8_t status; /**< status of Entry */
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< IPv6 address */
+ struct rte_timer *timer; /**< Timer */
+} __attribute__ ((packed));
+
+/**
+* To get the destination MAC address andnext hop for the ip address and outgoing port
+* @param1 ip addr
+* IP address for which MAC address is needed.
+* @param2 phy_port
+* Physical Port
+* @param3 ether_addr
+* pointer to the ether_addr, This gets update with valid MAC addresss
+* @Param4 next nhip
+* Gets the next hop IP by Ip address and physical port
+* @return
+* 0 if failure, and 1 if success
+*/
+
+int get_dest_mac_address(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint32_t *nhip);
+/**
+* To get the destination MAC address andnext hop for the ip address and outgoing port
+* @param1 ip addr
+* IP address for which MAC address is needed.
+* @param2 phy_port
+* Physical Port
+* @param3 ether_addr
+* pointer to the ether_addr, This gets update with valid MAC addresss
+* @Param4 next nhip
+* Gets the next hop IP by Ip address and physical port
+* @return
+* 0 if failure, and 1 if success
+*/
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr);
+
+/**
+* To get the destination mac address for IPv4 address
+* @param Ipaddr
+* IP address which need the destination mac address
+* @param Phy_port
+* physical port
+* @param ether_addr
+* pointer to the ether_addr, This gets update with valid mac address
+* @return
+* 0 if failure, 1 if success
+*/
+int get_dest_mac_addr(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr);
+
+/**
+* To get the destination mac address for IPV6 address
+* @param ipv6addr
+* IPv6 address which need the destination mac adress
+* @param Phy_Port
+* physical prt
+* @param ether_addr
+* pointer to the ether_address, This gets update with valid mac address
+* @param Nhipv6[]
+* Gets the next hop ipv6 address by ipv6 address and physical port
+* @return
+* 0 if failure, 1 ifsuccess
+*/
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[]);
+/**
+* To get the destination mac address for IPV6 address
+* @param ipv6addr
+* IPv6 address which need the destination mac adress
+* @param Phy_Port
+* physical prt
+* @param ether_addr
+* pointer to the ether_address, This gets update with valid mac address
+* @param Nhipv6[]
+* Gets the next hop ipv6 address by ipv6 address and physical port
+* @return
+* 0 if failure, 1 ifsuccess
+*/
+
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr,
+ uint8_t nhipv6[]);
+
+/**
+* To get hardware link address
+* @param out_port
+* out going port
+*/
+
+struct ether_addr *get_link_hw_addr(uint8_t out_port);
+
+/**
+* This prints the Arp Table
+* @param void
+*
+*/
+void print_arp_table(void);
+
+/**
+* This prints the ND table
+* @param void
+*
+*/
+void print_nd_table(void);
+
+/**
+* This removes arp entry from Table
+* @param ipaddr
+* Ipv4 address
+* @param portid
+* Port id
+*/
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid, void *arg);
+
+/**
+* Removes ND entry from Nd Table
+* @Param ipv6addr[]
+* Ipv6 address
+* @Param portid
+* Port id
+*/
+
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid);
+
+/**
+* Populate arp entry in arp Table
+* @param ether_addr
+* Ethernet address
+* @param ipaddr
+* Ipv4 adress
+* @Param portid
+* port id
+* @Param mode
+* Mode
+*/
+void populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid, uint8_t mode);
+
+/**
+* Populate ND entry in ND Table
+* @param ether_addr
+* Ethernet address
+* @param ip[]
+* Ipv6 adress
+* @Param portid
+* port id
+* @Param mode
+* Mode
+*/
+
+void populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ip[],
+ uint8_t portid, uint8_t mode);
+
+/**
+* To send ARp request
+* @Param port_id
+* port id
+@ Param IP
+* Ip address
+*/
+
+void request_arp(uint8_t port_id, uint32_t ip);
+
+/**
+* TO send echo request
+* @param port_id
+* Port id
+* @Param ip
+* Ip address
+*/
+struct rte_mbuf *request_echo(uint32_t port_id, uint32_t ip);
+
+/**
+* To send icmpv6 echo request
+* @Param port_id
+* Port id
+* @Param ipv6
+* ipv6 address
+*/
+struct rte_mbuf *request_icmpv6_echo(uint8_t ipv6[], l2_phy_interface_t *port);
+
+/**
+* To request ND
+* @Param ipv6
+* ipv6 address
+* @Param port
+* pointer to port
+*/
+struct rte_mbuf *request_nd(uint8_t ipv6[], l2_phy_interface_t *port);
+
+/**
+* To process te ARP and ICMP packets
+* @Param Pkt
+* Packets to be processed
+* @Param pkt_num
+* packet number
+* @Param portid
+* port id
+*/
+void process_arpicmp_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port);
+
+/**
+* IPv4
+* Validate if key-value pair already exists in the hash table for given key - IPv4
+* @Param arp_key
+* Arp key to validate entry
+*/
+struct arp_entry_data *retrieve_arp_entry(const struct arp_key_ipv4 arp_key);
+
+/**
+* ND IPv6
+* Validate if key-value pair already exists in the hash table for given key - ND IPv6
+* @Param nd_key
+* Nd key to validate Nd entry
+*/
+
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key);
+
+/**
+* Setsup Arp Initilization
+*/
+//void lib_arp_init(void);
+void lib_arp_init(struct pipeline_params *params, struct app_params *app);
+#if 0
+void set_port_to_loadb_map(uint8_t pipeline_num);
+
+/**
+* Acts on port_to_loadb_map
+*/
+uint8_t get_port_to_loadb_map(uint8_t phy_port_id);
+
+void set_phy_inport_map(uint8_t pipeline_num, uint8_t *map);
+void set_phy_outport_map(uint8_t pipeline_num, uint8_t *map);
+
+/**
+* Acts on lb_outport_id
+*/
+
+uint8_t get_loadb_outport_id(uint8_t actual_phy_port);
+uint8_t get_vnf_set_num(uint8_t pipeline_num);
+
+void pipelines_port_info(void);
+void pipelines_map_info(void);
+#endif
+/**
+* A callback for arp Timer
+* @Param rte_timer
+* timer pointer
+* @Param arg
+* arguments to timer
+*/
+void arp_timer_callback(struct rte_timer *, void *arg);
+
+/**
+* A callback for ND timer
+* @Param rte_timer
+* timer pointer
+* @Param arg
+* arguments to timer
+*/
+void nd_timer_callback(struct rte_timer *timer, void *arg);
+
+/**
+* To create Arp Table
+* @param void
+*/
+void create_arp_table(void);
+/**
+* To create ND Table
+* @param void
+*/
+void create_nd_table(void);
+
+/**
+* To parse and process the Arp and icmp packets
+* @Param pkt
+* pkt to process
+* @Param pkt_num
+* pkt number
+* @Param pkt_mask
+* packet mask
+* @Param port
+* pointer to port
+*/
+void process_arpicmp_pkt_parse(struct rte_mbuf **pkt, uint16_t pkt_num,
+ uint64_t pkt_mask, l2_phy_interface_t *port);
+
+/**
+* Sends garp packet
+* @Param port
+* pointer to port
+*/
+void send_gratuitous_arp(l2_phy_interface_t *port);
+/**
+* To set arp debug
+* @Param flag
+* set 1 unset 0
+*/
+void set_arpdebug(int flag);
+/**
+* To set timer for arp entry
+* @Param timeout_val
+* timer val for arp entry
+*/
+void set_arptimeout(uint32_t timeout_val);
+/**
+* To get nexthop for ipv4
+* @Param ipv4
+* ipv4 address
+* @Param
+* timeout_val to set
+*/
+uint32_t get_nh(uint32_t, uint32_t *);
+/**
+* To get nexthop for ipv6
+* @Param ipv6
+* ipv6 address
+* @Param port
+* pointer to port
+* @Param nhipv6
+* next hop ipv6
+*/
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[]);
+#endif
diff --git a/common/VIL/l2l3_stack/lib_icmpv6.c b/common/VIL/l2l3_stack/lib_icmpv6.c
new file mode 100644
index 00000000..44f30cbf
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_icmpv6.c
@@ -0,0 +1,410 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+/* Santosh Sethupathi*/
+
+#include "lib_icmpv6.h"
+
+static void print_pkt(uint8_t *rd)
+{
+ int i = 0, j = 0;
+
+ printf("Packet Contents:\n");
+
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ printf("%02x ", rd[(20 * i) + j]);
+
+ printf("\n");
+ }
+}
+
+static uint16_t icmpv6_ipv6_nd_checksum(struct rte_mbuf *pkt)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+
+ size_t tmplen, offset;
+ uint8_t *tmppacket, *tpacket;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ uint32_t payloadlen = 0x20;
+ payloadlen = rte_bswap32(payloadlen);
+
+ tmplen = 40 + sizeof(struct icmpv6_hdr) + sizeof(struct icmpv6_nd_hdr);
+ tmplen = RTE_CACHE_LINE_ROUNDUP(tmplen);
+ tmppacket = rte_zmalloc(NULL, tmplen, RTE_CACHE_LINE_SIZE);
+ tpacket = tmppacket;
+
+ offset = 16;
+ memcpy(tpacket, &ipv6_h->src_addr[0], offset);
+ tpacket += offset;
+ memcpy(tpacket, &ipv6_h->dst_addr[0], offset);
+ tpacket += offset;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ memcpy(tpacket, &ipv6_h->proto, 1);
+ tpacket++;
+ memcpy(tpacket, &payloadlen, 4);
+ tpacket += 4;
+ memcpy(tpacket, icmpv6_h,
+ sizeof(struct icmpv6_hdr) + sizeof(struct icmpv6_nd_hdr));
+
+ if (ARPICMP_DEBUG)
+ print_pkt(tmppacket);
+
+ return rte_raw_cksum(tmppacket, tmplen);
+}
+
+static uint16_t icmpv6_ipv6_echo_checksum(struct rte_mbuf *pkt)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+
+ size_t tmplen, offset;
+ uint8_t *tmppacket, *tpacket;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ uint32_t payloadlen = rte_bswap16(ipv6_h->payload_len);
+ uint32_t payloadlen_swap = rte_bswap32(payloadlen);
+
+ if (ARPICMP_DEBUG)
+ printf("%s: payloadlen: %u\n", __FUNCTION__, payloadlen);
+
+ tmplen = 40 + payloadlen;
+ tmplen = RTE_CACHE_LINE_ROUNDUP(tmplen);
+ tmppacket = rte_zmalloc(NULL, tmplen, RTE_CACHE_LINE_SIZE);
+ tpacket = tmppacket;
+
+ offset = 16;
+ memcpy(tpacket, &ipv6_h->src_addr[0], offset);
+ tpacket += offset;
+ memcpy(tpacket, &ipv6_h->dst_addr[0], offset);
+ tpacket += offset;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ memcpy(tpacket, &ipv6_h->proto, 1);
+ tpacket++;
+ memcpy(tpacket, &payloadlen_swap, 4);
+ tpacket += 4;
+ memcpy(tpacket, icmpv6_h, payloadlen);
+
+ if (ARPICMP_DEBUG)
+ print_pkt(tmppacket);
+
+ return rte_raw_cksum(tmppacket, tmplen);
+}
+
+void process_icmpv6_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port)
+{
+
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_nd_hdr *icmpv6_nd_h;
+ uint8_t ipv6_addr[16];
+ uint8_t i = 0;
+ uint8_t req_tipv6[16];
+ /* To drop the packet */
+
+ if (port == NULL) {
+ printf("port is NULL");
+ return;
+ } else if (port->ipv6_list == NULL) {
+ printf("IPV6 address not configured on link\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REQUEST)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ for (i = 0; i < 16; i++) {
+ ipv6_addr[i] = ipv6_h->src_addr[i];
+ }
+
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0],
+ &eth_h->s_addr);
+
+ for (i = 0; i < 16; i++)
+ ipv6_h->src_addr[i] = ipv6_h->dst_addr[i];
+ for (i = 0; i < 16; i++)
+ ipv6_h->dst_addr[i] = ipv6_addr[i];
+
+ icmpv6_h->icmpv6_type = ICMPV6_ECHO_REPLY;
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_echo_checksum(pkt);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+
+ return;
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REPLY)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = port->pmdid;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ nd_key.ipv6[i] = ipv6_h->src_addr[i];
+
+ }
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ /*Validate if key-value pair already exists in the hash table for ND IPv6 */
+ struct nd_entry_data *new_nd_data = retrieve_nd_entry(nd_key);
+ if (new_nd_data == NULL) {
+ printf
+ ("Received unsolicited ICMPv6 echo reply on port %d\n",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ printf("%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ return;
+ }
+
+ new_nd_data->status = COMPLETE;
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_SOLICITATION)
+ && (icmpv6_h->icmpv6_code == 0)) {
+
+ icmpv6_nd_h =
+ (struct icmpv6_nd_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t src_ipv6[16], dst_ipv6[16];
+ uint16_t multi_addr;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ src_ipv6[i] = ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ dst_ipv6[i] = ipv6_h->dst_addr[i];
+
+ multi_addr = dst_ipv6[0];
+
+ /* Check for Multicast Address */
+ if ((IPV6_MULTICAST & ((multi_addr << 8) | dst_ipv6[1]))
+ || !memcmp(&port->macaddr[0], &eth_h->d_addr, 6)) {
+ populate_nd_entry(src_hw_addr, src_ipv6, port->pmdid,
+ DYNAMIC_ND);
+
+ /* build a Neighbor Advertisement message */
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ req_tipv6[i] = icmpv6_nd_h->target_ipv6[i];
+
+ if (!memcmp
+ (&req_tipv6[0],
+ &((ipv6list_t *) port->ipv6_list)->ipaddr[0],
+ 16)) {
+
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->
+ macaddr[0], &eth_h->s_addr);
+
+ /* set sender mac address */
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->dst_addr[i] =
+ ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->src_addr[i] = req_tipv6[i];
+ icmpv6_h->icmpv6_type =
+ ICMPV6_NEIGHBOR_ADVERTISEMENT;
+ icmpv6_nd_h->type = e_Target_Link_Layer_Address;
+ icmpv6_nd_h->length = 1;
+ memcpy(&icmpv6_nd_h->link_layer_addr[0],
+ &port->macaddr[0], 6);
+ icmpv6_nd_h->icmpv6_reserved = 0;
+ icmpv6_nd_h->icmpv6_reserved |=
+ rte_cpu_to_be_32
+ (NEIGHBOR_ROUTER_OVERRIDE_SET);
+
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum =
+ ~icmpv6_ipv6_nd_checksum(pkt);
+
+ port->transmit_bulk_pkts(port, &pkt, 1);
+
+ } else if (ARPICMP_DEBUG) {
+ printf
+ ("............Some one else is the target host here !!!\n");
+ }
+
+ return;
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf
+ ("...............Malformed ND Solicitation message!!!\n");
+ }
+ }
+
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_ADVERTISEMENT)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t ipv6[16];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ ipv6[i] = ipv6_h->src_addr[i];
+
+ }
+ populate_nd_entry(src_hw_addr, ipv6, port->pmdid, DYNAMIC_ND);
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf("ICMPv6 Type %d Not Supported yet !!!\n",
+ icmpv6_h->icmpv6_type);
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+}
+
+struct rte_mbuf *request_icmpv6_echo(uint8_t ipv6[], l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_info_hdr *icmpv6_info_h;
+ int i;
+ uint8_t *icmp_data;
+
+ struct rte_mbuf *icmpv6_pkt = lib_icmpv6_pkt;
+ if (icmpv6_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmpv6_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmpv6_pkt, struct ether_hdr *);
+
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ icmpv6_info_h =
+ (struct icmpv6_info_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = rte_bswap16(0x86dd);
+ for (i = 0; i < 6; i++) {
+ eth_h->d_addr.addr_bytes[i] = 0;
+ }
+
+ ipv6_h->vtc_flow = rte_bswap32(0x60000000);
+ ipv6_h->payload_len = rte_bswap16(64);
+ ipv6_h->proto = 58;
+ ipv6_h->hop_limits = 64;
+
+ for (i = 0; i < 16; i++) {
+ ipv6_h->src_addr[i] = 0x0;
+ ipv6_h->dst_addr[i] = ipv6[i];
+ }
+
+ icmpv6_h->icmpv6_type = ICMPV6_ECHO_REQUEST;
+ icmpv6_h->icmpv6_code = 0;
+ icmpv6_info_h->icmpv6_ident = rte_bswap16(0x5151);
+ icmpv6_info_h->icmpv6_seq_nb = rte_bswap16(0x1);
+
+ icmp_data = (uint8_t *) icmpv6_h + 8;
+ for (i = 0; i < 56; i++) {
+ *icmp_data = i + 1;
+ icmp_data++;
+ }
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_echo_checksum(icmpv6_pkt);
+
+ icmpv6_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr) + 64;
+ icmpv6_pkt->data_len = icmpv6_pkt->pkt_len;
+
+ return icmpv6_pkt;
+}
+
+struct rte_mbuf *request_nd(uint8_t ipv6[], l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_nd_hdr *icmpv6_nd_h;
+ int i;
+
+ struct rte_mbuf *icmpv6_pkt = lib_icmpv6_pkt;
+ if (icmpv6_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmpv6_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmpv6_pkt, struct ether_hdr *);
+
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ icmpv6_nd_h =
+ (struct icmpv6_nd_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = rte_bswap16(0x86dd);
+ for (i = 0; i < 6; i++) {
+ eth_h->d_addr.addr_bytes[i] = 0;
+ }
+
+ ipv6_h->vtc_flow = 0x60000000;
+ ipv6_h->payload_len = rte_bswap16(32);
+ ipv6_h->proto = 58;
+ ipv6_h->hop_limits = 64;
+
+ for (i = 0; i < 16; i++) {
+ ipv6_h->src_addr[i] = 0x0;
+ ipv6_h->dst_addr[i] = ipv6[i];
+ }
+
+ icmpv6_h->icmpv6_type = ICMPV6_NEIGHBOR_SOLICITATION;
+ icmpv6_h->icmpv6_code = 0;
+
+ icmpv6_nd_h->icmpv6_reserved = 0x0;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ icmpv6_nd_h->target_ipv6[i] = ipv6[i];
+ icmpv6_nd_h->type = e_Source_Link_Layer_Address;
+ icmpv6_nd_h->length = 1;
+ memcpy(&icmpv6_nd_h->link_layer_addr[0], &port->macaddr[0], 6);
+
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_nd_checksum(icmpv6_pkt);
+
+ icmpv6_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr) + 32;
+ icmpv6_pkt->data_len = icmpv6_pkt->pkt_len;
+
+ return icmpv6_pkt;
+}
diff --git a/common/VIL/l2l3_stack/lib_icmpv6.h b/common/VIL/l2l3_stack/lib_icmpv6.h
new file mode 100644
index 00000000..e9ccca14
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_icmpv6.h
@@ -0,0 +1,113 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+/* Author - Santosh Sethupathi */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include "lib_arp.h"
+#include <rte_pipeline.h>
+#include "rte_ether.h"
+
+/**
+* ICMPv6 Header
+*/
+
+struct icmpv6_hdr {
+ uint8_t icmpv6_type; /**< ICMPV6 packet type. */
+ uint8_t icmpv6_code; /**< ICMPV6 packet code. */
+ uint16_t icmpv6_cksum; /**< ICMPV6 packet checksum. */
+} __attribute__ ((__packed__));
+
+/**
+* ICMPV6 Info Header
+*/
+struct icmpv6_info_hdr {
+ uint16_t icmpv6_ident; /**< ICMPV6 packet identifier. */
+ uint16_t icmpv6_seq_nb; /**< ICMPV6 packet sequence number. */
+} __attribute__ ((__packed__));
+
+/**
+ * ICMPV6 ND Header
+ */
+struct icmpv6_nd_hdr {
+ /*ND Advertisement flags */
+ uint32_t icmpv6_reserved; /**< bit31-Router, bit30-Solicited, bit29-Override, bit28-bit0 unused */
+
+ uint8_t target_ipv6[16]; /**< target IPv6 address */
+ uint8_t type; /**< ICMPv6 Option*/
+ uint8_t length; /**< Length */
+ uint8_t link_layer_addr[6]; /**< Link layer address */
+} __attribute__ ((__packed__));
+
+/* Icmpv6 types */
+#define ICMPV6_PROTOCOL_ID 58
+#define ICMPV6_ECHO_REQUEST 0x0080
+#define ICMPV6_ECHO_REPLY 0x0081
+#define ICMPV6_NEIGHBOR_SOLICITATION 0x0087
+#define ICMPV6_NEIGHBOR_ADVERTISEMENT 0x0088
+#define IPV6_MULTICAST 0xFF02
+
+#define NEIGHBOR_SOLICITATION_SET 0x40000000
+#define NEIGHBOR_ROUTER_OVERRIDE_SET 0xa0000000
+enum icmpv6_link_layer_Address_type {
+ e_Source_Link_Layer_Address = 1,
+ e_Target_Link_Layer_Address,
+ e_Link_Layer_Address
+};
+
+/* Checks whether ipv6 is multicast
+ * @param ipv6
+ */
+uint8_t is_multicast_ipv6_addr(uint8_t ipv6[]);
+
+/**
+*Icmpv6 Port address
+*/
+struct icmpv6_port_address {
+ uint32_t ipv6[16]; /**< Ipv6 address */
+ uint64_t mac_addr; /**< Mac address */
+};
+
+/**
+* To store Icmpv6 Port address
+*/
+struct icmpv6_port_address icmpv6_port_addresses[RTE_MAX_ETHPORTS];
+
+#define MAX_NUM_ICMPv6_ENTRIES 64
+struct rte_mbuf *lib_icmpv6_pkt;
+
+/**
+ * Processes icmpv6 packets
+ * @param pkt
+ * pkt mbuf packets
+ * @param port
+ * port - port structure
+ */
+void process_icmpv6_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port);
diff --git a/common/VIL/l2l3_stack/main_l2l3.c b/common/VIL/l2l3_stack/main_l2l3.c
new file mode 100644
index 00000000..08c97641
--- /dev/null
+++ b/common/VIL/l2l3_stack/main_l2l3.c
@@ -0,0 +1,304 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_MAIN__
+#define __INCLUDE_MAIN_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_timer.h>
+#include "lib_arp.h"
+#include "l2_proto.h"
+#include "interface.h"
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#define TIMER_RESOLUTION_CYCLES 20000000ULL /* around 10ms at 2 Ghz */
+unsigned lcore_id = 1;
+void convert_ipstr_to_numeric(void);
+struct sockaddr_in ipaddr1, ipaddr2, ipaddr3, ipaddr4;
+uint8_t ipv6_addr0[16] = {
+ 0, 0x64, 0xff, 0x9b, 0, 0, 0, 0, 0, 0, 0, 0, 0xc0, 0x10, 0x28, 0x15
+};
+
+uint8_t ipv6_addr1[16] = {
+ 0x12, 0x64, 0xff, 0x9b, 0, 0, 0, 0, 0, 0, 0, 0, 0xc0, 0x10, 0x28, 0x15
+};
+
+/*{port_id, nrx_queue, ntx_queue, adminstate, promisc}*/
+port_config_t portconf[5] = {
+ {
+ .port_id = 0,
+ .nrx_queue = 1,
+ .ntx_queue = 1,
+ .state = 1,
+ .promisc = 1,
+ .mempool = {
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) +
+ RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+ },
+ .port_conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ _adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,},
+ .lpbk_mode = 0,
+ .intr_conf = {
+ .lsc = 1,
+ /**< lsc interrupt feature enabled */
+ }
+ },
+ .rx_conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ },
+ .tx_conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0, =
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+ },
+ {
+ .port_id = 1,
+ .nrx_queue = 1,
+ .ntx_queue = 1,
+ .state = 1,
+ .promisc = 1,
+ .mempool = {
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) +
+ RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+ },
+ .port_conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ _adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,},
+ .lpbk_mode = 0,
+ .intr_conf = {
+ .lsc = 1,
+ /**< lsc interrupt feature enabled */
+ }
+ },
+ .rx_conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ },
+ .tx_conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0, =
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+ },
+};
+
+static __attribute__ ((noreturn))
+int lcore_mainloop (__attribute__ ((unused))
+ void *arg)
+{
+ l2_phy_interface_t *port;
+ int8_t portid;
+ struct rte_mbuf *pkts_burst[IFM_BURST_SIZE];
+ uint32_t nb_tx, nb_rx;
+ const uint64_t drain_tsc =
+ (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+ uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
+ while (1) {
+ port = ifm_get_first_port();
+ while (port != NULL) {
+ rte_timer_manage();
+ portid = port->pmdid;
+ cur_tsc = rte_rdtsc();
+ diff_tsc = cur_tsc - prev_tsc;
+
+ /* call rx function ptr from port, with port.arpq, */
+ if (unlikely(diff_tsc > drain_tsc)) {
+ if (port->tx_buf_len > 0) {
+ RTE_SET_USED(nb_tx);
+
+ //nb_tx = port->transmit_bulk_pkts(port, port->tx_buf, port->tx_buf_len);
+ port->tx_buf_len = 0;
+ }
+ prev_tsc = cur_tsc;
+ }
+ nb_rx = port->retrieve_bulk_pkts(portid, 0, pkts_burst);
+ port->n_rxpkts += nb_rx;
+ protocol_handler_recv(pkts_burst, nb_rx, port);
+ port = ifm_get_next_port(portid);
+ if (port != NULL)
+ prev_tsc = cur_tsc;
+ }
+ }
+}
+
+void convert_ipstr_to_numeric(void)
+{
+ memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
+ ipaddr1.sin_addr.s_addr = inet_addr("30.0.0.10");
+ memset(&ipaddr2, '\0', sizeof(struct sockaddr_in));
+ ipaddr2.sin_addr.s_addr = inet_addr("120.0.0.10");
+}
+
+int main(int argc, char **argv)
+{
+ int ret = 0;
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+ /* Port init */
+ //lib_arp_init();
+ ifm_init();
+ ifm_configure_ports(portconf);
+
+ //convert_ipstr_to_numeric();
+ //ifm_add_ipv4_port(0, ipaddr1.sin_addr.s_addr, 24);
+ //ifm_add_ipv4_port(1, ipaddr2.sin_addr.s_addr, 24);
+ ifm_add_ipv6_port(0, ipv6_addr0, 96);
+ ifm_add_ipv6_port(1, ipv6_addr1, 96);
+ print_interface_details();
+
+ //filter_init();
+ l3fwd_init();
+ create_arp_table();
+ create_nd_table();
+ populate_lpm_routes();
+ /*call the main loop */
+ /* launch per-lcore init on every lcore */
+ int ii;
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_addr0[ii], ipv6_addr0[ii + 1]);
+ }
+ printf("\n");
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_addr1[ii], ipv6_addr1[ii + 1]);
+ }
+ printf("REMOTE LAUNCH STARTED........\n");
+ rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
+ printf("REMOTE LAUNCH DONE.......\n");
+ if (rte_eal_wait_lcore(lcore_id) < 0) {
+ }
+ return 0;
+}
+#endif
diff --git a/common/VIL/l2l3_stack/tsx.c b/common/VIL/l2l3_stack/tsx.c
new file mode 100644
index 00000000..a361c945
--- /dev/null
+++ b/common/VIL/l2l3_stack/tsx.c
@@ -0,0 +1,167 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <immintrin.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdint.h>
+#include "rte_atomic.h"
+#include "tsx.h"
+int max_retries = 3;
+
+static void
+run_cpuid (uint32_t eax, uint32_t ecx, uint32_t *abcd)
+{
+ uint32_t ebx = 0, edx = 0;
+
+#if defined(__i386__) && defined (__PIC__)
+ /* in case of PIC under 32-bit EBX cannot be clobbered */
+__asm__ ("movl %%ebx, %%edi \n\t cpuid \n\t xchgl %%ebx, %%edi":"=D" (ebx),
+#else
+__asm__ ("cpuid":"+b" (ebx),
+#endif
+ "+a" (eax), "+c" (ecx), "=d" (edx));
+ abcd[0] = eax;
+ abcd[1] = ebx;
+ abcd[2] = ecx;
+ abcd[3] = edx;
+}
+
+static int
+check_xcr0_ymm (void)
+{
+uint32_t xcr0;
+__asm__ ("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx");
+return ((xcr0 & 6) == 6);/* checking if xmm and ymm state are enabled in XCR0 */
+}
+
+static int
+check_4th_gen_intel_core_features (void)
+{
+ uint32_t abcd[4];
+ uint32_t fma_movbe_osxsave_mask = ((1 << 12) | (1 << 22) | (1 << 27));
+ uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
+
+ /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
+ run_cpuid (1, 0, abcd);
+ if ((abcd[2] & fma_movbe_osxsave_mask) != fma_movbe_osxsave_mask) {
+ printf ("Failing in if cond-1\n");
+ return 0;
+ }
+ if (!check_xcr0_ymm ()) {
+ printf ("Failing in if cond-2\n");
+ return 0;
+ }
+
+ /* CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 &&
+ CPUID.(EAX=07H, ECX=0H):EBX.BMI1[bit 3]==1 &&
+ CPUID.(EAX=07H, ECX=0H):EBX.BMI2[bit 8]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask) {
+ printf ("Failing in if cond-3\n");
+ return 0;
+ }
+ /* CPUID.(EAX=80000001H):ECX.LZCNT[bit 5]==1 */
+ run_cpuid (0x80000001, 0, abcd);
+ if ((abcd[2] & (1 << 5)) == 0) {
+ printf ("Failing in if cond-4\n");
+ return 0;
+ }
+ /* CPUID.(EAX=07H, ECX=0H).EBX.RTM[bit 11]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & (1 << 11)) == 0) {
+ printf ("Failing in if cond-5\n");
+ return 0;
+ }
+ /* CPUID.(EAX=07H, ECX=0H).EBX.HLE[bit 4]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & (1 << 4)) == 0) {
+ printf ("Failing in if cond-6\n");
+ return 0;
+ }
+ return 1;
+}
+
+int
+can_use_intel_core_4th_gen_features (void)
+{
+ static int the_4th_gen_features_available = -1;
+ /* test is performed once */
+ if (the_4th_gen_features_available < 0)
+ the_4th_gen_features_available = check_4th_gen_intel_core_features ();
+ return the_4th_gen_features_available;
+}
+
+void
+rtm_init (void)
+{
+ naborted = (rte_atomic64_t) RTE_ATOMIC64_INIT (0);
+
+ //RTE_ATOMIC64_INIT(naborted);
+} int
+
+rtm_lock (void)
+{
+ int nretries = 0;
+ while (1) {
+ ++nretries;
+ unsigned int status = _xbegin ();
+ if (status == _XBEGIN_STARTED) {
+ if (!is_hle_locked ())
+ return 1; // successfully started transaction
+ // started transaction but someone executes the transaction section
+ // non-speculatively (acquired the fall-back lock)
+ _xabort (0xff); // abort with code 0xff
+ }
+ // abort handler
+ rte_atomic64_inc (&naborted); // do abort statistics
+ printf
+ ("DEBUG: Transaction aborted: %d time(s) with the status: %u\n",
+ nretries, status);
+ // handle _xabort(0xff) from above
+ if ((status & _XABORT_EXPLICIT)
+ && _XABORT_CODE (status) == 0xff && !(status & _XABORT_NESTED)) {
+ while (is_hle_locked ())
+ _mm_pause (); // wait until lock is free
+ }
+ else if (!(status & _XABORT_RETRY))
+ break; // take the fall-back lock if the retry abort flag is not set
+ if (nretries >= max_retries)
+ break; // too many retries, take the fall-back lock
+ }
+ hle_lock ();
+ return 1;
+}
+
+int
+rtm_unlock (void)
+{
+ if (is_hle_locked ())
+ hle_release ();
+
+ else
+ _xend ();
+ return 1;
+}
+
+int
+is_rtm_locked (void)
+{
+ return ((int) _xtest ());
+}
diff --git a/common/VIL/l2l3_stack/tsx.h b/common/VIL/l2l3_stack/tsx.h
new file mode 100644
index 00000000..8b748165
--- /dev/null
+++ b/common/VIL/l2l3_stack/tsx.h
@@ -0,0 +1,38 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef _TSX_H_
+#define _RSX_H_
+#include <rte_atomic.h>
+#define TRUE 1
+#define FALSE 0
+
+volatile int mutex_val;
+
+rte_atomic64_t naborted;
+
+void hle_init(void);
+int hle_lock(void);
+int hle_release(void);
+int is_hle_locked(void);
+
+void rtm_init(void);
+int rtm_lock(void);
+int rtm_unlock(void);
+int is_rtm_locked(void);
+
+int can_use_intel_core_4th_gen_features(void);
+
+#endif