summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX')
-rw-r--r--VNFs/DPPD-PROX/handle_lat.c7
-rw-r--r--VNFs/DPPD-PROX/packet_utils.c28
-rw-r--r--VNFs/DPPD-PROX/packet_utils.h4
-rw-r--r--VNFs/DPPD-PROX/prox_args.c4
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.c152
-rw-r--r--VNFs/DPPD-PROX/task_base.h1
-rw-r--r--VNFs/DPPD-PROX/task_init.c19
-rw-r--r--VNFs/DPPD-PROX/task_init.h2
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.c33
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.h2
10 files changed, 154 insertions, 98 deletions
diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c
index a0e5fb42..8c7de8f1 100644
--- a/VNFs/DPPD-PROX/handle_lat.c
+++ b/VNFs/DPPD-PROX/handle_lat.c
@@ -505,6 +505,7 @@ static void task_lat_store_lat(struct task_lat *task, uint64_t rx_packet_index,
static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
struct task_lat *task = (struct task_lat *)tbase;
+ int rc;
// If link is down, link_speed is 0
if (unlikely(task->link_speed == 0)) {
@@ -662,7 +663,11 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task->begin = tbase->aux->tsc_rx.before;
task->last_pkts_tsc = tbase->aux->tsc_rx.after;
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
+ rc = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
+ // non_dp_count should not be drop-handled, as there are all by definition considered as not handled
+ // RX = DISCARDED + HANDLED + NON_DP + (TX - TX_NON_DP) + TX_FAIL
+ TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, -non_dp_count);
+ return rc;
}
static void init_task_lat_latency_buffer(struct task_lat *task, uint32_t core_id)
diff --git a/VNFs/DPPD-PROX/packet_utils.c b/VNFs/DPPD-PROX/packet_utils.c
index a4762300..06a9ba64 100644
--- a/VNFs/DPPD-PROX/packet_utils.c
+++ b/VNFs/DPPD-PROX/packet_utils.c
@@ -89,7 +89,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
return SEND_MBUF;
} else if (tsc > l3->gw.arp_update_time) {
// long time since we have sent an arp, send arp
- l3->gw.arp_update_time = tsc + hz;
+ l3->gw.arp_update_time = tsc + l3->arp_update_time * hz / 1000;
*ip_dst = l3->gw.ip;
if ((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_timeout)){
// MAC is valid in the table => send also the mbuf
@@ -120,7 +120,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
return SEND_MBUF;
} else if (tsc > l3->optimized_arp_table[idx].arp_update_time) {
// ARP not sent since a long time, send ARP
- l3->optimized_arp_table[idx].arp_update_time = tsc + hz;
+ l3->optimized_arp_table[idx].arp_update_time = tsc + l3->arp_update_time * hz / 1000;
if (tsc < l3->optimized_arp_table[idx].arp_timeout) {
// MAC still valid => also send mbuf
memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr));
@@ -137,7 +137,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
}
// IP address not found in table
l3->optimized_arp_table[l3->n_pkts].ip = *ip_dst;
- l3->optimized_arp_table[l3->n_pkts].arp_update_time = tsc + hz;
+ l3->optimized_arp_table[l3->n_pkts].arp_update_time = tsc + l3->arp_update_time * hz / 1000;
l3->n_pkts++;
if (l3->n_pkts < 4) {
@@ -171,7 +171,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
return DROP_MBUF;
} else {
l3->arp_table[ret].ip = *ip_dst;
- l3->arp_table[ret].arp_update_time = tsc + hz;
+ l3->arp_table[ret].arp_update_time = tsc + l3->arp_update_time * hz / 1000;
}
return SEND_ARP;
} else {
@@ -182,6 +182,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d
return SEND_MBUF;
} else if (tsc > l3->arp_table[ret].arp_update_time) {
// ARP not sent since a long time, send ARP
+ l3->arp_table[ret].arp_update_time = tsc + l3->arp_update_time * hz / 1000;
l3->arp_table[ret].arp_update_time = tsc + hz;
if (tsc < l3->arp_table[ret].arp_timeout) {
// MAC still valid => send also MBUF
@@ -229,6 +230,14 @@ void task_init_l3(struct task_base *tbase, struct task_args *targ)
tbase->l3.core_id = targ->lconf->id;
tbase->l3.task_id = targ->id;
tbase->l3.tmaster = targ->tmaster;
+ if (tbase->l3.arp_timeout != 0)
+ tbase->l3.arp_timeout = targ->arp_timeout;
+ else
+ tbase->l3.arp_timeout = DEFAULT_ARP_TIMEOUT;
+ if (tbase->l3.arp_update_time != 0)
+ tbase->l3.arp_update_time = targ->arp_update_time;
+ else
+ tbase->l3.arp_update_time = DEFAULT_ARP_UPDATE_TIME;
}
void task_start_l3(struct task_base *tbase, struct task_args *targ)
@@ -299,7 +308,7 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui
// MAC address of the gateway
memcpy(&l3->gw.mac, &hdr->arp.data.sha, 6);
l3->flags |= FLAG_DST_MAC_KNOWN;
- l3->gw.arp_timeout = tsc + 30 * hz;
+ l3->gw.arp_timeout = tsc + l3->arp_timeout * hz / 1000;
} else if (l3->n_pkts < 4) {
// Few packets tracked - should be faster to loop through them thean using a hash table
for (idx = 0; idx < l3->n_pkts; idx++) {
@@ -310,7 +319,7 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui
if (idx < l3->n_pkts) {
// IP not found; this is a reply while we never asked for the request!
memcpy(&l3->optimized_arp_table[idx].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr));
- l3->optimized_arp_table[idx].arp_timeout = tsc + 30 * hz;
+ l3->optimized_arp_table[idx].arp_timeout = tsc + l3->arp_timeout * hz / 1000;
}
} else {
int ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
@@ -318,16 +327,17 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui
plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip));
} else {
memcpy(&l3->arp_table[ret].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr));
- l3->arp_table[ret].arp_timeout = tsc + 30 * hz;
+ l3->arp_table[ret].arp_timeout = tsc + l3->arp_timeout * hz / 1000;
}
}
tx_drop(mbufs[j]);
break;
case ARP_REPLY_FROM_CTRL:
case ARP_REQ_FROM_CTRL:
- TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
out[0] = 0;
- tbase->aux->tx_pkt_l2(tbase, &mbufs[j], 1, out);
+ // tx_ctrlplane_pkt does not drop packets
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
break;
}
}
diff --git a/VNFs/DPPD-PROX/packet_utils.h b/VNFs/DPPD-PROX/packet_utils.h
index 74a3f60e..cb4dc913 100644
--- a/VNFs/DPPD-PROX/packet_utils.h
+++ b/VNFs/DPPD-PROX/packet_utils.h
@@ -33,6 +33,8 @@ enum {
SEND_ARP,
DROP_MBUF
};
+#define DEFAULT_ARP_TIMEOUT (1000 * 3600 * 24 * 15) // ~15 days = disabled by default
+#define DEFAULT_ARP_UPDATE_TIME (1000) // 1 second
struct task_base;
struct task_args;
@@ -50,6 +52,8 @@ struct l3_base {
uint8_t reachable_port_id;
uint8_t core_id;
uint8_t task_id;
+ uint32_t arp_timeout;
+ uint32_t arp_update_time;
struct arp_table gw;
struct arp_table optimized_arp_table[4];
struct rte_hash *ip_hash;
diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c
index 59c514fc..d77eab1a 100644
--- a/VNFs/DPPD-PROX/prox_args.c
+++ b/VNFs/DPPD-PROX/prox_args.c
@@ -1362,6 +1362,10 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */
return parse_ip6(&targ->local_ipv6, pkey);
}
+ if (STR_EQ(str, "arp timeout"))
+ return parse_int(&targ->arp_timeout, pkey);
+ if (STR_EQ(str, "arp update time"))
+ return parse_int(&targ->arp_update_time, pkey);
if (STR_EQ(str, "number of packets"))
return parse_int(&targ->n_pkts, pkey);
if (STR_EQ(str, "pipes")) {
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c
index 5c5ca58f..fc4971f1 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.c
+++ b/VNFs/DPPD-PROX/prox_port_cfg.c
@@ -309,71 +309,8 @@ uint8_t init_rte_ring_dev(void)
return nb_ring_dev;
}
-static void init_port(struct prox_port_cfg *port_cfg)
+static void print_port_capa(struct prox_port_cfg *port_cfg)
{
- static char dummy_pool_name[] = "0_dummy";
- struct rte_eth_link link;
- uint8_t port_id;
- int ret;
-
- port_id = port_cfg - prox_port_cfg;
- plog_info("\t*** Initializing port %u ***\n", port_id);
- plog_info("\t\tPort name is set to %s\n", port_cfg->name);
- plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
- plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
-#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
- plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
-#endif
-
- PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
- "\t\t port %u is enabled but no RX or TX queues have been configured", port_id);
-
- if (port_cfg->n_rxq == 0) {
- /* not receiving on this port */
- plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id);
- port_cfg->n_rxq = 1;
- uint32_t mbuf_size = TX_MBUF_SIZE;
- plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n",
- port_cfg->socket, port_cfg->n_rxd, mbuf_size);
- port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size,
- 0,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- prox_pktmbuf_init, 0,
- port_cfg->socket, 0);
- PROX_PANIC(port_cfg->pool[0] == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
- port_cfg->socket, port_cfg->n_rxd);
- dummy_pool_name[0]++;
- } else {
- // Most pmd should now support setting mtu
- if (port_cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) {
- plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len);
- port_cfg->mtu = port_cfg->max_rx_pkt_len;
- }
- plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id);
- ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu);
- if (ret)
- plog_err("\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret);
-
- if (port_cfg->n_txq == 0) {
- /* not sending on this port */
- plog_info("\t\tPort %u had no TX queues, setting to 1\n", port_id);
- port_cfg->n_txq = 1;
- }
- }
-
- if (port_cfg->n_rxq > 1) {
- // Enable RSS if multiple receive queues
- port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
-#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONFRAG_IPV4_UDP;
-#else
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
-#endif
- }
-
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
if (port_cfg->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP)
@@ -457,6 +394,80 @@ static void init_port(struct prox_port_cfg *port_cfg)
plog_info("\t\tdefault RX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_rxportconf.burst_size, port_cfg->dev_info.default_rxportconf.ring_size, port_cfg->dev_info.default_rxportconf.nb_queues);
plog_info("\t\tdefault TX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_txportconf.burst_size, port_cfg->dev_info.default_txportconf.ring_size, port_cfg->dev_info.default_txportconf.nb_queues);
#endif
+}
+
+static void init_port(struct prox_port_cfg *port_cfg)
+{
+ static char dummy_pool_name[] = "0_dummy";
+ struct rte_eth_link link;
+ uint8_t port_id;
+ int ret;
+
+ port_id = port_cfg - prox_port_cfg;
+ plog_info("\t*** Initializing port %u ***\n", port_id);
+ plog_info("\t\tPort name is set to %s\n", port_cfg->name);
+ plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
+ plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
+#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
+ plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
+#endif
+
+ PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
+ "\t\t port %u is enabled but no RX or TX queues have been configured", port_id);
+
+ if (port_cfg->n_rxq == 0) {
+ /* not receiving on this port */
+ plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id);
+ port_cfg->n_rxq = 1;
+ uint32_t mbuf_size = TX_MBUF_SIZE;
+ plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n",
+ port_cfg->socket, port_cfg->n_rxd, mbuf_size);
+ port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size,
+ 0,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ prox_pktmbuf_init, 0,
+ port_cfg->socket, 0);
+ PROX_PANIC(port_cfg->pool[0] == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
+ port_cfg->socket, port_cfg->n_rxd);
+ dummy_pool_name[0]++;
+ } else {
+ // Most pmd should now support setting mtu
+ if (port_cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) {
+ plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len);
+ port_cfg->mtu = port_cfg->max_rx_pkt_len;
+ }
+ plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id);
+ ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu);
+ if (ret)
+ plog_err("\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret);
+
+ if (port_cfg->n_txq == 0) {
+ /* not sending on this port */
+ plog_info("\t\tPort %u had no TX queues, setting to 1\n", port_id);
+ port_cfg->n_txq = 1;
+ }
+ }
+
+ print_port_capa(port_cfg);
+
+ if (port_cfg->n_rxq > 1) {
+ // Enable RSS if multiple receive queues
+ port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
+#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP|ETH_RSS_UDP;
+#else
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
+#endif
+ }
+
+ // Make sure that the requested RSS offload is supported by the PMD
+#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf &= port_cfg->dev_info.flow_type_rss_offloads;
+#endif
+ plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, ETH_RSS_IP|ETH_RSS_UDP);
// rxmode such as hw src strip
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
@@ -490,19 +501,6 @@ static void init_port(struct prox_port_cfg *port_cfg)
// Multi Segments
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
CONFIGURE_TX_OFFLOAD(DEV_TX_OFFLOAD_MULTI_SEGS);
- //if (port_cfg->requested_tx_offload & DEV_TX_OFFLOAD_MULTI_SEGS) {
- //if (port_cfg->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) {
- //port_cfg->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- //plog_info("\t\tMULTI SEGS TX offloads enabled on port)\n");
- //} else if (port_cfg->dev_info.tx_queue_offload_capa & DEV_TX_OFFLOAD_MULTI_SEGS) {
- //port_cfg->tx_conf.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- //plog_info("\t\tMULTI SEGS TX offloads enabled on queue)\n");
- //} else {
- //port_cfg->requested_tx_offload &= ~DEV_TX_OFFLOAD_MULTI_SEGS;
- //plog_info("\t\tMULTI SEGS TX offloads disabled) as neither port or queue supports it\n");
- //}
- //} else
- //plog_info("\t\tMULTI SEGS TX offloads disabled)\n");
#else
if (!strcmp(port_cfg->short_name, "vmxnet3")) {
port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
diff --git a/VNFs/DPPD-PROX/task_base.h b/VNFs/DPPD-PROX/task_base.h
index b4a33372..64d17436 100644
--- a/VNFs/DPPD-PROX/task_base.h
+++ b/VNFs/DPPD-PROX/task_base.h
@@ -174,6 +174,7 @@ struct task_base_aux {
int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
void (*stop)(struct task_base *tbase);
+ int (*tx_ctrlplane_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
void (*start)(struct task_base *tbase);
void (*stop_last)(struct task_base *tbase);
void (*start_first)(struct task_base *tbase);
diff --git a/VNFs/DPPD-PROX/task_init.c b/VNFs/DPPD-PROX/task_init.c
index 2361d32c..08ccaf97 100644
--- a/VNFs/DPPD-PROX/task_init.c
+++ b/VNFs/DPPD-PROX/task_init.c
@@ -208,7 +208,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
// Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
// Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
- // (as npt being scheduled).
+ // (as not being scheduled).
struct core_task ct;
struct task_args *dtarg;
for (unsigned int j = 0; j < targ->nb_txrings; j++) {
@@ -277,6 +277,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
prev = prev->tx_opt_ring_task;
}
}
+
if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
if (targ->tx_opt_ring) {
@@ -350,13 +351,6 @@ struct task_base *init_task_struct(struct task_args *targ)
offset = init_rx_tx_rings_ports(targ, tbase, offset);
tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
- if (targ->nb_txports != 0) {
- if (targ->flags & TASK_ARG_L3) {
- tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
- tbase->tx_pkt = tx_pkt_l3;
- }
- }
-
if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) {
task_base_add_rx_pkt_function(tbase, rx_pkt_all);
tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket);
@@ -372,10 +366,13 @@ struct task_base *init_task_struct(struct task_args *targ)
if (targ->flags & TASK_ARG_L3) {
plog_info("\tTask configured in L3 mode\n");
tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
- }
- if (targ->nb_txports != 0) {
- if (targ->flags & TASK_ARG_L3)
+ if (targ->nb_txports != 0) {
+ tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
+ tbase->tx_pkt = tx_pkt_l3;
+ // Make sure control plane packets such as arp are not dropped
+ tbase->aux->tx_ctrlplane_pkt = targ->nb_txrings ? tx_ctrlplane_sw : tx_ctrlplane_hw;
task_init_l3(tbase, targ);
+ }
}
targ->tbase = tbase;
diff --git a/VNFs/DPPD-PROX/task_init.h b/VNFs/DPPD-PROX/task_init.h
index 5186826a..91a0d7eb 100644
--- a/VNFs/DPPD-PROX/task_init.h
+++ b/VNFs/DPPD-PROX/task_init.h
@@ -127,6 +127,8 @@ struct task_args {
uint32_t gateway_ipv4;
uint32_t local_ipv4;
uint32_t remote_ipv4;
+ uint32_t arp_timeout;
+ uint32_t arp_update_time;
struct ipv6_addr local_ipv6; /* For IPv6 Tunnel, it's the local tunnel endpoint address */
struct rte_ring *rx_rings[MAX_RINGS_PER_TASK];
struct rte_ring *tx_rings[MAX_RINGS_PER_TASK];
diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c
index c5047e56..d494236c 100644
--- a/VNFs/DPPD-PROX/tx_pkt.c
+++ b/VNFs/DPPD-PROX/tx_pkt.c
@@ -762,6 +762,39 @@ int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n
}
return n_pkts;
}
+static inline void dump_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
+{
+ uint32_t n_dump = tbase->aux->task_rt_dump.n_print_tx;
+ uint32_t n_trace = tbase->aux->task_rt_dump.n_trace;
+
+ if (unlikely(n_dump)) {
+ n_dump = n_pkts < n_dump? n_pkts : n_dump;
+ for (uint32_t i = 0; i < n_dump; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_print_tx -= n_dump;
+ } else if (unlikely(n_trace)) {
+ n_trace = n_pkts < n_trace? n_pkts : n_trace;
+ for (uint32_t i = 0; i < n_trace; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_trace - n_trace;
+ }
+}
+
+// ctrlplane packets are slow path, hence cost of checking if dump ortrace is needed in not too important
+// easier to have this implementation than an implementation similar to dataplane tx
+int tx_ctrlplane_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
+}
+
+int tx_ctrlplane_sw(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase);
+}
static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
{
diff --git a/VNFs/DPPD-PROX/tx_pkt.h b/VNFs/DPPD-PROX/tx_pkt.h
index e8caed52..a6881531 100644
--- a/VNFs/DPPD-PROX/tx_pkt.h
+++ b/VNFs/DPPD-PROX/tx_pkt.h
@@ -64,6 +64,8 @@ int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+int tx_ctrlplane_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+int tx_ctrlplane_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_trace(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);