summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/tx_pkt.c
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX/tx_pkt.c')
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.c258
1 files changed, 233 insertions, 25 deletions
diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c
index 49f46898..cd62cc54 100644
--- a/VNFs/DPPD-PROX/tx_pkt.c
+++ b/VNFs/DPPD-PROX/tx_pkt.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@
#include "log.h"
#include "mbuf_utils.h"
#include "handle_master.h"
+#include "defines.h"
static void buf_pkt_single(struct task_base *tbase, struct rte_mbuf *mbuf, const uint8_t out)
{
@@ -50,27 +51,118 @@ static inline void buf_pkt_all(struct task_base *tbase, struct rte_mbuf **mbufs,
}
#define MAX_PMD_TX 32
+void store_packet(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ // If buffer is full, drop the first mbuf
+ if (tbase->aux->mbuf)
+ tx_drop(tbase->aux->mbuf);
+ tbase->aux->mbuf = mbuf;
+}
+
+int tx_pkt_ndp(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
+{
+ struct ipv6_addr ip_dst;
+ int first = 0, ret, ok = 0, rc;
+ const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
+ struct rte_mbuf *mbuf = NULL; // used when one need to send both an ARP and a mbuf
+ uint16_t vlan;
+ uint64_t tsc = rte_rdtsc();
+
+ for (int j = 0; j < n_pkts; j++) {
+ if ((out) && (out[j] >= OUT_HANDLED))
+ continue;
+ if (unlikely((rc = write_ip6_dst_mac(tbase, mbufs[j], &ip_dst, &vlan, tsc)) != SEND_MBUF)) {
+ if (j - first) {
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out);
+ ok += ret;
+ }
+ first = j + 1;
+ switch(rc) {
+ case SEND_ARP_ND:
+ // Original mbuf (packet) is stored to be sent later -> need to allocate new mbuf
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ store_packet(tbase, mbufs[j]);
+ mbuf->port = tbase->l3.reachable_port_id;
+ tx_ring_cti6(tbase, tbase->l3.ctrl_plane_ring, IP6_REQ_MAC_TO_MASTER, mbuf, tbase->l3.core_id, tbase->l3.task_id, &ip_dst, vlan);
+ } else {
+ plog_err("Failed to get a mbuf from arp/nd mempool\n");
+ tx_drop(mbufs[j]);
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ }
+ break;
+ case SEND_MBUF_AND_ARP_ND:
+ // We send the mbuf and an ND - we need to allocate another mbuf for ND
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = tbase->l3.reachable_port_id;
+ tx_ring_cti6(tbase, tbase->l3.ctrl_plane_ring, IP6_REQ_MAC_TO_MASTER, mbuf, tbase->l3.core_id, tbase->l3.task_id, &ip_dst, vlan);
+ } else {
+ plog_err("Failed to get a mbuf from arp/nd mempool\n");
+ // We still send the initial mbuf
+ }
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + j, 1, out);
+ break;
+ case DROP_MBUF:
+ tx_drop(mbufs[j]);
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ break;
+ }
+ }
+ }
+ if (n_pkts - first) {
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, n_pkts - first, out);
+ ok += ret;
+ }
+ return ok;
+}
int tx_pkt_l3(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
{
uint32_t ip_dst;
+ uint16_t vlan;
int first = 0, ret, ok = 0, rc;
const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
+ struct rte_mbuf *arp_mbuf = NULL; // used when one need to send both an ARP and a mbuf
+ uint64_t *time;
+ uint64_t tsc = rte_rdtsc();
for (int j = 0; j < n_pkts; j++) {
if ((out) && (out[j] >= OUT_HANDLED))
continue;
- if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst)) < 0)) {
+ if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst, &vlan, &time, tsc)) != SEND_MBUF)) {
if (j - first) {
ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out);
ok += ret;
}
first = j + 1;
- if (rc == -1) {
+ switch(rc) {
+ case SEND_ARP_ND:
+ // We re-use the mbuf - no need to create a arp_mbuf and delete the existing mbuf
mbufs[j]->port = tbase->l3.reachable_port_id;
- tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, REQ_MAC_TO_CTRL, mbufs[j], tbase->l3.core_id, tbase->l3.task_id, ip_dst);
- } else if (rc == -2) {
+ if (tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, IP4_REQ_MAC_TO_MASTER, mbufs[j], tbase->l3.core_id, tbase->l3.task_id, ip_dst, vlan) == 0)
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 1000);
+ else
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 100);
+ break;
+ case SEND_MBUF_AND_ARP_ND:
+ // We send the mbuf and an ARP - we need to allocate another mbuf for ARP
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&arp_mbuf);
+ if (likely(ret == 0)) {
+ arp_mbuf->port = tbase->l3.reachable_port_id;
+ if (tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, IP4_REQ_MAC_TO_MASTER, arp_mbuf, tbase->l3.core_id, tbase->l3.task_id, ip_dst, vlan) == 0)
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 1000);
+ else
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 100);
+ } else {
+ plog_err("Failed to get a mbuf from arp mempool\n");
+ // We still send the initial mbuf
+ }
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + j, 1, out);
+ break;
+ case DROP_MBUF:
tx_drop(mbufs[j]);
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ break;
}
}
}
@@ -194,7 +286,7 @@ void flush_queues_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_sw(struct task_base *tbase)
@@ -211,7 +303,7 @@ void flush_queues_sw(struct task_base *tbase)
ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_hw(struct task_base *tbase)
@@ -229,7 +321,7 @@ void flush_queues_no_drop_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_sw(struct task_base *tbase)
@@ -246,7 +338,7 @@ void flush_queues_no_drop_sw(struct task_base *tbase)
ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
/* "try" functions try to send packets to sw/hw w/o failing or blocking;
@@ -335,7 +427,7 @@ int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct r
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -364,7 +456,7 @@ int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -503,7 +595,7 @@ int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -524,7 +616,7 @@ int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -545,7 +637,7 @@ int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -565,7 +657,7 @@ int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -619,7 +711,7 @@ static inline void trace_one_tx_pkt(struct task_base *tbase, struct rte_mbuf *mb
static void unset_trace(struct task_base *tbase)
{
if (0 == tbase->aux->task_rt_dump.n_trace) {
- if (tbase->tx_pkt == tx_pkt_l3) {
+ if ((tbase->tx_pkt == tx_pkt_l3) || (tbase->tx_pkt == tx_pkt_ndp)){
tbase->aux->tx_pkt_l2 = tbase->aux->tx_pkt_orig;
tbase->aux->tx_pkt_orig = NULL;
} else {
@@ -688,7 +780,7 @@ int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkt
ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
if (0 == tbase->aux->task_rt_dump.n_print_tx) {
- if (tbase->tx_pkt == tx_pkt_l3) {
+ if ((tbase->tx_pkt == tx_pkt_l3) || (tbase->tx_pkt == tx_pkt_ndp)) {
tbase->aux->tx_pkt_l2 = tbase->aux->tx_pkt_orig;
tbase->aux->tx_pkt_orig = NULL;
} else {
@@ -745,33 +837,68 @@ int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n
}
return n_pkts;
}
+static inline void dump_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
+{
+ uint32_t n_dump = tbase->aux->task_rt_dump.n_print_tx;
+ uint32_t n_trace = tbase->aux->task_rt_dump.n_trace;
-static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
+ if (unlikely(n_dump)) {
+ n_dump = n_pkts < n_dump? n_pkts : n_dump;
+ for (uint32_t i = 0; i < n_dump; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_print_tx -= n_dump;
+ } else if (unlikely(n_trace)) {
+ n_trace = n_pkts < n_trace? n_pkts : n_trace;
+ for (uint32_t i = 0; i < n_trace; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_trace -= n_trace;
+ }
+}
+
+// ctrlplane packets are slow path, hence cost of checking if dump ortrace is needed in not too important
+// easier to have this implementation than an implementation similar to dataplane tx
+int tx_ctrlplane_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
+}
+
+int tx_ctrlplane_sw(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase);
+}
+
+static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
{
if (tbase->aux->task_rt_dump.cur_trace) {
trace_one_rx_pkt(tbase, mbuf);
}
- mbuf->udata64 = ((uint64_t)ip << 32) | (core_id << 16) | (task_id << 8) | command;
+ ctrl_ring_set_command_core_task_ip(mbuf, ((uint64_t)ip << 32) | (core_id << 16) | (task_id << 8) | command);
return rte_ring_enqueue(ring, mbuf);
}
-void tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
+int tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip, uint16_t vlan)
{
- plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ ctrl_ring_set_vlan(mbuf, vlan);
int ret = tx_ring_all(tbase, ring, command, mbuf, core_id, task_id, ip);
if (unlikely(ret != 0)) {
- plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
rte_pktmbuf_free(mbuf);
}
+ return ret;
}
-void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint32_t ip)
+void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint32_t ip)
{
- plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring));
+ plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring));
int ret = tx_ring_all(tbase, ring, command, mbuf, 0, 0, ip);
if (unlikely(ret != 0)) {
- plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring));
+ plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring));
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
rte_pktmbuf_free(mbuf);
}
@@ -787,3 +914,84 @@ void tx_ring(struct task_base *tbase, struct rte_ring *ring, uint16_t command,
rte_pktmbuf_free(mbuf);
}
}
+
+void tx_ring_route(struct task_base *tbase, struct rte_ring *ring, int add, struct rte_mbuf *mbuf, uint32_t ip, uint32_t gateway_ip, uint32_t prefix)
+{
+ uint8_t command;
+ if (add)
+ command = ROUTE_ADD_FROM_MASTER;
+ else
+ command = ROUTE_DEL_FROM_MASTER;
+
+ plogx_dbg("\tSending command %s to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ring, mbuf, rte_ring_free_count(ring));
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ip(mbuf, ip);
+ ctrl_ring_set_gateway_ip(mbuf, gateway_ip);
+ ctrl_ring_set_prefix(mbuf, prefix);
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ int ret = rte_ring_enqueue(ring, mbuf);
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_cti6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, struct ipv6_addr *ip, uint16_t vlan)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command_core_task_ip(mbuf, (core_id << 16) | (task_id << 8) | command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ctrl_ring_set_vlan(mbuf, vlan);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_ip6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_ip6_data(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip, uint64_t data)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ctrl_ring_set_data(mbuf, data);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+
+}