diff options
Diffstat (limited to 'VNFs/DPPD-PROX')
-rw-r--r-- | VNFs/DPPD-PROX/handle_cgnat.c | 2 | ||||
-rw-r--r-- | VNFs/DPPD-PROX/handle_master.c | 44 | ||||
-rw-r--r-- | VNFs/DPPD-PROX/main.c | 4 | ||||
-rw-r--r-- | VNFs/DPPD-PROX/packet_utils.c | 105 | ||||
-rw-r--r-- | VNFs/DPPD-PROX/packet_utils.h | 13 | ||||
-rw-r--r-- | VNFs/DPPD-PROX/tx_pkt.c | 31 |
6 files changed, 152 insertions, 47 deletions
diff --git a/VNFs/DPPD-PROX/handle_cgnat.c b/VNFs/DPPD-PROX/handle_cgnat.c index 84ad5460..0aa6876f 100644 --- a/VNFs/DPPD-PROX/handle_cgnat.c +++ b/VNFs/DPPD-PROX/handle_cgnat.c @@ -45,8 +45,6 @@ #define BIT_8_TO_15 0x0000ff00 #define BIT_0_TO_15 0x0000ffff -#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 - struct private_key { uint32_t ip_addr; uint16_t l4_port; diff --git a/VNFs/DPPD-PROX/handle_master.c b/VNFs/DPPD-PROX/handle_master.c index 074d7dd3..22527413 100644 --- a/VNFs/DPPD-PROX/handle_master.c +++ b/VNFs/DPPD-PROX/handle_master.c @@ -36,7 +36,6 @@ #include "input.h" #include "tx_pkt.h" -#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 #define PROX_MAX_ARP_REQUESTS 32 // Maximum number of tasks requesting the same MAC address const char *actions_string[] = {"UPDATE_FROM_CTRL", "SEND_ARP_REQUEST_FROM_CTRL", "SEND_ARP_REPLY_FROM_CTRL", "HANDLE_ARP_TO_CTRL", "REQ_MAC_TO_CTRL"}; @@ -116,10 +115,10 @@ void register_ip_to_ctrl_plane(struct task_base *tbase, uint32_t ip, uint8_t por { struct task_master *task = (struct task_master *)tbase; struct ip_port key; - plogx_dbg("\tregistering IP %x.%x.%x.%x with port %d core %d and task %d\n", IP4(ip), port_id, core_id, task_id); + plogx_dbg("\tregistering IP %d.%d.%d.%d with port %d core %d and task %d\n", IP4(ip), port_id, core_id, task_id); if (port_id >= PROX_MAX_PORTS) { - plog_err("Unable to register ip %x, port %d\n", ip, port_id); + plog_err("Unable to register ip %d.%d.%d.%d, port %d\n", IP4(ip), port_id); return; } @@ -139,7 +138,7 @@ void register_ip_to_ctrl_plane(struct task_base *tbase, uint32_t ip, uint8_t por key.port = port_id; int ret = rte_hash_add_key(task->internal_ip_hash, (const void *)&key); if (unlikely(ret < 0)) { - plog_err("Unable to register ip %x\n", ip); + plog_err("Unable to register ip %d.%d.%d.%d\n", IP4(ip)); return; } memcpy(&task->internal_ip_table[ret].mac, &prox_port_cfg[port_id].eth_addr, 6); @@ -153,7 +152,7 @@ static inline void handle_arp_reply(struct task_base *tbase, struct rte_mbuf *mb struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *); int i, ret; uint32_t key = hdr_arp->arp.data.spa; - plogx_dbg("\tMaster handling ARP reply for ip %x\n", key); + plogx_dbg("\tMaster handling ARP reply for ip %d.%d.%d.%d\n", IP4(key)); ret = rte_hash_lookup(task->external_ip_hash, (const void *)&key); if (unlikely(ret < 0)) { @@ -187,7 +186,7 @@ static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf * key.port = port; if (task->internal_port_table[port].flags & HANDLE_RANDOM_IP_FLAG) { struct ether_addr mac; - plogx_dbg("\tMaster handling ARP request for ip %x on port %d which supports random ip\n", key.ip, key.port); + plogx_dbg("\tMaster handling ARP request for ip %d.%d.%d.%d on port %d which supports random ip\n", IP4(key.ip), key.port); struct rte_ring *ring = task->internal_port_table[port].ring; create_mac(hdr_arp, &mac); mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); @@ -196,7 +195,7 @@ static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf * return; } - plogx_dbg("\tMaster handling ARP request for ip %x\n", key.ip); + plogx_dbg("\tMaster handling ARP request for ip %d.%d.%d.%d\n", IP4(key.ip)); ret = rte_hash_lookup(task->internal_ip_hash, (const void *)&key); if (unlikely(ret < 0)) { @@ -217,9 +216,9 @@ static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *m struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *); uint8_t port = get_port(mbuf); uint32_t ip_dst = get_ip(mbuf); - int ret1, ret2; + int ret1, ret2, i; - plogx_dbg("\tMaster handling unknown ip %x for port %d\n", ip_dst, port); + plogx_dbg("\tMaster handling unknown ip %d.%d.%d.%d for port %d\n", IP4(ip_dst), port); if (unlikely(port >= PROX_MAX_PORTS)) { plogx_dbg("Port %d not found", port); tx_drop(mbuf); @@ -237,13 +236,32 @@ static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *m ret2 = rte_hash_add_key(task->external_ip_hash, (const void *)&ip_dst); if (unlikely(ret2 < 0)) { // entry not found for this IP: delete the reply - plogx_dbg("Unable to add IP %x in external_ip_hash\n", rte_be_to_cpu_32(hdr_arp->arp.data.tpa)); + plogx_dbg("Unable to add IP %d.%d.%d.%d in external_ip_hash\n", IP4(ip_dst)); tx_drop(mbuf); return; } - task->external_ip_table[ret2].rings[task->external_ip_table[ret2].nb_requests] = ring; - task->external_ip_table[ret2].nb_requests++; - memcpy(&task->external_ip_table[ret2].mac, &task->internal_port_table[port].mac, 6); + + // If multiple tasks requesting the same info, we will need to send a reply to all of them + // However if one task sends multiple requests to the same IP (e.g. because it is not answering) + // then we should not send multiple replies to the same task + if (task->external_ip_table[ret2].nb_requests >= PROX_MAX_ARP_REQUESTS) { + // This can only happen if really many tasks requests the same IP + plogx_dbg("Unable to add request for IP %d.%d.%d.%d in external_ip_table\n", IP4(ip_dst)); + tx_drop(mbuf); + return; + } + for (i = 0; i < task->external_ip_table[ret2].nb_requests; i++) { + if (task->external_ip_table[ret2].rings[i] == ring) + break; + } + if (i >= task->external_ip_table[ret2].nb_requests) { + // If this is a new request i.e. a new task requesting a new IP + task->external_ip_table[ret2].rings[task->external_ip_table[ret2].nb_requests] = ring; + task->external_ip_table[ret2].nb_requests++; + // Only needed for first request - but avoid test and copy the same 6 bytes + // In most cases we will only have one request per IP. + memcpy(&task->external_ip_table[ret2].mac, &task->internal_port_table[port].mac, 6); + } // We send an ARP request even if one was just sent (and not yet answered) by another task mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM); diff --git a/VNFs/DPPD-PROX/main.c b/VNFs/DPPD-PROX/main.c index 499a1ab7..5ab85d60 100644 --- a/VNFs/DPPD-PROX/main.c +++ b/VNFs/DPPD-PROX/main.c @@ -501,7 +501,7 @@ static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct starg->ctrl_plane_ring = ring; } - plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n", + plog_info("\t\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n", lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT? "pkt" : "msg", ring, ring->name); ris->n_ctrl_rings++; @@ -614,7 +614,7 @@ static void init_rings(void) ct.core = lconf->id; ct.task = starg->id;; - struct rte_ring *tx_ring = init_ring_between_tasks(lcore_cfg, lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris); + struct rte_ring *tx_ring = init_ring_between_tasks(&lcore_cfg[prox_cfg.master], lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris); } } } diff --git a/VNFs/DPPD-PROX/packet_utils.c b/VNFs/DPPD-PROX/packet_utils.c index 9d170949..e93f430c 100644 --- a/VNFs/DPPD-PROX/packet_utils.c +++ b/VNFs/DPPD-PROX/packet_utils.c @@ -24,8 +24,6 @@ #include "handle_master.h" #include "prox_port_cfg.h" -#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 - static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_dst) { struct vlan_hdr *vlan_hdr; @@ -69,6 +67,14 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_ return -1; } +/* This implementation could be improved: instead of checking each time we send a packet whether we need also + to send an ARP, we should only check whether the MAC is valid. + We should check arp_update_time in the master process. This would also require the generating task to clear its arp ring + to avoid sending many ARP while starting after a long stop. + We could also check for arp_timeout in the master so that dataplane has only to check whether MAC is available + but this would require either thread safety, or the the exchange of information between master and generating core. +*/ + int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst) { const uint64_t hz = rte_get_tsc_hz(); @@ -80,78 +86,117 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d if (l3->gw.ip) { if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_update_time) && (tsc < l3->gw.arp_timeout))) { memcpy(mac, &l3->gw.mac, sizeof(struct ether_addr)); - return 0; + return SEND_MBUF; } else if (tsc > l3->gw.arp_update_time) { // long time since we have sent an arp, send arp l3->gw.arp_update_time = tsc + hz; *ip_dst = l3->gw.ip; - return -1; + if ((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_timeout)){ + // MAC is valid in the table => send also the mbuf + memcpy(mac, &l3->gw.mac, sizeof(struct ether_addr)); + return SEND_MBUF_AND_ARP; + } else { + // MAC still unknown, or timed out => only send ARP + return SEND_ARP; + } + } else { + // MAC is unknown and we already sent an ARP recently, drop mbuf and wait for ARP reply + return DROP_MBUF; } - return -2; } uint16_t len = rte_pktmbuf_pkt_len(mbuf); if (find_ip(packet, len, ip_dst) != 0) { - return 0; + // Unable to find IP address => non IP packet => send it as it + return SEND_MBUF; } if (likely(l3->n_pkts < 4)) { for (unsigned int idx = 0; idx < l3->n_pkts; idx++) { if (*ip_dst == l3->optimized_arp_table[idx].ip) { + // IP address already in table if ((tsc < l3->optimized_arp_table[idx].arp_update_time) && (tsc < l3->optimized_arp_table[idx].arp_timeout)) { + // MAC address was recently updated in table, use it memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr)); - return 0; + return SEND_MBUF; } else if (tsc > l3->optimized_arp_table[idx].arp_update_time) { + // ARP not sent since a long time, send ARP l3->optimized_arp_table[idx].arp_update_time = tsc + hz; - return -1; + if (tsc < l3->optimized_arp_table[idx].arp_timeout) { + // MAC still valid => also send mbuf + memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr)); + return SEND_MBUF_AND_ARP; + } else { + // MAC unvalid => only send ARP + return SEND_ARP; + } } else { - return -2; + // ARP timeout elapsed, MAC not valid anymore but waiting for ARP reply + return DROP_MBUF; } } } + // IP address not found in table l3->optimized_arp_table[l3->n_pkts].ip = *ip_dst; l3->optimized_arp_table[l3->n_pkts].arp_update_time = tsc + hz; l3->n_pkts++; - if (l3->n_pkts < 4) - return -1; + if (l3->n_pkts < 4) { + return SEND_ARP; + } - // We have ** many ** IP addresses; lets use hash table instead + // We have too many IP addresses to search linearly; lets use hash table instead => copy all entries in hash table for (uint32_t idx = 0; idx < l3->n_pkts; idx++) { uint32_t ip = l3->optimized_arp_table[idx].ip; int ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip); if (ret < 0) { - plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip)); + // This should not happen as few entries so far. + // If it happens, we still send the ARP as easier: + // If the ARP corresponds to this error, the ARP reply will be ignored + // If ARP does not correspond to this error/ip, then ARP reply will be handled. + plogx_err("Unable add ip %d.%d.%d.%d in mac_hash (already %d entries)\n", IP4(ip), idx); } else { memcpy(&l3->arp_table[ret], &l3->optimized_arp_table[idx], sizeof(struct arp_table)); } } - return -1; + return SEND_ARP; } else { - // Find mac in lookup table. Send ARP if not found + // Find IP in lookup table. Send ARP if not found int ret = rte_hash_lookup(l3->ip_hash, (const void *)ip_dst); if (unlikely(ret < 0)) { + // IP not found, try to send an ARP int ret = rte_hash_add_key(l3->ip_hash, (const void *)ip_dst); if (ret < 0) { - plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(*ip_dst)); - return -2; + // No reason to send ARP, as reply would be anyhow ignored + plogx_err("Unable to add ip %d.%d.%d.%d in mac_hash\n", IP4(*ip_dst)); + return DROP_MBUF; } else { l3->arp_table[ret].ip = *ip_dst; l3->arp_table[ret].arp_update_time = tsc + hz; } - return -1; + return SEND_ARP; } else { - if ((tsc < l3->arp_table[ret].arp_update_time) && (tsc < l3->arp_table[ret].arp_timeout)) { + // IP has been found + if (likely((tsc < l3->arp_table[ret].arp_update_time) && (tsc < l3->arp_table[ret].arp_timeout))) { + // MAC still valid and ARP sent recently memcpy(mac, &l3->arp_table[ret].mac, sizeof(struct ether_addr)); - return 0; + return SEND_MBUF; } else if (tsc > l3->arp_table[ret].arp_update_time) { + // ARP not sent since a long time, send ARP l3->arp_table[ret].arp_update_time = tsc + hz; - return -1; + if (tsc < l3->arp_table[ret].arp_timeout) { + // MAC still valid => send also MBUF + memcpy(mac, &l3->arp_table[ret].mac, sizeof(struct ether_addr)); + return SEND_MBUF_AND_ARP; + } else { + return SEND_ARP; + } } else { - return -2; + return DROP_MBUF; } } } - return 0; + // Should not happen + return DROP_MBUF; } void task_init_l3(struct task_base *tbase, struct task_args *targ) @@ -188,13 +233,27 @@ void task_init_l3(struct task_base *tbase, struct task_args *targ) void task_start_l3(struct task_base *tbase, struct task_args *targ) { + const int NB_ARP_MBUF = 1024; + const int ARP_MBUF_SIZE = 2048; + const int NB_CACHE_ARP_MBUF = 256; + struct prox_port_cfg *port = find_reachable_port(targ); if (port) { + static char name[] = "arp0_pool"; tbase->l3.reachable_port_id = port - prox_port_cfg; if (targ->local_ipv4) { tbase->local_ipv4 = rte_be_to_cpu_32(targ->local_ipv4); register_ip_to_ctrl_plane(tbase->l3.tmaster, tbase->local_ipv4, tbase->l3.reachable_port_id, targ->lconf->id, targ->id); } + name[3]++; + struct rte_mempool *ret = rte_mempool_create(name, NB_ARP_MBUF, ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, + sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, + rte_socket_id(), 0); + PROX_PANIC(ret == NULL, "Failed to allocate ARP memory pool on socket %u with %u elements\n", + rte_socket_id(), NB_ARP_MBUF); + plog_info("\t\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF, + ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, rte_socket_id()); + tbase->l3.arp_pool = ret; } } diff --git a/VNFs/DPPD-PROX/packet_utils.h b/VNFs/DPPD-PROX/packet_utils.h index 0017a89e..74a3f60e 100644 --- a/VNFs/DPPD-PROX/packet_utils.h +++ b/VNFs/DPPD-PROX/packet_utils.h @@ -13,6 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. */ +#ifndef _PACKET_UTILS_H_ +#define _PACKET_UTILS_H_ #include "arp.h" #include "quit.h" @@ -24,6 +26,14 @@ #define FLAG_DST_MAC_KNOWN 1 #define MAX_ARP_ENTRIES 65536 +#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 +enum { + SEND_MBUF_AND_ARP, + SEND_MBUF, + SEND_ARP, + DROP_MBUF +}; + struct task_base; struct task_args; struct arp_table { @@ -44,6 +54,7 @@ struct l3_base { struct arp_table optimized_arp_table[4]; struct rte_hash *ip_hash; struct arp_table *arp_table; + struct rte_mempool *arp_pool; }; void task_init_l3(struct task_base *tbase, struct task_args *targ); @@ -52,3 +63,5 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d void task_set_gateway_ip(struct task_base *tbase, uint32_t ip); void task_set_local_ip(struct task_base *tbase, uint32_t ip); void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); + +#endif /* _PACKET_UTILS_H_ */ diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c index 49f46898..c5047e56 100644 --- a/VNFs/DPPD-PROX/tx_pkt.c +++ b/VNFs/DPPD-PROX/tx_pkt.c @@ -55,22 +55,39 @@ int tx_pkt_l3(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint32_t ip_dst; int first = 0, ret, ok = 0, rc; const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0]; + struct rte_mbuf *arp_mbuf = NULL; // used when one need to send both an ARP and a mbuf for (int j = 0; j < n_pkts; j++) { if ((out) && (out[j] >= OUT_HANDLED)) continue; - if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst)) < 0)) { + if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst)) != SEND_MBUF)) { if (j - first) { ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out); ok += ret; } first = j + 1; - if (rc == -1) { + switch(rc) { + case SEND_ARP: + // We re-use the mbuf - no need to create a arp_mbuf and delete the existing mbuf mbufs[j]->port = tbase->l3.reachable_port_id; tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, REQ_MAC_TO_CTRL, mbufs[j], tbase->l3.core_id, tbase->l3.task_id, ip_dst); - } else if (rc == -2) { + break; + case SEND_MBUF_AND_ARP: + // We send the mbuf and an ARP - we need to allocate another mbuf for ARP + ret = rte_mempool_get(tbase->l3.arp_pool, (void **)&arp_mbuf); + if (likely(ret == 0)) { + arp_mbuf->port = tbase->l3.reachable_port_id; + tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, REQ_MAC_TO_CTRL, arp_mbuf, tbase->l3.core_id, tbase->l3.task_id, ip_dst); + } else { + plog_err("Failed to get a mbuf from arp mempool\n"); + // We still send the initial mbuf + } + ret = tbase->aux->tx_pkt_l2(tbase, mbufs + j, 1, out); + break; + case DROP_MBUF: tx_drop(mbufs[j]); TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1); + break; } } } @@ -757,10 +774,10 @@ static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, ui void tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip) { - plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring)); + plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring)); int ret = tx_ring_all(tbase, ring, command, mbuf, core_id, task_id, ip); if (unlikely(ret != 0)) { - plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring)); + plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring)); TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1); rte_pktmbuf_free(mbuf); } @@ -768,10 +785,10 @@ void tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint16_t comman void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint32_t ip) { - plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring)); + plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring)); int ret = tx_ring_all(tbase, ring, command, mbuf, 0, 0, ip); if (unlikely(ret != 0)) { - plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring)); + plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring)); TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1); rte_pktmbuf_free(mbuf); } |