summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/rx_pkt.c
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX/rx_pkt.c')
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.c280
1 files changed, 173 insertions, 107 deletions
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c
index ec698d9a..e1756cb3 100644
--- a/VNFs/DPPD-PROX/rx_pkt.c
+++ b/VNFs/DPPD-PROX/rx_pkt.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -28,7 +28,10 @@
#include "arp.h"
#include "tx_pkt.h"
#include "handle_master.h"
-#include "input.h" /* Needed for callback on dump */
+#include "input.h"
+#include "prox_ipv6.h" /* Needed for callback on dump */
+
+#define TCP_PORT_BGP rte_cpu_to_be_16(179)
/* _param version of the rx_pkt_hw functions are used to create two
instances of very similar variations of these functions. The
@@ -42,7 +45,9 @@
packets are received if the dequeue step involves finding 32 packets.
*/
-#define MIN_PMD_RX 32
+#define MIN_PMD_RX 32
+#define PROX_L3 1
+#define PROX_NDP 2
static uint16_t rx_pkt_hw_port_queue(struct port_queue *pq, struct rte_mbuf **mbufs, int multi)
{
@@ -77,7 +82,7 @@ static void next_port_pow2(struct rx_params_hw *rx_params_hw)
static inline void dump_l3(struct task_base *tbase, struct rte_mbuf *mbuf)
{
if (unlikely(tbase->aux->task_rt_dump.n_print_rx)) {
- if (tbase->aux->task_rt_dump.input->reply == NULL) {
+ if ((tbase->aux->task_rt_dump.input == NULL) || (tbase->aux->task_rt_dump.input->reply == NULL)) {
plogdx_info(mbuf, "RX: ");
} else {
struct input *input = tbase->aux->task_rt_dump.input;
@@ -105,11 +110,107 @@ static inline void dump_l3(struct task_base *tbase, struct rte_mbuf *mbuf)
}
}
+static inline void handle_ipv4(struct task_base *tbase, struct rte_mbuf **mbufs, int i, prox_rte_ipv4_hdr *pip, int *skip)
+{
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1);
+ if (pip->next_proto_id == IPPROTO_ICMP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_MASTER, mbufs[i]);
+ (*skip)++;
+ } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_MASTER, mbufs[i]);
+ (*skip)++;
+ } else if (unlikely(*skip)) {
+ mbufs[i - *skip] = mbufs[i];
+ }
+}
+static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
+{
+ struct rte_mbuf **mbufs = *mbufs_ptr;
+ int i;
+ struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST];
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ipv4_hdr *pip;
+ prox_rte_vlan_hdr *vlan;
+ int skip = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ PREFETCH0(mbufs[i]);
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
+ PREFETCH0(hdr_arp[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) {
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ pip = (prox_rte_ipv4_hdr *)(hdr + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else {
+ switch (hdr_arp[i]->ether_hdr.ether_type) {
+ case ETYPE_VLAN:
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ vlan = (prox_rte_vlan_hdr *)(hdr + 1);
+ if (vlan->eth_proto == ETYPE_IPv4) {
+ pip = (prox_rte_ipv4_hdr *)(vlan + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else if (vlan->eth_proto == ETYPE_ARP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ }
+ break;
+ case ETYPE_ARP:
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ break;
+ default:
+ if (unlikely(skip)) {
+ mbufs[i - skip] = mbufs[i];
+ }
+ }
+ }
+ }
+ return skip;
+}
+
+static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
+{
+ struct rte_mbuf **mbufs = *mbufs_ptr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
+ int i;
+ prox_rte_ether_hdr *hdr[MAX_PKT_BURST];
+ int skip = 0;
+ uint16_t vlan = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ PREFETCH0(mbufs[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ ipv6_hdr = prox_get_ipv6_hdr(hdr[i], rte_pktmbuf_pkt_len(mbufs[i]), &vlan);
+ if (unlikely((ipv6_hdr) && (ipv6_hdr->proto == ICMPv6))) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, NDP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ } else if (unlikely(skip)) {
+ mbufs[i - skip] = mbufs[i];
+ }
+ }
+ return skip;
+}
+
static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi,
- void (*next)(struct rx_params_hw *rx_param_hw), int l3)
+ void (*next)(struct rx_params_hw *rx_param_hw), int l3_ndp)
{
uint8_t last_read_portid;
- uint16_t nb_rx;
+ uint16_t nb_rx, ret;
int skip = 0;
START_EMPTY_MEASSURE();
@@ -122,30 +223,13 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf
nb_rx = rx_pkt_hw_port_queue(pq, *mbufs_ptr, multi);
next(&tbase->rx_params_hw);
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr[MAX_PKT_BURST];
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
- }
+ if (l3_ndp == PROX_L3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
+ else if (l3_ndp == PROX_NDP)
+ skip = handle_ndp(tbase, nb_rx, mbufs_ptr);
if (skip)
- TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, skip);
+ TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
if (likely(nb_rx > 0)) {
TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
return nb_rx - skip;
@@ -154,7 +238,7 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf
return 0;
}
-static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3)
+static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3_ndp)
{
uint16_t nb_rx, n;
int skip = 0;
@@ -178,36 +262,21 @@ static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf
}
}
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr[MAX_PKT_BURST];
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
+ if (unlikely(nb_rx == 0)) {
+ TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
+ return 0;
}
+ if (l3_ndp == PROX_L3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
+ else if (l3_ndp == PROX_NDP)
+ skip = handle_ndp(tbase, nb_rx, mbufs_ptr);
+
if (skip)
- TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, skip);
- if (likely(nb_rx > 0)) {
- TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
- return nb_rx - skip;
- }
- TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
- return 0;
+ TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
+
+ TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
+ return nb_rx - skip;
}
uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs)
@@ -242,32 +311,62 @@ uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs)
uint16_t rx_pkt_hw_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 0, next_port, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_NDP);
}
uint16_t rx_pkt_hw_pow2_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_pow2_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_NDP);
}
uint16_t rx_pkt_hw1_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw1_param(tbase, mbufs, 0, 1);
+ return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_L3);
+}
+
+uint16_t rx_pkt_hw1_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_NDP);
}
uint16_t rx_pkt_hw_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 1, next_port, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_NDP);
}
uint16_t rx_pkt_hw_pow2_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_pow2_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_NDP);
}
uint16_t rx_pkt_hw1_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw1_param(tbase, mbufs, 1, 1);
+ return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_L3);
+}
+
+uint16_t rx_pkt_hw1_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_NDP);
}
/* The following functions implement ring access */
@@ -388,13 +487,9 @@ static uint16_t call_prev_rx_pkt(struct task_base *tbase, struct rte_mbuf ***mbu
{
uint16_t ret;
- if (tbase->aux->rx_prev_idx + 1 == tbase->aux->rx_prev_count) {
- ret = tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_idx](tbase, mbufs);
- } else {
- tbase->aux->rx_prev_idx++;
- ret = tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_idx](tbase, mbufs);
- tbase->aux->rx_prev_idx--;
- }
+ tbase->aux->rx_prev_idx++;
+ ret = tbase->aux->rx_pkt_prev[tbase->aux->rx_prev_idx - 1](tbase, mbufs);
+ tbase->aux->rx_prev_idx--;
return ret;
}
@@ -412,7 +507,7 @@ uint16_t rx_pkt_dump(struct task_base *tbase, struct rte_mbuf ***mbufs)
uint32_t n_dump = tbase->aux->task_rt_dump.n_print_rx;
n_dump = ret < n_dump? ret : n_dump;
- if (tbase->aux->task_rt_dump.input->reply == NULL) {
+ if ((tbase->aux->task_rt_dump.input == NULL) || (tbase->aux->task_rt_dump.input->reply == NULL)) {
for (uint32_t i = 0; i < n_dump; ++i) {
plogdx_info((*mbufs)[i], "RX: ");
}
@@ -457,12 +552,13 @@ uint16_t rx_pkt_trace(struct task_base *tbase, struct rte_mbuf ***mbufs)
if (ret) {
uint32_t n_trace = tbase->aux->task_rt_dump.n_trace;
n_trace = ret < n_trace? ret : n_trace;
+ n_trace = n_trace <= MAX_RING_BURST ? n_trace : MAX_RING_BURST;
for (uint32_t i = 0; i < n_trace; ++i) {
uint8_t *pkt = rte_pktmbuf_mtod((*mbufs)[i], uint8_t *);
- rte_memcpy(tbase->aux->task_rt_dump.pkt_cpy[tbase->aux->task_rt_dump.cur_trace + i], pkt, sizeof(tbase->aux->task_rt_dump.pkt_cpy[i]));
- tbase->aux->task_rt_dump.pkt_cpy_len[tbase->aux->task_rt_dump.cur_trace + i] = rte_pktmbuf_pkt_len((*mbufs)[i]);
- tbase->aux->task_rt_dump.pkt_mbuf_addr[tbase->aux->task_rt_dump.cur_trace + i] = (*mbufs)[i];
+ rte_memcpy(tbase->aux->task_rt_dump.pkt_cpy[i], pkt, sizeof(tbase->aux->task_rt_dump.pkt_cpy[i]));
+ tbase->aux->task_rt_dump.pkt_cpy_len[i] = rte_pktmbuf_pkt_len((*mbufs)[i]);
+ tbase->aux->task_rt_dump.pkt_mbuf_addr[i] = (*mbufs)[i];
}
tbase->aux->task_rt_dump.cur_trace += n_trace;
@@ -479,7 +575,10 @@ uint16_t rx_pkt_distr(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
uint16_t ret = call_prev_rx_pkt(tbase, mbufs);
- tbase->aux->rx_bucket[ret]++;
+ if (likely(ret < RX_BUCKET_SIZE))
+ tbase->aux->rx_bucket[ret]++;
+ else
+ tbase->aux->rx_bucket[RX_BUCKET_SIZE - 1]++;
return ret;
}
@@ -508,36 +607,3 @@ uint16_t rx_pkt_tsc(struct task_base *tbase, struct rte_mbuf ***mbufs)
return ret;
}
-
-uint16_t rx_pkt_all(struct task_base *tbase, struct rte_mbuf ***mbufs)
-{
- uint16_t tot = 0;
- uint16_t ret = 0;
- struct rte_mbuf **new_mbufs;
- struct rte_mbuf **dst = tbase->aux->all_mbufs;
-
- /* In case we receive less than MAX_PKT_BURST packets in one
- iteration, do no perform any copying of mbuf pointers. Use
- the buffer itself instead. */
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- if (ret < MAX_PKT_BURST/2) {
- *mbufs = new_mbufs;
- return ret;
- }
-
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- *mbufs = dst;
-
- do {
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- } while (ret == MAX_PKT_BURST/2 && tot < MAX_RX_PKT_ALL - MAX_PKT_BURST);
-
- if (tot >= MAX_RX_PKT_ALL - MAX_PKT_BURST) {
- plog_err("Could not receive all packets - buffer full\n");
- }
-
- return tot;
-}