diff options
author | 2020-05-11 21:00:33 +0200 | |
---|---|---|
committer | 2020-05-29 23:45:09 +0200 | |
commit | c871c361f9d69a93429ae385e7dbf21a46aa6857 (patch) | |
tree | 957ec8a53d2cd0a9b5676e163c22e8a3ae8f6f3e /VNFs/DPPD-PROX/rx_pkt.c | |
parent | 7c4601f23c526b14a67674782b303663dfaa95af (diff) |
Added initial support for NDP (IPv6)
Following messages are now handled by PROX
- router_solicitation
- neighbour_solicitation
- router_advertisement
- neighbour_advertisement
The following parameters are supported (through the PROX config file)
- sub mode=ndp
This will enable handling of router and neighbour solicitation
and advertisement.
- local ipv6=xxxx:xxxx:xxxxx:xxxx:xxxx:xxxx:xxxx:xxxx
This will configure the local IPv6 address of the port.
This parameter is optional. If not specified, the local IPv6
will be calculated from the EUI.
- global ipv6=xxxx:xxxx:xxxxx:xxxx:xxxx:xxxx:xxxx:xxxx
This will configure the global IPv6 address of the port.
This parameter is optional. If not specified, the global IPv6
will be calculated from the EUI and the router prefix received
from the router.
- ipv6 router=yes
This will cause the core to behave as an IPv6 router
i.e. it will generate Router Advertisement messages
This is only useful in back to back cases, when no real
IPv6 router is present in the setup.
- router prefix=xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx
The router prefix usedin the router advertisement
The prefix will be used by the node to build an IPv6 global
address in cases none were configured.
"Unsollicited NA" parameter has been added within the core/task section.
If set to yes (Unsollicited NA=yes), then an unsollicited neighbour
Advertisement is sent at startup
A same core/task cannot support both l3 and ndp mode.
Those messages will be generated or handled when submode
is set to "ndp":
- neighbour sollicitation
- neighbour advertisement
- router sollicitation
- router advertisement
An example configuration is provided: config/ipv6.cfg in which
port 0 / core 1 plays the role of the generator and port 1 /
core 2 plays the role of the swap.
Change-Id: Id0ab32d384448b4cf767fb4a1c486fc023f4f395
Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
Diffstat (limited to 'VNFs/DPPD-PROX/rx_pkt.c')
-rw-r--r-- | VNFs/DPPD-PROX/rx_pkt.c | 97 |
1 files changed, 81 insertions, 16 deletions
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c index da59fda7..17e39646 100644 --- a/VNFs/DPPD-PROX/rx_pkt.c +++ b/VNFs/DPPD-PROX/rx_pkt.c @@ -28,7 +28,8 @@ #include "arp.h" #include "tx_pkt.h" #include "handle_master.h" -#include "input.h" /* Needed for callback on dump */ +#include "input.h" +#include "prox_ipv6.h" /* Needed for callback on dump */ #define TCP_PORT_BGP rte_cpu_to_be_16(179) @@ -44,7 +45,9 @@ packets are received if the dequeue step involves finding 32 packets. */ -#define MIN_PMD_RX 32 +#define MIN_PMD_RX 32 +#define PROX_L3 1 +#define PROX_NDP 2 static uint16_t rx_pkt_hw_port_queue(struct port_queue *pq, struct rte_mbuf **mbufs, int multi) { @@ -112,11 +115,11 @@ static inline void handle_ipv4(struct task_base *tbase, struct rte_mbuf **mbufs, prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1); if (pip->next_proto_id == IPPROTO_ICMP) { dump_l3(tbase, mbufs[i]); - tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_MASTER, mbufs[i]); (*skip)++; } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) { dump_l3(tbase, mbufs[i]); - tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_CTRL, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_MASTER, mbufs[i]); (*skip)++; } else if (unlikely(*skip)) { mbufs[i - *skip] = mbufs[i]; @@ -155,13 +158,13 @@ static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_ handle_ipv4(tbase, mbufs, i, pip, &skip); } else if (vlan->eth_proto == ETYPE_ARP) { dump_l3(tbase, mbufs[i]); - tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]); skip++; } break; case ETYPE_ARP: dump_l3(tbase, mbufs[i]); - tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]); skip++; break; default: @@ -174,8 +177,35 @@ static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_ return skip; } +static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr) +{ + struct rte_mbuf **mbufs = *mbufs_ptr; + int i; + prox_rte_ether_hdr *hdr[MAX_PKT_BURST]; + int skip = 0; + + for (i = 0; i < nb_rx; i++) { + PREFETCH0(mbufs[i]); + } + for (i = 0; i < nb_rx; i++) { + hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *); + PREFETCH0(hdr[i]); + } + for (i = 0; i < nb_rx; i++) { + prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr[i] + 1); + if (unlikely((hdr[i]->ether_type == ETYPE_IPv6) && (ipv6_hdr->proto == ICMPv6))) { + dump_l3(tbase, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, NDP_PKT_FROM_NET_TO_MASTER, mbufs[i]); + skip++; + } else if (unlikely(skip)) { + mbufs[i - skip] = mbufs[i]; + } + } + return skip; +} + static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, - void (*next)(struct rx_params_hw *rx_param_hw), int l3) + void (*next)(struct rx_params_hw *rx_param_hw), int l3_ndp) { uint8_t last_read_portid; uint16_t nb_rx, ret; @@ -191,8 +221,10 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf nb_rx = rx_pkt_hw_port_queue(pq, *mbufs_ptr, multi); next(&tbase->rx_params_hw); - if (l3) + if (l3_ndp == PROX_L3) skip = handle_l3(tbase, nb_rx, mbufs_ptr); + else if (l3_ndp == PROX_NDP) + skip = handle_ndp(tbase, nb_rx, mbufs_ptr); if (skip) TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip); @@ -204,7 +236,7 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf return 0; } -static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3) +static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3_ndp) { uint16_t nb_rx, n; int skip = 0; @@ -230,8 +262,11 @@ static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf if (nb_rx == 0) return 0; - if (l3) + + if (l3_ndp == PROX_L3) skip = handle_l3(tbase, nb_rx, mbufs_ptr); + else if (l3_ndp == PROX_NDP) + skip = handle_ndp(tbase, nb_rx, mbufs_ptr); if (skip) TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip); @@ -275,32 +310,62 @@ uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs) uint16_t rx_pkt_hw_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw_param(tbase, mbufs, 0, next_port, 1); + return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_L3); +} + +uint16_t rx_pkt_hw_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_NDP); } uint16_t rx_pkt_hw_pow2_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, 1); + return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_L3); +} + +uint16_t rx_pkt_hw_pow2_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_NDP); } uint16_t rx_pkt_hw1_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw1_param(tbase, mbufs, 0, 1); + return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_L3); +} + +uint16_t rx_pkt_hw1_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_NDP); } uint16_t rx_pkt_hw_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw_param(tbase, mbufs, 1, next_port, 1); + return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_L3); +} + +uint16_t rx_pkt_hw_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_NDP); } uint16_t rx_pkt_hw_pow2_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, 1); + return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_L3); +} + +uint16_t rx_pkt_hw_pow2_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_NDP); } uint16_t rx_pkt_hw1_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs) { - return rx_pkt_hw1_param(tbase, mbufs, 1, 1); + return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_L3); +} + +uint16_t rx_pkt_hw1_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs) +{ + return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_NDP); } /* The following functions implement ring access */ |