diff options
author | Xavier Simonart <xavier.simonart@intel.com> | 2020-04-24 21:52:12 +0200 |
---|---|---|
committer | Xavier Simonart <xavier.simonart@intel.com> | 2020-05-29 23:28:44 +0200 |
commit | ca250755c6ecad89fc30507a4c6707eedc658f5d (patch) | |
tree | c3a573bc038ba7872e0a19b4927c1ae96803fe68 /VNFs/DPPD-PROX/rx_pkt.c | |
parent | fa869940dd9bb459ac599fe80c26c9d3e720fd31 (diff) |
Added support for netlink
Through this commit ARP and ICMP messages are forwarded to the kernel
when vdev tap devices are enabled, as well as PROX l3 mode.
ICMP support has also been added to master (i.e. PROX L3 mode) and to
swap (so when L3 submode is not enabled).
Change-Id: Ie6bf52cbae7171bfca041ff18651d4ec866f44cd
Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
Diffstat (limited to 'VNFs/DPPD-PROX/rx_pkt.c')
-rw-r--r-- | VNFs/DPPD-PROX/rx_pkt.c | 40 |
1 files changed, 31 insertions, 9 deletions
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c index 075069c8..4832066a 100644 --- a/VNFs/DPPD-PROX/rx_pkt.c +++ b/VNFs/DPPD-PROX/rx_pkt.c @@ -1,5 +1,5 @@ /* -// Copyright (c) 2010-2017 Intel Corporation +// Copyright (c) 2010-2020 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -125,16 +125,27 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf if (l3) { struct rte_mbuf **mbufs = *mbufs_ptr; int i; - struct ether_hdr_arp *hdr[MAX_PKT_BURST]; + struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST]; + prox_rte_ether_hdr *hdr; for (i = 0; i < nb_rx; i++) { PREFETCH0(mbufs[i]); } for (i = 0; i < nb_rx; i++) { - hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *); - PREFETCH0(hdr[i]); + hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *); + PREFETCH0(hdr_arp[i]); } for (i = 0; i < nb_rx; i++) { - if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) { + if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) { + hdr = (prox_rte_ether_hdr *)hdr_arp[i]; + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(hdr + 1); + if (pip->next_proto_id == IPPROTO_ICMP) { + dump_l3(tbase, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]); + skip++; + } else if (unlikely(skip)) { + mbufs[i - skip] = mbufs[i]; + } + } else if (unlikely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_ARP)) { dump_l3(tbase, mbufs[i]); tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]); skip++; @@ -181,16 +192,27 @@ static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf if (l3) { struct rte_mbuf **mbufs = *mbufs_ptr; int i; - struct ether_hdr_arp *hdr[MAX_PKT_BURST]; + struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST]; + prox_rte_ether_hdr *hdr; for (i = 0; i < nb_rx; i++) { PREFETCH0(mbufs[i]); } for (i = 0; i < nb_rx; i++) { - hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *); - PREFETCH0(hdr[i]); + hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *); + PREFETCH0(hdr_arp[i]); } for (i = 0; i < nb_rx; i++) { - if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) { + if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) { + hdr = (prox_rte_ether_hdr *)hdr_arp[i]; + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(hdr + 1); + if (pip->next_proto_id == IPPROTO_ICMP) { + dump_l3(tbase, mbufs[i]); + tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]); + skip++; + } else if (unlikely(skip)) { + mbufs[i - skip] = mbufs[i]; + } + } else if (unlikely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_ARP)) { dump_l3(tbase, mbufs[i]); tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]); skip++; |