summaryrefslogtreecommitdiffstats
path: root/VNFs
diff options
context:
space:
mode:
authorXavier Simonart <xavier.simonart@intel.com>2020-05-11 00:20:51 +0200
committerXavier Simonart <xavier.simonart@intel.com>2020-05-29 23:36:49 +0200
commitf7148d3d3a0a9dedf24cb4d7a5a72b63c17e6add (patch)
tree4fd8ba153545f8810f78b2fcb42ec0ce9e0b1014 /VNFs
parent1614130d60abfaa89a41ba8eed5f9bbf41d9a4f4 (diff)
Added initial VLAN support with vdev devices
For kernel supported devices, add for vlan tag support This can be configured through port parameter: vlan tag=<vlan tag> If this parameter is set, a vlan tagged interface is created on top of the tap device This is only supported for vdev tap devices When sending (untagged) packet to the tap device (through socket) the tap should react in sending tagged packet Note that receiving in L3 mode (w/o tap support) a tagged packet is not yet supported. Change-Id: I363fa2f8d2341ac41ef23620222ece1d944bf336 Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
Diffstat (limited to 'VNFs')
-rw-r--r--VNFs/DPPD-PROX/prox_args.c3
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.c12
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.h1
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.c150
4 files changed, 89 insertions, 77 deletions
diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c
index 30b4cbd7..9e12eb68 100644
--- a/VNFs/DPPD-PROX/prox_args.c
+++ b/VNFs/DPPD-PROX/prox_args.c
@@ -592,6 +592,9 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
#endif
}
+ else if (STR_EQ(str, "vlan tag")) {
+ return parse_int(&cfg->vlan_tag, pkey);
+ }
else if (STR_EQ(str, "vlan")) {
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
uint32_t val;
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c
index 9798c590..2abf4d58 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.c
+++ b/VNFs/DPPD-PROX/prox_port_cfg.c
@@ -213,7 +213,17 @@ void init_rte_dev(int use_dummy_devices)
prox_port_cfg[vdev_port_id].active = 1;
prox_port_cfg[vdev_port_id].dpdk_mapping = port_id;
prox_port_cfg[vdev_port_id].n_txq = 1;
- strncpy(prox_port_cfg[vdev_port_id].name, port_cfg->vdev, MAX_NAME_SIZE);
+
+ if (prox_port_cfg[port_id].vlan_tag) {
+ char command[1024];
+ snprintf(prox_port_cfg[vdev_port_id].name, MAX_NAME_SIZE, "%s_%d", port_cfg->vdev, prox_port_cfg[port_id].vlan_tag);
+ sprintf(command, "ip link add link %s name %s type vlan id %d", port_cfg->vdev, prox_port_cfg[vdev_port_id].name, prox_port_cfg[port_id].vlan_tag);
+ system(command);
+ plog_info("Running %s\n", command);
+ plog_info("Using vlan tag %d - added device %s\n", prox_port_cfg[port_id].vlan_tag, prox_port_cfg[vdev_port_id].name);
+ } else
+ strncpy(prox_port_cfg[vdev_port_id].name, port_cfg->vdev, MAX_NAME_SIZE);
+
prox_port_cfg[port_id].dpdk_mapping = vdev_port_id;
prox_port_cfg[vdev_port_id].ip = rte_be_to_cpu_32(prox_port_cfg[port_id].ip);
prox_port_cfg[port_id].ip = 0; // So only vdev has an IP associated
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h
index d6090a35..ad3d9380 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.h
+++ b/VNFs/DPPD-PROX/prox_port_cfg.h
@@ -83,6 +83,7 @@ struct prox_port_cfg {
int dpdk_mapping;
uint32_t ip;
int fd;
+ uint32_t vlan_tag;
};
extern rte_atomic32_t lsc;
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c
index 6a6112b5..da59fda7 100644
--- a/VNFs/DPPD-PROX/rx_pkt.c
+++ b/VNFs/DPPD-PROX/rx_pkt.c
@@ -107,11 +107,78 @@ static inline void dump_l3(struct task_base *tbase, struct rte_mbuf *mbuf)
}
}
+static inline void handle_ipv4(struct task_base *tbase, struct rte_mbuf **mbufs, int i, prox_rte_ipv4_hdr *pip, int *skip)
+{
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1);
+ if (pip->next_proto_id == IPPROTO_ICMP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]);
+ (*skip)++;
+ } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_CTRL, mbufs[i]);
+ (*skip)++;
+ } else if (unlikely(*skip)) {
+ mbufs[i - *skip] = mbufs[i];
+ }
+}
+static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
+{
+ struct rte_mbuf **mbufs = *mbufs_ptr;
+ int i;
+ struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST];
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ipv4_hdr *pip;
+ prox_rte_vlan_hdr *vlan;
+ int skip = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ PREFETCH0(mbufs[i]);
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
+ PREFETCH0(hdr_arp[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) {
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ pip = (prox_rte_ipv4_hdr *)(hdr + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else {
+ switch (hdr_arp[i]->ether_hdr.ether_type) {
+ case ETYPE_VLAN:
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ vlan = (prox_rte_vlan_hdr *)(hdr + 1);
+ if (vlan->eth_proto == ETYPE_IPv4) {
+ pip = (prox_rte_ipv4_hdr *)(vlan + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else if (vlan->eth_proto == ETYPE_ARP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
+ skip++;
+ }
+ break;
+ case ETYPE_ARP:
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
+ skip++;
+ break;
+ default:
+ if (unlikely(skip)) {
+ mbufs[i - skip] = mbufs[i];
+ }
+ }
+ }
+ }
+ return skip;
+}
+
static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi,
void (*next)(struct rx_params_hw *rx_param_hw), int l3)
{
uint8_t last_read_portid;
- uint16_t nb_rx;
+ uint16_t nb_rx, ret;
int skip = 0;
START_EMPTY_MEASSURE();
@@ -124,43 +191,8 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf
nb_rx = rx_pkt_hw_port_queue(pq, *mbufs_ptr, multi);
next(&tbase->rx_params_hw);
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST];
- prox_rte_ether_hdr *hdr;
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr_arp[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) {
- hdr = (prox_rte_ether_hdr *)hdr_arp[i];
- prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(hdr + 1);
- prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1);
- if (pip->next_proto_id == IPPROTO_ICMP) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]);
- skip++;
- } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- } else if (unlikely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
- }
+ if (l3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
if (skip)
TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
@@ -196,44 +228,10 @@ static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf
}
}
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST];
- prox_rte_ether_hdr *hdr;
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr_arp[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- // plog_info("ether_type = %x\n", hdr_arp[i]->ether_hdr.ether_type);
- if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) {
- hdr = (prox_rte_ether_hdr *)hdr_arp[i];
- prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(hdr + 1);
- prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1);
- if (pip->next_proto_id == IPPROTO_ICMP) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_CTRL, mbufs[i]);
- skip++;
- } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- } else if (unlikely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
- }
+ if (nb_rx == 0)
+ return 0;
+ if (l3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
if (skip)
TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);