From ab615dd91dcb355615b1f1f7266d878b70b35b4d Mon Sep 17 00:00:00 2001 From: Xavier Simonart Date: Wed, 23 Oct 2019 12:38:51 +0200 Subject: Prepare for DPDK 19.08 support This commit prepares PROX for supporting changes from DPDK 19.08 Mainly, the "sed" listed below were run. In addition, - some forward definition have been removed, - prox_compat.h has been updated - prox_compat.h has been included where necessary. Hence such a commit is rather easy to review and to reapply on other branches if necessary. sed -i 's/struct ether_hdr/prox_rte_ether_hdr/g' *.h sed -i 's/struct ether_hdr/prox_rte_ether_hdr/g' *.c sed -i 's/struct ether_addr/prox_rte_ether_addr/g' *.c sed -i 's/struct ether_addr/prox_rte_ether_addr/g' *.h sed -i 's/prox_rte_ether_addr_port/struct ether_addr_port/g' *.c sed -i 's/prox_rte_ether_addr_port/struct ether_addr_port/g' *.h sed -i 's/struct vlan_hdr/prox_rte_vlan_hdr/g' *.c sed -i 's/struct vlan_hdr/prox_rte_vlan_hdr/g' *.h sed -i 's/struct ipv4_hdr/prox_rte_ipv4_hdr/g' *.h sed -i 's/struct ipv4_hdr/prox_rte_ipv4_hdr/g' *.c sed -i 's/struct ipv6_hdr/prox_rte_ipv6_hdr/g' *.c sed -i 's/struct ipv6_hdr/prox_rte_ipv6_hdr/g' *.h sed -i 's/struct udp_hdr/prox_rte_udp_hdr/g' *.c sed -i 's/struct udp_hdr/prox_rte_udp_hdr/g' *.h sed -i 's/struct tcp_hdr/prox_rte_tcp_hdr/g' *.c sed -i 's/struct tcp_hdr/prox_rte_tcp_hdr/g' *.h sed -i 's/struct prox_rte_ether_addr_copy/ether_addr_copy/g' *.c sed -i 's/struct prox_rte_ether_addr_copy/ether_addr_copy/g' *.h sed -i 's/struct prox_rte_ether_addr_copy/prox_rte_ether_addr_copy/g' *.c sed -i 's/struct prox_rte_ether_addr_copy/ether_addr_copy/g' *.h sed -i 's/prox_rte_ether_addr_copy/ether_addr_copy/g' *.h sed -i 's/prox_rte_ether_addr_copy/ether_addr_copy/g' *.c sed -i 's/ether_addr_copy/prox_rte_ether_addr_copy/g' *.c sed -i 's/ether_addr_copy/prox_rte_ether_addr_copy/g' *.h sed -i 's/prox_rte_ether_hdr_arp/struct ether_hdr_arp/g' *.h sed -i 's/prox_rte_ether_hdr_arp/struct ether_hdr_arp/g' *.c sed -i 's/struct vxlan_gpe_hdr/prox_rte_vxlan_gpe_hdr/g' *.h sed -i 's/struct vxlan_gpe_hdr/prox_rte_vxlan_gpe_hdr/g' *.c sed -i 's/eth_random_addr/prox_rte_eth_random_addr/g' *.c sed -i 's/eth_random_addr/prox_rte_eth_random_addr/g' *.h sed -i 's/ETHER_CRC_LEN/PROX_RTE_ETHER_CRC_LEN/g' *.c sed -i 's/ETHER_CRC_LEN/PROX_RTE_ETHER_CRC_LEN/g' *.h sed -i 's/ETHER_HDR_LEN/PROX_RTE_ETHER_HDR_LEN/g' *.c sed -i 's/ETHER_HDR_LEN/PROX_RTE_ETHER_HDR_LEN/g' *.h sed -i 's/ETHER_MAX_LEN/PROX_RTE_ETHER_MAX_LEN/g' *.c sed -i 's/ETHER_MAX_LEN/PROX_RTE_ETHER_MAX_LEN/g' *.h sed -i 's/ETHER_MIN_LEN/PROX_RTE_ETHER_MIN_LEN/g' *.c sed -i 's/ETHER_MIN_LEN/PROX_RTE_ETHER_MIN_LEN/g' *.h sed -i 's/TCP_SYN_FLAG/PROX_RTE_TCP_SYN_FLAG/g' *.c sed -i 's/TCP_RST_FLAG/PROX_RTE_TCP_RST_FLAG/g' *.c sed -i 's/TCP_FIN_FLAG/PROX_RTE_TCP_FIN_FLAG/g' *.c sed -i 's/TCP_ACK_FLAG/PROX_RTE_TCP_ACK_FLAG/g' *.c Change-Id: I4dfe8be68e618c94dcaae28754579fbeb571bb00 Signed-off-by: Xavier Simonart --- VNFs/DPPD-PROX/acl_field_def.h | 10 +-- VNFs/DPPD-PROX/arp.h | 25 +++--- VNFs/DPPD-PROX/bng_pkts.h | 25 +++--- VNFs/DPPD-PROX/cmd_parser.c | 8 +- VNFs/DPPD-PROX/commands.c | 12 +-- VNFs/DPPD-PROX/commands.h | 3 +- VNFs/DPPD-PROX/defaults.c | 10 +-- VNFs/DPPD-PROX/defaults.h | 5 +- VNFs/DPPD-PROX/genl4_stream.h | 4 +- VNFs/DPPD-PROX/genl4_stream_tcp.c | 122 ++++++++++++++-------------- VNFs/DPPD-PROX/genl4_stream_udp.c | 16 ++-- VNFs/DPPD-PROX/handle_aggregator.c | 14 ++-- VNFs/DPPD-PROX/handle_arp.c | 8 +- VNFs/DPPD-PROX/handle_blockudp.c | 4 +- VNFs/DPPD-PROX/handle_cgnat.c | 18 ++--- VNFs/DPPD-PROX/handle_classify.c | 2 +- VNFs/DPPD-PROX/handle_esp.c | 138 ++++++++++++++++---------------- VNFs/DPPD-PROX/handle_fm.c | 16 ++-- VNFs/DPPD-PROX/handle_gen.c | 62 +++++++------- VNFs/DPPD-PROX/handle_genl4.c | 16 ++-- VNFs/DPPD-PROX/handle_gre_decap_encap.c | 62 +++++++------- VNFs/DPPD-PROX/handle_impair.c | 22 ++--- VNFs/DPPD-PROX/handle_ipv6_tunnel.c | 70 ++++++++-------- VNFs/DPPD-PROX/handle_l2fwd.c | 20 ++--- VNFs/DPPD-PROX/handle_lat.c | 2 +- VNFs/DPPD-PROX/handle_lb_5tuple.c | 10 +-- VNFs/DPPD-PROX/handle_lb_net.c | 30 +++---- VNFs/DPPD-PROX/handle_lb_pos.c | 6 +- VNFs/DPPD-PROX/handle_lb_qinq.c | 14 ++-- VNFs/DPPD-PROX/handle_master.c | 8 +- VNFs/DPPD-PROX/handle_mplstag.c | 14 ++-- VNFs/DPPD-PROX/handle_nat.c | 6 +- VNFs/DPPD-PROX/handle_nsh.c | 32 ++++---- VNFs/DPPD-PROX/handle_qinq_decap4.c | 32 ++++---- VNFs/DPPD-PROX/handle_qinq_decap6.c | 8 +- VNFs/DPPD-PROX/handle_qinq_encap4.c | 8 +- VNFs/DPPD-PROX/handle_qinq_encap4.h | 8 +- VNFs/DPPD-PROX/handle_qinq_encap6.c | 4 +- VNFs/DPPD-PROX/handle_qos.c | 4 +- VNFs/DPPD-PROX/handle_routing.c | 14 ++-- VNFs/DPPD-PROX/handle_swap.c | 72 ++++++++--------- VNFs/DPPD-PROX/handle_untag.c | 12 +-- VNFs/DPPD-PROX/hash_entry_types.h | 3 +- VNFs/DPPD-PROX/log.c | 4 +- VNFs/DPPD-PROX/main.c | 6 +- VNFs/DPPD-PROX/mbuf_utils.h | 5 +- VNFs/DPPD-PROX/packet_utils.c | 34 ++++---- VNFs/DPPD-PROX/packet_utils.h | 3 +- VNFs/DPPD-PROX/parse_utils.c | 2 +- VNFs/DPPD-PROX/parse_utils.h | 4 +- VNFs/DPPD-PROX/pkt_parser.h | 31 +++---- VNFs/DPPD-PROX/pkt_prototypes.h | 2 +- VNFs/DPPD-PROX/prox_args.c | 4 +- VNFs/DPPD-PROX/prox_cksum.c | 18 ++--- VNFs/DPPD-PROX/prox_cksum.h | 11 +-- VNFs/DPPD-PROX/prox_compat.h | 28 ++++++- VNFs/DPPD-PROX/prox_lua_types.c | 8 +- VNFs/DPPD-PROX/prox_lua_types.h | 8 +- VNFs/DPPD-PROX/prox_port_cfg.c | 14 ++-- VNFs/DPPD-PROX/prox_port_cfg.h | 5 +- VNFs/DPPD-PROX/qinq.h | 5 +- VNFs/DPPD-PROX/task_init.h | 5 +- VNFs/DPPD-PROX/vxlangpe_nsh.h | 4 +- 63 files changed, 609 insertions(+), 571 deletions(-) (limited to 'VNFs') diff --git a/VNFs/DPPD-PROX/acl_field_def.h b/VNFs/DPPD-PROX/acl_field_def.h index ede5bea7..4f05ae80 100644 --- a/VNFs/DPPD-PROX/acl_field_def.h +++ b/VNFs/DPPD-PROX/acl_field_def.h @@ -24,9 +24,9 @@ #include "qinq.h" struct pkt_eth_ipv4_udp { - struct ether_hdr ether_hdr; - struct ipv4_hdr ipv4_hdr; - struct udp_hdr udp_hdr; + prox_rte_ether_hdr ether_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_udp_hdr udp_hdr; } __attribute__((packed)); static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = { @@ -73,8 +73,8 @@ static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = { struct pkt_qinq_ipv4_udp { struct qinq_hdr qinq_hdr; - struct ipv4_hdr ipv4_hdr; - struct udp_hdr udp_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_udp_hdr udp_hdr; }; static struct rte_acl_field_def pkt_qinq_ipv4_udp_defs[] = { diff --git a/VNFs/DPPD-PROX/arp.h b/VNFs/DPPD-PROX/arp.h index 488008d7..c0f74cbb 100644 --- a/VNFs/DPPD-PROX/arp.h +++ b/VNFs/DPPD-PROX/arp.h @@ -18,6 +18,7 @@ #define _ARP_H_ #include +#include "prox_compat.h" #include "etypes.h" #include "mbuf_utils.h" @@ -25,9 +26,9 @@ #define ARP_REPLY 0x200 struct _arp_ipv4 { - struct ether_addr sha; /* Sender hardware address */ + prox_rte_ether_addr sha; /* Sender hardware address */ uint32_t spa; /* Sender protocol address */ - struct ether_addr tha; /* Target hardware address */ + prox_rte_ether_addr tha; /* Target hardware address */ uint32_t tpa; /* Target protocol address */ } __attribute__((__packed__)); typedef struct _arp_ipv4 arp_ipv4_t; @@ -42,7 +43,7 @@ struct my_arp_t { } __attribute__((__packed__)); struct ether_hdr_arp { - struct ether_hdr ether_hdr; + prox_rte_ether_hdr ether_hdr; struct my_arp_t arp; }; @@ -51,21 +52,21 @@ static int arp_is_gratuitous(struct ether_hdr_arp *hdr) return hdr->arp.data.spa == hdr->arp.data.tpa; } -static inline void build_arp_reply(struct ether_hdr_arp *hdr_arp, struct ether_addr *s_addr) +static inline void build_arp_reply(struct ether_hdr_arp *hdr_arp, prox_rte_ether_addr *s_addr) { uint32_t ip_source = hdr_arp->arp.data.spa; - memcpy(hdr_arp->ether_hdr.d_addr.addr_bytes, hdr_arp->ether_hdr.s_addr.addr_bytes, sizeof(struct ether_addr)); - memcpy(hdr_arp->ether_hdr.s_addr.addr_bytes, s_addr, sizeof(struct ether_addr)); + memcpy(hdr_arp->ether_hdr.d_addr.addr_bytes, hdr_arp->ether_hdr.s_addr.addr_bytes, sizeof(prox_rte_ether_addr)); + memcpy(hdr_arp->ether_hdr.s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr)); hdr_arp->arp.data.spa = hdr_arp->arp.data.tpa; hdr_arp->arp.data.tpa = ip_source; hdr_arp->arp.oper = 0x200; - memcpy(&hdr_arp->arp.data.tha, &hdr_arp->arp.data.sha, sizeof(struct ether_addr)); - memcpy(&hdr_arp->arp.data.sha, s_addr, sizeof(struct ether_addr)); + memcpy(&hdr_arp->arp.data.tha, &hdr_arp->arp.data.sha, sizeof(prox_rte_ether_addr)); + memcpy(&hdr_arp->arp.data.sha, s_addr, sizeof(prox_rte_ether_addr)); } -static inline void build_arp_request(struct rte_mbuf *mbuf, struct ether_addr *src_mac, uint32_t ip_dst, uint32_t ip_src) +static inline void build_arp_request(struct rte_mbuf *mbuf, prox_rte_ether_addr *src_mac, uint32_t ip_dst, uint32_t ip_src) { struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *); uint64_t mac_bcast = 0xFFFFFFFFFFFF; @@ -83,11 +84,11 @@ static inline void build_arp_request(struct rte_mbuf *mbuf, struct ether_addr *s hdr_arp->arp.oper = 0x100; hdr_arp->arp.data.spa = ip_src; hdr_arp->arp.data.tpa = ip_dst; - memset(&hdr_arp->arp.data.tha, 0, sizeof(struct ether_addr)); - memcpy(&hdr_arp->arp.data.sha, src_mac, sizeof(struct ether_addr)); + memset(&hdr_arp->arp.data.tha, 0, sizeof(prox_rte_ether_addr)); + memcpy(&hdr_arp->arp.data.sha, src_mac, sizeof(prox_rte_ether_addr)); } -static void create_mac(struct ether_hdr_arp *hdr, struct ether_addr *addr) +static void create_mac(struct ether_hdr_arp *hdr, prox_rte_ether_addr *addr) { addr->addr_bytes[0] = 0x2; addr->addr_bytes[1] = 0; diff --git a/VNFs/DPPD-PROX/bng_pkts.h b/VNFs/DPPD-PROX/bng_pkts.h index 82e6199c..50780e3b 100644 --- a/VNFs/DPPD-PROX/bng_pkts.h +++ b/VNFs/DPPD-PROX/bng_pkts.h @@ -22,6 +22,7 @@ #include #include +#include "prox_compat.h" #include "gre.h" #include "mpls.h" #include "qinq.h" @@ -32,10 +33,10 @@ struct cpe_pkt { #ifdef USE_QINQ struct qinq_hdr qinq_hdr; #else - struct ether_hdr ether_hdr; + prox_rte_ether_hdr ether_hdr; #endif - struct ipv4_hdr ipv4_hdr; - struct udp_hdr udp_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_udp_hdr udp_hdr; } __attribute__((packed)); struct cpe_packet_arp { @@ -47,25 +48,25 @@ struct cpe_packet_arp { going to the core netwerk. Payload may follow after the headers, but no need to touch that. */ struct core_net_pkt_m { - struct ether_hdr ether_hdr; + prox_rte_ether_hdr ether_hdr; #ifdef MPLS_ROUTING union { struct mpls_hdr mpls; uint32_t mpls_bytes; }; #endif - struct ipv4_hdr tunnel_ip_hdr; + prox_rte_ipv4_hdr tunnel_ip_hdr; struct gre_hdr gre_hdr; - struct ipv4_hdr ip_hdr; - struct udp_hdr udp_hdr; + prox_rte_ipv4_hdr ip_hdr; + prox_rte_udp_hdr udp_hdr; } __attribute__((packed)); struct core_net_pkt { - struct ether_hdr ether_hdr; - struct ipv4_hdr tunnel_ip_hdr; + prox_rte_ether_hdr ether_hdr; + prox_rte_ipv4_hdr tunnel_ip_hdr; struct gre_hdr gre_hdr; - struct ipv4_hdr ip_hdr; - struct udp_hdr udp_hdr; + prox_rte_ipv4_hdr ip_hdr; + prox_rte_udp_hdr udp_hdr; } __attribute__((packed)); #define UPSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt) - sizeof(struct cpe_pkt))) @@ -86,7 +87,7 @@ static inline void extract_key_cpe(struct rte_mbuf *mbuf, uint64_t* key) #endif } -static inline void key_core(struct gre_hdr* gre, __attribute__((unused)) struct ipv4_hdr* ip, uint64_t* key) +static inline void key_core(struct gre_hdr* gre, __attribute__((unused)) prox_rte_ipv4_hdr* ip, uint64_t* key) { struct cpe_key *cpe_key = (struct cpe_key*)key; diff --git a/VNFs/DPPD-PROX/cmd_parser.c b/VNFs/DPPD-PROX/cmd_parser.c index 3ad5b534..a8fe3a0a 100644 --- a/VNFs/DPPD-PROX/cmd_parser.c +++ b/VNFs/DPPD-PROX/cmd_parser.c @@ -731,8 +731,8 @@ static int parse_cmd_set_random(const char *str, struct input *input) if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) { plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id); } - else if (offset > ETHER_MAX_LEN) { - plog_err("Offset out of range (must be less then %u)\n", ETHER_MAX_LEN); + else if (offset > PROX_RTE_ETHER_MAX_LEN) { + plog_err("Offset out of range (must be less then %u)\n", PROX_RTE_ETHER_MAX_LEN); } else if (value_len > 4) { plog_err("Length out of range (must be less then 4)\n"); @@ -1256,7 +1256,7 @@ static int parse_cmd_tot_imissed_tot(const char *str, struct input *input) static int parse_cmd_enable_multicast(const char *str, struct input *input) { uint8_t port_id; - struct ether_addr mac; + prox_rte_ether_addr mac; if (sscanf(str, "%hhu %hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &port_id, mac.addr_bytes, mac.addr_bytes + 1, mac.addr_bytes + 2, mac.addr_bytes + 3, mac.addr_bytes + 4, mac.addr_bytes + 5 ) != 7) { return -1; @@ -1268,7 +1268,7 @@ static int parse_cmd_enable_multicast(const char *str, struct input *input) static int parse_cmd_disable_multicast(const char *str, struct input *input) { uint8_t port_id; - struct ether_addr mac; + prox_rte_ether_addr mac; if (sscanf(str, "%hhu %hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &port_id, mac.addr_bytes, mac.addr_bytes + 1, mac.addr_bytes + 2, mac.addr_bytes + 3, mac.addr_bytes + 4, mac.addr_bytes + 5 ) != 7) { return -1; diff --git a/VNFs/DPPD-PROX/commands.c b/VNFs/DPPD-PROX/commands.c index a66f4888..11b43e53 100644 --- a/VNFs/DPPD-PROX/commands.c +++ b/VNFs/DPPD-PROX/commands.c @@ -246,7 +246,7 @@ static struct size_unit to_size_unit(uint64_t bytes) return ret; } -static int add_multicast_addr(uint8_t port_id, struct ether_addr *addr) +static int add_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr) { unsigned int i; int rc = 0; @@ -264,7 +264,7 @@ static int add_multicast_addr(uint8_t port_id, struct ether_addr *addr) } } - ether_addr_copy(addr, &port_cfg->mc_addr[port_cfg->nb_mc_addr]); + prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[port_cfg->nb_mc_addr]); if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr + 1)) != 0) { plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id); return rc; @@ -275,7 +275,7 @@ static int add_multicast_addr(uint8_t port_id, struct ether_addr *addr) return rc; } -static int del_multicast_addr(uint8_t port_id, struct ether_addr *addr) +static int del_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr) { unsigned int i; int rc = 0; @@ -285,12 +285,12 @@ static int del_multicast_addr(uint8_t port_id, struct ether_addr *addr) for (i = 0; i < port_cfg->nb_mc_addr; i++) { if (is_same_ether_addr(addr, &port_cfg->mc_addr[i])) { // Copy last address to the slot to be deleted - ether_addr_copy(&port_cfg->mc_addr[port_cfg->nb_mc_addr-1], &port_cfg->mc_addr[i]); + prox_rte_ether_addr_copy(&port_cfg->mc_addr[port_cfg->nb_mc_addr-1], &port_cfg->mc_addr[i]); if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr - 1)) != 0) { plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id); // When set failed, let restore the situation we were before calling the function... - ether_addr_copy(addr, &port_cfg->mc_addr[i]); + prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[i]); return rc; } port_cfg->nb_mc_addr--; @@ -957,7 +957,7 @@ void cmd_reset_port(uint8_t portid) } } -void cmd_multicast(uint8_t port_id, unsigned int val, struct ether_addr *mac) +void cmd_multicast(uint8_t port_id, unsigned int val, prox_rte_ether_addr *mac) { if (!port_is_active(port_id)) { return; diff --git a/VNFs/DPPD-PROX/commands.h b/VNFs/DPPD-PROX/commands.h index 291930ff..5ddb81e2 100644 --- a/VNFs/DPPD-PROX/commands.h +++ b/VNFs/DPPD-PROX/commands.h @@ -19,6 +19,7 @@ #include #include +#include "prox_compat.h" struct input; @@ -65,7 +66,7 @@ void cmd_set_cache_class(uint32_t lcore_id, uint32_t set); void cmd_cache_reset(void); void cmd_reset_port(uint8_t port_id); -void cmd_multicast(uint8_t port_id, unsigned int val, struct ether_addr *mac); +void cmd_multicast(uint8_t port_id, unsigned int val, prox_rte_ether_addr *mac); int reconnect_task(uint32_t lcore_id, uint32_t task_id); int bypass_task(uint32_t lcore_id, uint32_t task_id); diff --git a/VNFs/DPPD-PROX/defaults.c b/VNFs/DPPD-PROX/defaults.c index 3dbb7ece..915ee31c 100644 --- a/VNFs/DPPD-PROX/defaults.c +++ b/VNFs/DPPD-PROX/defaults.c @@ -48,7 +48,7 @@ static const struct rte_eth_conf default_port_conf = { .rxmode = { .mq_mode = 0, - .max_rx_pkt_len = PROX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + .max_rx_pkt_len = PROX_MTU + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN }, .rx_adv_conf = { .rss_conf = { @@ -146,14 +146,14 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in targ->mapping[i] = i; // identity } - targ->cbs = ETHER_MAX_LEN; - targ->ebs = ETHER_MAX_LEN; - targ->pbs = ETHER_MAX_LEN; + targ->cbs = PROX_RTE_ETHER_MAX_LEN; + targ->ebs = PROX_RTE_ETHER_MAX_LEN; + targ->pbs = PROX_RTE_ETHER_MAX_LEN; targ->n_max_rules = 1024; targ->ring_size = RING_RX_SIZE; targ->nb_cache_mbuf = MAX_PKT_BURST * 4; - targ->overhead = ETHER_CRC_LEN + 20; + targ->overhead = PROX_RTE_ETHER_CRC_LEN + 20; targ->tunnel_hop_limit = 3; targ->ctrl_freq = 1000; diff --git a/VNFs/DPPD-PROX/defaults.h b/VNFs/DPPD-PROX/defaults.h index 8f850d03..8ce59530 100644 --- a/VNFs/DPPD-PROX/defaults.h +++ b/VNFs/DPPD-PROX/defaults.h @@ -18,6 +18,7 @@ #define _DEFAULTS_H_ #include +#include "prox_compat.h" struct prox_cfg; struct lcore_cfg; @@ -58,8 +59,8 @@ void set_port_defaults(void); TX_MBUF_SIZE is used for when transmitting only: in this case the mbuf size can be smaller. */ #define MBUF_SIZE (2048 + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE) -#define TX_MBUF_SIZE (ETHER_MAX_LEN + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE) +#define TX_MBUF_SIZE (PROX_RTE_ETHER_MAX_LEN + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE) -#define PROX_MTU ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN +#define PROX_MTU PROX_RTE_ETHER_MAX_LEN - PROX_RTE_ETHER_HDR_LEN - PROX_RTE_ETHER_CRC_LEN #endif /* _DEFAULTS_H_ */ diff --git a/VNFs/DPPD-PROX/genl4_stream.h b/VNFs/DPPD-PROX/genl4_stream.h index b180765d..3f1b6c87 100644 --- a/VNFs/DPPD-PROX/genl4_stream.h +++ b/VNFs/DPPD-PROX/genl4_stream.h @@ -160,9 +160,9 @@ static void stream_ctx_reset_move(struct stream_ctx *ctx, struct stream_cfg *cfg static int stream_cfg_calc_max_payload_len(struct stream_cfg *cfg, enum l4gen_peer peer) { const uint32_t l4_hdr_len = cfg->proto == IPPROTO_UDP? - sizeof(struct udp_hdr) : sizeof(struct tcp_hdr); + sizeof(prox_rte_udp_hdr) : sizeof(prox_rte_tcp_hdr); - return ETHER_MAX_LEN - ETHER_CRC_LEN - cfg->data[peer].hdr_len - l4_hdr_len; + return PROX_RTE_ETHER_MAX_LEN - PROX_RTE_ETHER_CRC_LEN - cfg->data[peer].hdr_len - l4_hdr_len; } static int stream_cfg_max_n_segments(struct stream_cfg *cfg) diff --git a/VNFs/DPPD-PROX/genl4_stream_tcp.c b/VNFs/DPPD-PROX/genl4_stream_tcp.c index e0cdb2b9..4d92546b 100644 --- a/VNFs/DPPD-PROX/genl4_stream_tcp.c +++ b/VNFs/DPPD-PROX/genl4_stream_tcp.c @@ -68,8 +68,8 @@ struct tcp_option { void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct pkt_tuple *tuple) { - struct tcp_hdr *tcp = (struct tcp_hdr *)l4_meta->l4_hdr; - struct ipv4_hdr *ip = ((struct ipv4_hdr *)tcp) - 1; + prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)l4_meta->l4_hdr; + prox_rte_ipv4_hdr *ip = ((prox_rte_ipv4_hdr *)tcp) - 1; ip->src_addr = tuple->dst_addr; ip->dst_addr = tuple->src_addr; @@ -77,9 +77,9 @@ void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struc tcp->dst_port = tuple->src_port; tcp->src_port = tuple->dst_port; - ip->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr)); - tcp->tcp_flags = TCP_RST_FLAG; - tcp->data_off = ((sizeof(struct tcp_hdr) / 4) << 4); + ip->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_tcp_hdr)); + tcp->tcp_flags = PROX_RTE_TCP_RST_FLAG; + tcp->data_off = ((sizeof(prox_rte_tcp_hdr) / 4) << 4); rte_pktmbuf_pkt_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *); rte_pktmbuf_data_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *); } @@ -94,8 +94,8 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_ pkt = rte_pktmbuf_mtod(mbuf, uint8_t *); rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len); - struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)]; - struct tcp_hdr *l4_hdr = (struct tcp_hdr *)&pkt[stream_cfg->data[act->peer].hdr_len]; + prox_rte_ipv4_hdr *l3_hdr = (prox_rte_ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(prox_rte_ipv4_hdr)]; + prox_rte_tcp_hdr *l4_hdr = (prox_rte_tcp_hdr *)&pkt[stream_cfg->data[act->peer].hdr_len]; l3_hdr->src_addr = ctx->tuple->dst_addr; l3_hdr->dst_addr = ctx->tuple->src_addr; @@ -104,17 +104,17 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_ l4_hdr->src_port = ctx->tuple->dst_port; l4_hdr->dst_port = ctx->tuple->src_port; - uint32_t tcp_len = sizeof(struct tcp_hdr); + uint32_t tcp_len = sizeof(prox_rte_tcp_hdr); uint32_t tcp_payload_len = 0; uint32_t seq_len = 0; struct tcp_option *tcp_op; - if (tcp_flags & TCP_RST_FLAG) { - tcp_flags |= TCP_RST_FLAG; + if (tcp_flags & PROX_RTE_TCP_RST_FLAG) { + tcp_flags |= PROX_RTE_TCP_RST_FLAG; seq_len = 1; } - else if (tcp_flags & TCP_SYN_FLAG) { - tcp_flags |= TCP_SYN_FLAG; + else if (tcp_flags & PROX_RTE_TCP_SYN_FLAG) { + tcp_flags |= PROX_RTE_TCP_SYN_FLAG; /* Window scaling */ /* TODO: make options come from the stream. */ @@ -129,14 +129,14 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_ ctx->seq_first_byte = ctx->ackd_seq + 1; } - else if (tcp_flags & TCP_FIN_FLAG) { - tcp_flags |= TCP_FIN_FLAG; + else if (tcp_flags & PROX_RTE_TCP_FIN_FLAG) { + tcp_flags |= PROX_RTE_TCP_FIN_FLAG; seq_len = 1; } - if (tcp_flags & TCP_ACK_FLAG) { + if (tcp_flags & PROX_RTE_TCP_ACK_FLAG) { l4_hdr->recv_ack = rte_bswap32(ctx->recv_seq); - tcp_flags |= TCP_ACK_FLAG; + tcp_flags |= PROX_RTE_TCP_ACK_FLAG; } else l4_hdr->recv_ack = 0; @@ -163,13 +163,13 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_ rte_pktmbuf_pkt_len(mbuf) = l4_payload_offset + data_len; rte_pktmbuf_data_len(mbuf) = l4_payload_offset + data_len; - l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + tcp_len + data_len); + l3_hdr->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + tcp_len + data_len); plogdx_dbg(mbuf, NULL); plogx_dbg("put tcp packet with flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n", - tcp_flags & TCP_SYN_FLAG? "SYN ":"", - tcp_flags & TCP_ACK_FLAG? "ACK ":"", - tcp_flags & TCP_FIN_FLAG? "FIN ":"", + tcp_flags & PROX_RTE_TCP_SYN_FLAG? "SYN ":"", + tcp_flags & PROX_RTE_TCP_ACK_FLAG? "ACK ":"", + tcp_flags & PROX_RTE_TCP_FIN_FLAG? "FIN ":"", data_len, rte_bswap32(l4_hdr->sent_seq), rte_bswap32(l4_hdr->recv_ack)); } @@ -187,9 +187,9 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx) the current implementation this packet contains the TCP option field to set the MSS. For this, add 4 bytes. */ - return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + 4; + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + 4; } - return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr); + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr); } else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) { /* The reply _could_ (due to races, still possibly @@ -204,7 +204,7 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx) if (remaining_len == 0) { if (ctx->cur_action + 1 != ctx->stream_cfg->n_actions) { if (ctx->stream_cfg->actions[ctx->cur_action + 1].peer == ctx->peer) - return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr); else { uint32_t seq_beg = ctx->recv_seq - ctx->other_seq_first_byte; uint32_t end = ctx->stream_cfg->actions[ctx->cur_action + 1].beg + @@ -212,15 +212,15 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx) uint32_t remaining = end - seq_beg; uint16_t data_len = remaining > 1460? 1460: remaining; - return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len; + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + data_len; } } else { - return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr); } } else { - return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr); } } else if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) { @@ -230,10 +230,10 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx) uint32_t remaining = end - seq_beg; uint16_t data_len = remaining > 1460? 1460: remaining; - return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len; + return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + data_len; } else - return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr); + return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr); } static void stream_tcp_proc_in_order_data(struct stream_ctx *ctx, struct l4_meta *l4_meta, int *progress_seq) @@ -294,18 +294,18 @@ static void stream_tcp_proc_in_order_data(struct stream_ctx *ctx, struct l4_meta static int stream_tcp_proc_in(struct stream_ctx *ctx, struct l4_meta *l4_meta) { - struct tcp_hdr *tcp = NULL; + prox_rte_tcp_hdr *tcp = NULL; int got_syn = 0; int got_ack = 0; int got_fin = 0; int got_rst = 0; - tcp = (struct tcp_hdr *)l4_meta->l4_hdr; + tcp = (prox_rte_tcp_hdr *)l4_meta->l4_hdr; - got_syn = tcp->tcp_flags & TCP_SYN_FLAG; - got_ack = tcp->tcp_flags & TCP_ACK_FLAG; - got_fin = tcp->tcp_flags & TCP_FIN_FLAG; - got_rst = tcp->tcp_flags & TCP_RST_FLAG; + got_syn = tcp->tcp_flags & PROX_RTE_TCP_SYN_FLAG; + got_ack = tcp->tcp_flags & PROX_RTE_TCP_ACK_FLAG; + got_fin = tcp->tcp_flags & PROX_RTE_TCP_FIN_FLAG; + got_rst = tcp->tcp_flags & PROX_RTE_TCP_RST_FLAG; plogx_dbg("TCP, flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n", got_syn? "SYN ":"", got_ack? "ACK ":"", got_fin? "FIN " : "", l4_meta->len, rte_bswap32(tcp->sent_seq), rte_bswap32(tcp->recv_ack)); if (got_syn) @@ -400,7 +400,7 @@ static int stream_tcp_proc_in(struct stream_ctx *ctx, struct l4_meta *l4_meta) } /* parse options */ - if (((tcp->data_off >> 4)*4) > sizeof(struct tcp_hdr)) { + if (((tcp->data_off >> 4)*4) > sizeof(prox_rte_tcp_hdr)) { struct tcp_option *tcp_op = (struct tcp_option *)(tcp + 1); uint8_t *payload = (uint8_t *)tcp + ((tcp->data_off >> 4)*4); @@ -440,7 +440,7 @@ static int stream_tcp_proc_out_closed(struct stream_ctx *ctx, struct rte_mbuf *m ctx->next_seq = 99; ctx->ackd_seq = 99; - create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -461,7 +461,7 @@ static int stream_tcp_proc_out_listen(struct stream_ctx *ctx, struct rte_mbuf *m pkt_tuple_debug(ctx->tuple); ctx->flags |= STREAM_CTX_F_TCP_ENDED; - create_tcp_pkt(ctx, mbuf, TCP_RST_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_RST_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -475,7 +475,7 @@ static int stream_tcp_proc_out_listen(struct stream_ctx *ctx, struct rte_mbuf *m ctx->tcp_state = SYN_RECEIVED; - create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -517,7 +517,7 @@ static int stream_tcp_proc_out_syn_sent(struct stream_ctx *ctx, struct rte_mbuf return -1; } else { - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); } @@ -542,7 +542,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf ctx->same_state = 0; ctx->tcp_state = ESTABLISHED; if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) { - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -562,7 +562,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf data. */ - /* create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); */ + /* create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); */ /* token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); */ *next_tsc = tcp_resched_timeout(ctx); return -1; @@ -576,7 +576,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf ++ctx->same_state; tcp_set_retransmit(ctx); ctx->next_seq = ctx->ackd_seq; - create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -603,7 +603,7 @@ static int stream_tcp_proc_out_estab_tx(struct stream_ctx *ctx, struct rte_mbuf plogx_dbg("Moving to FIN_WAIT\n"); ctx->tcp_state = FIN_WAIT; ctx->same_state = 0; - create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -682,7 +682,7 @@ static int stream_tcp_proc_out_estab_tx(struct stream_ctx *ctx, struct rte_mbuf else ctx->flags &= ~STREAM_CTX_F_MORE_DATA; - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, data_beg, data_len); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, data_beg, data_len); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); if (ctx->flags & STREAM_CTX_F_MORE_DATA) *next_tsc = tcp_resched_timeout(ctx); @@ -705,14 +705,14 @@ static int stream_tcp_proc_out_estab_rx(struct stream_ctx *ctx, struct rte_mbuf plogx_dbg("Got fin!\n"); if (1) { ctx->tcp_state = LAST_ACK; - create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; } else { ctx->tcp_state = CLOSE_WAIT; - create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_resched_timeout(ctx); return 0; @@ -727,7 +727,7 @@ static int stream_tcp_proc_out_estab_rx(struct stream_ctx *ctx, struct rte_mbuf plogx_dbg("state++ (ack = %d)\n", ctx->recv_seq); } - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -756,7 +756,7 @@ static int stream_tcp_proc_out_close_wait(struct stream_ctx *ctx, struct rte_mbu when the FIN is sent after ACK'ing the incoming FIN. In any case, it does not matter if there was a packet or not. */ ctx->tcp_state = LAST_ACK; - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG | PROX_RTE_TCP_FIN_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -786,7 +786,7 @@ static int stream_tcp_proc_out_last_ack(struct stream_ctx *ctx, struct rte_mbuf ctx->next_seq = ctx->ackd_seq; ctx->same_state++; tcp_set_retransmit(ctx); - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG | PROX_RTE_TCP_FIN_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -808,7 +808,7 @@ static int stream_tcp_proc_out_fin_wait(struct stream_ctx *ctx, struct rte_mbuf ctx->tcp_state = TIME_WAIT; ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait; plogx_dbg("from FIN_WAIT to TIME_WAIT\n"); - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = ctx->stream_cfg->tsc_timeout_time_wait; return 0; @@ -830,7 +830,7 @@ static int stream_tcp_proc_out_fin_wait(struct stream_ctx *ctx, struct rte_mbuf ctx->same_state++; tcp_set_retransmit(ctx); ctx->next_seq = ctx->ackd_seq; - create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = tcp_retx_timeout(ctx); return 0; @@ -853,7 +853,7 @@ static int stream_tcp_proc_out_time_wait(struct stream_ctx *ctx, struct rte_mbuf plogx_dbg("Got packet while in TIME_WAIT (pkt ACK reTX)\n"); ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait; - create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); + create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); *next_tsc = ctx->stream_cfg->tsc_timeout_time_wait; return 0; @@ -917,7 +917,7 @@ int stream_tcp_is_ended(struct stream_ctx *ctx) static void add_pkt_bytes(uint32_t *n_pkts, uint32_t *n_bytes, uint32_t len) { - len = (len < 60? 60 : len) + 20 + ETHER_CRC_LEN; + len = (len < 60? 60 : len) + 20 + PROX_RTE_ETHER_CRC_LEN; (*n_pkts)++; *n_bytes += len; @@ -932,9 +932,9 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b *n_bytes = 0; /* Connection setup */ - add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN */ - add_pkt_bytes(n_pkts, n_bytes, server_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN/ACK */ - add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr)); /* ACK */ + add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(prox_rte_tcp_hdr) + 4); /* SYN */ + add_pkt_bytes(n_pkts, n_bytes, server_hdr_len + sizeof(prox_rte_tcp_hdr) + 4); /* SYN/ACK */ + add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(prox_rte_tcp_hdr)); /* ACK */ for (uint32_t i = 0; i < cfg->n_actions; ++i) { const uint32_t mss = 1440; /* TODO: should come from peer's own mss. */ @@ -947,11 +947,11 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b while (remaining) { uint32_t seg = remaining > mss? mss: remaining; - add_pkt_bytes(n_pkts, n_bytes, send_hdr_len + sizeof(struct tcp_hdr) + seg); + add_pkt_bytes(n_pkts, n_bytes, send_hdr_len + sizeof(prox_rte_tcp_hdr) + seg); remaining -= seg; } - add_pkt_bytes(n_pkts, n_bytes, reply_hdr_len + sizeof(struct tcp_hdr)); + add_pkt_bytes(n_pkts, n_bytes, reply_hdr_len + sizeof(prox_rte_tcp_hdr)); } /* Connection Tear-down */ @@ -960,7 +960,7 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b const uint32_t init_hdr_len = last_peer == PEER_CLIENT? client_hdr_len : server_hdr_len; const uint32_t resp_hdr_len = last_peer == PEER_CLIENT? server_hdr_len : client_hdr_len; - add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* FIN */ - add_pkt_bytes(n_pkts, n_bytes, resp_hdr_len + sizeof(struct tcp_hdr)); /* FIN/ACK */ - add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* ACK */ + add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(prox_rte_tcp_hdr)); /* FIN */ + add_pkt_bytes(n_pkts, n_bytes, resp_hdr_len + sizeof(prox_rte_tcp_hdr)); /* FIN/ACK */ + add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(prox_rte_tcp_hdr)); /* ACK */ } diff --git a/VNFs/DPPD-PROX/genl4_stream_udp.c b/VNFs/DPPD-PROX/genl4_stream_udp.c index 3de2db09..31661682 100644 --- a/VNFs/DPPD-PROX/genl4_stream_udp.c +++ b/VNFs/DPPD-PROX/genl4_stream_udp.c @@ -93,7 +93,7 @@ int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_met uint8_t *pkt = rte_pktmbuf_mtod(mbuf, uint8_t *); const struct peer_action *act = &stream_cfg->actions[ctx->cur_action]; - uint16_t pkt_len = stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr) + act->len; + uint16_t pkt_len = stream_cfg->data[act->peer].hdr_len + sizeof(prox_rte_udp_hdr) + act->len; rte_pktmbuf_pkt_len(mbuf) = pkt_len; rte_pktmbuf_data_len(mbuf) = pkt_len; @@ -101,19 +101,19 @@ int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_met /* Construct the packet. The template is used up to L4 header, a gap of sizeof(l4_hdr) is skipped, followed by the payload. */ rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len); - rte_memcpy(pkt + stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr), stream_cfg->data[act->peer].content + act->beg, act->len); + rte_memcpy(pkt + stream_cfg->data[act->peer].hdr_len + sizeof(prox_rte_udp_hdr), stream_cfg->data[act->peer].content + act->beg, act->len); - struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)]; - struct udp_hdr *l4_hdr = (struct udp_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len]; + prox_rte_ipv4_hdr *l3_hdr = (prox_rte_ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(prox_rte_ipv4_hdr)]; + prox_rte_udp_hdr *l4_hdr = (prox_rte_udp_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len]; l3_hdr->src_addr = ctx->tuple->dst_addr; l3_hdr->dst_addr = ctx->tuple->src_addr; l3_hdr->next_proto_id = IPPROTO_UDP; l4_hdr->src_port = ctx->tuple->dst_port; l4_hdr->dst_port = ctx->tuple->src_port; - l4_hdr->dgram_len = rte_bswap16(sizeof(struct udp_hdr) + act->len); + l4_hdr->dgram_len = rte_bswap16(sizeof(prox_rte_udp_hdr) + act->len); /* TODO: UDP checksum calculation */ - l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + act->len); + l3_hdr->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + act->len); ctx->cur_pos[ctx->peer] += act->len; ctx->cur_action++; @@ -144,7 +144,7 @@ uint16_t stream_udp_reply_len(struct stream_ctx *ctx) else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) return 0; else - return ctx->stream_cfg->data[ctx->stream_cfg->actions[ctx->cur_action].peer].hdr_len + sizeof(struct udp_hdr) + + return ctx->stream_cfg->data[ctx->stream_cfg->actions[ctx->cur_action].peer].hdr_len + sizeof(prox_rte_udp_hdr) + ctx->stream_cfg->actions[ctx->cur_action].len; } @@ -158,7 +158,7 @@ void stream_udp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b for (uint32_t i = 0; i < cfg->n_actions; ++i) { const uint32_t send_hdr_len = cfg->actions[i].peer == PEER_CLIENT? client_hdr_len : server_hdr_len; - uint32_t len = send_hdr_len + sizeof(struct udp_hdr) + cfg->actions[i].len; + uint32_t len = send_hdr_len + sizeof(prox_rte_udp_hdr) + cfg->actions[i].len; *n_bytes += (len < 60? 60 : len) + 24; (*n_pkts)++; } diff --git a/VNFs/DPPD-PROX/handle_aggregator.c b/VNFs/DPPD-PROX/handle_aggregator.c index 6434d759..ccf8b8cc 100644 --- a/VNFs/DPPD-PROX/handle_aggregator.c +++ b/VNFs/DPPD-PROX/handle_aggregator.c @@ -44,10 +44,10 @@ (stats)->rx_prio[prio] += ntx; \ } while(0) \ -static inline uint8_t detect_l4_priority(uint8_t l3_priority, const struct ipv4_hdr *ipv4_hdr) +static inline uint8_t detect_l4_priority(uint8_t l3_priority, const prox_rte_ipv4_hdr *ipv4_hdr) { if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - const struct udp_hdr *udp = (const struct udp_hdr *)((const uint8_t *)ipv4_hdr + sizeof(struct ipv4_hdr)); + const prox_rte_udp_hdr *udp = (const prox_rte_udp_hdr *)((const uint8_t *)ipv4_hdr + sizeof(prox_rte_ipv4_hdr)); if (((udp->src_port == 0x67) && (udp->dst_port == 0x68)) || ((udp->src_port == 0x68) && (udp->dst_port == 0x67))) { return PRIORITY_DHCP; } @@ -55,7 +55,7 @@ static inline uint8_t detect_l4_priority(uint8_t l3_priority, const struct ipv4_ return l3_priority; } -static inline uint8_t detect_l3_priority(uint8_t l2_priority, const struct ipv4_hdr *ipv4_hdr) +static inline uint8_t detect_l3_priority(uint8_t l2_priority, const prox_rte_ipv4_hdr *ipv4_hdr) { uint8_t dscp; if ((ipv4_hdr->version_ihl >> 4) == 4) { @@ -107,10 +107,10 @@ static inline void buffer_packet(struct task_aggregator *task, struct rte_mbuf * static inline void handle_aggregator(struct task_aggregator *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); uint8_t priority = 0; const struct qinq_hdr *pqinq; - const struct ipv4_hdr *ipv4_hdr; + const prox_rte_ipv4_hdr *ipv4_hdr; const uint16_t eth_type = peth->ether_type; switch (eth_type) { @@ -121,7 +121,7 @@ static inline void handle_aggregator(struct task_aggregator *task, struct rte_mb pqinq = rte_pktmbuf_mtod(mbuf, const struct qinq_hdr *); if ((priority = detect_l2_priority(pqinq)) == OUT_DISCARD) break; - ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1); if ((priority = detect_l3_priority(priority, ipv4_hdr)) == OUT_DISCARD) break; if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD) @@ -130,7 +130,7 @@ static inline void handle_aggregator(struct task_aggregator *task, struct rte_mb case ETYPE_VLAN: break; case ETYPE_IPv4: - ipv4_hdr = (const struct ipv4_hdr *)(peth+1); + ipv4_hdr = (const prox_rte_ipv4_hdr *)(peth+1); if ((priority = detect_l3_priority(LOW_PRIORITY, ipv4_hdr)) == OUT_DISCARD) break; if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD) diff --git a/VNFs/DPPD-PROX/handle_arp.c b/VNFs/DPPD-PROX/handle_arp.c index 767cee11..2da98ef2 100644 --- a/VNFs/DPPD-PROX/handle_arp.c +++ b/VNFs/DPPD-PROX/handle_arp.c @@ -28,7 +28,7 @@ struct task_arp { struct task_base base; - struct ether_addr src_mac; + prox_rte_ether_addr src_mac; uint32_t seed; uint32_t flags; uint32_t ip; @@ -44,7 +44,7 @@ static void task_update_config(struct task_arp *task) task->ip = task->tmp_ip; } -static void handle_arp(struct task_arp *task, struct ether_hdr_arp *hdr, struct ether_addr *s_addr) +static void handle_arp(struct task_arp *task, struct ether_hdr_arp *hdr, prox_rte_ether_addr *s_addr) { build_arp_reply(hdr, s_addr); } @@ -56,7 +56,7 @@ static int handle_arp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin uint8_t out[MAX_PKT_BURST] = {0}; struct rte_mbuf *replies_mbufs[64] = {0}, *arp_pkt_mbufs[64] = {0}; int n_arp_reply_pkts = 0, n_other_pkts = 0,n_arp_pkts = 0; - struct ether_addr s_addr; + prox_rte_ether_addr s_addr; for (uint16_t j = 0; j < n_pkts; ++j) { hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *); @@ -130,7 +130,7 @@ static void init_task_arp(struct task_base *tbase, struct task_args *targ) task->arp_replies_ring = OUT_DISCARD; task->seed = rte_rdtsc(); - memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw_sw.tx_port_queue.port].eth_addr, sizeof(struct ether_addr)); + memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw_sw.tx_port_queue.port].eth_addr, sizeof(prox_rte_ether_addr)); task->ip = rte_cpu_to_be_32(targ->local_ipv4); task->tmp_ip = task->ip; diff --git a/VNFs/DPPD-PROX/handle_blockudp.c b/VNFs/DPPD-PROX/handle_blockudp.c index 04c945e5..8dbfea8a 100644 --- a/VNFs/DPPD-PROX/handle_blockudp.c +++ b/VNFs/DPPD-PROX/handle_blockudp.c @@ -35,8 +35,8 @@ static int handle_blockudp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs uint16_t j; for (j = 0; j < n_pkts; ++j) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); - struct ipv4_hdr *pip = (struct ipv4_hdr *) (peth + 1); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *); + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *) (peth + 1); out[j] = peth->ether_type == ETYPE_IPv4 && pip->next_proto_id == 0x11 ? OUT_DISCARD : 0; } diff --git a/VNFs/DPPD-PROX/handle_cgnat.c b/VNFs/DPPD-PROX/handle_cgnat.c index 0aa6876f..f516921c 100644 --- a/VNFs/DPPD-PROX/handle_cgnat.c +++ b/VNFs/DPPD-PROX/handle_cgnat.c @@ -111,9 +111,9 @@ struct task_nat { static __m128i proto_ipsrc_portsrc_mask; static __m128i proto_ipdst_portdst_mask; struct pkt_eth_ipv4 { - struct ether_hdr ether_hdr; - struct ipv4_hdr ipv4_hdr; - struct udp_hdr udp_hdr; + prox_rte_ether_hdr ether_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_udp_hdr udp_hdr; } __attribute__((packed)); void task_cgnat_dump_public_hash(struct task_nat *task) @@ -128,7 +128,7 @@ void task_cgnat_dump_private_hash(struct task_nat *task) static void set_l2(struct task_nat *task, struct rte_mbuf *mbuf, uint8_t nh_idx) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; } @@ -136,8 +136,8 @@ static void set_l2(struct task_nat *task, struct rte_mbuf *mbuf, uint8_t nh_idx) static uint8_t route_ipv4(struct task_nat *task, struct rte_mbuf *mbuf) { struct pkt_eth_ipv4 *pkt = rte_pktmbuf_mtod(mbuf, struct pkt_eth_ipv4 *); - struct ipv4_hdr *ip = &pkt->ipv4_hdr; - struct ether_hdr *peth_out; + prox_rte_ipv4_hdr *ip = &pkt->ipv4_hdr; + prox_rte_ether_hdr *peth_out; uint8_t tx_port; uint32_t dst_ip; @@ -394,7 +394,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx; if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc) task->private_ip_info[private_ip_idx].mac_aging_time = tsc; - prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); out[j] = route_ipv4(task, mbufs[j]); } } @@ -486,7 +486,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin } if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc) task->private_ip_info[private_ip_idx].mac_aging_time = tsc; - prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); // TODO: if route fails while just added new key in table, should we delete the key from the table? out[j] = route_ipv4(task, mbufs[j]); if (out[j] && new_entry) { @@ -532,7 +532,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6); out[j] = task->public_entries[port_idx].dpdk_port; } - prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); } return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } diff --git a/VNFs/DPPD-PROX/handle_classify.c b/VNFs/DPPD-PROX/handle_classify.c index 59b9b266..96a14149 100644 --- a/VNFs/DPPD-PROX/handle_classify.c +++ b/VNFs/DPPD-PROX/handle_classify.c @@ -61,7 +61,7 @@ static inline void handle_classify(struct task_classify *task, struct rte_mbuf * prev_tc = sched->traffic_class; #endif - const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1); uint8_t dscp = task->dscp[ipv4_hdr->type_of_service >> 2]; uint8_t queue = dscp & 0x3; diff --git a/VNFs/DPPD-PROX/handle_esp.c b/VNFs/DPPD-PROX/handle_esp.c index 31969de3..447fcfa2 100644 --- a/VNFs/DPPD-PROX/handle_esp.c +++ b/VNFs/DPPD-PROX/handle_esp.c @@ -62,9 +62,9 @@ struct task_esp_enc { uint8_t cdev_id; uint16_t qp_id; uint32_t local_ipv4; - struct ether_addr local_mac; + prox_rte_ether_addr local_mac; uint32_t remote_ipv4; - struct ether_addr dst_mac; + prox_rte_ether_addr dst_mac; struct rte_mempool *crypto_op_pool; struct rte_mempool *session_pool; struct rte_cryptodev_sym_session *sess; @@ -76,8 +76,8 @@ struct task_esp_dec { uint8_t cdev_id; uint16_t qp_id; uint32_t local_ipv4; - struct ether_addr local_mac; - struct ether_addr dst_mac; + prox_rte_ether_addr local_mac; + prox_rte_ether_addr dst_mac; struct rte_mempool *crypto_op_pool; struct rte_mempool *session_pool; struct rte_cryptodev_sym_session *sess; @@ -242,14 +242,14 @@ static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ) task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4); task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4); - //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr)); + //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr)); struct prox_port_cfg *port = find_reachable_port(targ); - memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr)); + memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr)); if (targ->flags & TASK_ARG_DST_MAC_SET){ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac)); plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes)); - //ether_addr_copy(&ptask->dst_mac, &peth->d_addr); + //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr); //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); } } @@ -340,14 +340,14 @@ static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ) } task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4); - //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr)); + //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr)); struct prox_port_cfg *port = find_reachable_port(targ); - memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr)); + memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr)); if (targ->flags & TASK_ARG_DST_MAC_SET){ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac)); plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n", MAC_BYTES(task->dst_mac.addr_bytes)); - //ether_addr_copy(&ptask->dst_mac, &peth->d_addr); + //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr); //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); } @@ -356,8 +356,8 @@ static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ) static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop) { u8 *data; - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); struct rte_crypto_sym_op *sym_cop = cop->sym; @@ -376,54 +376,54 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet) int l1 = rte_pktmbuf_pkt_len(mbuf); - int padding = l1 - (ipv4_length + sizeof(struct ether_hdr)); + int padding = l1 - (ipv4_length + sizeof(prox_rte_ether_hdr)); if (unlikely(padding > 0)) { rte_pktmbuf_trim(mbuf, padding); } l1 = rte_pktmbuf_pkt_len(mbuf); - int encrypt_len = l1 - sizeof(struct ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1) + int encrypt_len = l1 - sizeof(prox_rte_ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1) padding = 0; if ((encrypt_len & 0xf) != 0){ padding = 16 - (encrypt_len % 16); encrypt_len += padding; } - const int extra_space = sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC; + const int extra_space = sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC; - struct ether_addr src_mac = peth->s_addr; - struct ether_addr dst_mac = peth->d_addr; + prox_rte_ether_addr src_mac = peth->s_addr; + prox_rte_ether_addr dst_mac = peth->d_addr; uint32_t src_addr = pip4->src_addr; uint32_t dst_addr = pip4->dst_addr; uint8_t ttl = pip4->time_to_live; uint8_t version_ihl = pip4->version_ihl; - peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix - peth = (struct ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV - peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix + peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV + peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); l1 = rte_pktmbuf_pkt_len(mbuf); peth->ether_type = ETYPE_IPv4; #if 0 //send it back - ether_addr_copy(&dst_mac, &peth->s_addr); - ether_addr_copy(&src_mac, &peth->d_addr); + prox_rte_ether_addr_copy(&dst_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&src_mac, &peth->d_addr); #else - ether_addr_copy(&task->local_mac, &peth->s_addr); - //ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp - ether_addr_copy(&task->dst_mac, &peth->d_addr); + prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr); + //prox_rte_ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp + prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr); #endif - pip4 = (struct ipv4_hdr *)(peth + 1); + pip4 = (prox_rte_ipv4_hdr *)(peth + 1); pip4->src_addr = task->local_ipv4; pip4->dst_addr = task->remote_ipv4; pip4->time_to_live = ttl; pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer pip4->version_ihl = version_ihl; // 20 bytes, ipv4 - pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth + pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth pip4->packet_id = 0x0101; pip4->type_of_service = 0; pip4->time_to_live = 64; - prox_ip_cksum(mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), 1); + prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1); data = (u8*)(pip4 + 1); #if 0 @@ -443,12 +443,12 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb *(padl + 1) = 4; // ipv4 in 4 sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len; - //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len)); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len)); + //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len)); + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len)); //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1; //sym_cop->cipher.iv.data = data + 8; - //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4; + //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4; //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC; //rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC); @@ -457,7 +457,7 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb rte_memcpy(iv_ptr, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC); #if 0//old - sym_cop->cipher.data.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC; + sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC; sym_cop->cipher.data.length = encrypt_len; uint64_t *iv = (uint64_t *)(pesp + 1); @@ -465,11 +465,11 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb #else //uint64_t *iv = (uint64_t *)(pesp + 1); //memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC); - sym_cop->cipher.data.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr); + sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr); sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC; #endif - sym_cop->auth.data.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); + sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr); sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME sym_cop->m_src = mbuf; @@ -483,8 +483,8 @@ static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mb static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop) { struct rte_crypto_sym_op *sym_cop = cop->sym; - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); u8 *data = (u8*)(pip4 + 1); @@ -497,12 +497,12 @@ static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mb rte_crypto_op_attach_sym_session(cop, task->sess); sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1); - //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME - sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr)); + //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1; //sym_cop->cipher.iv.data = (uint8_t *)data + 8; - //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4; + //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4; //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC; #if 0 @@ -516,11 +516,11 @@ static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mb CIPHER_IV_LENGTH_AES_CBC); #endif - sym_cop->auth.data.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); - sym_cop->auth.data.length = ipv4_length - sizeof(struct ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC; + sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr); + sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC; - sym_cop->cipher.data.offset = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC; - sym_cop->cipher.data.length = ipv4_length - sizeof(struct ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME + sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC; + sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME sym_cop->m_src = mbuf; return 0; @@ -528,12 +528,12 @@ static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mb static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ether_addr src_mac = peth->s_addr; - struct ether_addr dst_mac = peth->d_addr; + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ether_addr src_mac = peth->s_addr; + prox_rte_ether_addr dst_mac = peth->d_addr; uint32_t src_ip, dst_ip; - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); src_ip = pip4->src_addr; dst_ip = pip4->dst_addr; @@ -541,15 +541,15 @@ static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf peth->d_addr = src_mac;//should be replaced by arp pip4->src_addr = dst_ip; pip4->dst_addr = src_ip; - ether_addr_copy(&task->local_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr); } static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - rte_memcpy(((u8*)peth) + sizeof(struct ether_hdr), ((u8*)peth) + sizeof(struct ether_hdr) + - + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(struct ipv4_hdr));// next hdr, padding - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr), ((u8*)peth) + sizeof(prox_rte_ether_hdr) + + + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(prox_rte_ipv4_hdr));// next hdr, padding + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); if (unlikely((pip4->version_ihl >> 4) != 4)) { plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl); @@ -564,22 +564,22 @@ static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct return OUT_DISCARD; } uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); - rte_memcpy(((u8*)peth) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr), - ((u8*)peth) + sizeof(struct ether_hdr) + - + 2 * sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(struct ipv4_hdr)); + rte_memcpy(((u8*)peth) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr), + ((u8*)peth) + sizeof(prox_rte_ether_hdr) + + + 2 * sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(prox_rte_ipv4_hdr)); int len = rte_pktmbuf_pkt_len(mbuf); - rte_pktmbuf_trim(mbuf, len - sizeof(struct ether_hdr) - ipv4_length); - peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length); + peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); #if 0 do_ipv4_swap(task, mbuf); #else - ether_addr_copy(&task->local_mac, &peth->s_addr); - ether_addr_copy(&task->dst_mac, &peth->d_addr); + prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr); //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac)); #endif - prox_ip_cksum(mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), 1); + prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1); return 0; } @@ -587,10 +587,10 @@ static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struct rte_mbuf *mbuf) { u8* m = rte_pktmbuf_mtod(mbuf, u8*); - rte_memcpy(m+sizeof(struct ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC, - m, sizeof(struct ether_hdr)); - m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(struct ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC); - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(m+sizeof(struct ether_hdr)); + rte_memcpy(m+sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC, + m, sizeof(prox_rte_ether_hdr)); + m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr)+sizeof(struct prox_esp_hdr)+CIPHER_IV_LENGTH_AES_CBC); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m+sizeof(prox_rte_ether_hdr)); if (unlikely((pip4->version_ihl >> 4) != 4)) { plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl); @@ -606,18 +606,18 @@ static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struc } uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); int len = rte_pktmbuf_pkt_len(mbuf); - rte_pktmbuf_trim(mbuf, len - sizeof(struct ether_hdr) - ipv4_length); + rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) - ipv4_length); #if 0 do_ipv4_swap(task, mbuf); #else - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - ether_addr_copy(&task->local_mac, &peth->s_addr); - ether_addr_copy(&task->dst_mac, &peth->d_addr); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr); //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac)); #endif - prox_ip_cksum(mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), 1); + prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), 1); return 0; } diff --git a/VNFs/DPPD-PROX/handle_fm.c b/VNFs/DPPD-PROX/handle_fm.c index 75df01f0..470082b0 100644 --- a/VNFs/DPPD-PROX/handle_fm.c +++ b/VNFs/DPPD-PROX/handle_fm.c @@ -51,11 +51,11 @@ struct task_fm { }; struct eth_ip4_udp { - struct ether_hdr l2; - struct ipv4_hdr l3; + prox_rte_ether_hdr l2; + prox_rte_ipv4_hdr l3; union { - struct udp_hdr udp; - struct tcp_hdr tcp; + prox_rte_udp_hdr udp; + prox_rte_tcp_hdr tcp; } l4; } __attribute__((packed)); @@ -104,8 +104,8 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct fi_flipped->port_src = p->l4.udp.dst_port; fi_flipped->port_dst = p->l4.udp.src_port; - *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(struct udp_hdr); - *payload = (uint8_t*)(&p->l4.udp) + sizeof(struct udp_hdr); + *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(prox_rte_udp_hdr); + *payload = (uint8_t*)(&p->l4.udp) + sizeof(prox_rte_udp_hdr); return 0; } else if (pkt_type.val == pkt_type_tcp.val) { @@ -121,7 +121,7 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct fi_flipped->port_src = p->l4.tcp.dst_port; fi_flipped->port_dst = p->l4.tcp.src_port; - *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(struct ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4); + *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(prox_rte_ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4); *payload = ((uint8_t*)&p->l4.tcp) + ((p->l4.tcp.data_off >> 4)*4); return 0; } @@ -132,7 +132,7 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct static int is_flow_beg(const struct flow_info *fi, const struct eth_ip4_udp *p) { return fi->ip_proto == IPPROTO_UDP || - (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & TCP_SYN_FLAG); + (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & PROX_RTE_TCP_SYN_FLAG); } static void *lookup_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc) diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c index 4fd2c399..7c67b3ce 100644 --- a/VNFs/DPPD-PROX/handle_gen.c +++ b/VNFs/DPPD-PROX/handle_gen.c @@ -116,18 +116,18 @@ struct task_gen { uint64_t accur[ACCURACY_WINDOW]; uint64_t pkt_tsc_offset[64]; struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */ - struct ether_addr src_mac; + prox_rte_ether_addr src_mac; uint8_t flags; uint8_t cksum_offload; struct prox_port_cfg *port; uint64_t *bytes_to_tsc; } __rte_cache_aligned; -static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip) +static inline uint8_t ipv4_get_hdr_len(prox_rte_ipv4_hdr *ip) { /* Optimize for common case of IPv4 header without options. */ if (ip->version_ihl == 0x45) - return sizeof(struct ipv4_hdr); + return sizeof(prox_rte_ipv4_hdr); if (unlikely(ip->version_ihl >> 4 != 4)) { plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4); return 0; @@ -137,16 +137,16 @@ static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip) static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len) { - *l2_len = sizeof(struct ether_hdr); + *l2_len = sizeof(prox_rte_ether_hdr); *l3_len = 0; - struct vlan_hdr *vlan_hdr; - struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt; - struct ipv4_hdr *ip; + prox_rte_vlan_hdr *vlan_hdr; + prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt; + prox_rte_ipv4_hdr *ip; uint16_t ether_type = eth_hdr->ether_type; // Unstack VLAN tags - while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) { - vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len); + while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(prox_rte_vlan_hdr) < len)) { + vlan_hdr = (prox_rte_vlan_hdr *)(pkt + *l2_len); *l2_len +=4; ether_type = vlan_hdr->eth_proto; } @@ -173,7 +173,7 @@ static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, ui } if (*l2_len) { - struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(pkt + *l2_len); *l3_len = ipv4_get_hdr_len(ip); } } @@ -184,7 +184,7 @@ static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_temp uint16_t l3_len = pkt_template->l3_len; if (l2_len) { - struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(hdr + l2_len); prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload); } } @@ -528,7 +528,7 @@ static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbuf struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx]; struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx]; pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]); - struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i]; + prox_rte_ether_hdr *hdr = (prox_rte_ether_hdr *)pkt_hdr[i]; if (task->lat_enabled) { #ifdef NO_EXTRAPOLATION task->pkt_tsc_offset[i] = 0; @@ -570,16 +570,16 @@ static inline void register_all_ip_to_ctrl_plane(struct task_gen *task) struct pkt_template *pktpl = &task->pkt_template[i]; unsigned int ip_src_pos = 0; int maybe_ipv4 = 0; - unsigned int l2_len = sizeof(struct ether_hdr); + unsigned int l2_len = sizeof(prox_rte_ether_hdr); uint8_t *pkt = pktpl->buf; - struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt; + prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt; uint16_t ether_type = eth_hdr->ether_type; - struct vlan_hdr *vlan_hdr; + prox_rte_vlan_hdr *vlan_hdr; // Unstack VLAN tags - while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < pktpl->len)) { - vlan_hdr = (struct vlan_hdr *)(pkt + l2_len); + while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < pktpl->len)) { + vlan_hdr = (prox_rte_vlan_hdr *)(pkt + l2_len); l2_len +=4; ether_type = vlan_hdr->eth_proto; } @@ -590,11 +590,11 @@ static inline void register_all_ip_to_ctrl_plane(struct task_gen *task) if ((ether_type != ETYPE_IPv4) && !maybe_ipv4) continue; - struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(pkt + l2_len); PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4); // Even if IPv4 header contains options, options are after ip src and dst - ip_src_pos = l2_len + sizeof(struct ipv4_hdr) - 2 * sizeof(uint32_t); + ip_src_pos = l2_len + sizeof(prox_rte_ipv4_hdr) - 2 * sizeof(uint32_t); uint32_t *ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos)); plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src); register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id); @@ -761,7 +761,7 @@ static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic) { - const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); + const uint16_t min_len = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr); const uint16_t max_len = task->max_frame_size; if (do_panic) { @@ -850,24 +850,24 @@ static void task_gen_pkt_template_recalc_metadata(struct task_gen *task) static void task_gen_pkt_template_recalc_checksum(struct task_gen *task) { struct pkt_template *template; - struct ipv4_hdr *ip; + prox_rte_ipv4_hdr *ip; task->runtime_checksum_needed = 0; for (size_t i = 0; i < task->n_pkts; ++i) { template = &task->pkt_template[i]; if (template->l2_len == 0) continue; - ip = (struct ipv4_hdr *)(template->buf + template->l2_len); + ip = (prox_rte_ipv4_hdr *)(template->buf + template->l2_len); ip->hdr_checksum = 0; prox_ip_cksum_sw(ip); uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len; if (ip->next_proto_id == IPPROTO_UDP) { - struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len); + prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)ip) + template->l3_len); prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr); } else if (ip->next_proto_id == IPPROTO_TCP) { - struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len); + prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)ip) + template->l3_len); prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr); } @@ -941,7 +941,7 @@ static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_arg "Failed to allocate %u bytes (in huge pages) for packet\n", task->max_frame_size); PROX_PANIC(targ->pkt_size > task->max_frame_size, - targ->pkt_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ? + targ->pkt_size > PROX_RTE_ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ? "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu"); rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size); @@ -962,7 +962,7 @@ static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *tar task->n_pkts = pcap_count_pkts(handle, &max_frame_size); plogx_info("%u packets in pcap file '%s'; max frame size=%d\n", task->n_pkts, targ->pcap_file, max_frame_size); PROX_PANIC(max_frame_size > task->max_frame_size, - max_frame_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ? + max_frame_size > PROX_RTE_ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ? "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu"); if (targ->n_pkts) @@ -1081,7 +1081,7 @@ void task_gen_reset_values(struct task_base *tbase) task_gen_reset_pkt_templates_content(task); if (task->flags & TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC) { for (uint32_t i = 0; i < task->n_pkts; ++i) { - rte_memcpy(&task->pkt_template[i].buf[sizeof(struct ether_addr)], &task->src_mac, sizeof(struct ether_addr)); + rte_memcpy(&task->pkt_template[i].buf[sizeof(struct ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr)); } } } @@ -1237,10 +1237,10 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) if (port) { task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM); task->port = port; - task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE; + task->max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE; } else { // Not generating to any port... - task->max_frame_size = ETHER_MAX_LEN; + task->max_frame_size = PROX_RTE_ETHER_MAX_LEN; } task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size); PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n"); @@ -1315,9 +1315,9 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) PROX_PANIC(((targ->nb_txrings == 0) && (targ->nb_txports == 0)), "Gen mode requires a tx ring or a tx port"); if ((targ->flags & DSF_KEEP_SRC_MAC) == 0) { task->flags |= TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC; - memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr)); + memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr)); for (uint32_t i = 0; i < task->n_pkts; ++i) { - rte_memcpy(&task->pkt_template[i].buf[sizeof(struct ether_addr)], &task->src_mac, sizeof(struct ether_addr)); + rte_memcpy(&task->pkt_template[i].buf[sizeof(struct ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr)); } } for (uint32_t i = 0; i < targ->n_rand_str; ++i) { diff --git a/VNFs/DPPD-PROX/handle_genl4.c b/VNFs/DPPD-PROX/handle_genl4.c index 056bd838..49fde3fc 100644 --- a/VNFs/DPPD-PROX/handle_genl4.c +++ b/VNFs/DPPD-PROX/handle_genl4.c @@ -439,9 +439,9 @@ static int handle_gen_scheduled(struct task_gen_server *task) } else { - struct ether_hdr *eth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv4_hdr *ip = (struct ipv4_hdr*)(eth + 1); - struct tcp_hdr *tcp = (struct tcp_hdr*)(ip + 1); + prox_rte_ether_hdr *eth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(eth + 1); + prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(ip + 1); task->out_saved = 0; task->cancelled = 1; @@ -732,8 +732,8 @@ static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const cha const uint64_t hz = rte_get_tsc_hz(); - ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, ETHER_MAX_LEN + 20); - ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, ETHER_MAX_LEN + 20); + ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, PROX_RTE_ETHER_MAX_LEN + 20); + ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, PROX_RTE_ETHER_MAX_LEN + 20); if (!strcmp(proto, "tcp")) { ret->proto = IPPROTO_TCP; @@ -946,7 +946,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), - .bytes_max = n_descriptors * (ETHER_MIN_LEN + 20), + .bytes_max = n_descriptors * (PROX_RTE_ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); @@ -1025,7 +1025,7 @@ static void init_task_gen_client(struct task_base *tbase, struct task_args *targ task->heap = heap_create(targ->n_concur_conn, socket); task->seed = rte_rdtsc(); - /* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */ + /* task->token_time.bytes_max = MAX_PKT_BURST * (PROX_RTE_ETHER_MAX_LEN + 20); */ /* To avoid overflowing the tx descriptors, the token bucket size needs to be limited. The descriptors are filled most @@ -1037,7 +1037,7 @@ static void init_task_gen_client(struct task_base *tbase, struct task_args *targ struct token_time_cfg tt_cfg = { .bpp = targ->rate_bps, .period = rte_get_tsc_hz(), - .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20), + .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (PROX_RTE_ETHER_MIN_LEN + 20), }; token_time_init(&task->token_time, &tt_cfg); diff --git a/VNFs/DPPD-PROX/handle_gre_decap_encap.c b/VNFs/DPPD-PROX/handle_gre_decap_encap.c index 02ba4c36..83e430a6 100644 --- a/VNFs/DPPD-PROX/handle_gre_decap_encap.c +++ b/VNFs/DPPD-PROX/handle_gre_decap_encap.c @@ -37,7 +37,7 @@ #include "quit.h" struct cpe_gre_key { - struct ether_addr clt_mac; + prox_rte_ether_addr clt_mac; uint16_t pad; } __attribute__((__packed__)); @@ -219,12 +219,12 @@ void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin } struct gre_packet { - struct ether_hdr eth; - struct ipv4_hdr ip; + prox_rte_ether_hdr eth; + prox_rte_ipv4_hdr ip; struct gre_hdr gre; union { - struct ether_hdr eth2; - struct ipv4_hdr ip2; + prox_rte_ether_hdr eth2; + prox_rte_ipv4_hdr ip2; }; } __attribute__((__packed__)); @@ -232,26 +232,26 @@ struct gre_packet { GRE remove gre and ipv4 header and retain space for ethernet header. In case of Eth over GRE remove external eth, gre and ipv4 headers and return pointer to payload */ -static inline struct ether_hdr *gre_decap(struct gre_hdr *pgre, struct rte_mbuf *mbuf) +static inline prox_rte_ether_hdr *gre_decap(struct gre_hdr *pgre, struct rte_mbuf *mbuf) { int16_t hsize = 0; if (pgre->type == ETYPE_EoGRE) { - hsize = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + hsize = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr); } else if (pgre->type == ETYPE_IPv4) { - /* retain sizeof(struct ether_hdr) */ - hsize = sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + /* retain sizeof(prox_rte_ether_hdr) */ + hsize = sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr); } else { return NULL; } - return (struct ether_hdr *)rte_pktmbuf_adj(mbuf, hsize); + return (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, hsize); } static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_mbuf *mbuf) { - struct ipv4_hdr *pip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbuf, struct ether_hdr *) + 1); + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *) + 1); if (pip->next_proto_id != IPPROTO_GRE) { plog_warn("Invalid packet proto_id = 0x%x expect 0x%x\n", @@ -265,15 +265,15 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m data.gre_id = pgre->gre_id; data.cpe_ip = pip->src_addr; - struct ether_hdr *peth = gre_decap(pgre, mbuf); + prox_rte_ether_hdr *peth = gre_decap(pgre, mbuf); PROX_PANIC(peth != 0, "Failed to gre_decap"); - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); /* emulate client MAC for test purposes */ #if 1 if (pgre->type == ETYPE_IPv4) { - struct ether_hdr eth = { + prox_rte_ether_hdr eth = { .d_addr = {.addr_bytes = {0x0A, 0x02, 0x0A, 0x0A, 0x00, 0x01}}, .s_addr = {.addr_bytes = @@ -285,9 +285,9 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m eth.s_addr.addr_bytes[3] = (hip >> 16) & 0xFF; eth.s_addr.addr_bytes[4] = (hip >> 8) & 0xFF; eth.s_addr.addr_bytes[5] = (hip) & 0xFF; - rte_memcpy(peth, ð, sizeof(struct ether_hdr)); + rte_memcpy(peth, ð, sizeof(prox_rte_ether_hdr)); } - ether_addr_copy(&peth->s_addr, &key.clt_mac); + prox_rte_ether_addr_copy(&peth->s_addr, &key.clt_mac); #endif data.tsc = rte_rdtsc() + task->cpe_timeout; @@ -303,7 +303,7 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m } rte_memcpy(&task->cpe_gre_data[hash_index], &data, sizeof(data)); if (task->runtime_flags & TASK_TX_CRC) { - prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); } return 0; @@ -333,8 +333,8 @@ void handle_gre_encap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out) { for (uint8_t i = 0; i < n_pkts; ++i) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *); - ether_addr_copy(&peth->d_addr, &task->key[i].clt_mac); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *); + prox_rte_ether_addr_copy(&peth->d_addr, &task->key[i].clt_mac); } int32_t hash_index[16]; @@ -359,24 +359,24 @@ static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mb } #ifdef DO_ENC_ETH_OVER_GRE -#define PKT_PREPEND_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr)) +#define PKT_PREPEND_LEN (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr)) #elif DO_ENC_IP_OVER_GRE -#define PKT_PREPEND_LEN (sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr)) +#define PKT_PREPEND_LEN (sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr)) #else static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_mbuf *mbuf, struct cpe_gre_data *table) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv4_hdr *pip = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(peth + 1); uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); struct cpe_gre_key key; - ether_addr_copy(&peth->d_addr, &key.clt_mac); + prox_rte_ether_addr_copy(&peth->d_addr, &key.clt_mac); #ifdef GRE_TP /* policing enabled */ if (task->cycles_per_byte) { - const uint16_t pkt_size = rte_pktmbuf_pkt_len(mbuf) + ETHER_CRC_LEN; + const uint16_t pkt_size = rte_pktmbuf_pkt_len(mbuf) + PROX_RTE_ETHER_CRC_LEN; uint64_t tsc_now = rte_rdtsc(); if (table->tp_tbsize < pkt_size) { uint64_t cycles_diff = tsc_now - table->tp_tsc; @@ -399,19 +399,19 @@ static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_m /* reuse ethernet header from payload, retain payload (ip) in case of DO_ENC_IP_OVER_GRE */ - peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, PKT_PREPEND_LEN); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, PKT_PREPEND_LEN); PREFETCH0(peth); ip_len += PKT_PREPEND_LEN; - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); struct gre_hdr *pgre = (struct gre_hdr *)(pip + 1); - struct ether_hdr eth = { + prox_rte_ether_hdr eth = { .d_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x02}}, .s_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x01}}, .ether_type = ETYPE_IPv4 }; - rte_memcpy(peth, ð, sizeof(struct ether_hdr)); + rte_memcpy(peth, ð, sizeof(prox_rte_ether_hdr)); rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr)); #if DO_ENC_ETH_OVER_GRE @@ -421,13 +421,13 @@ static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_m #endif pgre->gre_id = table->gre_id; - rte_memcpy(pip, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); + rte_memcpy(pip, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr)); pip->src_addr = 0x02010a0a; //emulate port ip pip->dst_addr = table->cpe_ip; pip->total_length = rte_cpu_to_be_16(ip_len); if (task->runtime_flags & TASK_TX_CRC) { - prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); } return 0; diff --git a/VNFs/DPPD-PROX/handle_impair.c b/VNFs/DPPD-PROX/handle_impair.c index 805dedfc..3896b70f 100644 --- a/VNFs/DPPD-PROX/handle_impair.c +++ b/VNFs/DPPD-PROX/handle_impair.c @@ -192,18 +192,18 @@ static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mb { struct task_impair *task = (struct task_impair *)tbase; uint8_t out[MAX_PKT_BURST]; - struct ether_hdr * hdr[MAX_PKT_BURST]; + prox_rte_ether_hdr * hdr[MAX_PKT_BURST]; int ret = 0; for (uint16_t i = 0; i < n_pkts; ++i) { PREFETCH0(mbufs[i]); } for (uint16_t i = 0; i < n_pkts; ++i) { - hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *); + hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *); PREFETCH0(hdr[i]); } if (task->flags & IMPAIR_SET_MAC) { for (uint16_t i = 0; i < n_pkts; ++i) { - ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD; } } else { @@ -224,12 +224,12 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t enqueue_failed; uint16_t i; int ret = 0; - struct ether_hdr * hdr[MAX_PKT_BURST]; + prox_rte_ether_hdr * hdr[MAX_PKT_BURST]; for (uint16_t i = 0; i < n_pkts; ++i) { PREFETCH0(mbufs[i]); } for (uint16_t i = 0; i < n_pkts; ++i) { - hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *); + hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *); PREFETCH0(hdr[i]); } @@ -238,7 +238,7 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, /* We know n_pkts fits, no need to check for every packet */ for (i = 0; i < n_pkts; ++i) { if (task->flags & IMPAIR_SET_MAC) - ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); task->queue[task->queue_head].tsc = now + task->delay_time; task->queue[task->queue_head].mbuf = mbufs[i]; task->queue_head = (task->queue_head + 1) & task->queue_mask; @@ -247,7 +247,7 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, for (i = 0; i < n_pkts; ++i) { if (((task->queue_head + 1) & task->queue_mask) != task->queue_tail) { if (task->flags & IMPAIR_SET_MAC) - ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); task->queue[task->queue_head].tsc = now + task->delay_time; task->queue[task->queue_head].mbuf = mbufs[i]; task->queue_head = (task->queue_head + 1) & task->queue_mask; @@ -336,12 +336,12 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf ** int ret = 0; uint64_t packet_time, idx; uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK; - struct ether_hdr * hdr[MAX_PKT_BURST]; + prox_rte_ether_hdr * hdr[MAX_PKT_BURST]; for (uint16_t i = 0; i < n_pkts; ++i) { PREFETCH0(mbufs[i]); } for (uint16_t i = 0; i < n_pkts; ++i) { - hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *); + hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *); PREFETCH0(hdr[i]); } @@ -352,7 +352,7 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf ** struct queue *queue = &task->buffer[idx]; if (((queue->queue_head + 1) & task->queue_mask) != queue->queue_tail) { if (task->flags & IMPAIR_SET_MAC) - ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr); queue->queue_elem[queue->queue_head].mbuf = mbufs[i]; queue->queue_head = (queue->queue_head + 1) & task->queue_mask; break; @@ -450,7 +450,7 @@ static void init_task(struct task_base *tbase, struct task_args *targ) } random_init_seed(&task->state); if (targ->nb_txports) { - memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(struct ether_addr)); + memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(prox_rte_ether_addr)); task->flags = IMPAIR_SET_MAC; } else { task->flags = 0; diff --git a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c index cf56069e..a99a8f96 100644 --- a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c +++ b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c @@ -49,7 +49,7 @@ struct ipv6_tun_dest { struct ipv6_addr dst_addr; - struct ether_addr dst_mac; + prox_rte_ether_addr dst_mac; }; typedef enum ipv6_tun_dir_t { @@ -59,7 +59,7 @@ typedef enum ipv6_tun_dir_t { struct task_ipv6_tun_base { struct task_base base; - struct ether_addr src_mac; + prox_rte_ether_addr src_mac; uint8_t core_nb; uint64_t keys[64]; struct rte_mbuf* fake_packets[64]; @@ -71,7 +71,7 @@ struct task_ipv6_tun_base { struct task_ipv6_decap { struct task_ipv6_tun_base base; - struct ether_addr dst_mac; + prox_rte_ether_addr dst_mac; }; struct task_ipv6_encap { @@ -131,7 +131,7 @@ static void init_lookup_table(struct task_ipv6_tun_base* ptask, struct task_args struct ipv6_tun_binding_entry* entry = &table->entry[idx]; uint64_t key = MAKE_KEY_FROM_FIELDS(rte_cpu_to_be_32(entry->public_ipv4), entry->public_port, ptask->lookup_port_mask); rte_memcpy(&data.dst_addr, &entry->endpoint_addr, sizeof(struct ipv6_addr)); - rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(struct ether_addr)); + rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(prox_rte_ether_addr)); int ret = prox_rte_table_key8_add(ptask->lookup_table, &key, &data, &key_found, &entry_in_hash); PROX_PANIC(ret, "Error adding entry (%d) to binding lookup table", idx); @@ -221,16 +221,16 @@ __attribute__((constructor)) static void reg_task_ipv6_encap(void) static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest); static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest); -static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort) +static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, prox_rte_ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort) { *pAddr = (dir == TUNNEL_DIR_DECAP) ? pip4->src_addr : pip4->dst_addr; if (pip4->next_proto_id == IPPROTO_UDP) { - struct udp_hdr* pudp = (struct udp_hdr *)(pip4 + 1); + prox_rte_udp_hdr* pudp = (prox_rte_udp_hdr *)(pip4 + 1); *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? pudp->src_port : pudp->dst_port); } else if (pip4->next_proto_id == IPPROTO_TCP) { - struct tcp_hdr* ptcp = (struct tcp_hdr *)(pip4 + 1); + prox_rte_tcp_hdr* ptcp = (prox_rte_tcp_hdr *)(pip4 + 1); *pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? ptcp->src_port : ptcp->dst_port); } else { @@ -242,7 +242,7 @@ static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_t return 0; } -static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey) +static inline void extract_key(struct task_ipv6_tun_base* ptask, prox_rte_ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey) { uint32_t lookup_addr; uint16_t lookup_port; @@ -256,19 +256,19 @@ static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr *pkey = MAKE_KEY_FROM_FIELDS(lookup_addr, lookup_port, ptask->lookup_port_mask); } -static inline struct ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf) +static inline prox_rte_ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf) { - struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); - struct ipv4_hdr* pip4 = (struct ipv4_hdr*) (pip6 + 1); // TODO - Skip Option headers + prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr*) (pip6 + 1); // TODO - Skip Option headers return pip4; } -static inline struct ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf) +static inline prox_rte_ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf) { - struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); return pip4; } @@ -303,7 +303,7 @@ __attribute__((cold)) static void handle_error(struct task_ipv6_tun_base* ptask, uint16_t lookup_port; uint64_t key; - struct ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf); + prox_rte_ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf); extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port); extract_key(ptask, pip4, dir, &key); @@ -381,9 +381,9 @@ static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** mbu static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest) { - struct ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *); + prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *); struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask; - struct ipv4_hdr* pip4 = NULL; + prox_rte_ipv4_hdr* pip4 = NULL; if (unlikely(peth->ether_type != ETYPE_IPv6)) { plog_warn("Received non IPv6 packet on ipv6 tunnel port\n"); @@ -391,8 +391,8 @@ static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rt return OUT_DISCARD; } - struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); - int ipv6_hdr_len = sizeof(struct ipv6_hdr); + prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1); + int ipv6_hdr_len = sizeof(prox_rte_ipv6_hdr); // TODO - Skip over any IPv6 Extension Header: // If pip6->next_header is in (0, 43, 44, 50, 51, 60, 135), skip ahead pip->hdr_ext_len @@ -406,18 +406,18 @@ static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rt // Discard IPv6 encapsulation rte_pktmbuf_adj(rx_mbuf, ipv6_hdr_len); - peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *); - pip4 = (struct ipv4_hdr *)(peth + 1); + peth = rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *); + pip4 = (prox_rte_ipv4_hdr *)(peth + 1); // Restore Ethernet header - ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); - ether_addr_copy(&ptask->dst_mac, &peth->d_addr); + prox_rte_ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr); peth->ether_type = ETYPE_IPv4; #ifdef GEN_DECAP_IPV6_TO_IPV4_CKSUM // generate an IP checksum for ipv4 packet if (tun_base->runtime_flags & TASK_TX_CRC) { - prox_ip_cksum(rx_mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), ptask->base.offload_crc); + prox_ip_cksum(rx_mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), ptask->base.offload_crc); } #endif @@ -428,8 +428,8 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt { //plog_info("Found tunnel endpoint:"IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(tun_dest->dst_addr), MAC_BYTES(tun_dest->dst_mac.addr_bytes)); - struct ether_hdr* peth = (struct ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *)); - struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1); + prox_rte_ether_hdr* peth = (prox_rte_ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *)); + prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1); uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length); struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask; @@ -449,22 +449,22 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt pip4->hdr_checksum = 0; // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet) - int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(struct ether_hdr)); + int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(prox_rte_ether_hdr)); if (unlikely(padding > 0)) { rte_pktmbuf_trim(rx_mbuf, padding); } // Encapsulate - const int extra_space = sizeof(struct ipv6_hdr); - peth = (struct ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space); + const int extra_space = sizeof(prox_rte_ipv6_hdr); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space); // Ethernet Header - ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); - ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr); + prox_rte_ether_addr_copy(&ptask->base.src_mac, &peth->s_addr); + prox_rte_ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr); peth->ether_type = ETYPE_IPv6; // Set up IPv6 Header - struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1); + prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1); pip6->vtc_flow = rte_cpu_to_be_32(IPv6_VERSION << 28); pip6->proto = IPPROTO_IPIP; pip6->payload_len = rte_cpu_to_be_16(ipv4_length); @@ -474,8 +474,8 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt if (tun_base->runtime_flags & TASK_TX_CRC) { // We modified the TTL in the IPv4 header, hence have to recompute the IPv4 checksum -#define TUNNEL_L2_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr)) - prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(struct ipv4_hdr), ptask->base.offload_crc); +#define TUNNEL_L2_LEN (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv6_hdr)) + prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(prox_rte_ipv4_hdr), ptask->base.offload_crc); } return 0; } diff --git a/VNFs/DPPD-PROX/handle_l2fwd.c b/VNFs/DPPD-PROX/handle_l2fwd.c index e5a8c338..35d331b6 100644 --- a/VNFs/DPPD-PROX/handle_l2fwd.c +++ b/VNFs/DPPD-PROX/handle_l2fwd.c @@ -31,32 +31,32 @@ struct task_l2fwd { static int handle_l2fwd_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_l2fwd *task = (struct task_l2fwd *)tbase; - struct ether_hdr *hdr; - struct ether_addr mac; + prox_rte_ether_hdr *hdr; + prox_rte_ether_addr mac; if ((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) { /* Source and Destination mac hardcoded */ for (uint16_t j = 0; j < n_pkts; ++j) { - hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *); rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); } } else { for (uint16_t j = 0; j < n_pkts; ++j) { - hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *); if ((task->runtime_flags & (TASK_ARG_DO_NOT_SET_SRC_MAC|TASK_ARG_SRC_MAC_SET)) == 0) { /* dst mac will be used as src mac */ - ether_addr_copy(&hdr->d_addr, &mac); + prox_rte_ether_addr_copy(&hdr->d_addr, &mac); } if (task->runtime_flags & TASK_ARG_DST_MAC_SET) - ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_DST_MAC) == 0) - ether_addr_copy(&hdr->s_addr, &hdr->d_addr); + prox_rte_ether_addr_copy(&hdr->s_addr, &hdr->d_addr); if (task->runtime_flags & TASK_ARG_SRC_MAC_SET) { - ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); } else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_SRC_MAC) == 0) { - ether_addr_copy(&mac, &hdr->s_addr); + prox_rte_ether_addr_copy(&mac, &hdr->s_addr); } } } @@ -66,7 +66,7 @@ static int handle_l2fwd_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, u static void init_task_l2fwd(struct task_base *tbase, struct task_args *targ) { struct task_l2fwd *task = (struct task_l2fwd *)tbase; - struct ether_addr *src_addr, *dst_addr; + prox_rte_ether_addr *src_addr, *dst_addr; /* * The destination MAC of the outgoing packet is based on the config file: diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c index d3a52d7e..a82e74ad 100644 --- a/VNFs/DPPD-PROX/handle_lat.c +++ b/VNFs/DPPD-PROX/handle_lat.c @@ -792,7 +792,7 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ) uint64_t bytes_per_hz = UINT64_MAX; if (targ->nb_rxports) { struct prox_port_cfg *port = &prox_port_cfg[targ->rx_port_queue[0].port]; - max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; + max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; // port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC. // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04) diff --git a/VNFs/DPPD-PROX/handle_lb_5tuple.c b/VNFs/DPPD-PROX/handle_lb_5tuple.c index 7aadf49a..d320ca9d 100644 --- a/VNFs/DPPD-PROX/handle_lb_5tuple.c +++ b/VNFs/DPPD-PROX/handle_lb_5tuple.c @@ -58,7 +58,7 @@ static inline uint8_t get_ipv4_dst_port(struct task_lb_5tuple *task, void *ipv4_ int ret = 0; union ipv4_5tuple_host key; - ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live); + ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(prox_rte_ipv4_hdr, time_to_live); __m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr)); /* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */ key.xmm = _mm_and_si128(data, mask0); @@ -76,15 +76,15 @@ static inline uint8_t get_ipv4_dst_port(struct task_lb_5tuple *task, void *ipv4_ static inline uint8_t handle_lb_5tuple(struct task_lb_5tuple *task, struct rte_mbuf *mbuf) { - struct ether_hdr *eth_hdr; - struct ipv4_hdr *ipv4_hdr; + prox_rte_ether_hdr *eth_hdr; + prox_rte_ipv4_hdr *ipv4_hdr; - eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); switch (eth_hdr->ether_type) { case ETYPE_IPv4: /* Handle IPv4 headers.*/ - ipv4_hdr = (struct ipv4_hdr *) (eth_hdr + 1); + ipv4_hdr = (prox_rte_ipv4_hdr *) (eth_hdr + 1); return get_ipv4_dst_port(task, ipv4_hdr, OUT_DISCARD, task->lookup_hash); default: return OUT_DISCARD; diff --git a/VNFs/DPPD-PROX/handle_lb_net.c b/VNFs/DPPD-PROX/handle_lb_net.c index 46a7226e..1bfb6c3d 100644 --- a/VNFs/DPPD-PROX/handle_lb_net.c +++ b/VNFs/DPPD-PROX/handle_lb_net.c @@ -357,9 +357,9 @@ static inline uint8_t worker_from_mask(struct task_lb_net *task, uint32_t val) static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, struct rte_mbuf *mbuf) { // For all packets, one by one, remove MPLS tag if any and fills in keys used by "fake" packets - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); // Check for MPLS TAG - struct ipv4_hdr *ip; + prox_rte_ipv4_hdr *ip; if (peth->ether_type == ETYPE_MPLSU) { struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); uint32_t mpls_len = 0; @@ -368,12 +368,12 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s mpls_len += sizeof(struct mpls_hdr); } mpls_len += sizeof(struct mpls_hdr); - ip = (struct ipv4_hdr *)(mpls + 1); + ip = (prox_rte_ipv4_hdr *)(mpls + 1); switch (ip->version_ihl >> 4) { case 4: // Remove MPLS Tag if requested if (task->runtime_flags & TASK_MPLS_TAGGING) { - peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); peth->ether_type = ETYPE_IPv4; } break; @@ -386,7 +386,7 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s } } else { - ip = (struct ipv4_hdr *)(peth + 1); + ip = (prox_rte_ipv4_hdr *)(peth + 1); } // Entry point for the packet => check for packet validity // => do not use extract_key_core(mbufs[j], &task->keys[j]); @@ -416,7 +416,7 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s return 0; } -static inline uint8_t lb_ip4(struct task_lb_net *task, struct ipv4_hdr *ip) +static inline uint8_t lb_ip4(struct task_lb_net *task, prox_rte_ipv4_hdr *ip) { if (unlikely(ip->version_ihl >> 4 != 4)) { plog_warn("Expected to receive IPv4 packet but IP version was %d\n", @@ -453,7 +453,7 @@ static inline uint8_t lb_ip4(struct task_lb_net *task, struct ipv4_hdr *ip) return OUT_DISCARD; } -static inline uint8_t lb_ip6(struct task_lb_net *task, struct ipv6_hdr *ip) +static inline uint8_t lb_ip6(struct task_lb_net *task, prox_rte_ipv6_hdr *ip) { if (unlikely((*(uint8_t*)ip) >> 4 != 6)) { plog_warn("Expected to receive IPv6 packet but IP version was %d\n", @@ -465,7 +465,7 @@ static inline uint8_t lb_ip6(struct task_lb_net *task, struct ipv6_hdr *ip) return worker + task->nb_worker_threads * IPV6; } -static inline uint8_t lb_mpls(struct task_lb_net *task, struct ether_hdr *peth, struct rte_mbuf *mbuf) +static inline uint8_t lb_mpls(struct task_lb_net *task, prox_rte_ether_hdr *peth, struct rte_mbuf *mbuf) { struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); uint32_t mpls_len = 0; @@ -474,21 +474,21 @@ static inline uint8_t lb_mpls(struct task_lb_net *task, struct ether_hdr *peth, mpls_len += sizeof(struct mpls_hdr); } mpls_len += sizeof(struct mpls_hdr); - struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(mpls + 1); switch (ip->version_ihl >> 4) { case 4: if (task->runtime_flags & TASK_MPLS_TAGGING) { - peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); peth->ether_type = ETYPE_IPv4; } return lb_ip4(task, ip); case 6: if (task->runtime_flags & TASK_MPLS_TAGGING) { - peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); peth->ether_type = ETYPE_IPv6; } - return lb_ip6(task, (struct ipv6_hdr *)ip); + return lb_ip6(task, (prox_rte_ipv6_hdr *)ip); default: plogd_warn(mbuf, "Failed Decoding MPLS Packet - neither IPv4 neither IPv6: version %u for packet : \n", ip->version_ihl); return OUT_DISCARD; @@ -507,7 +507,7 @@ static inline uint8_t lb_qinq(struct task_lb_net *task, struct qinq_hdr *qinq) static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); const uint16_t len = rte_pktmbuf_pkt_len(mbuf); if (len < 60) { plogd_warn(mbuf, "Unexpected frame len = %d for packet : \n", len); @@ -520,9 +520,9 @@ static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *m case ETYPE_8021ad: return lb_qinq(task, (struct qinq_hdr *)peth); case ETYPE_IPv4: - return lb_ip4(task, (struct ipv4_hdr *)(peth + 1)); + return lb_ip4(task, (prox_rte_ipv4_hdr *)(peth + 1)); case ETYPE_IPv6: - return lb_ip6(task, (struct ipv6_hdr *)(peth + 1)); + return lb_ip6(task, (prox_rte_ipv6_hdr *)(peth + 1)); case ETYPE_LLDP: return OUT_DISCARD; default: diff --git a/VNFs/DPPD-PROX/handle_lb_pos.c b/VNFs/DPPD-PROX/handle_lb_pos.c index 4324e94d..3cf465ce 100644 --- a/VNFs/DPPD-PROX/handle_lb_pos.c +++ b/VNFs/DPPD-PROX/handle_lb_pos.c @@ -81,9 +81,9 @@ union ip_port { }; struct pkt_ether_ipv4_udp { - struct ether_hdr ether; - struct ipv4_hdr ipv4; - struct udp_hdr udp; + prox_rte_ether_hdr ether; + prox_rte_ipv4_hdr ipv4; + prox_rte_udp_hdr udp; } __attribute__((unused)); static uint8_t handle_lb_ip_port(struct task_lb_pos *task, struct rte_mbuf *mbuf) diff --git a/VNFs/DPPD-PROX/handle_lb_qinq.c b/VNFs/DPPD-PROX/handle_lb_qinq.c index 18ff7df4..49ed1b79 100644 --- a/VNFs/DPPD-PROX/handle_lb_qinq.c +++ b/VNFs/DPPD-PROX/handle_lb_qinq.c @@ -245,22 +245,22 @@ int handle_lb_qinq_bulk_set_port(struct task_base *tbase, struct rte_mbuf **mbuf struct qinq_packet { struct qinq_hdr qinq_hdr; union { - struct ipv4_hdr ipv4_hdr; - struct ipv6_hdr ipv6_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_ipv6_hdr ipv6_hdr; }; } __attribute__((packed)); struct qinq_packet_data { - struct ether_addr d_addr; - struct ether_addr s_addr; + prox_rte_ether_addr d_addr; + prox_rte_ether_addr s_addr; uint64_t qinq; } __attribute__((packed)); struct ether_packet { - struct ether_hdr ether_hdr; + prox_rte_ether_hdr ether_hdr; union { - struct ipv4_hdr ipv4_hdr; - struct ipv6_hdr ipv6_hdr; + prox_rte_ipv4_hdr ipv4_hdr; + prox_rte_ipv6_hdr ipv6_hdr; }; } __attribute__((packed)); diff --git a/VNFs/DPPD-PROX/handle_master.c b/VNFs/DPPD-PROX/handle_master.c index c6ae96b1..9a864352 100644 --- a/VNFs/DPPD-PROX/handle_master.c +++ b/VNFs/DPPD-PROX/handle_master.c @@ -56,18 +56,18 @@ static struct my_arp_t arp_request = { }; struct ip_table { - struct ether_addr mac; + prox_rte_ether_addr mac; struct rte_ring *ring; }; struct external_ip_table { - struct ether_addr mac; + prox_rte_ether_addr mac; struct rte_ring *rings[PROX_MAX_ARP_REQUESTS]; uint16_t nb_requests; }; struct port_table { - struct ether_addr mac; + prox_rte_ether_addr mac; struct rte_ring *ring; uint32_t ip; uint8_t port; @@ -187,7 +187,7 @@ static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf * key.ip = hdr_arp->arp.data.tpa; key.port = port; if (task->internal_port_table[port].flags & HANDLE_RANDOM_IP_FLAG) { - struct ether_addr mac; + prox_rte_ether_addr mac; plogx_dbg("\tMaster handling ARP request for ip %d.%d.%d.%d on port %d which supports random ip\n", IP4(key.ip), key.port); struct rte_ring *ring = task->internal_port_table[port].ring; create_mac(hdr_arp, &mac); diff --git a/VNFs/DPPD-PROX/handle_mplstag.c b/VNFs/DPPD-PROX/handle_mplstag.c index ce5996eb..ed122a06 100644 --- a/VNFs/DPPD-PROX/handle_mplstag.c +++ b/VNFs/DPPD-PROX/handle_mplstag.c @@ -42,23 +42,23 @@ static void init_task_unmpls(__attribute__((unused)) struct task_base *tbase, static inline uint8_t handle_unmpls(__attribute__((unused)) struct task_unmpls *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); uint32_t mpls_len = sizeof(struct mpls_hdr); while (!(mpls->bytes & 0x00010000)) { mpls++; mpls_len += sizeof(struct mpls_hdr); } - uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr); + uint32_t tot_eth_addr_len = 2*sizeof(prox_rte_ether_addr); rte_memcpy(((uint8_t *)peth) + mpls_len, peth, tot_eth_addr_len); - struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1); + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(mpls + 1); switch (ip->version_ihl >> 4) { case 4: - peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); peth->ether_type = ETYPE_IPv4; return 0; case 6: - peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); + peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len); peth->ether_type = ETYPE_IPv6; return 0; default: @@ -109,12 +109,12 @@ static void init_task_tagmpls(__attribute__((unused)) struct task_base *tbase, static inline uint8_t handle_tagmpls(__attribute__((unused)) struct task_tagmpls *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 4); + prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 4); PROX_ASSERT(peth); rte_prefetch0(peth); uint32_t mpls = 0; - uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr); + uint32_t tot_eth_addr_len = 2*sizeof(prox_rte_ether_addr); rte_memcpy(peth, ((uint8_t *)peth) + sizeof(struct mpls_hdr), tot_eth_addr_len); *((uint32_t *)(peth + 1)) = mpls | 0x00010000; // Set BoS to 1 peth->ether_type = ETYPE_MPLSU; diff --git a/VNFs/DPPD-PROX/handle_nat.c b/VNFs/DPPD-PROX/handle_nat.c index 8e6789a4..4cd2de22 100644 --- a/VNFs/DPPD-PROX/handle_nat.c +++ b/VNFs/DPPD-PROX/handle_nat.c @@ -45,8 +45,8 @@ struct task_nat { }; struct pkt_eth_ipv4 { - struct ether_hdr ether_hdr; - struct ipv4_hdr ipv4_hdr; + prox_rte_ether_hdr ether_hdr; + prox_rte_ipv4_hdr ipv4_hdr; } __attribute__((packed)); static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf) @@ -71,7 +71,7 @@ static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf) return OUT_DISCARD; *ip_addr = task->entries[ret]; - prox_ip_udp_cksum(mbuf, &pkt->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_udp_cksum(mbuf, &pkt->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); return 0; } diff --git a/VNFs/DPPD-PROX/handle_nsh.c b/VNFs/DPPD-PROX/handle_nsh.c index dbd0e655..c9474255 100644 --- a/VNFs/DPPD-PROX/handle_nsh.c +++ b/VNFs/DPPD-PROX/handle_nsh.c @@ -27,7 +27,7 @@ #include "prefetch.h" #include "log.h" -#define VXLAN_GPE_HDR_SZ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + sizeof(struct vxlan_gpe_hdr) + sizeof(struct nsh_hdr) +#define VXLAN_GPE_HDR_SZ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + sizeof(prox_rte_vxlan_gpe_hdr) + sizeof(struct nsh_hdr) #define ETHER_NSH_TYPE 0x4F89 /* 0x894F in little endian */ #define VXLAN_GPE_NSH_TYPE 0xB612 /* 4790 in little endian */ #define VXLAN_GPE_NP 0x4 @@ -51,15 +51,15 @@ static void init_task_decap_nsh(__attribute__((unused)) struct task_base *tbase, static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap_nsh *task, struct rte_mbuf *mbuf) { - struct ether_hdr *eth_hdr = NULL; - struct udp_hdr *udp_hdr = NULL; - struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; + prox_rte_ether_hdr *eth_hdr = NULL; + prox_rte_udp_hdr *udp_hdr = NULL; + prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; uint16_t hdr_len; - eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); if (eth_hdr->ether_type == ETHER_NSH_TYPE) { /* "decapsulate" Ethernet + NSH header by moving packet pointer */ - hdr_len = sizeof(struct ether_hdr) + sizeof(struct nsh_hdr); + hdr_len = sizeof(prox_rte_ether_hdr) + sizeof(struct nsh_hdr); mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len); mbuf->data_off += hdr_len; @@ -74,14 +74,14 @@ static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap } /* check the UDP destination port */ - udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); + udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)); if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) { mbuf->udata64 = 0; return 0; } /* check the Next Protocol field in VxLAN-GPE header */ - vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr)); + vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr)); if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) { mbuf->udata64 = 0; return 0; @@ -131,10 +131,10 @@ static void init_task_encap_nsh(__attribute__((unused)) struct task_base *tbase, static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap_nsh *task, struct rte_mbuf *mbuf) { - struct ether_hdr *eth_hdr = NULL; + prox_rte_ether_hdr *eth_hdr = NULL; struct nsh_hdr *nsh_hdr = NULL; - struct udp_hdr *udp_hdr = NULL; - struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; + prox_rte_udp_hdr *udp_hdr = NULL; + prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL; uint16_t hdr_len; if (mbuf == NULL) @@ -148,9 +148,9 @@ static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap mbuf->data_off -= mbuf->udata64; mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64); - eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); if (eth_hdr->ether_type == ETHER_NSH_TYPE) { - nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(struct ether_hdr)); + nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr)); /* decrement Service Index in NSH header */ if (nsh_hdr->sf_index > 0) @@ -162,17 +162,17 @@ static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap return 0; /* check the UDP destination port */ - udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); + udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)); if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) return 0; /* check the Next Protocol field in VxLAN-GPE header */ - vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr)); + vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr)); if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) return 0; /* decrement Service Index in NSH header */ - nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(struct vxlan_gpe_hdr)); + nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(prox_rte_vxlan_gpe_hdr)); if (nsh_hdr->sf_index > 0) nsh_hdr->sf_index -= 1; } diff --git a/VNFs/DPPD-PROX/handle_qinq_decap4.c b/VNFs/DPPD-PROX/handle_qinq_decap4.c index 767c0d17..94efbb1f 100644 --- a/VNFs/DPPD-PROX/handle_qinq_decap4.c +++ b/VNFs/DPPD-PROX/handle_qinq_decap4.c @@ -380,9 +380,9 @@ static int handle_qinq_decap4_bulk(struct task_base *tbase, struct rte_mbuf **mb static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id) { #ifdef USE_QINQ - struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *)); + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *)); #else - struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *)); + prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *)); #endif uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr); @@ -391,15 +391,15 @@ static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, s rte_pktmbuf_trim(mbuf, padlen); } - PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN, + PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > PROX_RTE_ETHER_MAX_LEN, "Would need to fragment packet new size = %u - not implemented\n", rte_pktmbuf_data_len(mbuf) - padlen + 20); #ifdef USE_QINQ /* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */ - struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20); + prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 20); #else - struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28); + prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 28); #endif PROX_ASSERT(peth); @@ -407,16 +407,16 @@ static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, s if (task->runtime_flags & TASK_TX_CRC) { /* calculate IP CRC here to avoid problems with -O3 flag with gcc */ #ifdef MPLS_ROUTING - prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); #else - prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); #endif } /* new IP header */ - struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1); - rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); - ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + prox_rte_ipv4_hdr *p_tunnel_ip = (prox_rte_ipv4_hdr *)(peth + 1); + rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr)); + ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr); p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len); p_tunnel_ip->src_addr = src_ipv4; @@ -435,7 +435,7 @@ static inline uint16_t calc_padlen(const struct rte_mbuf *mbuf, const uint16_t i static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task) { - PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN, + PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > PROX_RTE_ETHER_MAX_LEN, "Would need to fragment packet new size = %u - not implemented\n", rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA); @@ -443,7 +443,7 @@ static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, PROX_ASSERT(packet); PREFETCH0(packet); - struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr; + prox_rte_ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr; uint16_t ip_len = rte_be_to_cpu_16(pip->total_length); /* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */ @@ -476,16 +476,16 @@ static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, #endif /* New IP header */ - rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr)); - ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr); + rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr)); + ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr); packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len); packet->tunnel_ip_hdr.src_addr = src_ipv4; packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst; if (task->runtime_flags & TASK_TX_CRC) { #ifdef MPLS_ROUTING - prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); #else - prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); #endif } diff --git a/VNFs/DPPD-PROX/handle_qinq_decap6.c b/VNFs/DPPD-PROX/handle_qinq_decap6.c index 77bacb75..d26f312a 100644 --- a/VNFs/DPPD-PROX/handle_qinq_decap6.c +++ b/VNFs/DPPD-PROX/handle_qinq_decap6.c @@ -45,7 +45,7 @@ struct task_qinq_decap6 { struct rte_table_hash *cpe_table; uint16_t *user_table; uint32_t bucket_index; - struct ether_addr edaddr; + prox_rte_ether_addr edaddr; struct rte_lpm6 *rte_lpm6; void* period_data; /* used if using dual stack*/ void (*period_func)(void* data); @@ -103,7 +103,7 @@ static void early_init(struct task_args *targ) static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct rte_mbuf *mbuf) { struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, struct qinq_hdr *); - struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1); + prox_rte_ipv6_hdr *pip6 = (prox_rte_ipv6_hdr *)(pqinq + 1); uint16_t svlan = pqinq->svlan.vlan_tci & 0xFF0F; uint16_t cvlan = pqinq->cvlan.vlan_tci & 0xFF0F; @@ -124,11 +124,11 @@ static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct r return OUT_DISCARD; } - pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(struct vlan_hdr)); + pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(prox_rte_vlan_hdr)); PROX_ASSERT(pqinq); pqinq->ether_type = ETYPE_IPv6; // Dest MAC addresses - ether_addr_copy(&task->edaddr, &pqinq->d_addr); + prox_rte_ether_addr_copy(&task->edaddr, &pqinq->d_addr); return 0; } diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.c b/VNFs/DPPD-PROX/handle_qinq_encap4.c index e5c16af4..ffd9356a 100644 --- a/VNFs/DPPD-PROX/handle_qinq_encap4.c +++ b/VNFs/DPPD-PROX/handle_qinq_encap4.c @@ -444,14 +444,14 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf * prefetch_pkts(mbufs, n_pkts); for (uint16_t j = 0; j < n_pkts; ++j) { - struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1); + prox_rte_ipv4_hdr* ip = (prox_rte_ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *) + 1); task->keys[j] = (uint64_t)ip->dst_addr; } prox_rte_table_key8_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries); if (likely(lookup_hit_mask == pkts_mask)) { for (uint16_t j = 0; j < n_pkts; ++j) { - struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr)); uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); if (padlen) { @@ -467,7 +467,7 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf * out[j] = OUT_DISCARD; continue; } - struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr)); uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr); if (padlen) { @@ -551,7 +551,7 @@ static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct c task->stats_per_user[entry->user]++; #endif if (task->runtime_flags & TASK_TX_CRC) { - prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); } return entry->mac_port.out_idx; } diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.h b/VNFs/DPPD-PROX/handle_qinq_encap4.h index b18b0ca6..999abbd8 100644 --- a/VNFs/DPPD-PROX/handle_qinq_encap4.h +++ b/VNFs/DPPD-PROX/handle_qinq_encap4.h @@ -70,20 +70,20 @@ void init_cpe4_hash(struct task_args *targ); static inline uint8_t mpls_untag(struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); const uint16_t eth_type = peth->ether_type; if (eth_type == ETYPE_MPLSU) { - struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + prox_rte_ether_hdr *pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4); const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1); if (mpls->bos == 0) { // Double MPLS tag - pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4); PROX_ASSERT(pneweth); } - const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(pneweth + 1); + const prox_rte_ipv4_hdr *pip = (const prox_rte_ipv4_hdr *)(pneweth + 1); if ((pip->version_ihl >> 4) == 4) { pneweth->ether_type = ETYPE_IPv4; return 1; diff --git a/VNFs/DPPD-PROX/handle_qinq_encap6.c b/VNFs/DPPD-PROX/handle_qinq_encap6.c index b0245cb7..c6538655 100644 --- a/VNFs/DPPD-PROX/handle_qinq_encap6.c +++ b/VNFs/DPPD-PROX/handle_qinq_encap6.c @@ -57,10 +57,10 @@ static void init_task_qinq_encap6(struct task_base *tbase, struct task_args *tar /* Encapsulate IPv6 packet in QinQ where the QinQ is derived from the IPv6 address */ static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq_encap6 *task) { - struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(struct vlan_hdr)); + struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(prox_rte_vlan_hdr)); PROX_ASSERT(pqinq); - struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1); + prox_rte_ipv6_hdr *pip6 = (prox_rte_ipv6_hdr *)(pqinq + 1); if (pip6->hop_limits) { pip6->hop_limits--; diff --git a/VNFs/DPPD-PROX/handle_qos.c b/VNFs/DPPD-PROX/handle_qos.c index f6878f7b..5af7a310 100644 --- a/VNFs/DPPD-PROX/handle_qos.c +++ b/VNFs/DPPD-PROX/handle_qos.c @@ -76,7 +76,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *); uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); if (pqinq->ether_type == ETYPE_IPv4) { - const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1); queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3; tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2; } else { @@ -92,7 +92,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *); uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci); if (pqinq->ether_type == ETYPE_IPv4) { - const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1); + const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1); queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3; tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2; } else { diff --git a/VNFs/DPPD-PROX/handle_routing.c b/VNFs/DPPD-PROX/handle_routing.c index fc9b47dd..29b84382 100644 --- a/VNFs/DPPD-PROX/handle_routing.c +++ b/VNFs/DPPD-PROX/handle_routing.c @@ -178,14 +178,14 @@ static int handle_routing_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, static void set_l2(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; } static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx) { - struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr)); + prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr)); *((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes; *((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx]; @@ -203,8 +203,8 @@ static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_offset, struct rte_mbuf *mbuf) { - struct ipv4_hdr *ip = (struct ipv4_hdr*)(beg + ip_offset); - struct ether_hdr *peth_out; + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(beg + ip_offset); + prox_rte_ether_hdr *peth_out; uint8_t tx_port; uint32_t dst_ip; @@ -218,7 +218,7 @@ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_o switch(ip->next_proto_id) { case IPPROTO_GRE: { struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1); - dst_ip = ((struct ipv4_hdr *)(pgre + 1))->dst_addr; + dst_ip = ((prox_rte_ipv4_hdr *)(pgre + 1))->dst_addr; break; } case IPPROTO_TCP: @@ -260,7 +260,7 @@ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_o static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf) { struct qinq_hdr *qinq; - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); switch (peth->ether_type) { case ETYPE_8021ad: { @@ -277,7 +277,7 @@ static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf case ETYPE_MPLSU: { /* skip MPLS headers if any for routing */ struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1); - uint32_t count = sizeof(struct ether_hdr); + uint32_t count = sizeof(prox_rte_ether_hdr); while (!(mpls->bytes & 0x00010000)) { mpls++; count += sizeof(struct mpls_hdr); diff --git a/VNFs/DPPD-PROX/handle_swap.c b/VNFs/DPPD-PROX/handle_swap.c index 39131013..5286a4f9 100644 --- a/VNFs/DPPD-PROX/handle_swap.c +++ b/VNFs/DPPD-PROX/handle_swap.c @@ -28,6 +28,7 @@ #include "prefetch.h" #include "igmp.h" #include "prox_cksum.h" +#include "prox_compat.h" struct task_swap { struct task_base base; @@ -45,56 +46,55 @@ struct task_swap { static void write_src_and_dst_mac(struct task_swap *task, struct rte_mbuf *mbuf) { - struct ether_hdr *hdr; - struct ether_addr mac; + prox_rte_ether_hdr *hdr; + prox_rte_ether_addr mac; if (unlikely((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) { /* Source and Destination mac hardcoded */ - hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac)); } else { - hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); if (likely((task->runtime_flags & TASK_ARG_SRC_MAC_SET) == 0)) { /* dst mac will be used as src mac */ - ether_addr_copy(&hdr->d_addr, &mac); + prox_rte_ether_addr_copy(&hdr->d_addr, &mac); } if (unlikely(task->runtime_flags & TASK_ARG_DST_MAC_SET)) - ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[0], &hdr->d_addr); else - ether_addr_copy(&hdr->s_addr, &hdr->d_addr); + prox_rte_ether_addr_copy(&hdr->s_addr, &hdr->d_addr); if (unlikely(task->runtime_flags & TASK_ARG_SRC_MAC_SET)) { - ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); } else { - ether_addr_copy(&mac, &hdr->s_addr); + prox_rte_ether_addr_copy(&mac, &hdr->s_addr); } } } - -static inline void build_mcast_mac(uint32_t ip, struct ether_addr *dst_mac) +static inline void build_mcast_mac(uint32_t ip, prox_rte_ether_addr *dst_mac) { // MAC address is 01:00:5e followed by 23 LSB of IP address uint64_t mac = 0x0000005e0001L | ((ip & 0xFFFF7F00L) << 16); - memcpy(dst_mac, &mac, sizeof(struct ether_addr)); + memcpy(dst_mac, &mac, sizeof(prox_rte_ether_addr)); } static inline void build_igmp_message(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t ip, uint8_t igmp_message) { struct task_swap *task = (struct task_swap *)tbase; - struct ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); - struct ether_addr dst_mac; + prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); + prox_rte_ether_addr dst_mac; build_mcast_mac(ip, &dst_mac); rte_pktmbuf_pkt_len(mbuf) = 46; rte_pktmbuf_data_len(mbuf) = 46; init_mbuf_seg(mbuf); - ether_addr_copy(&dst_mac, &hdr->d_addr); - ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); + prox_rte_ether_addr_copy(&dst_mac, &hdr->d_addr); + prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr); hdr->ether_type = ETYPE_IPv4; - struct ipv4_hdr *ip_hdr = (struct ipv4_hdr *)(hdr + 1); + prox_rte_ipv4_hdr *ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1); ip_hdr->version_ihl = 0x45; /**< version and header length */ ip_hdr->type_of_service = 0; /**< type of service */ ip_hdr->total_length = rte_cpu_to_be_16(32); /**< length of packet */ @@ -110,25 +110,25 @@ static inline void build_igmp_message(struct task_base *tbase, struct rte_mbuf * pigmp->max_resp_time = 0; pigmp->checksum = 0; pigmp->group_address = ip; - prox_ip_udp_cksum(mbuf, ip_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc); + prox_ip_udp_cksum(mbuf, ip_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc); } static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts) { struct task_swap *task = (struct task_swap *)tbase; - struct ether_hdr *hdr; - struct ether_addr mac; - struct ipv4_hdr *ip_hdr; - struct udp_hdr *udp_hdr; + prox_rte_ether_hdr *hdr; + prox_rte_ether_addr mac; + prox_rte_ipv4_hdr *ip_hdr; + prox_rte_udp_hdr *udp_hdr; struct gre_hdr *pgre; - struct ipv4_hdr *inner_ip_hdr; + prox_rte_ipv4_hdr *inner_ip_hdr; uint32_t ip; uint16_t port; uint8_t out[64] = {0}; struct mpls_hdr *mpls; uint32_t mpls_len = 0; struct qinq_hdr *qinq; - struct vlan_hdr *vlan; + prox_rte_vlan_hdr *vlan; uint16_t j; struct igmpv2_hdr *pigmp; uint8_t type; @@ -142,7 +142,7 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui // TODO 1: check packet is long enough for Ethernet + IP + UDP = 42 bytes for (uint16_t j = 0; j < n_pkts; ++j) { - hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *); + hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *); switch (hdr->ether_type) { case ETYPE_MPLSU: mpls = (struct mpls_hdr *)(hdr + 1); @@ -152,7 +152,7 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui mpls_len += sizeof(struct mpls_hdr); } mpls_len += sizeof(struct mpls_hdr); - ip_hdr = (struct ipv4_hdr *)(mpls + 1); + ip_hdr = (prox_rte_ipv4_hdr *)(mpls + 1); break; case ETYPE_8021ad: qinq = (struct qinq_hdr *)hdr; @@ -161,16 +161,16 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui out[j] = OUT_DISCARD; continue; } - ip_hdr = (struct ipv4_hdr *)(qinq + 1); + ip_hdr = (prox_rte_ipv4_hdr *)(qinq + 1); break; case ETYPE_VLAN: - vlan = (struct vlan_hdr *)(hdr + 1); + vlan = (prox_rte_vlan_hdr *)(hdr + 1); if (vlan->eth_proto == ETYPE_IPv4) { - ip_hdr = (struct ipv4_hdr *)(vlan + 1); + ip_hdr = (prox_rte_ipv4_hdr *)(vlan + 1); } else if (vlan->eth_proto == ETYPE_VLAN) { - vlan = (struct vlan_hdr *)(vlan + 1); + vlan = (prox_rte_vlan_hdr *)(vlan + 1); if (vlan->eth_proto == ETYPE_IPv4) { - ip_hdr = (struct ipv4_hdr *)(vlan + 1); + ip_hdr = (prox_rte_ipv4_hdr *)(vlan + 1); } else if (vlan->eth_proto == ETYPE_IPv6) { plog_warn("Unsupported IPv6\n"); @@ -189,7 +189,7 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui } break; case ETYPE_IPv4: - ip_hdr = (struct ipv4_hdr *)(hdr + 1); + ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1); break; case ETYPE_IPv6: plog_warn("Unsupported IPv6\n"); @@ -212,12 +212,12 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui ip_hdr->src_addr = ip; pgre = (struct gre_hdr *)(ip_hdr + 1); - inner_ip_hdr = ((struct ipv4_hdr *)(pgre + 1)); + inner_ip_hdr = ((prox_rte_ipv4_hdr *)(pgre + 1)); ip = inner_ip_hdr->dst_addr; inner_ip_hdr->dst_addr = inner_ip_hdr->src_addr; inner_ip_hdr->src_addr = ip; - udp_hdr = (struct udp_hdr *)(inner_ip_hdr + 1); + udp_hdr = (prox_rte_udp_hdr *)(inner_ip_hdr + 1); // TODO 3.1 : verify proto is UPD or TCP port = udp_hdr->dst_port; udp_hdr->dst_port = udp_hdr->src_port; @@ -230,7 +230,7 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui out[j] = OUT_DISCARD; continue; } - udp_hdr = (struct udp_hdr *)(ip_hdr + 1); + udp_hdr = (prox_rte_udp_hdr *)(ip_hdr + 1); ip_hdr->dst_addr = ip_hdr->src_addr; ip_hdr->src_addr = ip; @@ -311,7 +311,7 @@ void igmp_leave_group(struct task_base *tbase) static void init_task_swap(struct task_base *tbase, struct task_args *targ) { struct task_swap *task = (struct task_swap *)tbase; - struct ether_addr *src_addr, *dst_addr; + prox_rte_ether_addr *src_addr, *dst_addr; /* * The destination MAC of the outgoing packet is based on the config file: diff --git a/VNFs/DPPD-PROX/handle_untag.c b/VNFs/DPPD-PROX/handle_untag.c index 2fc8fe64..ba3c6251 100644 --- a/VNFs/DPPD-PROX/handle_untag.c +++ b/VNFs/DPPD-PROX/handle_untag.c @@ -65,16 +65,16 @@ static int handle_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, u return task->base.tx_pkt(&task->base, mbufs, n_pkts, out); } -static inline uint8_t untag_mpls(struct rte_mbuf *mbuf, struct ether_hdr *peth) +static inline uint8_t untag_mpls(struct rte_mbuf *mbuf, prox_rte_ether_hdr *peth) { - struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + prox_rte_ether_hdr *pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4); const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1); - const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(mpls + 1); + const prox_rte_ipv4_hdr *pip = (const prox_rte_ipv4_hdr *)(mpls + 1); PROX_ASSERT(pneweth); if (mpls->bos == 0) { // Double MPLS tag - pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4); + pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4); PROX_ASSERT(pneweth); } @@ -98,13 +98,13 @@ static uint8_t untag_qinq(struct rte_mbuf *mbuf, struct qinq_hdr *qinq) return OUT_DISCARD; } - rte_pktmbuf_adj(mbuf, sizeof(struct qinq_hdr) - sizeof(struct ether_hdr)); + rte_pktmbuf_adj(mbuf, sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr)); return 0; } static inline uint8_t handle_untag(struct task_untag *task, struct rte_mbuf *mbuf) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); const uint16_t etype = peth->ether_type; if (etype != task->etype) { diff --git a/VNFs/DPPD-PROX/hash_entry_types.h b/VNFs/DPPD-PROX/hash_entry_types.h index e2cbcb3c..6288d5a9 100644 --- a/VNFs/DPPD-PROX/hash_entry_types.h +++ b/VNFs/DPPD-PROX/hash_entry_types.h @@ -18,9 +18,10 @@ #define _HASH_ENTRY_TYPES_H_ #include +#include "prox_compat.h" struct ether_addr_port { - struct ether_addr mac; + prox_rte_ether_addr mac; uint8_t pad; uint8_t out_idx; }; diff --git a/VNFs/DPPD-PROX/log.c b/VNFs/DPPD-PROX/log.c index 56338f89..b07076de 100644 --- a/VNFs/DPPD-PROX/log.c +++ b/VNFs/DPPD-PROX/log.c @@ -144,8 +144,8 @@ static const char* lvl_to_str(int lvl, int always) static int dump_pkt(char *dst, size_t dst_size, const struct rte_mbuf *mbuf) { - const struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, const struct ether_hdr *); - const struct ipv4_hdr *dpip = (const struct ipv4_hdr *)(peth + 1); + const prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, const prox_rte_ether_hdr *); + const prox_rte_ipv4_hdr *dpip = (const prox_rte_ipv4_hdr *)(peth + 1); const uint8_t *pkt_bytes = (const uint8_t *)peth; const uint16_t len = rte_pktmbuf_pkt_len(mbuf); size_t str_len = 0; diff --git a/VNFs/DPPD-PROX/main.c b/VNFs/DPPD-PROX/main.c index e4663fee..d87561d0 100644 --- a/VNFs/DPPD-PROX/main.c +++ b/VNFs/DPPD-PROX/main.c @@ -322,7 +322,7 @@ static void configure_if_rx_queues(struct task_args *targ, uint8_t socket) // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments // This is usually the case when setting a big mtu size i.e. enabling jumbo frames. // If the packets get transmitted, then multi segments will have to be enabled on the TX port - uint16_t max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; + uint16_t max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) { targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS; } @@ -686,8 +686,8 @@ static void set_mbuf_size(struct task_args *targ) continue; } port = &prox_port_cfg[if_port]; - if (max_frame_size < port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE) - max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; + if (max_frame_size < port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE) + max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE; if (min_buffer_size < port->min_rx_bufsize) min_buffer_size = port->min_rx_bufsize; diff --git a/VNFs/DPPD-PROX/mbuf_utils.h b/VNFs/DPPD-PROX/mbuf_utils.h index 22d57a39..d48b5098 100644 --- a/VNFs/DPPD-PROX/mbuf_utils.h +++ b/VNFs/DPPD-PROX/mbuf_utils.h @@ -22,6 +22,7 @@ #include #include #include +#include "prox_compat.h" static void init_mbuf_seg(struct rte_mbuf *mbuf) { @@ -35,7 +36,7 @@ static void init_mbuf_seg(struct rte_mbuf *mbuf) static uint16_t pkt_len_to_wire_size(uint16_t pkt_len) { - return (pkt_len < 60? 60 : pkt_len) + ETHER_CRC_LEN + 20; + return (pkt_len < 60? 60 : pkt_len) + PROX_RTE_ETHER_CRC_LEN + 20; } static uint16_t mbuf_wire_size(const struct rte_mbuf *mbuf) @@ -45,7 +46,7 @@ static uint16_t mbuf_wire_size(const struct rte_mbuf *mbuf) return pkt_len_to_wire_size(pkt_len); } -static uint16_t mbuf_calc_padlen(const struct rte_mbuf *mbuf, void *pkt, struct ipv4_hdr *ipv4) +static uint16_t mbuf_calc_padlen(const struct rte_mbuf *mbuf, void *pkt, prox_rte_ipv4_hdr *ipv4) { uint16_t pkt_len = rte_pktmbuf_pkt_len(mbuf); uint16_t ip_offset = (uint8_t *)ipv4 - (uint8_t*)pkt; diff --git a/VNFs/DPPD-PROX/packet_utils.c b/VNFs/DPPD-PROX/packet_utils.c index 4ab7c9c9..43350134 100644 --- a/VNFs/DPPD-PROX/packet_utils.c +++ b/VNFs/DPPD-PROX/packet_utils.c @@ -26,15 +26,15 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_dst) { - struct vlan_hdr *vlan_hdr; - struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt; - struct ipv4_hdr *ip; + prox_rte_vlan_hdr *vlan_hdr; + prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt; + prox_rte_ipv4_hdr *ip; uint16_t ether_type = eth_hdr->ether_type; - uint16_t l2_len = sizeof(struct ether_hdr); + uint16_t l2_len = sizeof(prox_rte_ether_hdr); // Unstack VLAN tags - while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < len)) { - vlan_hdr = (struct vlan_hdr *)((uint8_t *)pkt + l2_len); + while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) { + vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len); l2_len +=4; ether_type = vlan_hdr->eth_proto; } @@ -58,8 +58,8 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_ break; } - if (l2_len && (l2_len + sizeof(struct ipv4_hdr) <= len)) { - struct ipv4_hdr *ip = (struct ipv4_hdr *)((uint8_t *)pkt + l2_len); + if (l2_len && (l2_len + sizeof(prox_rte_ipv4_hdr) <= len)) { + prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)((uint8_t *)pkt + l2_len); // TODO: implement LPM => replace ip_dst by next hop IP DST *ip_dst = ip->dst_addr; return 0; @@ -79,13 +79,13 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d { const uint64_t hz = rte_get_tsc_hz(); struct ether_hdr_arp *packet = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *); - struct ether_addr *mac = &packet->ether_hdr.d_addr; + prox_rte_ether_addr *mac = &packet->ether_hdr.d_addr; uint64_t tsc = rte_rdtsc(); struct l3_base *l3 = &(tbase->l3); if (l3->gw.ip) { if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_update_time) && (tsc < l3->gw.arp_timeout))) { - memcpy(mac, &l3->gw.mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF; } else if (tsc > l3->gw.arp_update_time) { // long time since we have sent an arp, send arp @@ -93,7 +93,7 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d *ip_dst = l3->gw.ip; if ((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_timeout)){ // MAC is valid in the table => send also the mbuf - memcpy(mac, &l3->gw.mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF_AND_ARP; } else { // MAC still unknown, or timed out => only send ARP @@ -116,14 +116,14 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d // IP address already in table if ((tsc < l3->optimized_arp_table[idx].arp_update_time) && (tsc < l3->optimized_arp_table[idx].arp_timeout)) { // MAC address was recently updated in table, use it - memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF; } else if (tsc > l3->optimized_arp_table[idx].arp_update_time) { // ARP not sent since a long time, send ARP l3->optimized_arp_table[idx].arp_update_time = tsc + l3->arp_update_time * hz / 1000; if (tsc < l3->optimized_arp_table[idx].arp_timeout) { // MAC still valid => also send mbuf - memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF_AND_ARP; } else { // MAC unvalid => only send ARP @@ -178,14 +178,14 @@ int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_d // IP has been found if (likely((tsc < l3->arp_table[ret].arp_update_time) && (tsc < l3->arp_table[ret].arp_timeout))) { // MAC still valid and ARP sent recently - memcpy(mac, &l3->arp_table[ret].mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->arp_table[ret].mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF; } else if (tsc > l3->arp_table[ret].arp_update_time) { // ARP not sent since a long time, send ARP l3->arp_table[ret].arp_update_time = tsc + l3->arp_update_time * hz / 1000; if (tsc < l3->arp_table[ret].arp_timeout) { // MAC still valid => send also MBUF - memcpy(mac, &l3->arp_table[ret].mac, sizeof(struct ether_addr)); + memcpy(mac, &l3->arp_table[ret].mac, sizeof(prox_rte_ether_addr)); return SEND_MBUF_AND_ARP; } else { return SEND_ARP; @@ -317,7 +317,7 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui } if (idx < l3->n_pkts) { // IP not found; this is a reply while we never asked for the request! - memcpy(&l3->optimized_arp_table[idx].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr)); + memcpy(&l3->optimized_arp_table[idx].mac, &(hdr->arp.data.sha), sizeof(prox_rte_ether_addr)); l3->optimized_arp_table[idx].arp_timeout = tsc + l3->arp_timeout * hz / 1000; } } else { @@ -325,7 +325,7 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui if (ret < 0) { plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip)); } else { - memcpy(&l3->arp_table[ret].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr)); + memcpy(&l3->arp_table[ret].mac, &(hdr->arp.data.sha), sizeof(prox_rte_ether_addr)); l3->arp_table[ret].arp_timeout = tsc + l3->arp_timeout * hz / 1000; } } diff --git a/VNFs/DPPD-PROX/packet_utils.h b/VNFs/DPPD-PROX/packet_utils.h index cb4dc913..d6e2dbb9 100644 --- a/VNFs/DPPD-PROX/packet_utils.h +++ b/VNFs/DPPD-PROX/packet_utils.h @@ -16,6 +16,7 @@ #ifndef _PACKET_UTILS_H_ #define _PACKET_UTILS_H_ +#include "prox_compat.h" #include "arp.h" #include "quit.h" #include "prox_malloc.h" @@ -42,7 +43,7 @@ struct arp_table { uint64_t arp_update_time; uint64_t arp_timeout; uint32_t ip; - struct ether_addr mac; + prox_rte_ether_addr mac; }; struct l3_base { struct rte_ring *ctrl_plane_ring; diff --git a/VNFs/DPPD-PROX/parse_utils.c b/VNFs/DPPD-PROX/parse_utils.c index 786c2141..887de803 100644 --- a/VNFs/DPPD-PROX/parse_utils.c +++ b/VNFs/DPPD-PROX/parse_utils.c @@ -422,7 +422,7 @@ int parse_ip6(struct ipv6_addr *addr, const char *str2) return 0; } -int parse_mac(struct ether_addr *ether_addr, const char *str2) +int parse_mac(prox_rte_ether_addr *ether_addr, const char *str2) { char str[MAX_STR_LEN_PROC]; char *addr_parts[7]; diff --git a/VNFs/DPPD-PROX/parse_utils.h b/VNFs/DPPD-PROX/parse_utils.h index 6e4bc770..32c95f4e 100644 --- a/VNFs/DPPD-PROX/parse_utils.h +++ b/VNFs/DPPD-PROX/parse_utils.h @@ -18,12 +18,12 @@ #define _PARSE_UTILS_H_ #include +#include "prox_compat.h" #include "ip_subnet.h" #define MAX_STR_LEN_PROC (3 * MAX_PKT_SIZE + 20) struct ipv6_addr; -struct ether_addr; enum ctrl_type {CTRL_TYPE_DP, CTRL_TYPE_MSG, CTRL_TYPE_PKT}; @@ -53,7 +53,7 @@ int parse_ip(uint32_t *paddr, const char *saddr); int parse_ip6(struct ipv6_addr *addr, const char *saddr); -int parse_mac(struct ether_addr *paddr, const char *saddr); +int parse_mac(prox_rte_ether_addr *paddr, const char *saddr); /* return error on overflow or invalid suffix*/ int parse_kmg(uint32_t* val, const char *str); diff --git a/VNFs/DPPD-PROX/pkt_parser.h b/VNFs/DPPD-PROX/pkt_parser.h index 285d42f9..746830bf 100644 --- a/VNFs/DPPD-PROX/pkt_parser.h +++ b/VNFs/DPPD-PROX/pkt_parser.h @@ -24,6 +24,7 @@ #include #include +#include "prox_compat.h" #include "log.h" #include "etypes.h" @@ -69,28 +70,28 @@ static void pkt_tuple_debug(const struct pkt_tuple *pt) /* Return 0 on success, i.e. packets parsed without any error. */ static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta *l4_meta) { - struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *); + prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *); size_t l2_types_count = 0; - struct ipv4_hdr* pip = 0; + prox_rte_ipv4_hdr* pip = 0; /* L2 */ pt->l2_types[l2_types_count++] = peth->ether_type; switch (peth->ether_type) { case ETYPE_IPv4: - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); break; case ETYPE_VLAN: { - struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1); pt->l2_types[l2_types_count++] = vlan->eth_proto; if (vlan->eth_proto == ETYPE_IPv4) { - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); } else if (vlan->eth_proto == ETYPE_VLAN) { - struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1); pt->l2_types[l2_types_count++] = vlan->eth_proto; if (vlan->eth_proto == ETYPE_IPv4) { - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); } else if (vlan->eth_proto == ETYPE_IPv6) { return 1; @@ -103,13 +104,13 @@ static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta } break; case ETYPE_8021ad: { - struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1); pt->l2_types[l2_types_count++] = vlan->eth_proto; if (vlan->eth_proto == ETYPE_VLAN) { - struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1); + prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1); pt->l2_types[l2_types_count++] = vlan->eth_proto; if (vlan->eth_proto == ETYPE_IPv4) { - pip = (struct ipv4_hdr *)(peth + 1); + pip = (prox_rte_ipv4_hdr *)(peth + 1); } else { return 1; @@ -148,21 +149,21 @@ static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta /* L4 parser */ if (pt->proto_id == IPPROTO_UDP) { - struct udp_hdr *udp = (struct udp_hdr*)(pip + 1); + prox_rte_udp_hdr *udp = (prox_rte_udp_hdr*)(pip + 1); l4_meta->l4_hdr = (uint8_t*)udp; pt->src_port = udp->src_port; pt->dst_port = udp->dst_port; - l4_meta->payload = ((uint8_t*)udp) + sizeof(struct udp_hdr); - l4_meta->len = rte_be_to_cpu_16(udp->dgram_len) - sizeof(struct udp_hdr); + l4_meta->payload = ((uint8_t*)udp) + sizeof(prox_rte_udp_hdr); + l4_meta->len = rte_be_to_cpu_16(udp->dgram_len) - sizeof(prox_rte_udp_hdr); } else if (pt->proto_id == IPPROTO_TCP) { - struct tcp_hdr *tcp = (struct tcp_hdr*)(pip + 1); + prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(pip + 1); l4_meta->l4_hdr = (uint8_t*)tcp; pt->src_port = tcp->src_port; pt->dst_port = tcp->dst_port; l4_meta->payload = ((uint8_t*)tcp) + ((tcp->data_off >> 4)*4); - l4_meta->len = rte_be_to_cpu_16(pip->total_length) - sizeof(struct ipv4_hdr) - ((tcp->data_off >> 4)*4); + l4_meta->len = rte_be_to_cpu_16(pip->total_length) - sizeof(prox_rte_ipv4_hdr) - ((tcp->data_off >> 4)*4); } else { plog_err("unsupported protocol %d\n", pt->proto_id); diff --git a/VNFs/DPPD-PROX/pkt_prototypes.h b/VNFs/DPPD-PROX/pkt_prototypes.h index 5d55bacb..9acde34a 100644 --- a/VNFs/DPPD-PROX/pkt_prototypes.h +++ b/VNFs/DPPD-PROX/pkt_prototypes.h @@ -31,7 +31,7 @@ static const struct gre_hdr gre_hdr_proto = { .bits = GRE_KEY_PRESENT }; -static const struct ipv4_hdr tunnel_ip_proto = { +static const prox_rte_ipv4_hdr tunnel_ip_proto = { .version_ihl = 0x45, .type_of_service = 0, .packet_id = 0, diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c index a5d10a60..f7706755 100644 --- a/VNFs/DPPD-PROX/prox_args.c +++ b/VNFs/DPPD-PROX/prox_args.c @@ -610,8 +610,8 @@ static int get_port_cfg(unsigned sindex, char *str, void *data) // A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan) // should not be considered as a jumbo frame. However rte_ethdev.c considers that // the max_rx_pkt_len for a non jumbo frame is 1518 - cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - if (cfg->port_conf.rxmode.max_rx_pkt_len > ETHER_MAX_LEN) { + cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN; + if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN) { cfg->requested_rx_offload |= DEV_RX_OFFLOAD_JUMBO_FRAME; } } diff --git a/VNFs/DPPD-PROX/prox_cksum.c b/VNFs/DPPD-PROX/prox_cksum.c index 58e05a6b..11d37a6c 100644 --- a/VNFs/DPPD-PROX/prox_cksum.c +++ b/VNFs/DPPD-PROX/prox_cksum.c @@ -21,9 +21,9 @@ /* compute IP 16 bit checksum */ /* The hdr_checksum field must be set to 0 by the caller. */ -inline void prox_ip_cksum_sw(struct ipv4_hdr *buf) +inline void prox_ip_cksum_sw(prox_rte_ipv4_hdr *buf) { - const uint16_t size = sizeof(struct ipv4_hdr); + const uint16_t size = sizeof(prox_rte_ipv4_hdr); uint32_t cksum = 0; uint32_t nb_dwords; uint32_t tail, mask; @@ -73,7 +73,7 @@ static inline uint16_t calc_pseudo_checksum(uint8_t ipproto, uint16_t len, uint3 return csum; } -static inline void prox_write_udp_pseudo_hdr(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +static inline void prox_write_udp_pseudo_hdr(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) { /* Note that the csum is not complemented, while the pseaudo header checksum is calculated as "... the 16-bit one's @@ -84,18 +84,18 @@ static inline void prox_write_udp_pseudo_hdr(struct udp_hdr *udp, uint16_t len, udp->dgram_cksum = calc_pseudo_checksum(IPPROTO_UDP, len, src_ip_addr, dst_ip_addr); } -static inline void prox_write_tcp_pseudo_hdr(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +static inline void prox_write_tcp_pseudo_hdr(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) { tcp->cksum = calc_pseudo_checksum(IPPROTO_TCP, len, src_ip_addr, dst_ip_addr); } -inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload) +inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload) { prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & DEV_TX_OFFLOAD_IPV4_CKSUM); uint32_t l4_len = rte_bswap16(pip->total_length) - l3_len; if (pip->next_proto_id == IPPROTO_UDP) { - struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t*)pip) + l3_len); + prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t*)pip) + l3_len); #ifndef SOFT_CRC if (cksum_offload & DEV_TX_OFFLOAD_UDP_CKSUM) { mbuf->ol_flags |= PKT_TX_UDP_CKSUM; @@ -104,7 +104,7 @@ inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint1 #endif prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr); } else if (pip->next_proto_id == IPPROTO_TCP) { - struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t*)pip) + l3_len); + prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t*)pip) + l3_len); #ifndef SOFT_CRC if (cksum_offload & DEV_TX_OFFLOAD_TCP_CKSUM) { prox_write_tcp_pseudo_hdr(tcp, l4_len, pip->src_addr, pip->dst_addr); @@ -143,14 +143,14 @@ static inline uint16_t checksum_byte_seq(uint16_t *buf, uint16_t len) return ~csum; } -inline void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +inline void prox_udp_cksum_sw(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) { prox_write_udp_pseudo_hdr(udp, len, src_ip_addr, dst_ip_addr); uint16_t csum = checksum_byte_seq((uint16_t *)udp, len); udp->dgram_cksum = csum; } -inline void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) +inline void prox_tcp_cksum_sw(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr) { prox_write_tcp_pseudo_hdr(tcp, len, src_ip_addr, dst_ip_addr); diff --git a/VNFs/DPPD-PROX/prox_cksum.h b/VNFs/DPPD-PROX/prox_cksum.h index 57489900..6ba50268 100644 --- a/VNFs/DPPD-PROX/prox_cksum.h +++ b/VNFs/DPPD-PROX/prox_cksum.h @@ -26,6 +26,7 @@ #include #include #include "igmp.h" +#include "prox_compat.h" #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) #define CALC_TX_OL(l2_len, l3_len) ((uint64_t)(l2_len) | (uint64_t)(l3_len) << 7) @@ -43,9 +44,9 @@ static void prox_ip_cksum_hw(struct rte_mbuf *mbuf, uint16_t l2_len, uint16_t l3 mbuf->ol_flags |= PKT_TX_IP_CKSUM; } -void prox_ip_cksum_sw(struct ipv4_hdr *buf); +void prox_ip_cksum_sw(prox_rte_ipv4_hdr *buf); -static inline void prox_ip_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int offload) +static inline void prox_ip_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int offload) { buf->hdr_checksum = 0; #ifdef SOFT_CRC @@ -60,11 +61,11 @@ static inline void prox_ip_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, ui #endif } -void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int cksum_offload); +void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int cksum_offload); /* src_ip_addr/dst_ip_addr are in network byte order */ -void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); -void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); +void prox_udp_cksum_sw(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); +void prox_tcp_cksum_sw(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr); void prox_igmp_cksum_sw(struct igmpv2_hdr *igmp, uint16_t len); #endif /* _PROX_CKSUM_H_ */ diff --git a/VNFs/DPPD-PROX/prox_compat.h b/VNFs/DPPD-PROX/prox_compat.h index 001caa6d..7c1c2e11 100644 --- a/VNFs/DPPD-PROX/prox_compat.h +++ b/VNFs/DPPD-PROX/prox_compat.h @@ -13,7 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. */ - #ifndef _PROX_COMPAT_H_ #define _PROX_COMPAT_H_ #include @@ -150,6 +149,33 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc #define prox_rte_sched_port_pkt_write(A,B,C,D,E,F,G) rte_sched_port_pkt_write(A,B,C,D,E,F,G); #endif +#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0) +#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0) +typedef struct vxlan_gpe_hdr prox_rte_vxlan_gpe_hdr; +#endif +#define PROX_RTE_ETHER_CRC_LEN ETHER_CRC_LEN +#define PROX_RTE_ETHER_MIN_LEN ETHER_MIN_LEN +#define PROX_RTE_ETHER_MAX_LEN ETHER_MAX_LEN +#define PROX_RTE_ETHER_HDR_LEN ETHER_HDR_LEN +#define PROX_RTE_TCP_SYN_FLAG TCP_SYN_FLAG +#define PROX_RTE_TCP_FIN_FLAG TCP_FIN_FLAG +#define PROX_RTE_TCP_RST_FLAG TCP_RST_FLAG +#define PROX_RTE_TCP_ACK_FLAG TCP_ACK_FLAG + +#define prox_rte_ether_addr_copy ether_addr_copy +#define prox_rte_eth_random_addr eth_random_addr + +typedef struct ipv6_hdr prox_rte_ipv6_hdr; +typedef struct ipv4_hdr prox_rte_ipv4_hdr; +typedef struct ether_addr prox_rte_ether_addr; +typedef struct ether_hdr prox_rte_ether_hdr; +typedef struct vlan_hdr prox_rte_vlan_hdr; +typedef struct udp_hdr prox_rte_udp_hdr; +typedef struct tcp_hdr prox_rte_tcp_hdr; + +#else +#endif + static inline char *prox_strncpy(char * dest, const char * src, size_t count) { #pragma GCC diagnostic push diff --git a/VNFs/DPPD-PROX/prox_lua_types.c b/VNFs/DPPD-PROX/prox_lua_types.c index f901b931..3ef3d472 100644 --- a/VNFs/DPPD-PROX/prox_lua_types.c +++ b/VNFs/DPPD-PROX/prox_lua_types.c @@ -183,7 +183,7 @@ int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8 return 0; } -int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac) +int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, prox_rte_ether_addr *mac) { uint32_t n_entries; uint32_t mac_array[4]; @@ -419,7 +419,7 @@ int lua_to_next_hop(struct lua_State *L, enum lua_place from, const char *name, uint32_t port_id; uint32_t ip; uint32_t mpls; - struct ether_addr mac; + prox_rte_ether_addr mac; int pop; if ((pop = lua_getfrom(L, from, name)) < 0) @@ -463,7 +463,7 @@ int lua_to_next_hop6(struct lua_State *L, enum lua_place from, const char *name, { struct next_hop6 *ret; uint32_t next_hop_index, port_id, mpls; - struct ether_addr mac; + prox_rte_ether_addr mac; uint8_t ip[16]; int pop; @@ -919,7 +919,7 @@ int lua_to_cpe_table_data(struct lua_State *L, enum lua_place from, const char * struct ip4_subnet cidr; uint32_t n_entries = 0; uint32_t port_idx, gre_id, svlan, cvlan, user; - struct ether_addr mac; + prox_rte_ether_addr mac; uint32_t idx = 0; lua_pushnil(L); diff --git a/VNFs/DPPD-PROX/prox_lua_types.h b/VNFs/DPPD-PROX/prox_lua_types.h index 182c9055..ce6bd9d2 100644 --- a/VNFs/DPPD-PROX/prox_lua_types.h +++ b/VNFs/DPPD-PROX/prox_lua_types.h @@ -21,10 +21,10 @@ #include #include +#include "prox_compat.h" #include "ip6_addr.h" struct lua_State; -struct ether_addr; struct ip4_subnet; struct ip6_subnet; struct next_hop; @@ -70,7 +70,7 @@ struct lpm6 { struct ipv6_tun_binding_entry { struct ipv6_addr endpoint_addr; // IPv6 local addr - struct ether_addr next_hop_mac; // mac addr of next hop towards lwB4 + prox_rte_ether_addr next_hop_mac; // mac addr of next hop towards lwB4 uint32_t public_ipv4; // Public IPv4 address uint16_t public_port; // Public base port (together with port mask, defines the Port Set) } __attribute__((__packed__)); @@ -86,7 +86,7 @@ struct cpe_table_entry { uint32_t svlan; uint32_t cvlan; uint32_t ip; - struct ether_addr eth_addr; + prox_rte_ether_addr eth_addr; uint32_t user; }; @@ -115,7 +115,7 @@ int lua_getfrom(struct lua_State *L, enum lua_place from, const char *name); int lua_to_port(struct lua_State *L, enum lua_place from, const char *name, uint16_t *port); int lua_to_ip(struct lua_State *L, enum lua_place from, const char *name, uint32_t *ip); int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8_t *ip); -int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac); +int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, prox_rte_ether_addr *mac); int lua_to_cidr(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr); int lua_to_cidr6(struct lua_State *L, enum lua_place from, const char *name, struct ip6_subnet *cidr); int lua_to_int(struct lua_State *L, enum lua_place from, const char *name, uint32_t *val); diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c index 93341d3f..73afe571 100644 --- a/VNFs/DPPD-PROX/prox_port_cfg.c +++ b/VNFs/DPPD-PROX/prox_port_cfg.c @@ -104,10 +104,10 @@ void prox_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsig struct rte_mbuf *mbuf = _m; #if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0) - mbuf->tx_offload = CALC_TX_OL(sizeof(struct ether_hdr), sizeof(struct ipv4_hdr)); + mbuf->tx_offload = CALC_TX_OL(sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr)); #else - mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr); - mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr); + mbuf->pkt.vlan_macip.f.l2_len = sizeof(prox_rte_ether_hdr); + mbuf->pkt.vlan_macip.f.l3_len = sizeof(prox_rte_ipv4_hdr); #endif rte_pktmbuf_init(mp, opaque_arg, mbuf, i); @@ -181,9 +181,9 @@ void init_rte_dev(int use_dummy_devices) char port_name[32] = "0dummy_dev"; for (uint32_t i = 0; i < nb_ports; ++i) { #if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1)) - rte_vdev_init(port_name, "size=ETHER_MIN_LEN,copy=0"); + rte_vdev_init(port_name, "size=PROX_RTE_ETHER_MIN_LEN,copy=0"); #else - eth_dev_null_create(port_name, 0, ETHER_MIN_LEN, 0); + eth_dev_null_create(port_name, 0, PROX_RTE_ETHER_MIN_LEN, 0); #endif port_name[0]++; } @@ -505,7 +505,7 @@ static void init_port(struct prox_port_cfg *port_cfg) dummy_pool_name[0]++; } else { // Most pmd should now support setting mtu - if (port_cfg->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) { + if (port_cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) { plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len); port_cfg->mtu = port_cfg->max_rx_pkt_len; } @@ -779,7 +779,7 @@ void init_port_addr(void) rte_eth_macaddr_get(port_id, &port_cfg->eth_addr); break; case PROX_PORT_MAC_RAND: - eth_random_addr(port_cfg->eth_addr.addr_bytes); + prox_rte_eth_random_addr(port_cfg->eth_addr.addr_bytes); break; case PROX_PORT_MAC_SET: if ((rc = rte_eth_dev_default_mac_addr_set(port_id, &port_cfg->eth_addr)) != 0) diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h index 6a02cf0b..9a252c16 100644 --- a/VNFs/DPPD-PROX/prox_port_cfg.h +++ b/VNFs/DPPD-PROX/prox_port_cfg.h @@ -26,6 +26,7 @@ #endif #include +#include "prox_compat.h" #include "prox_globals.h" enum addr_type {PROX_PORT_MAC_HW, PROX_PORT_MAC_SET, PROX_PORT_MAC_RAND}; @@ -53,7 +54,7 @@ struct prox_port_cfg { uint32_t max_link_speed; uint32_t mtu; enum addr_type type; - struct ether_addr eth_addr; /* port MAC address */ + prox_rte_ether_addr eth_addr; /* port MAC address */ char name[MAX_NAME_SIZE]; char short_name[MAX_NAME_SIZE]; char driver_name[MAX_NAME_SIZE]; @@ -77,7 +78,7 @@ struct prox_port_cfg { uint16_t min_tx_desc; uint16_t max_tx_desc; uint32_t nb_mc_addr; - struct ether_addr mc_addr[NB_MCAST_ADDR]; + prox_rte_ether_addr mc_addr[NB_MCAST_ADDR]; }; extern rte_atomic32_t lsc; diff --git a/VNFs/DPPD-PROX/qinq.h b/VNFs/DPPD-PROX/qinq.h index 14da9753..1d11114d 100644 --- a/VNFs/DPPD-PROX/qinq.h +++ b/VNFs/DPPD-PROX/qinq.h @@ -18,6 +18,7 @@ #define _QINQ_H_ #include +#include "prox_compat.h" struct my_vlan_hdr { uint16_t eth_proto; @@ -30,8 +31,8 @@ struct vlans { }; struct qinq_hdr { - struct ether_addr d_addr; - struct ether_addr s_addr; + prox_rte_ether_addr d_addr; + prox_rte_ether_addr s_addr; struct my_vlan_hdr svlan; struct my_vlan_hdr cvlan; uint16_t ether_type; diff --git a/VNFs/DPPD-PROX/task_init.h b/VNFs/DPPD-PROX/task_init.h index 9fa9f92b..4108f54d 100644 --- a/VNFs/DPPD-PROX/task_init.h +++ b/VNFs/DPPD-PROX/task_init.h @@ -22,6 +22,7 @@ #include #include #include +#include "prox_compat.h" #include "task_base.h" #include "prox_globals.h" #include "ip6_addr.h" @@ -135,8 +136,8 @@ struct task_args { struct rte_ring *tx_rings[MAX_RINGS_PER_TASK]; struct rte_ring *ctrl_plane_ring; uint32_t tot_n_txrings_inited; - struct ether_addr edaddr; - struct ether_addr esaddr; + prox_rte_ether_addr edaddr; + prox_rte_ether_addr esaddr; struct port_queue tx_port_queue[PROX_MAX_PORTS]; struct port_queue rx_port_queue[PROX_MAX_PORTS]; /* Used to set up actual task at initialization time. */ diff --git a/VNFs/DPPD-PROX/vxlangpe_nsh.h b/VNFs/DPPD-PROX/vxlangpe_nsh.h index 5f83650c..7aebf380 100644 --- a/VNFs/DPPD-PROX/vxlangpe_nsh.h +++ b/VNFs/DPPD-PROX/vxlangpe_nsh.h @@ -36,12 +36,12 @@ struct nsh_hdr { } __attribute__((__packed__)); #if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0) -struct vxlan_gpe_hdr { +typedef struct prox_rte_vxlan_gpe_hdr { uint8_t flag_0; uint8_t flag_1; uint8_t reserved; uint8_t proto; uint32_t vni_res; -} __attribute__((__packed__)); +} __attribute__((__packed__)) prox_rte_vxlan_gpe_hdr; #endif #endif /* _VXLANGPE_NSH_H_ */ -- cgit 1.2.3-korg