summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/handle_qinq_encap4.c
diff options
context:
space:
mode:
Diffstat (limited to 'VNFs/DPPD-PROX/handle_qinq_encap4.c')
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_encap4.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.c b/VNFs/DPPD-PROX/handle_qinq_encap4.c
index 0b31660f..0b707b7a 100644
--- a/VNFs/DPPD-PROX/handle_qinq_encap4.c
+++ b/VNFs/DPPD-PROX/handle_qinq_encap4.c
@@ -152,7 +152,7 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
/* TODO: check if it is not necessary to limit reverse mapping
@@ -163,6 +163,10 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
}
/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
+ if (targ->runtime_flags & TASK_CLASSIFY) {
+ int rc = init_port_sched(&task->sched_port, targ);
+ PROX_PANIC(rc, "Did not find any QoS task to transmit to => undefined sched_port parameters\n");
+ }
}
static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
@@ -440,14 +444,14 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf *
prefetch_pkts(mbufs, n_pkts);
for (uint16_t j = 0; j < n_pkts; ++j) {
- struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1);
+ prox_rte_ipv4_hdr* ip = (prox_rte_ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *) + 1);
task->keys[j] = (uint64_t)ip->dst_addr;
}
prox_rte_table_key8_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
if (likely(lookup_hit_mask == pkts_mask)) {
for (uint16_t j = 0; j < n_pkts; ++j) {
- struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
+ struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr));
uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
if (padlen) {
@@ -463,7 +467,7 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf *
out[j] = OUT_DISCARD;
continue;
}
- struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
+ struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr));
uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
if (padlen) {
@@ -541,13 +545,13 @@ static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct c
uint8_t queue = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] & 0x3;
uint8_t tc = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] >> 2;
- rte_sched_port_pkt_write(mbuf, 0, entry->user, tc, queue, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, 0, entry->user, tc, queue, 0);
}
#ifdef ENABLE_EXTRA_USER_STATISTICS
task->stats_per_user[entry->user]++;
#endif
if (task->runtime_flags & TASK_TX_CRC) {
- prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
return entry->mac_port.out_idx;
}