summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXavier Simonart <xavier.simonart@intel.com>2019-03-04 14:50:04 +0100
committerXavier Simonart <xavier.simonart@intel.com>2019-03-06 17:17:16 +0100
commit22d1e458389ae6c6eb98d9c14c4f1cd8e2009f3d (patch)
treea324bbaaa772d8e45d61476ce92b2d9ebaeb448a
parent40c34eda9e2be84b9dcc98326d0609f1fa89007f (diff)
PROX generator: performance optimization (2/4)
Improve PROX generator performance by pre-calculating bytes_to_tsc. This improvement is only implemented for non-pcap generator, where only few different packet sizes are usually generated. This change might have a negative performance impact in some cases, if many different packet sizes are generated, resulting in higher memory usage. This is the case for instance if random is applied to packet size. In addition, simplified the rx path, receiving now only MAX_PKT_BURST packets per handle loop. Before we were trying to empty the NIC looping on RX packets, ending up with many rx packets per handle loop. This was used to determine an lower bound for the time the packet was received. We now set the lower bound when less than MAX_PKT_BURST has been received. Change-Id: I1ce813d7e4ac1577ea412c590add5d6f94b36ec7 Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
-rw-r--r--VNFs/DPPD-PROX/commands.c1
-rw-r--r--VNFs/DPPD-PROX/handle_gen.c88
-rw-r--r--VNFs/DPPD-PROX/handle_lat.c91
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.c42
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.h1
-rw-r--r--VNFs/DPPD-PROX/run.c3
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.c33
-rw-r--r--VNFs/DPPD-PROX/task_init.c4
8 files changed, 101 insertions, 162 deletions
diff --git a/VNFs/DPPD-PROX/commands.c b/VNFs/DPPD-PROX/commands.c
index 22d158a7..50d04d21 100644
--- a/VNFs/DPPD-PROX/commands.c
+++ b/VNFs/DPPD-PROX/commands.c
@@ -845,6 +845,7 @@ void cmd_portinfo(int port_id, char *dst, size_t max_len)
dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
+ dst += snprintf(dst, end - dst, "\tLink max speed: %u Mbps\n", port_cfg->max_link_speed);
dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c
index 32a351f1..643c61c5 100644
--- a/VNFs/DPPD-PROX/handle_gen.c
+++ b/VNFs/DPPD-PROX/handle_gen.c
@@ -85,7 +85,6 @@ struct task_gen_pcap {
struct task_gen {
struct task_base base;
uint64_t hz;
- uint64_t link_speed;
struct token_time token_time;
struct local_mbuf local_mbuf;
struct pkt_template *pkt_template; /* packet templates used at runtime */
@@ -123,6 +122,7 @@ struct task_gen {
uint8_t flags;
uint8_t cksum_offload;
struct prox_port_cfg *port;
+ uint64_t *bytes_to_tsc;
} __rte_cache_aligned;
static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
@@ -261,15 +261,9 @@ static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf,
return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL);
}
-static uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
+static inline uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
{
- const uint64_t hz = task->hz;
- const uint64_t bytes_per_hz = task->link_speed;
-
- if (bytes_per_hz == UINT64_MAX)
- return 0;
-
- return hz * bytes / bytes_per_hz;
+ return task->bytes_to_tsc[bytes];
}
static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx)
@@ -439,8 +433,12 @@ static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t coun
uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1);
struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx];
uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len);
+#ifdef NO_EXTRAPOLATION
+ uint64_t bulk_duration = task->pkt_tsc_offset[count - 1];
+#else
uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len);
uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration;
+#endif
return bulk_duration;
}
@@ -475,6 +473,14 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
simply sleeping until delta_t is zero would leave a period
of silence on the line. The error has been introduced
earlier, but the packets have already been sent. */
+
+ /* This happens typically if previous bulk was delayed
+ by an interrupt e.g. (with Time in nsec)
+ Time x: sleep 4 microsec
+ Time x+4000: send 64 packets (64 packets as 4000 nsec, w/ 10Gbps 64 bytes)
+ Time x+5000: send 16 packets (16 packets as 1000 nsec)
+ When we send the 16 packets, the 64 ealier packets are not yet
+ fully sent */
if (tx_tsc < task->earliest_tsc_next_pkt)
delta_t = task->earliest_tsc_next_pkt - tx_tsc;
else
@@ -483,12 +489,10 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
for (uint16_t i = 0; i < count; ++i) {
uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos);
const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i];
-
*pos = pkt_tsc >> LATENCY_ACCURACY;
}
uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count);
-
task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration;
write_tsc_after = rte_rdtsc();
task->write_duration_estimate = write_tsc_after - write_tsc_before;
@@ -498,6 +502,7 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
do {
tsc_before_tx = rte_rdtsc();
} while (tsc_before_tx < tx_tsc);
+
return tsc_before_tx;
}
@@ -537,7 +542,11 @@ static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbuf
mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK;
struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i];
if (task->lat_enabled) {
+#ifdef NO_EXTRAPOLATION
+ task->pkt_tsc_offset[i] = 0;
+#else
task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes);
+#endif
will_send_bytes += pkt_len_to_wire_size(pkt_template->len);
}
task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx);
@@ -633,19 +642,6 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
int i, j;
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
- // On more recent DPDK, we use the speed_capa of the port, and not the negotiated speed
- // If link is down, link_speed is 0
- if (unlikely(task->link_speed == 0)) {
- if (task->port && task->port->link_speed != 0) {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- } else
- return 0;
- }
-#endif
-
task_gen_update_config(task);
if (task->pkt_count == 0) {
@@ -1188,28 +1184,7 @@ static void start(struct task_base *tbase)
if (tbase->l3.tmaster) {
register_all_ip_to_ctrl_plane(task);
}
- if (task->port) {
- // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC.
- // task->link_speed reports link speed in Bytes per sec.
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
- // It can be 0 if link is down, and must hence be updated in fast path.
- task->link_speed = task->port->link_speed * 125000L;
- if (task->link_speed)
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- else
- plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
-#else
- if (task->port->link_speed == UINT32_MAX)
- task->link_speed = UINT64_MAX;
- else {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link max speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- }
-#endif
- }
+
/* TODO
Handle the case when two tasks transmit to the same port
and one of them is stopped. In that case ARP (requests or replies)
@@ -1295,7 +1270,26 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
task->generator_id = targ->generator_id;
plog_info("\tGenerator id = %d\n", task->generator_id);
- task->link_speed = UINT64_MAX;
+
+ // Allocate array holding bytes to tsc for supported frame sizes
+ task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->bytes_to_tsc == NULL,
+ "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", task->max_frame_size);
+
+ // task->port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
+ // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
+ uint64_t bytes_per_hz = UINT64_MAX;
+ if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
+ bytes_per_hz = task->port->max_link_speed * 125000L;
+ plog_info("\tPort %u: max link speed is %ld Mbps\n",
+ (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
+ }
+ for (unsigned int i = 0; i < task->max_frame_size * MAX_PKT_BURST ; i++) {
+ if (bytes_per_hz == UINT64_MAX)
+ task->bytes_to_tsc[i] = 0;
+ else
+ task->bytes_to_tsc[i] = (task->hz * i) / bytes_per_hz;
+ }
if (!strcmp(targ->pcap_file, "")) {
plog_info("\tUsing inline definition of a packet\n");
diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c
index 0273230b..c7a45b60 100644
--- a/VNFs/DPPD-PROX/handle_lat.c
+++ b/VNFs/DPPD-PROX/handle_lat.c
@@ -109,12 +109,12 @@ struct task_lat {
uint16_t min_pkt_len;
struct early_loss_detect *eld;
struct rx_pkt_meta_data *rx_pkt_meta;
- uint64_t link_speed;
// Following fields are only used when starting or stopping, not in general runtime
uint64_t *prev_tx_packet_index;
FILE *fp_rx;
FILE *fp_tx;
struct prox_port_cfg *port;
+ uint64_t *bytes_to_tsc;
};
/* This function calculate the difference between rx and tx_time
* Both values are uint32_t (see handle_lat_bulk)
@@ -435,9 +435,13 @@ static uint32_t task_lat_early_loss_detect(struct task_lat *task, uint32_t packe
return early_loss_detect_add(eld, packet_id);
}
-static uint64_t tsc_extrapolate_backward(uint64_t link_speed, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
+static uint64_t tsc_extrapolate_backward(struct task_lat *task, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
{
- uint64_t tsc = tsc_from - (rte_get_tsc_hz()*bytes)/link_speed;
+#ifdef NO_EXTRAPOLATION
+ uint64_t tsc = tsc_from;
+#else
+ uint64_t tsc = tsc_from - task->bytes_to_tsc[bytes];
+#endif
if (likely(tsc > tsc_minimum))
return tsc;
else
@@ -507,21 +511,6 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
struct task_lat *task = (struct task_lat *)tbase;
int rc;
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
- // On more recent DPDK, we use the speed_capa of the port, and not the negotiated speed
- // If link is down, link_speed is 0
- if (unlikely(task->link_speed == 0)) {
- if (task->port && task->port->link_speed != 0) {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- } else if (n_pkts) {
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
- } else {
- return 0;
- }
- }
-#endif
if (n_pkts == 0) {
task->begin = tbase->aux->tsc_rx.before;
return 0;
@@ -531,10 +520,10 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
// Remember those packets with bad length or bad signature
uint32_t non_dp_count = 0;
- uint64_t pkt_bad_len_sig[(MAX_RX_PKT_ALL + 63) / 64];
-#define BIT64_SET(a64, bit) a64[bit / 64] |= (((uint64_t)1) << (bit & 63))
-#define BIT64_CLR(a64, bit) a64[bit / 64] &= ~(((uint64_t)1) << (bit & 63))
-#define BIT64_TEST(a64, bit) a64[bit / 64] & (((uint64_t)1) << (bit & 63))
+ uint64_t pkt_bad_len_sig = 0;
+#define BIT64_SET(a64, bit) a64 |= (((uint64_t)1) << (bit & 63))
+#define BIT64_CLR(a64, bit) a64 &= ~(((uint64_t)1) << (bit & 63))
+#define BIT64_TEST(a64, bit) a64 & (((uint64_t)1) << (bit & 63))
/* Go once through all received packets and read them. If
packet has just been modified by another core, the cost of
@@ -583,7 +572,7 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
const uint64_t rx_tsc = tbase->aux->tsc_rx.after;
uint64_t rx_time_err;
- uint64_t pkt_rx_time64 = tsc_extrapolate_backward(task->link_speed, rx_tsc, task->rx_pkt_meta[0].bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
+ uint64_t pkt_rx_time64 = tsc_extrapolate_backward(task, rx_tsc, task->rx_pkt_meta[0].bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
if (unlikely((task->begin >> LATENCY_ACCURACY) > pkt_rx_time64)) {
// Extrapolation went up to BEFORE begin => packets were stuck in the NIC but we were not seeing them
rx_time_err = pkt_rx_time64 - (task->last_pkts_tsc >> LATENCY_ACCURACY);
@@ -603,7 +592,7 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
struct rx_pkt_meta_data *rx_pkt_meta = &task->rx_pkt_meta[j];
uint8_t *hdr = rx_pkt_meta->hdr;
- uint32_t pkt_rx_time = tsc_extrapolate_backward(task->link_speed, rx_tsc, rx_pkt_meta->bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
+ uint32_t pkt_rx_time = tsc_extrapolate_backward(task, rx_tsc, rx_pkt_meta->bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
uint32_t pkt_tx_time = rx_pkt_meta->pkt_tx_time;
uint8_t generator_id;
@@ -662,7 +651,8 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task->rx_packet_index++;
}
- task->begin = tbase->aux->tsc_rx.before;
+ if (n_pkts < MAX_PKT_BURST)
+ task->begin = tbase->aux->tsc_rx.before;
task->last_pkts_tsc = tbase->aux->tsc_rx.after;
rc = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
@@ -728,28 +718,6 @@ static void lat_start(struct task_base *tbase)
{
struct task_lat *task = (struct task_lat *)tbase;
- if (task->port) {
- // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC.
- // task->link_speed reports link speed in Bytes per sec.
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
- // It can be 0 if link is down, and must hence be updated in fast path.
- task->link_speed = task->port->link_speed * 125000L;
- if (task->link_speed)
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- else
- plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
-#else
- if (task->port->link_speed == UINT32_MAX)
- task->link_speed = UINT64_MAX;
- else {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link max speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- }
-#endif
- }
}
static void init_task_lat(struct task_base *tbase, struct task_args *targ)
@@ -815,15 +783,32 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
task->lat_test = &task->lt[task->using_lt];
task_lat_set_accuracy_limit(task, targ->accuracy_limit_nsec);
- task->rx_pkt_meta = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(*task->rx_pkt_meta), socket_id);
+ task->rx_pkt_meta = prox_zmalloc(MAX_PKT_BURST * sizeof(*task->rx_pkt_meta), socket_id);
PROX_PANIC(task->rx_pkt_meta == NULL, "unable to allocate memory to store RX packet meta data");
- task->link_speed = UINT64_MAX;
+ uint32_t max_frame_size = MAX_PKT_SIZE;
+ uint64_t bytes_per_hz = UINT64_MAX;
if (targ->nb_rxports) {
- // task->port structure is only used while starting handle_lat to get the link_speed.
- // link_speed can not be quiried at init as the port has not been initialized yet.
struct prox_port_cfg *port = &prox_port_cfg[targ->rx_port_queue[0].port];
- task->port = port;
+ max_frame_size = port->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+
+ // port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
+ // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
+ if (port->max_link_speed != UINT32_MAX) {
+ bytes_per_hz = port->max_link_speed * 125000L;
+ plog_info("\tPort %u: max link speed is %ld Mbps\n",
+ (uint8_t)(port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
+ }
+ }
+ task->bytes_to_tsc = prox_zmalloc(max_frame_size * sizeof(task->bytes_to_tsc[0]) * MAX_PKT_BURST, rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->bytes_to_tsc == NULL,
+ "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", max_frame_size);
+
+ for (unsigned int i = 0; i < max_frame_size * MAX_PKT_BURST ; i++) {
+ if (bytes_per_hz == UINT64_MAX)
+ task->bytes_to_tsc[i] = 0;
+ else
+ task->bytes_to_tsc[i] = (rte_get_tsc_hz() * i) / bytes_per_hz;
}
}
@@ -833,7 +818,7 @@ static struct task_init task_init_lat = {
.handle = handle_lat_bulk,
.start = lat_start,
.stop = lat_stop,
- .flag_features = TASK_FEATURE_TSC_RX | TASK_FEATURE_RX_ALL | TASK_FEATURE_ZERO_RX | TASK_FEATURE_NEVER_DISCARDS,
+ .flag_features = TASK_FEATURE_TSC_RX | TASK_FEATURE_ZERO_RX | TASK_FEATURE_NEVER_DISCARDS,
.size = sizeof(struct task_lat)
};
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c
index 6dc023dc..a538be4f 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.c
+++ b/VNFs/DPPD-PROX/prox_port_cfg.c
@@ -320,10 +320,10 @@ static void print_port_capa(struct prox_port_cfg *port_cfg)
plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
- if (port_cfg->link_speed != UINT32_MAX) {
- plog_info("\t\tHighest link speed capa = %d Mbps\n", port_cfg->link_speed);
- }
#endif
+ if (port_cfg->max_link_speed != UINT32_MAX) {
+ plog_info("\t\tHighest link speed capa = %d Mbps\n", port_cfg->max_link_speed);
+ }
#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
@@ -410,12 +410,12 @@ static void print_port_capa(struct prox_port_cfg *port_cfg)
#endif
}
-static void get_link_speed(struct prox_port_cfg *port_cfg)
+static void get_max_link_speed(struct prox_port_cfg *port_cfg)
{
-#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
- port_cfg->link_speed = UINT32_MAX;
+ port_cfg->max_link_speed = UINT32_MAX;
- // virtio and vmxnet3 reports fake link_speed
+#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
+ // virtio and vmxnet3 reports fake max_link_speed
if (strcmp(port_cfg->short_name, "vmxnet3") && strcmp(port_cfg->short_name, "virtio")) {
// Get link_speed from highest capability from the port
// This will be used by gen and lat for extrapolation purposes
@@ -424,29 +424,29 @@ static void get_link_speed(struct prox_port_cfg *port_cfg)
// and might result in wrong exrapolation, and hence should not be used
// for extrapolation purposes
if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_100G)
- port_cfg->link_speed = ETH_SPEED_NUM_100G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_100G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_56G)
- port_cfg->link_speed = ETH_SPEED_NUM_56G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_56G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_50G)
- port_cfg->link_speed = ETH_SPEED_NUM_50G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_50G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_40G)
- port_cfg->link_speed = ETH_SPEED_NUM_40G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_40G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_25G)
- port_cfg->link_speed = ETH_SPEED_NUM_25G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_25G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_20G)
- port_cfg->link_speed = ETH_SPEED_NUM_20G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_20G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_10G)
- port_cfg->link_speed = ETH_SPEED_NUM_10G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_10G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_5G)
- port_cfg->link_speed = ETH_SPEED_NUM_5G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_5G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_2_5G)
- port_cfg->link_speed = ETH_SPEED_NUM_2_5G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_2_5G;
else if (port_cfg->dev_info.speed_capa & ETH_LINK_SPEED_1G)
- port_cfg->link_speed = ETH_SPEED_NUM_1G;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_1G;
else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M))
- port_cfg->link_speed = ETH_SPEED_NUM_100M;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_100M;
else if (port_cfg->dev_info.speed_capa & (ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M))
- port_cfg->link_speed = ETH_SPEED_NUM_10M;
+ port_cfg->max_link_speed = ETH_SPEED_NUM_10M;
}
#endif
@@ -459,7 +459,7 @@ static void init_port(struct prox_port_cfg *port_cfg)
uint8_t port_id;
int ret;
- get_link_speed(port_cfg);
+ get_max_link_speed(port_cfg);
print_port_capa(port_cfg);
port_id = port_cfg - prox_port_cfg;
PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
@@ -652,9 +652,7 @@ static void init_port(struct prox_port_cfg *port_cfg)
rte_eth_link_get(port_id, &link);
port_cfg->link_up = link.link_status;
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
port_cfg->link_speed = link.link_speed;
-#endif
if (link.link_status) {
plog_info("Link Up - speed %'u Mbps - %s\n",
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h
index 696beb77..ccf83d6c 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.h
+++ b/VNFs/DPPD-PROX/prox_port_cfg.h
@@ -49,6 +49,7 @@ struct prox_port_cfg {
uint32_t n_txd;
uint8_t link_up;
uint32_t link_speed;
+ uint32_t max_link_speed;
uint32_t mtu;
enum addr_type type;
struct ether_addr eth_addr; /* port MAC address */
diff --git a/VNFs/DPPD-PROX/run.c b/VNFs/DPPD-PROX/run.c
index 2ad8aca1..bed0c757 100644
--- a/VNFs/DPPD-PROX/run.c
+++ b/VNFs/DPPD-PROX/run.c
@@ -78,10 +78,7 @@ static void update_link_states(void)
port_cfg = &prox_port_cfg[portid];
rte_eth_link_get_nowait(portid, &link);
-#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
- // On more recent DPDK, we use the speed_capa of the port, and not the negotiated speed
port_cfg->link_speed = link.link_speed;
-#endif
if (port_cfg->link_up != link.link_status) {
port_cfg->link_up = link.link_status;
plog_info("port %d: Link speed now %d Mbps\n", portid, link.link_speed);
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c
index 2571b8a4..075069c8 100644
--- a/VNFs/DPPD-PROX/rx_pkt.c
+++ b/VNFs/DPPD-PROX/rx_pkt.c
@@ -508,36 +508,3 @@ uint16_t rx_pkt_tsc(struct task_base *tbase, struct rte_mbuf ***mbufs)
return ret;
}
-
-uint16_t rx_pkt_all(struct task_base *tbase, struct rte_mbuf ***mbufs)
-{
- uint16_t tot = 0;
- uint16_t ret = 0;
- struct rte_mbuf **new_mbufs;
- struct rte_mbuf **dst = tbase->aux->all_mbufs;
-
- /* In case we receive less than MAX_PKT_BURST packets in one
- iteration, do no perform any copying of mbuf pointers. Use
- the buffer itself instead. */
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- if (ret < MAX_PKT_BURST/2) {
- *mbufs = new_mbufs;
- return ret;
- }
-
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- *mbufs = dst;
-
- do {
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- } while (ret == MAX_PKT_BURST/2 && tot < MAX_RX_PKT_ALL - MAX_PKT_BURST);
-
- if (tot >= MAX_RX_PKT_ALL - MAX_PKT_BURST) {
- plog_err("Could not receive all packets - buffer full\n");
- }
-
- return tot;
-}
diff --git a/VNFs/DPPD-PROX/task_init.c b/VNFs/DPPD-PROX/task_init.c
index 08ccaf97..8441561f 100644
--- a/VNFs/DPPD-PROX/task_init.c
+++ b/VNFs/DPPD-PROX/task_init.c
@@ -351,10 +351,6 @@ struct task_base *init_task_struct(struct task_args *targ)
offset = init_rx_tx_rings_ports(targ, tbase, offset);
tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
- if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) {
- task_base_add_rx_pkt_function(tbase, rx_pkt_all);
- tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket);
- }
if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
}