summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX
diff options
context:
space:
mode:
authorXavier Simonart <xavier.simonart@intel.com>2019-03-08 13:32:53 +0100
committerXavier Simonart <xavier.simonart@intel.com>2019-03-08 13:32:53 +0100
commit3d17d9c7e0daae0b82973283a23f3e85d0decabd (patch)
treef857a5bd7783e1942b70802658173a38d18d9059 /VNFs/DPPD-PROX
parentd813669721c37f0dcbb8f12b950cde7362265c50 (diff)
Fix PROX generator latency
When PROX fails to transmit packets (e.g. because switch or NIC is overloaded) some cleanup needs to be done related to packet id and latency. In addition, in some casesi, the clock estimate is slighly over-estimated which might result, in rare cases, in negative (hence very high) latencies, due to the extrapolation of timestamps. This has been workarounded bydecreasing the extrapolation, tolerating up to 1% clock estimate error Change-Id: I1ba17dfe0d5e2f9f9167f4f087ed0d96da1293c2 Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
Diffstat (limited to 'VNFs/DPPD-PROX')
-rw-r--r--VNFs/DPPD-PROX/handle_gen.c20
-rw-r--r--VNFs/DPPD-PROX/handle_lat.c7
2 files changed, 23 insertions, 4 deletions
diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c
index fcdbcd62..4bf2e6eb 100644
--- a/VNFs/DPPD-PROX/handle_gen.c
+++ b/VNFs/DPPD-PROX/handle_gen.c
@@ -13,7 +13,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
*/
-
#include <rte_mbuf.h>
#include <pcap.h>
#include <string.h>
@@ -669,6 +668,20 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
+
+ // If we failed to send some packets, we need to do some clean-up:
+
+ if (unlikely(ret)) {
+ // We need re-use the packets indexes not being sent
+ // Hence non-sent packets will not be considered as lost by the receiver when it looks at
+ // packet ids. This should also increase the percentage of packets used for latency measurements
+ task->pkt_queue_index -= ret;
+
+ // In case of failures, the estimate about when we can send next packet (earliest_tsc_next_pkt) is wrong
+ // This would result in under-estimated latency (up to 0 or negative)
+ uint64_t bulk_duration = task_gen_calc_bulk_duration(task, ret);
+ task->earliest_tsc_next_pkt -= bulk_duration;
+ }
return ret;
}
@@ -1275,11 +1288,14 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
plog_info("\tPort %u: max link speed is %ld Mbps\n",
(uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
}
+ // There are cases where hz estimate might be slighly over-estimated
+ // This results in too much extrapolation
+ // Only account for 99% of extrapolation to handle cases with up to 1% error clocks
for (unsigned int i = 0; i < task->max_frame_size * MAX_PKT_BURST ; i++) {
if (bytes_per_hz == UINT64_MAX)
task->bytes_to_tsc[i] = 0;
else
- task->bytes_to_tsc[i] = (task->hz * i) / bytes_per_hz;
+ task->bytes_to_tsc[i] = (task->hz * i * 0.99) / bytes_per_hz;
}
if (!strcmp(targ->pcap_file, "")) {
diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c
index 93652d47..b4e016ec 100644
--- a/VNFs/DPPD-PROX/handle_lat.c
+++ b/VNFs/DPPD-PROX/handle_lat.c
@@ -437,7 +437,7 @@ static uint32_t task_lat_early_loss_detect(struct task_lat *task, uint32_t packe
static uint64_t tsc_extrapolate_backward(struct task_lat *task, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
{
-#ifdef NO_EXTRAPOLATION
+#ifdef NO_LAT_EXTRAPOLATION
uint64_t tsc = tsc_from;
#else
uint64_t tsc = tsc_from - task->bytes_to_tsc[bytes];
@@ -806,11 +806,14 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(task->bytes_to_tsc == NULL,
"Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", max_frame_size);
+ // There are cases where hz estimate might be slighly over-estimated
+ // This results in too much extrapolation
+ // Only account for 99% of extrapolation to handle cases with up to 1% error clocks
for (unsigned int i = 0; i < max_frame_size * MAX_PKT_BURST ; i++) {
if (bytes_per_hz == UINT64_MAX)
task->bytes_to_tsc[i] = 0;
else
- task->bytes_to_tsc[i] = (rte_get_tsc_hz() * i) / bytes_per_hz;
+ task->bytes_to_tsc[i] = (rte_get_tsc_hz() * i * 0.99) / bytes_per_hz;
}
}