summaryrefslogtreecommitdiffstats
path: root/VNFs/DPPD-PROX/handle_gen.h
blob: 5083fea9bd6393de3436bc69b77f90735f47280b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
/*
// Copyright (c) 2010-2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
*/

#ifndef _HANDLE_GEN_H_
#define _HANDLE_GEN_H_

struct unique_id {
	uint8_t  generator_id;
	uint32_t packet_id;
} __attribute__((packed));

static void unique_id_init(struct unique_id *unique_id, uint8_t generator_id, uint32_t packet_id)
{
	unique_id->generator_id = generator_id;
	unique_id->packet_id = packet_id;
}

static void unique_id_get(struct unique_id *unique_id, uint8_t *generator_id, uint32_t *packet_id)
{
	*generator_id = unique_id->generator_id;
	*packet_id = unique_id->packet_id;
}

struct task_base;

void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count);
int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size);
void task_gen_set_rate(struct task_base *tbase, uint64_t bps);
void task_gen_reset_randoms(struct task_base *tbase);
void task_gen_reset_values(struct task_base *tbase);
int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len);
int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id);

uint32_t task_gen_get_n_randoms(struct task_base *tbase);
uint32_t task_gen_get_n_values(struct task_base *tbase);

#endif /* _HANDLE_GEN_H_ */
d with bpp. In addition, the token_time_tsc_until function will multiply at most bytes_max with period so make sure that can't overflow. */ while (period < UINT64_MAX/2 && frac != floor(frac) && (frac < 2.0f || period < UINT64_MAX/4/(uint64_t)frac) && (bytes_max == UINT64_MAX || period < UINT64_MAX/2/bytes_max)) { period *= 2; frac *= 2; } ret.bpp = floor(frac + 0.5); ret.period = period; ret.bytes_max = bytes_max; return ret; } static void token_time_update(struct token_time *tt, uint64_t tsc) { uint64_t new_bytes; uint64_t t_diff = tsc - tt->tsc_last; /* Since the rate is expressed in tt->bpp, i.e. bytes per period, counters can only be incremented/decremented accurately every period cycles. */ /* If the last update was more than a period ago, the update can be performed accurately. */ if (t_diff > tt->cfg.period) { /* First add remaining tokens in the last period that was added partially. */ new_bytes = tt->cfg.bpp - tt->tsc_last_bytes; tt->tsc_last_bytes = 0; tt->bytes_now += new_bytes; t_diff -= tt->cfg.period; tt->tsc_last += tt->cfg.period; /* If now it turns out that more periods have elapsed, add the bytes for those periods directly. */ if (t_diff > tt->cfg.period) { uint64_t periods = t_diff/tt->cfg.period; tt->bytes_now += periods * tt->cfg.bpp; t_diff -= tt->cfg.period * periods; tt->tsc_last += tt->cfg.period * periods; } } /* At this point, t_diff will be guaranteed to be less than tt->cfg.period. */ new_bytes = t_diff * tt->cfg.bpp/tt->cfg.period - tt->tsc_last_bytes; tt->tsc_last_bytes += new_bytes; tt->bytes_now += new_bytes; if (tt->bytes_now > tt->cfg.bytes_max) tt->bytes_now = tt->cfg.bytes_max; } static void token_time_set_bpp(struct token_time *tt, uint64_t bpp) { tt->cfg.bpp = bpp; } static void token_time_init(struct token_time *tt, const struct token_time_cfg *cfg) { tt->cfg = *cfg; } static void token_time_reset(struct token_time *tt, uint64_t tsc, uint64_t bytes_now) { tt->tsc_last = tsc; tt->bytes_now = bytes_now; tt->tsc_last_bytes = 0; } static void token_time_reset_full(struct token_time *tt, uint64_t tsc) { token_time_reset(tt, tsc, tt->cfg.bytes_max); } static int token_time_take(struct token_time *tt, uint64_t bytes) { if (bytes > tt->bytes_now) return -1; tt->bytes_now -= bytes; return 0; } static void token_time_take_clamp(struct token_time *tt, uint64_t bytes) { if (bytes > tt->bytes_now) tt->bytes_now = 0; else tt->bytes_now -= bytes; } static uint64_t token_time_tsc_until(const struct token_time *tt, uint64_t bytes) { if (tt->bytes_now >= bytes) return 0; return (bytes - tt->bytes_now) * tt->cfg.period / tt->cfg.bpp; } static uint64_t token_time_tsc_until_full(const struct token_time *tt) { return token_time_tsc_until(tt, tt->cfg.bytes_max); } #endif /* _TOKEN_TIME_H_ */