diff options
Diffstat (limited to 'VNFs/DPPD-PROX/handle_gen.c')
-rw-r--r-- | VNFs/DPPD-PROX/handle_gen.c | 352 |
1 files changed, 334 insertions, 18 deletions
diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c index adcabd79..2c8a65c7 100644 --- a/VNFs/DPPD-PROX/handle_gen.c +++ b/VNFs/DPPD-PROX/handle_gen.c @@ -13,6 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. */ + +#include <rte_common.h> +#ifndef __rte_cache_aligned +#include <rte_memory.h> +#endif + #include <rte_mbuf.h> #include <pcap.h> #include <string.h> @@ -21,6 +27,7 @@ #include <rte_version.h> #include <rte_byteorder.h> #include <rte_ether.h> +#include <rte_hash.h> #include <rte_hash_crc.h> #include <rte_malloc.h> @@ -49,6 +56,7 @@ #include "handle_master.h" #include "defines.h" #include "prox_ipv6.h" +#include "handle_lb_5tuple.h" struct pkt_template { uint16_t len; @@ -57,6 +65,13 @@ struct pkt_template { uint8_t *buf; }; +#define MAX_STORE_PKT_SIZE 2048 + +struct packet { + unsigned int len; + unsigned char buf[MAX_STORE_PKT_SIZE]; +}; + #define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 #define DO_PANIC 1 @@ -65,6 +80,8 @@ struct pkt_template { #define FROM_PCAP 1 #define NOT_FROM_PCAP 0 +#define MAX_RANGES 64 + #define TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC 1 static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt) @@ -90,6 +107,10 @@ struct task_gen_pcap { uint32_t socket_id; }; +struct flows { + uint32_t packet_id; +}; + struct task_gen { struct task_base base; uint64_t hz; @@ -110,10 +131,13 @@ struct task_gen { uint16_t packet_id_pos; uint16_t accur_pos; uint16_t sig_pos; + uint16_t flow_id_pos; + uint16_t packet_id_in_flow_pos; uint32_t sig; uint32_t socket_id; uint8_t generator_id; uint8_t n_rands; /* number of randoms */ + uint8_t n_ranges; /* number of ranges */ uint8_t min_bulk_size; uint8_t max_bulk_size; uint8_t lat_enabled; @@ -125,6 +149,7 @@ struct task_gen { uint16_t rand_offset; /* each random has an offset*/ uint8_t rand_len; /* # bytes to take from random (no bias introduced) */ } rand[64]; + struct range ranges[MAX_RANGES]; uint64_t accur[ACCURACY_WINDOW]; uint64_t pkt_tsc_offset[64]; struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */ @@ -136,6 +161,12 @@ struct task_gen { uint32_t imix_pkt_sizes[MAX_IMIX_PKTS]; uint32_t imix_nb_pkts; uint32_t new_imix_nb_pkts; + uint32_t store_pkt_id; + uint32_t store_msk; + struct packet *store_buf; + FILE *fp; + struct rte_hash *flow_id_table; + struct flows*flows; } __rte_cache_aligned; static void task_gen_set_pkt_templates_len(struct task_gen *task, uint32_t *pkt_sizes); @@ -379,6 +410,146 @@ static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pk task_gen_apply_random_fields(task, pkt_hdr[i]); } +static void task_gen_apply_ranges(struct task_gen *task, uint8_t *pkt_hdr) +{ + uint32_t ret; + if (!task->n_ranges) + return; + + for (uint16_t j = 0; j < task->n_ranges; ++j) { + if (unlikely(task->ranges[j].value == task->ranges[j].max)) + task->ranges[j].value = task->ranges[j].min; + else + task->ranges[j].value++; + ret = rte_bswap32(task->ranges[j].value); + uint8_t *pret = (uint8_t*)&ret; + rte_memcpy(pkt_hdr + task->ranges[j].offset, pret + 4 - task->ranges[j].range_len, task->ranges[j].range_len); + } +} + +static void task_gen_apply_all_ranges(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count) +{ + uint32_t ret; + if (!task->n_ranges) + return; + + for (uint16_t i = 0; i < count; ++i) { + task_gen_apply_ranges(task, pkt_hdr[i]); + } +} + +static inline uint32_t gcd(uint32_t a, uint32_t b) +{ + // Euclidean algorithm + uint32_t t; + while (b != 0) { + t = b; + b = a % b; + a = t; + } + return a; +} + +static inline uint32_t lcm(uint32_t a, uint32_t b) +{ + return ((a / gcd(a, b)) * b); +} + +static uint32_t get_n_range_flows(struct task_gen *task) +{ + uint32_t t = 1; + for (int i = 0; i < task->n_ranges; i++) { + t = lcm((task->ranges[i].max - task->ranges[i].min) + 1, t); + } + return t; +} + +static uint32_t get_n_rand_flows(struct task_gen *task) +{ + uint32_t t = 0; + for (int i = 0; i < task->n_rands; i++) { + t += __builtin_popcount(task->rand[i].rand_mask); + } + PROX_PANIC(t > 31, "Too many random bits - maximum 31 supported\n"); + return 1 << t; +} + +//void add_to_hash_table(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val, uint32_t fixed_bits, uint32_t rand_offset) { +// uint32_t ret_tmp = val | fixed_bits; +// ret_tmp = rte_bswap32(ret_tmp); +// uint8_t *pret_tmp = (uint8_t*)&ret_tmp; +// rte_memcpy(buf + rand_offset, pret_tmp + 4 - rand_len, rand_len); +// +// init idx +// alloc buffer +// init/alloc hash_table +//void build_buffer(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val) +//{ +// if (mask == 0) { +// buffer[*idx] = val; +// *idx = (*idx) + 1; +// return; +// } +// build_buffer(task, but, mask >> 1, bit_pos + 1, val); +// if (mask & 1) { +// build_buffer(task, but, mask >> 1, bit_pos + 1, val | (1 << bit_pos)); +//} + +static void build_flow_table(struct task_gen *task) +{ + uint8_t buf[2048], *key_fields; + union ipv4_5tuple_host key; + struct pkt_template *pkt_template; + uint32_t n_range_flows = get_n_range_flows(task); + // uint32_t n_rand_flows = get_n_rand_flows(task); + // uint32_t n_flows= n_range_flows * n_rand_flows * task->orig_n_pkts; + // for (int i = 0; i < task->n_rands; i++) { + // build_buffer(task, task->values_buf[i], &task->values_idx[i], task->rand[i].rand_mask, 0, 0); + // } + + uint32_t n_flows = n_range_flows * task->orig_n_pkts; + + for (uint32_t k = 0; k < task->orig_n_pkts; k++) { + memcpy(buf, task->pkt_template[k].buf, task->pkt_template[k].len); + for (uint32_t j = 0; j < n_range_flows; j++) { + task_gen_apply_ranges(task, buf); + key_fields = buf + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live); + key.xmm = _mm_loadu_si128((__m128i*)(key_fields)); + key.pad0 = key.pad1 = 0; + int idx = rte_hash_add_key(task->flow_id_table, (const void *)&key); + PROX_PANIC(idx < 0, "Unable to add key in table\n"); + if (idx >= 0) + plog_dbg("Added key %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst); + } + } +} + +static int32_t task_gen_get_flow_id(struct task_gen *task, uint8_t *pkt_hdr) +{ + int ret = 0; + union ipv4_5tuple_host key; + uint8_t *hdr = pkt_hdr + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live); + // __m128i data = _mm_loadu_si128((__m128i*)(hdr)); + // key.xmm = _mm_and_si128(data, mask0); + key.xmm = _mm_loadu_si128((__m128i*)(hdr)); + key.pad0 = key.pad1 = 0; + ret = rte_hash_lookup(task->flow_id_table, (const void *)&key); + if (ret < 0) { + plog_err("Flow not found: %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst); + } + return ret; +} + +static void task_gen_apply_all_flow_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *flow_id) +{ + if (task->flow_id_pos) { + for (uint16_t j = 0; j < count; ++j) { + flow_id[j] = task_gen_get_flow_id(task, pkt_hdr[j]); + *(int32_t *)(pkt_hdr[j] + task->flow_id_pos) = flow_id[j]; + } + } +} + static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy) { *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy; @@ -390,7 +561,7 @@ static void task_gen_apply_sig(struct task_gen *task, struct pkt_template *dst) *(uint32_t *)(dst->buf + task->sig_pos) = task->sig; } -static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +static void task_gen_apply_all_accur_pos(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count) { if (!task->accur_pos) return; @@ -411,7 +582,7 @@ static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, co *dst = *id; } -static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) +static void task_gen_apply_all_unique_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count) { if (!task->packet_id_pos) return; @@ -423,6 +594,26 @@ static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf } } +static void task_gen_apply_id_in_flows(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id) +{ + struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_in_flow_pos); + *dst = *id; +} + +static void task_gen_apply_all_id_in_flows(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *idx) +{ + if (!task->packet_id_in_flow_pos) + return; + + for (uint16_t i = 0; i < count; ++i) { + struct unique_id id; + if (idx[i] >= 0 ) { + unique_id_init(&id, task->generator_id, task->flows[idx[i]].packet_id++); + task_gen_apply_id_in_flows(task, pkt_hdr[i], &id); + } + } +} + static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count) { if (!(task->runtime_flags & TASK_TX_CRC)) @@ -712,7 +903,7 @@ static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int return 0; } -static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t n_orig_pkts, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes) +static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t orig_n_pkts, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes) { size_t k; uint32_t l4_len; @@ -720,8 +911,8 @@ static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t n_orig_ struct pkt_template *template; for (size_t j = 0; j < nb_pkt_sizes; ++j) { - for (size_t i = 0; i < n_orig_pkts; ++i) { - k = j * n_orig_pkts + i; + for (size_t i = 0; i < orig_n_pkts; ++i) { + k = j * orig_n_pkts + i; template = &task->pkt_template[k]; if (template->l2_len == 0) continue; @@ -949,17 +1140,31 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin if (new_pkts == NULL) return 0; uint8_t *pkt_hdr[MAX_RING_BURST]; - + int32_t flow_id[MAX_RING_BURST]; task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk); task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk); task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk); - task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk); - task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk); + task_gen_apply_all_ranges(task, pkt_hdr, send_bulk); + task_gen_apply_all_accur_pos(task, pkt_hdr, send_bulk); + task_gen_apply_all_flow_id(task, pkt_hdr, send_bulk, flow_id); + task_gen_apply_all_unique_id(task, pkt_hdr, send_bulk); + task_gen_apply_all_id_in_flows(task, pkt_hdr, send_bulk, flow_id); uint64_t tsc_before_tx; tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk); task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk); + if (task->store_msk) { + for (uint32_t i = 0; i < send_bulk; i++) { + if (out[i] != OUT_DISCARD) { + uint8_t *hdr; + hdr = (uint8_t *)rte_pktmbuf_mtod(new_pkts[i], prox_rte_ether_hdr *); + memcpy(&task->store_buf[task->store_pkt_id & task->store_msk].buf, hdr, rte_pktmbuf_pkt_len(new_pkts[i])); + task->store_buf[task->store_pkt_id & task->store_msk].len = rte_pktmbuf_pkt_len(new_pkts[i]); + task->store_pkt_id++; + } + } + } ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out); task_gen_store_accuracy(task, send_bulk, tsc_before_tx); @@ -1184,7 +1389,10 @@ static void task_gen_reset_pkt_templates_content(struct task_gen *task) static void task_gen_reset_pkt_templates(struct task_gen *task) { - task_gen_reset_pkt_templates_len(task); + if (task->imix_nb_pkts) + task_gen_set_pkt_templates_len(task, task->imix_pkt_sizes); + else + task_gen_reset_pkt_templates_len(task); task_gen_reset_pkt_templates_content(task); task_gen_pkt_template_recalc_all(task); } @@ -1254,7 +1462,7 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1 uint32_t mbuf_size = TX_MBUF_SIZE; if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size) mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM; - plog_info("\tCreating mempool with name '%s'\n", name); + plog_info("\t\tCreating mempool with name '%s'\n", name); ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, @@ -1262,8 +1470,8 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1 PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n", sock_id, targ->nb_mbuf - 1); - plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", ret, - targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id); + plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret, + targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id); return ret; } @@ -1328,6 +1536,14 @@ void task_gen_reset_randoms(struct task_base *tbase) task->n_rands = 0; } +void task_gen_reset_ranges(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + + memset(task->ranges, 0, task->n_ranges * sizeof(struct range)); + task->n_ranges = 0; +} + int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len) { struct task_gen *task = (struct task_gen *)tbase; @@ -1351,6 +1567,11 @@ void task_gen_reset_values(struct task_base *tbase) struct task_gen *task = (struct task_gen *)tbase; task_gen_reset_pkt_templates_content(task); + task_gen_pkt_template_recalc_metadata(task); + check_all_pkt_size(task, DO_NOT_PANIC); + check_all_fields_in_bounds(task, DO_NOT_PANIC); + task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes); + if (task->flags & TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC) { for (uint32_t i = 0; i < task->n_pkts; ++i) { rte_memcpy(&task->pkt_template[i].buf[sizeof(prox_rte_ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr)); @@ -1365,6 +1586,13 @@ uint32_t task_gen_get_n_randoms(struct task_base *tbase) return task->n_rands; } +uint32_t task_gen_get_n_ranges(struct task_base *tbase) +{ + struct task_gen *task = (struct task_gen *)tbase; + + return task->n_ranges; +} + static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ) { struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; @@ -1420,6 +1648,26 @@ static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offs return UINT32_MAX; } +int task_gen_add_range(struct task_base *tbase, struct range *range) +{ + struct task_gen *task = (struct task_gen *)tbase; + if (task->n_ranges == MAX_RANGES) { + plog_err("Too many ranges\n"); + return -1; + } + task->ranges[task->n_ranges].min = range->min; + task->ranges[task->n_ranges].value = range->min; + uint32_t m = range->max; + task->ranges[task->n_ranges].range_len = 0; + while (m != 0) { + m >>= 8; + task->ranges[task->n_ranges].range_len++; + } + task->ranges[task->n_ranges].offset = range->offset; + task->ranges[task->n_ranges++].max = range->max; + return 0; +} + int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id) { struct task_gen *task = (struct task_gen *)tbase; @@ -1477,6 +1725,31 @@ static void start(struct task_base *tbase) */ } +static void stop_gen(struct task_base *tbase) +{ + uint32_t i, j; + struct task_gen *task = (struct task_gen *)tbase; + if (task->store_msk) { + for (i = task->store_pkt_id & task->store_msk; i < task->store_msk + 1; i++) { + if (task->store_buf[i].len) { + fprintf(task->fp, "%06d: ", i); + for (j = 0; j < task->store_buf[i].len; j++) { + fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]); + } + fprintf(task->fp, "\n"); + } + } + for (i = 0; i < (task->store_pkt_id & task->store_msk); i++) { + if (task->store_buf[i].len) { + fprintf(task->fp, "%06d: ", i); + for (j = 0; j < task->store_buf[i].len; j++) { + fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]); + } + fprintf(task->fp, "\n"); + } + } + } +} static void start_pcap(struct task_base *tbase) { struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; @@ -1508,7 +1781,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) struct prox_port_cfg *port = find_reachable_port(targ); // TODO: check that all reachable ports have the same mtu... if (port) { - task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM); + task->cksum_offload = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM); task->port = port; task->max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE; } else { @@ -1522,6 +1795,8 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) task->lat_pos = targ->lat_pos; task->accur_pos = targ->accur_pos; task->sig_pos = targ->sig_pos; + task->flow_id_pos = targ->flow_id_pos; + task->packet_id_in_flow_pos = targ->packet_id_in_flow_pos; task->sig = targ->sig; task->new_rate_bps = targ->rate_bps; @@ -1552,7 +1827,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n"); task->generator_id = targ->generator_id; - plog_info("\tGenerator id = %d\n", task->generator_id); + plog_info("\t\tGenerator id = %d\n", task->generator_id); // Allocate array holding bytes to tsc for supported frame sizes task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), task->socket_id); @@ -1564,7 +1839,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) uint64_t bytes_per_hz = UINT64_MAX; if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) { bytes_per_hz = task->port->max_link_speed * 125000L; - plog_info("\tPort %u: max link speed is %ld Mbps\n", + plog_info("\t\tPort %u: max link speed is %ld Mbps\n", (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000); } // There are cases where hz estimate might be slighly over-estimated @@ -1582,10 +1857,10 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) task->imix_pkt_sizes[i] = targ->imix_pkt_sizes[i]; } if (!strcmp(targ->pcap_file, "")) { - plog_info("\tUsing inline definition of a packet\n"); + plog_info("\t\tUsing inline definition of a packet\n"); task_init_gen_load_pkt_inline(task, targ); } else { - plog_info("Loading from pcap %s\n", targ->pcap_file); + plog_info("\t\tLoading from pcap %s\n", targ->pcap_file); task_init_gen_load_pcap(task, targ); } @@ -1601,6 +1876,46 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX), "Failed to add random\n"); } + for (uint32_t i = 0; i < targ->n_ranges; ++i) { + PROX_PANIC(task_gen_add_range(tbase, &targ->range[i]), "Failed to add range\n"); + } + if (targ->store_max) { + char filename[256]; + sprintf(filename, "gen_buf_%02d_%02d", targ->lconf->id, targ->task); + + task->store_msk = targ->store_max - 1; + task->store_buf = (struct packet *)malloc(sizeof(struct packet) * targ->store_max); + task->fp = fopen(filename, "w+"); + PROX_PANIC(task->fp == NULL, "Unable to open %s\n", filename); + } else { + task->store_msk = 0; + } + uint32_t n_entries = get_n_range_flows(task) * task->orig_n_pkts * 4; +#ifndef RTE_HASH_BUCKET_ENTRIES +#define RTE_HASH_BUCKET_ENTRIES 8 +#endif + // cuckoo hash requires at least RTE_HASH_BUCKET_ENTRIES (8) entries + if (n_entries < RTE_HASH_BUCKET_ENTRIES) + n_entries = RTE_HASH_BUCKET_ENTRIES; + + static char hash_name[30]; + sprintf(hash_name, "A%03d_hash_gen_table", targ->lconf->id); + struct rte_hash_parameters hash_params = { + .name = hash_name, + .entries = n_entries, + .key_len = sizeof(union ipv4_5tuple_host), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = task->socket_id, + }; + plog_info("\t\thash table name = %s\n", hash_params.name); + task->flow_id_table = rte_hash_create(&hash_params); + PROX_PANIC(task->flow_id_table == NULL, "Failed to set up flow_id hash table for gen\n"); + plog_info("\t\tflow_id hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len); + build_flow_table(task); + task->flows = (struct flows *)prox_zmalloc(n_entries * sizeof(struct flows), task->socket_id); + PROX_PANIC(task->flows == NULL, "Failed to allocate flows\n"); + plog_info("\t\t%d flows allocated\n", n_entries); } static struct task_init task_init_gen = { @@ -1616,7 +1931,8 @@ static struct task_init task_init_gen = { #else .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, #endif - .size = sizeof(struct task_gen) + .size = sizeof(struct task_gen), + .stop_last = stop_gen }; static struct task_init task_init_gen_l3 = { |