/* // Copyright (c) 2010-2017 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. */ #include #include #include #include #include "prox_malloc.h" #include "task_init.h" #include "lconf.h" #include "defines.h" #include "stats.h" #include "tx_pkt.h" #include "hash_entry_types.h" #include "prefetch.h" #include "prox_cksum.h" #include "gre.h" #include "etypes.h" #include "log.h" #include "quit.h" #include "prox_assert.h" #include "pkt_prototypes.h" #include "quit.h" struct cpe_gre_key { struct ether_addr clt_mac; uint16_t pad; } __attribute__((__packed__)); struct cpe_gre_data { uint32_t gre_id; uint32_t cpe_ip; uint64_t tsc; #ifdef GRE_TP uint64_t tp_tsc; double tp_tbsize; #endif } __attribute__((__packed__)); struct task_gre_decap { struct task_base base; struct rte_hash *cpe_gre_hash; struct cpe_gre_data *cpe_gre_data; struct lcore_cfg *lconf; uint8_t runtime_flags; uint8_t mapping[PROX_MAX_PORTS]; uint32_t bucket_index; int offload_crc; const void* key_ptr[16]; struct cpe_gre_key key[16]; uint64_t cpe_timeout; #ifdef GRE_TP double cycles_per_byte; uint32_t tb_size; #endif }; static void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); static void handle_gre_encap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts); static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_mbuf *mbuf, struct cpe_gre_data *table); static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out); static inline uint8_t handle_gre_decap(struct task_gre_decap *tbase, struct rte_mbuf *mbuf); void update_arp_entries_gre(void *data); static void init_cpe_gre_hash(struct task_args *targ) { char name[64]; uint8_t socket_id; uint8_t lcore_id; uint8_t table_part; /* Already set up by other task */ if (targ->cpe_gre_hash) { return; } lcore_id = targ->lconf->id; socket_id = rte_lcore_to_socket_id(lcore_id); sprintf(name, "core_%u_CPE_GRE_Table", targ->lconf->id); table_part = targ->nb_slave_threads; if (table_part == 0) table_part = 1; if (!rte_is_power_of_2(table_part)) { table_part = rte_align32pow2(table_part) >> 1; } struct rte_hash_parameters hash_params = { .name = name, .entries = MAX_GRE / table_part, .bucket_entries = GRE_BUCKET_ENTRIES, .key_len = sizeof(struct cpe_gre_key), .hash_func_init_val = 0, .socket_id = socket_id }; struct rte_hash* phash = rte_hash_create(&hash_params); struct cpe_gre_data *cpe_gre_data = prox_zmalloc(MAX_GRE / table_part, socket_id); PROX_PANIC(phash == NULL, "Unable to allocate memory for IPv4 hash table on core %u\n", lcore_id); for (uint8_t task_id = 0; task_id < targ->lconf->n_tasks_all; ++task_id) { enum task_mode smode = targ->lconf->targs[task_id].mode; if (smode == GRE_DECAP || smode == GRE_ENCAP) { targ->lconf->targs[task_id].cpe_gre_hash = phash; targ->lconf->targs[task_id].cpe_gre_data = cpe_gre_data; } } } static void init_task_gre_decap(struct task_base *tbase, struct task_args *targ) { struct task_gre_decap *task = (struct task_gre_decap *)tbase; init_cpe_gre_hash(targ); task->cpe_gre_hash = targ->cpe_gre_hash; task->cpe_gre_data = targ->cpe_gre_data; task->runtime_flags = targ->runtime_flags; task->lconf = targ->lconf; task->cpe_timeout = msec_to_tsc(targ->cpe_table_timeout_ms); targ->lconf->period_func = update_arp_entries_gre; targ->lconf->period_data = tbase; targ->lconf->period_timeout = msec_to_tsc(500) / NUM_VCPES; for (uint8_t i = 0; i < 16; ++i) { task->key_ptr[i] = &task->key[i]; } } static void init_task_gre_encap(struct task_base *tbase, struct task_args *targ) { struct task_gre_decap *task = (struct task_gre_decap *)tbase; init_cpe_gre_hash(targ); task->cpe_gre_hash = targ->cpe_gre_hash; task->cpe_gre_data = targ->cpe_gre_data; task->runtime_flags = targ->runtime_flags; task->lconf = targ->lconf; struct port_cfg *port = find_reachable_task_sending_to_port(targ); if (port) { task->offload_crc = port->capabilities.tx_offload_cksum; } #ifdef GRE_TP if (targ->tb_rate) { task->cycles_per_byte = ((double)rte_get_tsc_hz()) / ((double)targ->tb_rate); task->tb_size = targ->tb_size != 0 ? targ->tb_size : 1520; } else { /* traffic policing disabled */ task->cycles_per_byte = 0; } #endif } static struct task_init task_init_gre_decap = { .mode = GRE_DECAP, .mode_str = "gredecap", .init = init_task_gre_decap, .handle = handle_gre_decap_bulk, .size = sizeof(struct task_gre_decap) }; static struct task_init task_init_gre_encap = { .mode = GRE_ENCAP, .mode_str = "greencap", .init = init_task_gre_encap, .handle = handle_gre_encap_bulk, .size = sizeof(struct task_gre_decap) }; __attribute__((constructor)) static void reg_task_gre(void) { reg_task(&task_init_gre_decap); reg_task(&task_init_gre_encap); } void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf