summaryrefslogtreecommitdiffstats
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/VIL/acl/lib_acl.c1203
-rw-r--r--common/VIL/acl/lib_acl.h127
-rw-r--r--common/VIL/alg/lib_ftp_alg.c917
-rw-r--r--common/VIL/alg/lib_ftp_alg.h102
-rw-r--r--common/VIL/alg/lib_sip_alg.c2257
-rw-r--r--common/VIL/alg/lib_sip_alg.h156
-rw-r--r--common/VIL/conntrack/rte_cnxn_tracking.c1804
-rw-r--r--common/VIL/conntrack/rte_cnxn_tracking.h413
-rw-r--r--common/VIL/conntrack/rte_ct_synproxy.c873
-rw-r--r--common/VIL/conntrack/rte_ct_tcp.c1116
-rw-r--r--common/VIL/conntrack/rte_ct_tcp.h484
-rw-r--r--common/VIL/conntrack/rte_ct_udp.c49
-rw-r--r--common/VIL/l2l3_stack/Makefile35
-rw-r--r--common/VIL/l2l3_stack/bond.c1595
-rw-r--r--common/VIL/l2l3_stack/build/.interface.o.d180
-rw-r--r--common/VIL/l2l3_stack/build/.l2_proto.o.d175
-rw-r--r--common/VIL/l2l3_stack/build/.main.o.d209
-rw-r--r--common/VIL/l2l3_stack/hle.c43
-rw-r--r--common/VIL/l2l3_stack/hle.h40
-rw-r--r--common/VIL/l2l3_stack/interface.c1478
-rw-r--r--common/VIL/l2l3_stack/interface.h873
-rw-r--r--common/VIL/l2l3_stack/l2_proto.c239
-rw-r--r--common/VIL/l2l3_stack/l2_proto.h150
-rw-r--r--common/VIL/l2l3_stack/l3fwd_common.h111
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm4.c1119
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm4.h374
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm6.c1058
-rw-r--r--common/VIL/l2l3_stack/l3fwd_lpm6.h315
-rw-r--r--common/VIL/l2l3_stack/l3fwd_main.c145
-rw-r--r--common/VIL/l2l3_stack/lib_arp.c2655
-rw-r--r--common/VIL/l2l3_stack/lib_arp.h506
-rw-r--r--common/VIL/l2l3_stack/lib_icmpv6.c410
-rw-r--r--common/VIL/l2l3_stack/lib_icmpv6.h113
-rw-r--r--common/VIL/l2l3_stack/main_l2l3.c304
-rw-r--r--common/VIL/l2l3_stack/tsx.c167
-rw-r--r--common/VIL/l2l3_stack/tsx.h38
-rw-r--r--common/VIL/pipeline_arpicmp/pipeline_arpicmp.c2118
-rw-r--r--common/VIL/pipeline_arpicmp/pipeline_arpicmp.h122
-rw-r--r--common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.c3484
-rw-r--r--common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.h343
-rw-r--r--common/VIL/pipeline_common/pipeline_common_be.c189
-rw-r--r--common/VIL/pipeline_common/pipeline_common_be.h146
-rw-r--r--common/VIL/pipeline_common/pipeline_common_fe.c1429
-rw-r--r--common/VIL/pipeline_common/pipeline_common_fe.h231
-rw-r--r--common/VIL/pipeline_loadb/pipeline_loadb.c493
-rw-r--r--common/VIL/pipeline_loadb/pipeline_loadb.h29
-rw-r--r--common/VIL/pipeline_loadb/pipeline_loadb_be.c1417
-rw-r--r--common/VIL/pipeline_loadb/pipeline_loadb_be.h149
-rw-r--r--common/VIL/pipeline_master/pipeline_master.c30
-rw-r--r--common/VIL/pipeline_master/pipeline_master.h24
-rw-r--r--common/VIL/pipeline_master/pipeline_master_be.c134
-rw-r--r--common/VIL/pipeline_master/pipeline_master_be.h24
-rw-r--r--common/VIL/pipeline_passthrough/pipeline_passthrough.c30
-rw-r--r--common/VIL/pipeline_passthrough/pipeline_passthrough.h24
-rw-r--r--common/VIL/pipeline_passthrough/pipeline_passthrough_be.c787
-rw-r--r--common/VIL/pipeline_passthrough/pipeline_passthrough_be.h42
-rw-r--r--common/VIL/pipeline_txrx/pipeline_txrx.c151
-rw-r--r--common/VIL/pipeline_txrx/pipeline_txrx.h28
-rw-r--r--common/VIL/pipeline_txrx/pipeline_txrx_be.c915
-rw-r--r--common/VIL/pipeline_txrx/pipeline_txrx_be.h76
-rw-r--r--common/vnf_common/app.h972
-rw-r--r--common/vnf_common/config_check.c443
-rw-r--r--common/vnf_common/config_parse.c3434
-rw-r--r--common/vnf_common/config_parse_tm.c431
-rw-r--r--common/vnf_common/cpu_core_map.c475
-rw-r--r--common/vnf_common/cpu_core_map.h52
-rw-r--r--common/vnf_common/hash_func.h334
-rw-r--r--common/vnf_common/parser.h32
-rw-r--r--common/vnf_common/pipeline.h76
-rw-r--r--common/vnf_common/pipeline_actions_common.h214
-rw-r--r--common/vnf_common/pipeline_be.h288
-rw-r--r--common/vnf_common/thread.c305
-rw-r--r--common/vnf_common/thread.h81
-rw-r--r--common/vnf_common/thread_fe.c480
-rw-r--r--common/vnf_common/thread_fe.h84
-rw-r--r--common/vnf_common/vnf_common.c146
-rw-r--r--common/vnf_common/vnf_common.h200
-rw-r--r--common/vnf_common/vnf_define.h29
78 files changed, 42324 insertions, 0 deletions
diff --git a/common/VIL/acl/lib_acl.c b/common/VIL/acl/lib_acl.c
new file mode 100644
index 00000000..279727ef
--- /dev/null
+++ b/common/VIL/acl/lib_acl.c
@@ -0,0 +1,1203 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include "lib_acl.h"
+#include "vnf_common.h"
+#include <rte_port.h>
+#define ACL_LIB_DEBUG 0
+static struct rte_acl_field_def field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_VLAN_HDR 4
+
+static struct rte_acl_field_def field_format_vlan_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR + offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_QINQ_HEADER 8
+
+static struct rte_acl_field_def field_format_qinq_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER + offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) + offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static struct rte_acl_field_def field_format_ipv6[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, proto),
+ },
+
+ /* Source IP address (IPv6) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, src_addr),
+ },
+
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, src_addr) + sizeof(uint32_t),
+ },
+
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, src_addr) + 2 * sizeof(uint32_t),
+ },
+
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, src_addr) + 3 * sizeof(uint32_t),
+ },
+
+ /* Destination IP address (IPv6) */
+ [5] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 5,
+ .input_index = 5,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, dst_addr),
+ },
+
+ [6] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 6,
+ .input_index = 6,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, dst_addr) + sizeof(uint32_t),
+ },
+
+ [7] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 7,
+ .input_index = 7,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, dst_addr) + 2 * sizeof(uint32_t),
+ },
+
+ [8] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 8,
+ .input_index = 8,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv6_hdr, dst_addr) + 3 * sizeof(uint32_t),
+ },
+
+ /* Source Port */
+ [9] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 9,
+ .input_index = 9,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [10] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 10,
+ .input_index = 9,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv6_hdr) + offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+void *lib_acl_create_active_standby_table_ipv4(uint8_t table_num,
+ uint32_t *libacl_n_rules)
+{
+ printf("Create LIBACL active IPV4 Tables rte_socket_id(): %i\n",
+ rte_socket_id());
+
+ /* Create IPV4 LIBACL Rule Tables */
+ struct rte_table_acl_params common_ipv4_table_libacl_params = {
+ .name = "LIBACLIPV4A",
+ .n_rules = *libacl_n_rules,
+ .n_rule_fields = RTE_DIM(field_format_ipv4),
+ };
+
+ memcpy(common_ipv4_table_libacl_params.field_format,
+ field_format_ipv4, sizeof(field_format_ipv4));
+
+ uint32_t ipv4_entry_size = sizeof(struct lib_acl_table_entry);
+ /* Create second IPV4 Table */
+ if (table_num == 2)
+ common_ipv4_table_libacl_params.name = "LIBACLIPV4B";
+ return rte_table_acl_ops.f_create(&common_ipv4_table_libacl_params,
+ rte_socket_id(),
+ ipv4_entry_size);
+
+
+}
+
+void *lib_acl_create_active_standby_table_ipv6(uint8_t table_num,
+ uint32_t *libacl_n_rules)
+{
+ printf("Create LIBACL active IPV6 Tables rte_socket_id(): %i\n",
+ rte_socket_id());
+ /* Create IPV6 LIBACL Rule Tables */
+ struct rte_table_acl_params common_ipv6_table_libacl_params = {
+ .name = "LIBACLIPV6A",
+ .n_rules = *libacl_n_rules,
+ .n_rule_fields = RTE_DIM(field_format_ipv6),
+ };
+
+ memcpy(common_ipv6_table_libacl_params.field_format,
+ field_format_ipv6, sizeof(field_format_ipv6));
+
+ uint32_t ipv6_entry_size = sizeof(struct lib_acl_table_entry);
+ /* Create second IPV6 table */
+ if (table_num == 2)
+ common_ipv6_table_libacl_params.name = "LIBACLIPV6B";
+ return rte_table_acl_ops.f_create(&common_ipv6_table_libacl_params,
+ rte_socket_id(),
+ ipv6_entry_size);
+
+
+}
+int lib_acl_parse_config(struct lib_acl *plib_acl,
+ char *arg_name, char *arg_value,
+ uint32_t *libacl_n_rules)
+{
+ uint32_t n_rules_present = 0;
+ uint32_t pkt_type_present = 0;
+ /* defaults */
+ plib_acl->n_rules = DEFULT_NUM_RULE;
+ *libacl_n_rules = DEFULT_NUM_RULE;
+ plib_acl->n_rule_fields = RTE_DIM(field_format_ipv4);
+ plib_acl->field_format = field_format_ipv4;
+ plib_acl->field_format_size = sizeof(field_format_ipv4);
+ if (strcmp(arg_name, "n_rules") == 0) {
+ if (n_rules_present) {
+ printf("n_rules_present");
+ return -1;
+ }
+ n_rules_present = 1;
+
+ plib_acl->n_rules = atoi(arg_value);
+ *libacl_n_rules = atoi(arg_value);
+ return 0;
+ }
+ if (strcmp(arg_name, "pkt_type") == 0) {
+ if (pkt_type_present) {
+ printf("pkt_type");
+ return -1;
+ }
+ pkt_type_present = 1;
+
+ /* ipv4 */
+ if (strcmp(arg_value, "ipv4") == 0) {
+ plib_acl->n_rule_fields =
+ RTE_DIM(field_format_ipv4);
+ plib_acl->field_format = field_format_ipv4;
+ plib_acl->field_format_size =
+ sizeof(field_format_ipv4);
+ return 0;
+ }
+
+ /* vlan_ipv4 */
+ if (strcmp(arg_value, "vlan_ipv4") == 0) {
+ plib_acl->n_rule_fields =
+ RTE_DIM(field_format_vlan_ipv4);
+ plib_acl->field_format =
+ field_format_vlan_ipv4;
+ plib_acl->field_format_size =
+ sizeof(field_format_vlan_ipv4);
+ return 0;
+ }
+
+ /* qinq_ipv4 */
+ if (strcmp(arg_value, "qinq_ipv4") == 0) {
+ plib_acl->n_rule_fields =
+ RTE_DIM(field_format_qinq_ipv4);
+ plib_acl->field_format =
+ field_format_qinq_ipv4;
+ plib_acl->field_format_size =
+ sizeof(field_format_qinq_ipv4);
+ return 0;
+ }
+
+ /* ipv6 */
+ if (strcmp(arg_value, "ipv6") == 0) {
+ plib_acl->n_rule_fields =
+ RTE_DIM(field_format_ipv6);
+ plib_acl->field_format = field_format_ipv6;
+ plib_acl->field_format_size =
+ sizeof(field_format_ipv6);
+ return 0;
+ }
+ /* other */
+ printf("other");
+ return -1;
+ }
+ /* Parameter not processed in this parse function */
+ return 1;
+}
+/**
+ * Main packet processing function.
+ * 64 packet bit mask are used to identify which packets to forward.
+ * Performs the following:
+ * - Burst lookup packets in the IPv4 ACL Rule Table.
+ * - Burst lookup packets in the IPv6 ACL Rule Table.
+ * - Lookup Action Table, perform actions.
+ * - Burst lookup Connection Tracking, if enabled.
+ * - Lookup MAC address.
+ * - Set bit mask.
+ * - Packets with bit mask set are forwarded
+ *
+ * @param p
+ * A pointer to the pipeline.
+ * @param pkts
+ * A pointer to a burst of packets.
+ * @param n_pkts
+ * Number of packets to process.
+ * @param arg
+ * A pointer to pipeline specific data.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+ uint64_t
+lib_acl_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv4_active,
+ void *plib_acl_rule_table_ipv6_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask,
+ int lib_acl_ipv4_enabled, int lib_acl_ipv6_enabled)
+{
+
+ uint64_t lookup_hit_mask = 0;
+ uint64_t lookup_hit_mask_ipv4 = 0;
+ uint64_t lookup_hit_mask_ipv6 = 0;
+ uint64_t lookup_miss_mask = 0;
+ int status;
+
+
+ if (lib_acl_ipv4_enabled) {
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV4 Lookup Mask Before = 0x%"PRIx64"\n",
+ pkts_mask);
+ status = rte_table_acl_ops.f_lookup(
+ plib_acl_rule_table_ipv4_active,
+ pkts, pkts_mask, &lookup_hit_mask_ipv4,
+ (void **) plib_acl->plib_acl_entries_ipv4);
+ if (status < 0)
+ printf("Lookup failed\n");
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV4 Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv4);
+ }
+
+ if (lib_acl_ipv6_enabled) {
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV6 Lookup Mask Before = 0x%"PRIx64"\n",
+ pkts_mask);
+ status = rte_table_acl_ops.f_lookup(
+ plib_acl_rule_table_ipv6_active,
+ pkts, pkts_mask, &lookup_hit_mask_ipv6,
+ (void **) plib_acl->plib_acl_entries_ipv6);
+ if (status < 0)
+ printf("Lookup Failed\n");
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV6 Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv6);
+ }
+
+ /* Merge lookup results since we process both IPv4 and IPv6 below */
+ lookup_hit_mask = lookup_hit_mask_ipv4 | lookup_hit_mask_ipv6;
+ if (ACL_LIB_DEBUG)
+ printf("ACL Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask);
+
+ lookup_miss_mask = pkts_mask & (~lookup_hit_mask);
+ pkts_mask = lookup_hit_mask;
+ *pkts_drop_without_rule += __builtin_popcountll(lookup_miss_mask);
+ if (ACL_LIB_DEBUG)
+ printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
+ *pkts_drop_without_rule,
+ __builtin_popcountll(lookup_miss_mask));
+ /* bitmap of packets left to process for ARP */
+ uint64_t pkts_to_process = lookup_hit_mask;
+
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t)__builtin_ctzll(pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts[pos];
+
+ uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt, IP_START);
+
+ hdr_chk = hdr_chk >> IP_VERSION_CHECK;
+
+ if (hdr_chk == IPv4_HDR_VERSION) {
+
+ struct lib_acl_table_entry *entry =
+ (struct lib_acl_table_entry *)
+ plib_acl->plib_acl_entries_ipv4[pos];
+ uint16_t phy_port = entry->head.port_id;
+ uint32_t action_id = entry->action_id;
+
+ if (ACL_LIB_DEBUG)
+ printf("action_id = %u\n", action_id);
+
+ uint32_t dscp_offset = IP_START + IP_HDR_DSCP_OFST;
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_count) {
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount++;
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount +=
+ rte_pktmbuf_pkt_len(pkt);
+ if (ACL_LIB_DEBUG)
+ printf("Action Count Packet Count: %"
+ PRIu64 " Byte Count: %"
+ PRIu64 "\n"
+ , p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount,
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_drop) {
+
+ /* Drop packet by changing the mask */
+ if (ACL_LIB_DEBUG)
+ printf("ACL before drop pkt_mask %"
+ PRIx64", pkt_num %d\n",
+ pkts_mask, pos);
+ pkts_mask &= ~(1LLU << pos);
+ (*pkts_drop_without_rule)++;
+ if (ACL_LIB_DEBUG)
+ printf("ACL after drop pkt_mask %"PRIx64
+ ", pkt_num %d, packet_drop%"
+ PRIu64"\n", pkts_mask, pos,
+ *pkts_drop_without_rule);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_fwd) {
+ phy_port = action_array_active[action_id].
+ fwd_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action FWD Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_nat) {
+ phy_port = action_array_active[action_id].
+ nat_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action NAT Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_dscp) {
+
+ /* Set DSCP priority */
+ uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ dscp_offset);
+ *dscp = action_array_active[action_id].
+ dscp_priority << 2;
+ if (ACL_LIB_DEBUG)
+ printf("Action DSCP DSCP Priority: %"
+ PRIu16 "\n", *dscp);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_accept) {
+ if (ACL_LIB_DEBUG)
+ printf("Action Accept\n");
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_conntrack) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+ if (ACL_LIB_DEBUG)
+ printf("ACL CT enabled: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ pkt_mask);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_connexist) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+
+ /* Set connexist bit for this pkt for
+ * public -> private */
+ /* Private -> public packet will open
+ * the connection */
+ if (action_array_active[action_id].
+ private_public ==
+ lib_acl_public_private)
+ *connexist_mask |= pkt_mask;
+
+ if (ACL_LIB_DEBUG)
+ printf("Connexist ENB CT:0x%"
+ PRIx64" connexist: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ *connexist_mask,
+ pkt_mask);
+ }
+ }
+ }
+
+ if (hdr_chk == IPv6_HDR_VERSION) {
+
+ struct lib_acl_table_entry *entry =
+ (struct lib_acl_table_entry *)
+ plib_acl->plib_acl_entries_ipv6[pos];
+ uint16_t phy_port = entry->head.port_id;
+ uint32_t action_id = entry->action_id;
+
+ if (ACL_LIB_DEBUG)
+ printf("action_id = %u\n", action_id);
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_count) {
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount++;
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount +=
+ rte_pktmbuf_pkt_len(pkt);
+ if (ACL_LIB_DEBUG)
+ printf("Action Count Packet Count: %"
+ PRIu64 " Byte Count: %"
+ PRIu64 "\n",
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount,
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_drop) {
+ /* Drop packet by changing the mask */
+ if (ACL_LIB_DEBUG)
+ printf("ACL before drop pkt_mask %"
+ PRIx64", pkt_num %d\n",
+ pkts_mask, pos);
+ pkts_mask &= ~(1LLU << pos);
+ (*pkts_drop_without_rule)++;
+ if (ACL_LIB_DEBUG)
+ printf("ACL after drop pkt_mask %"PRIx64
+ ", pkt_num %d, packet_drop %"
+ PRIu64 "\n", pkts_mask, pos,
+ *pkts_drop_without_rule);
+
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_fwd) {
+ phy_port = action_array_active[action_id].
+ fwd_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action FWD Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_nat) {
+ phy_port = action_array_active[action_id].
+ nat_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action NAT Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_dscp) {
+
+ /* Set DSCP priority */
+ uint32_t dscp_offset = IP_START +
+ IP_HDR_DSCP_OFST_IPV6;
+ uint16_t *dscp = RTE_MBUF_METADATA_UINT16_PTR(
+ pkt, dscp_offset);
+ uint16_t temp = *dscp;
+ uint16_t dscp_value = (rte_bswap16(temp) &
+ 0XF00F);
+ uint8_t dscp_store =
+ action_array_active
+ [action_id].dscp_priority << 2;
+ uint16_t dscp_temp = dscp_store;
+
+ dscp_temp = dscp_temp << 4;
+ *dscp = rte_bswap16(dscp_temp | dscp_value);
+ if (ACL_LIB_DEBUG)
+ printf("Action DSCP DSCP Priority: %"
+ PRIu16"\n", *dscp);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_packet_accept) {
+ if (ACL_LIB_DEBUG)
+ printf("Action Accept\n");
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_conntrack) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+ if (ACL_LIB_DEBUG)
+ printf("ACL CT enabled: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ pkt_mask);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_connexist) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+
+ /* Set connexist bit for this pkt for
+ * public -> private */
+ /* Private -> public packet will open
+ * the connection */
+ if (action_array_active[action_id].
+ private_public ==
+ lib_acl_public_private)
+ *connexist_mask |= pkt_mask;
+
+ if (ACL_LIB_DEBUG)
+ printf("Connexist ENB CT:0x%"
+ PRIx64" connexist: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ *connexist_mask,
+ pkt_mask);
+ }
+ }
+ }
+ }
+ return pkts_mask;
+}
+/**
+ * Main packet processing function.
+ * 64 packet bit mask are used to identify which packets to forward.
+ * Performs the following:
+ * - Burst lookup packets in the IPv4 ACL Rule Table.
+ * - Lookup Action Table, perform actions.
+ * - Burst lookup Connection Tracking, if enabled.
+ * - Lookup MAC address.
+ * - Set bit mask.
+ * - Packets with bit mask set are forwarded
+ *
+ * @param p
+ * A pointer to the pipeline.
+ * @param pkts
+ * A pointer to a burst of packets.
+ * @param n_pkts
+ * Number of packets to process.
+ * @param arg
+ * A pointer to pipeline specific data.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+ uint64_t
+lib_acl_ipv4_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv4_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask)
+{
+
+ uint64_t lookup_hit_mask_ipv4 = 0;
+ uint64_t lookup_miss_mask_ipv4 = 0;
+ int status;
+
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV4 Lookup Mask Before = 0x%"PRIx64"\n",
+ pkts_mask);
+ status = rte_table_acl_ops.f_lookup(
+ plib_acl_rule_table_ipv4_active,
+ pkts, pkts_mask, &lookup_hit_mask_ipv4,
+ (void **) plib_acl->plib_acl_entries_ipv4);
+ if (status < 0)
+ printf("Lookup Failed\n");
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV4 Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv4);
+ if (ACL_LIB_DEBUG)
+ printf("ACL Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv4);
+
+ lookup_miss_mask_ipv4 = pkts_mask & (~lookup_hit_mask_ipv4);
+ pkts_mask = lookup_hit_mask_ipv4;
+ *pkts_drop_without_rule += __builtin_popcountll(lookup_miss_mask_ipv4);
+ if (ACL_LIB_DEBUG)
+ printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
+ *pkts_drop_without_rule,
+ __builtin_popcountll(lookup_miss_mask_ipv4));
+ /* bitmap of packets left to process for ARP */
+ uint64_t pkts_to_process = lookup_hit_mask_ipv4;
+
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t)__builtin_ctzll(pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts[pos];
+
+
+
+ struct lib_acl_table_entry *entry =
+ (struct lib_acl_table_entry *)
+ plib_acl->plib_acl_entries_ipv4[pos];
+ uint16_t phy_port = entry->head.port_id;
+ uint32_t action_id = entry->action_id;
+
+ if (ACL_LIB_DEBUG)
+ printf("action_id = %u\n", action_id);
+
+ uint32_t dscp_offset = IP_START + IP_HDR_DSCP_OFST;
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_count) {
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount++;
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount +=
+ rte_pktmbuf_pkt_len(pkt);
+ if (ACL_LIB_DEBUG)
+ printf("Action Count Packet Count: %"
+ PRIu64 " Byte Count: %"
+ PRIu64 "\n"
+ , p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount,
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_drop) {
+
+ /* Drop packet by changing the mask */
+ if (ACL_LIB_DEBUG)
+ printf("ACL before drop pkt_mask %"
+ PRIx64", pkt_num %d\n",
+ pkts_mask, pos);
+ pkts_mask &= ~(1LLU << pos);
+ (*pkts_drop_without_rule)++;
+ if (ACL_LIB_DEBUG)
+ printf("ACL after drop pkt_mask %" PRIx64
+ ", pkt_num %d, action_packet_drop %"
+ PRIu64 "\n", pkts_mask, pos,
+ *pkts_drop_without_rule);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_fwd) {
+ phy_port = action_array_active[action_id].
+ fwd_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action FWD Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_nat) {
+ phy_port = action_array_active[action_id].
+ nat_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action NAT Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_dscp) {
+
+ /* Set DSCP priority */
+ uint8_t *dscp = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ dscp_offset);
+ *dscp = action_array_active[action_id].
+ dscp_priority << 2;
+ if (ACL_LIB_DEBUG)
+ printf("Action DSCP DSCP Priority: %"
+ PRIu16 "\n", *dscp);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_accept) {
+ if (ACL_LIB_DEBUG)
+ printf("Action Accept\n");
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_conntrack) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+ if (ACL_LIB_DEBUG)
+ printf("ACL CT enabled: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ pkt_mask);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_connexist) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+
+ /* Set connexist bit for this pkt for
+ * public -> private */
+ /* Private -> public packet will open
+ * the connection */
+ if (action_array_active[action_id].
+ private_public ==
+ lib_acl_public_private)
+ *connexist_mask |= pkt_mask;
+
+ if (ACL_LIB_DEBUG)
+ printf("ACL Connexist ENB CT:0x%"
+ PRIx64" connexist: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ *connexist_mask,
+ pkt_mask);
+ }
+ }
+
+ }
+ return pkts_mask;
+}
+/**
+ * Main packet processing function.
+ * 64 packet bit mask are used to identify which packets to forward.
+ * Performs the following:
+ * - Burst lookup packets in the IPv6 ACL Rule Table.
+ * - Lookup Action Table, perform actions.
+ * - Burst lookup Connection Tracking, if enabled.
+ * - Lookup MAC address.
+ * - Set bit mask.
+ * - Packets with bit mask set are forwarded
+ *
+ * @param p
+ * A pointer to the pipeline.
+ * @param pkts
+ * A pointer to a burst of packets.
+ * @param n_pkts
+ * Number of packets to process.
+ * @param arg
+ * A pointer to pipeline specific data.
+ *
+ * @return
+ * 0 on success, negative on error.
+ */
+ uint64_t
+lib_acl_ipv6_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv6_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask)
+{
+
+ uint64_t lookup_hit_mask_ipv6 = 0;
+ uint64_t lookup_miss_mask_ipv6 = 0;
+ int status;
+
+
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV6 Lookup Mask Before = 0x%"PRIx64"\n",
+ pkts_mask);
+ status = rte_table_acl_ops.f_lookup(
+ plib_acl_rule_table_ipv6_active,
+ pkts, pkts_mask, &lookup_hit_mask_ipv6,
+ (void **) plib_acl->plib_acl_entries_ipv6);
+ if (status < 0)
+ printf("Lookup Failed\n");
+ if (ACL_LIB_DEBUG)
+ printf("ACL IPV6 Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv6);
+
+ if (ACL_LIB_DEBUG)
+ printf("ACL Lookup Mask After = 0x%"PRIx64"\n",
+ lookup_hit_mask_ipv6);
+
+ lookup_miss_mask_ipv6 = pkts_mask & (~lookup_hit_mask_ipv6);
+ pkts_mask = lookup_hit_mask_ipv6;
+ *pkts_drop_without_rule += __builtin_popcountll(lookup_miss_mask_ipv6);
+ if (ACL_LIB_DEBUG)
+ printf("pkt_work_acl_key pkts_drop: %" PRIu64 " n_pkts: %u\n",
+ *pkts_drop_without_rule,
+ __builtin_popcountll(lookup_miss_mask_ipv6));
+ /* bitmap of packets left to process for ARP */
+ uint64_t pkts_to_process = lookup_hit_mask_ipv6;
+
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t)__builtin_ctzll(pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts[pos];
+
+
+ struct lib_acl_table_entry *entry =
+ (struct lib_acl_table_entry *)
+ plib_acl->plib_acl_entries_ipv6[pos];
+ uint16_t phy_port = entry->head.port_id;
+ uint32_t action_id = entry->action_id;
+
+ if (ACL_LIB_DEBUG)
+ printf("action_id = %u\n", action_id);
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_count) {
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount++;
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount +=
+ rte_pktmbuf_pkt_len(pkt);
+ if (ACL_LIB_DEBUG)
+ printf("Action Count Packet Count: %"
+ PRIu64 " Byte Count: %"
+ PRIu64 "\n",
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].packetCount,
+ p_action_counter_table
+ [plib_acl->action_counter_index]
+ [action_id].byteCount);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_packet_drop) {
+ /* Drop packet by changing the mask */
+ if (ACL_LIB_DEBUG)
+ printf("ACL before drop pkt_mask %"
+ PRIx64", pkt_num %d\n",
+ pkts_mask, pos);
+ pkts_mask &= ~(1LLU << pos);
+ (*pkts_drop_without_rule)++;
+ if (ACL_LIB_DEBUG)
+ printf("ACL after drop pkt_mask %" PRIx64
+ ", pkt_num %d, action_packet_drop %"
+ PRIu64 "\n", pkts_mask, pos,
+ *pkts_drop_without_rule);
+
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_fwd) {
+ phy_port = action_array_active[action_id].
+ fwd_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action FWD Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_nat) {
+ phy_port = action_array_active[action_id].
+ nat_port;
+ entry->head.port_id = phy_port;
+ if (ACL_LIB_DEBUG)
+ printf("Action NAT Port ID: %"
+ PRIu16"\n", phy_port);
+ }
+
+ if (action_array_active[action_id].action_bitmap &
+ lib_acl_action_dscp) {
+
+ /* Set DSCP priority */
+ uint32_t dscp_offset = IP_START +
+ IP_HDR_DSCP_OFST_IPV6;
+ uint16_t *dscp = RTE_MBUF_METADATA_UINT16_PTR(
+ pkt, dscp_offset);
+ uint16_t temp = *dscp;
+ uint16_t dscp_value = (rte_bswap16(temp) &
+ 0XF00F);
+ uint8_t dscp_store =
+ action_array_active
+ [action_id].dscp_priority << 2;
+ uint16_t dscp_temp = dscp_store;
+
+ dscp_temp = dscp_temp << 4;
+ *dscp = rte_bswap16(dscp_temp | dscp_value);
+ if (ACL_LIB_DEBUG)
+ printf("Action DSCP DSCP Priority: %"
+ PRIu16"\n", *dscp);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_packet_accept) {
+ if (ACL_LIB_DEBUG)
+ printf("Action Accept\n");
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_conntrack) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+ if (ACL_LIB_DEBUG)
+ printf("ACL CT enabled: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ pkt_mask);
+ }
+
+ if (action_array_active[action_id].action_bitmap
+ & lib_acl_action_connexist) {
+
+ /* Set conntrack bit for this pkt */
+ *conntrack_mask |= pkt_mask;
+
+ /* Set connexist bit for this pkt for
+ * public -> private */
+ /* Private -> public packet will open
+ * the connection */
+ if (action_array_active[action_id].
+ private_public ==
+ lib_acl_public_private)
+ *connexist_mask |= pkt_mask;
+
+ if (ACL_LIB_DEBUG)
+ printf("ACL Connexist ENB CT:0x%"
+ PRIx64" connexist: 0x%"
+ PRIx64" pkt_mask: 0x%"
+ PRIx64"\n",
+ *conntrack_mask,
+ *connexist_mask,
+ pkt_mask);
+ }
+ }
+ }
+ return pkts_mask;
+}
diff --git a/common/VIL/acl/lib_acl.h b/common/VIL/acl/lib_acl.h
new file mode 100644
index 00000000..6eaaf55f
--- /dev/null
+++ b/common/VIL/acl/lib_acl.h
@@ -0,0 +1,127 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_ACL_LIB_H__
+#define __INCLUDE_ACL_LIB_H__
+
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <rte_pipeline.h>
+#include <rte_table_acl.h>
+#include "rte_cnxn_tracking.h"
+#include "rte_ct_tcp.h"
+/* Define VNF actions for bitmap */
+#define lib_acl_action_packet_drop 1
+#define lib_acl_action_packet_accept 2
+#define lib_acl_action_nat 4
+#define lib_acl_action_fwd 8
+#define lib_acl_action_count 16
+#define lib_acl_action_dscp 32
+#define lib_acl_action_conntrack 64
+#define lib_acl_action_connexist 128
+#define action_array_max 10000
+#define lib_acl_private_public 0
+#define lib_acl_public_private 1
+#define IP_HDR_DSCP_OFST 1
+#define IPv4_HDR_VERSION 4
+#define IPv6_HDR_VERSION 6
+#define IP_HDR_DSCP_OFST_IPV6 0
+#define IP_VERSION_CHECK 4
+#define IP_START (MBUF_HDR_ROOM + ETH_HDR_SIZE)
+#define DEFULT_NUM_RULE (4*1024)
+/**
+ * A structure defining the key to store an VNF action.
+ */
+struct pipeline_action_key {
+ uint32_t action_id;
+ uint32_t action_bitmap;
+ uint32_t nat_port;
+ uint32_t fwd_port;
+ uint8_t dscp_priority;
+ uint8_t private_public;
+} __rte_cache_aligned;
+
+/**
+ * A structure defining the Action counters.
+ * One Action Counter Block per VNF thread.
+ */
+struct action_counter_block {
+ uint64_t byteCount;
+ uint64_t packetCount;
+} __rte_cache_aligned;
+
+/**
+ * A structure defining the ACL library table.
+ */
+struct lib_acl_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint32_t action_id;
+};
+
+
+struct lib_acl {
+ uint32_t n_rules;
+ uint32_t n_rule_fields;
+ struct rte_acl_field_def *field_format;
+ uint32_t field_format_size;
+ int action_counter_index;
+ struct lib_acl_table_entry
+ *plib_acl_entries_ipv4[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct lib_acl_table_entry
+ *plib_acl_entries_ipv6[RTE_PORT_IN_BURST_SIZE_MAX];
+} __rte_cache_aligned;
+
+void *lib_acl_create_active_standby_table_ipv4(uint8_t table_num,
+ uint32_t *libacl_n_rules);
+
+void *lib_acl_create_active_standby_table_ipv6(uint8_t table_num,
+ uint32_t *libacl_n_rules);
+int lib_acl_parse_config(struct lib_acl *plib_acl,
+ char *arg_name, char *arg_value,
+ uint32_t *libacl_n_rules);
+uint64_t
+lib_acl_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv4_active,
+ void *plib_acl_rule_table_ipv6_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask,
+ int lib_acl_ipv4_enabled, int lib_acl_ipv6_enabled);
+uint64_t
+lib_acl_ipv4_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv4_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask);
+uint64_t
+lib_acl_ipv6_pkt_work_key(struct lib_acl *plib_acl,
+ struct rte_mbuf **pkts, uint64_t pkts_mask,
+ uint64_t *pkts_drop_without_rule,
+ void *plib_acl_rule_table_ipv6_active,
+ struct pipeline_action_key *action_array_active,
+ struct action_counter_block (*p_action_counter_table)[action_array_max],
+ uint64_t *conntrack_mask,
+ uint64_t *connexist_mask);
+
+
+#endif
diff --git a/common/VIL/alg/lib_ftp_alg.c b/common/VIL/alg/lib_ftp_alg.c
new file mode 100644
index 00000000..0a8a0e6d
--- /dev/null
+++ b/common/VIL/alg/lib_ftp_alg.c
@@ -0,0 +1,917 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <app.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include "pipeline_cgnapt_common.h"
+#include "pipeline_actions_common.h"
+#include "pipeline_cgnapt_be.h"
+#include "hash_func.h"
+#include "lib_ftp_alg.h"
+#include "vnf_common.h"
+#include "pipeline_common_be.h"
+#include "rte_ct_tcp.h"
+#include "rte_cnxn_tracking.h"
+#define ALG_DEBUG 1
+
+#if 1
+extern uint8_t
+rte_ct_create_cnxn_hashkey(
+ uint32_t *src_addr,
+ uint32_t *dst_addr,
+ uint16_t src_port,
+ uint16_t dst_port,
+ uint8_t proto,
+ uint32_t *key,
+ uint8_t type);
+#endif
+
+struct rte_mbuf *lib_alg_pkt;
+enum {PRIVATE, PUBLIC};
+struct rte_hash_parameters ftp_alg_hash_params = {
+ .name = "FTP ALG",
+ .entries = 1024,
+ .reserved = 0,
+ .key_len = sizeof(struct ftp_alg_key),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+struct rte_hash *ftp_alg_hash_handle;
+
+/**
+ * ftp_alg Init function
+ */
+void lib_ftp_alg_init(void)
+{
+printf("NAT FTP ALG initialization ...\n");
+
+ /* FTP ALG hash table initialization */
+
+ ftp_alg_hash_handle = rte_hash_create(&ftp_alg_hash_params);
+
+ #ifdef ALGDBG
+ if (ftp_alg_hash_handle == NULL)
+ printf("FTP ALG rte_hash_create failed ...\n");
+ else
+ printf("ftp_alg_hash_table %p\n\n",
+ (void *)ftp_alg_hash_handle);
+
+ #endif
+}
+
+/*
+ * ftp_alg table retreive function
+ * Input - alg key
+ * Output - Entry
+ */
+
+struct ftp_alg_table_entry *retrieve_ftp_alg_entry(struct ftp_alg_key alg_key)
+{
+ struct ftp_alg_table_entry *ret_alg_data = NULL;
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+
+ int ret = rte_hash_lookup_data(ftp_alg_hash_handle, &alg_key,
+ (void **)&ret_alg_data);
+ if (ret < 0) {
+ #ifdef ALGDBG
+ printf("alg-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ } else {
+ return ret_alg_data;
+ }
+
+ return NULL;
+}
+
+/*
+ * ftp_alg table entry delete
+ * Input - ipaddress, portid
+ * Output - sucess or failure
+ */
+static int remove_ftp_alg_entry(uint32_t ipaddr, uint8_t portid)
+{
+
+ /* need to lock here if multi-threaded... */
+ /* rte_hash_del_key is not thread safe */
+ struct ftp_alg_key alg_key;
+ alg_key.l4port = rte_bswap16(portid);
+ alg_key.ip_address = rte_bswap32(ipaddr);
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+
+ #ifdef ALGDBG
+ printf("remove_alg_entry ip %x, port %d\n", alg_key.ip_address,
+ alg_key.l4port);
+ #endif
+ return rte_hash_del_key(ftp_alg_hash_handle, &alg_key);
+}
+/*
+ * ftp_alg table entry add
+ * Input - ipaddress, portid
+ * Output - sucess or failure
+ */
+void
+populate_ftp_alg_entry(uint32_t ipaddr, uint8_t portid)
+{
+ /* need to lock here if multi-threaded */
+ /* rte_hash_add_key_data is not thread safe */
+ struct ftp_alg_key alg_key;
+ alg_key.l4port = rte_bswap16(portid);
+ //arp_key.ip = rte_bswap32(ipaddr);
+ alg_key.ip_address = rte_bswap32(ipaddr);
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+
+
+ //lib_arp_populate_called++;
+
+ #ifdef ALGDBG
+ printf("populate_ftp_alg_entry ip %x, port %d\n", alg_key.ip_address,
+ alg_key.l4port);
+ #endif
+
+ struct ftp_alg_table_entry *new_alg_data =
+ retrieve_ftp_alg_entry(alg_key);
+ if (new_alg_data) {
+ #ifdef ALGDBG
+ printf("alg_entry exists ip%x, port %d\n", alg_key.ip_address,
+ alg_key.l4port);
+ #endif
+ //lib_arp_duplicate_found++;
+ return;
+ }
+ new_alg_data = (struct ftp_alg_table_entry *)
+ malloc(sizeof(new_alg_data));
+ //new_alg_data->status = INCOMPLETE;
+ new_alg_data->l4port = rte_bswap16(portid);
+ new_alg_data->ip_address = rte_bswap32(ipaddr);
+ rte_hash_add_key_data(ftp_alg_hash_handle, &alg_key, new_alg_data);
+
+ #ifdef ALGDBG
+ // print entire hash table
+ printf
+ ("\tALG: table update - ip=%d.%d.%d.%d on port=%d\n",
+ (alg_key.ip_address >> 24),
+ ((alg_key.ip_address & 0x00ff0000) >> 16),
+ ((alg_key.ip_address & 0x0000ff00) >> 8),
+ ((alg_key.ip_address & 0x000000ff)), portid);
+ /* print_arp_table(); */
+ puts("");
+ #endif
+}
+
+/*
+ * ftp_alg payload modification for PORT and PASV command
+ * Input - cgnapt table entry - for taking the public /translated ip/port ,
+ * incoming PORT/PASV string, Session type - PORT or PASV
+ * Output - Translated string
+ */
+int ftp_alg_modify_payload(
+ struct cgnapt_table_entry *egress_entry,
+ char *port_string,
+ char *port_string_translated, int ftp_session_type)
+{
+ uint32_t transport_ip;
+ uint16_t transport_port;
+ uint16_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
+ uint16_t new_port_string_length;
+
+ uint8_t *bptr_public_address;
+
+ transport_ip = egress_entry->data.pub_ip;
+ transport_port = egress_entry->data.pub_port;
+ tmp5 = (uint16_t) (transport_port/0x100);
+ tmp6 = (uint16_t) (transport_port % 0x100);
+
+ transport_ip = rte_bswap32(transport_ip);
+
+ bptr_public_address = (uint8_t *) &transport_ip;
+
+ tmp4 = bptr_public_address[3];
+ tmp3 = bptr_public_address[2];
+ tmp2 = bptr_public_address[1];
+ tmp1 = bptr_public_address[0];
+
+ if (ftp_session_type == 1)
+ sprintf(port_string_translated, FTP_PASV_PARAMETER_STRING,
+ FTP_PASV_RETURN_CODE, tmp1, tmp2, tmp3, tmp4,
+ tmp5, tmp6);
+ else
+ sprintf(port_string_translated, FTP_PORT_PARAMETER_STRING,
+ tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
+ #ifdef ALGDBG
+ printf("FTP ALG: FTP new string: Len:%d %s\n",
+ (uint16_t) strlen(port_string_translated)-2,
+ port_string_translated);
+
+ printf("FTP non translated PASV string: Len:%d, %s\n",
+ (uint16_t)strlen(port_string)-2, port_string);
+ printf("old strlen:%d new strlen:%d\n",
+ (int)strlen(port_string),
+ (int)strlen(port_string_translated));
+ #endif
+
+ return(new_port_string_length =
+ (uint16_t) strlen(port_string_translated));
+}
+
+/*
+ * ftp_alg modify packet len (due to change in len of FTP payload )
+ * Input - pkt
+ * Output - Length append /Trimmed Pkt
+**/
+static inline void ftp_alg_modify_pkt_len(struct rte_mbuf *pkt)
+{
+ uint16_t pkt_length = 0;
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(pkt);
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+
+ if (ip_hdr_size_bytes == IPv4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
+ pkt_length = rte_bswap16(ihdr4->total_length) + ETH_HDR_SIZE;
+ } else if (ip_hdr_size_bytes == IPv6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
+ pkt_length = rte_bswap16(ihdr6->payload_len) +
+ IPv6_HEADER_SIZE + ETH_HDR_SIZE;
+ }
+
+ uint16_t mbuf_pkt_length = rte_pktmbuf_pkt_len(pkt);
+
+ if (pkt_length == mbuf_pkt_length)
+ return;
+
+ if (pkt_length < mbuf_pkt_length) {
+ rte_pktmbuf_trim(pkt, mbuf_pkt_length - pkt_length);
+ return;
+ }
+
+ /* pkt_length > mbuf_pkt_length */
+ rte_pktmbuf_append(pkt, pkt_length - mbuf_pkt_length);
+}
+
+/*
+ * ftp_alg IP HDR size calculation
+ * Input - pkt
+ * Output - Length of IP HDR
+ */
+
+/* same as rte_ct_get_IP_hdr_size()*/
+uint16_t ftp_alg_get_IP_hdr_size(struct rte_mbuf *pkt)
+{
+ /* NOTE: Only supporting IP headers with no options at this time
+ * so header is fixed size
+ */
+
+ uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt, IP_START);
+ hdr_chk = hdr_chk >> 4;
+
+ if (hdr_chk == IP_VERSION_4)
+ return IPv4_HEADER_SIZE;
+ else if (hdr_chk == IP_VERSION_6)
+ return IPv6_HEADER_SIZE;
+ else /* Not IPv4 header with no options, return negative. */
+ return -1;
+
+}
+
+/*
+ * ftp_alg checksum re-computing due to change in payload , uses rte function,
+ * if HW Checksum is supported s/w checksum will be disabled
+ * Input - IP HDR and TCP HDR
+ * Output - Length of IP HDR
+ */
+static void ftp_alg_compute_checksums(
+ void *i_hdr,
+ struct tcp_hdr *t_hdr)
+/* same as rte_synproxy_compute_checksums*/
+{
+ /*
+ * calculate IP and TCP checksums.
+ * Note that both checksum routines require
+ * checksum fields to be set to zero, and the the checksum is in the
+ * correct byte order, so no rte_bswap16 is required.
+ */
+
+ int8_t hdr_chk = rte_ct_ipversion(i_hdr);
+ t_hdr->cksum = 0;
+
+ if (hdr_chk == IP_VERSION_4) {
+ struct ipv4_hdr *i4_hdr = (struct ipv4_hdr *)i_hdr;
+ i4_hdr->hdr_checksum = 0;
+ t_hdr->cksum = 0;
+ t_hdr->cksum = rte_ipv4_udptcp_cksum(i4_hdr, t_hdr);
+
+ #ifdef ALGDBG
+ printf("cksum %x\n", rte_bswap32(t_hdr->cksum));
+ #endif
+
+ i4_hdr->hdr_checksum = rte_ipv4_cksum(i4_hdr);
+ } else if (hdr_chk == IP_VERSION_6) {
+ struct ipv6_hdr *i6_hdr = (struct ipv6_hdr *)i_hdr;
+ t_hdr->cksum = 0;
+ t_hdr->cksum = rte_ipv6_udptcp_cksum(i6_hdr, t_hdr);
+ }
+}
+
+/*
+ * ftp_alg adjusting ACK from other end ;
+ * ACK field of return packet to be adjusted
+ * to the same value of length modified in the payload
+ * Input - pkt, ack diff - delta
+ * Output - None(void)
+ */
+static void ftp_alg_adjust_tcp_ack(struct rte_mbuf *pkt, int16_t ackSeqdiff)
+{
+ /*Since v6 is not supported now*/
+ uint16_t ip_hdr_size_bytes = IPv4_HEADER_SIZE;
+ struct ipv4_hdr *iphdr = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ IP_START + ip_hdr_size_bytes);
+ /*
+ * recv_ack and total length first to be chnaged to host byte order
+ * and then do the addition and then set back to network byte order
+ */
+ uint32_t temp;
+ temp = rte_bswap32(thdr->recv_ack);
+ //printf("%s: ackSeqdiff :%d %u\n", __FUNCTION__, ackSeqdiff, temp);
+ if (ackSeqdiff < 0)
+ temp += abs(ackSeqdiff);
+ else
+ temp -= abs(ackSeqdiff);
+
+ thdr->recv_ack = rte_bswap32(temp);
+}
+/*
+ * ftp_alg adjusting SEQ from other end ; SEQ field of onward/egress packet
+ * to be adjusted to the same value of length modified in the payload
+ * Input - pkt, ack diff - delta
+ * Output - None(void)
+ */
+
+static void ftp_alg_adjust_tcp_seq(struct rte_mbuf *pkt, int16_t ackSeqdiff)
+{
+ /*Since v6 is not supported now*/
+ uint16_t ip_hdr_size_bytes = IPv4_HEADER_SIZE;
+ struct ipv4_hdr *iphdr = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ IP_START + ip_hdr_size_bytes);
+ uint32_t temp;
+
+ temp = rte_bswap32(thdr->sent_seq);
+ if (ackSeqdiff < 0)
+ temp -= abs(ackSeqdiff);
+ else
+ temp += abs(ackSeqdiff);
+
+ thdr->sent_seq = rte_bswap32(temp);
+}
+/*
+ * ftp_alg adjusting SEQ from other end ; SEQ field of onward/egress packet
+ * to be adjusted to the same value of length modified in the payload;
+ * This function computes the delta and calls adjust_seq for chaging the packet
+ * Input - pkt,Original incoming String, Translated string and corresponding
+ * lengths of the string
+ * Output - Seq Diff between Original and translated string
+ */
+
+static int ftp_alg_delta_tcp_sequence(
+ struct rte_mbuf *pkt,
+ char *port_string,
+ int16_t existing_tcpSeqdiff,
+ uint16_t old_port_string_length,
+ uint16_t new_port_string_length)
+{
+ int16_t current_sequence_number_delta=0;
+ int16_t final_sequence_number_delta;
+ /*Since v6 is not supported now*/
+ uint16_t ip_hdr_size_bytes = IPv4_HEADER_SIZE;
+ struct ipv4_hdr *iphdr = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ IP_START + ip_hdr_size_bytes);
+ /*
+ * recv_ack and total length first to be chnaged to host byte order
+ * and then do the addition and then set back to network byte order
+ */
+ current_sequence_number_delta = (int16_t) (new_port_string_length -
+ old_port_string_length);
+ iphdr->total_length = rte_bswap16(iphdr->total_length);
+
+ #ifdef ALGDBG
+ printf("total_length :%u\n", iphdr->total_length);
+ #endif
+ if(current_sequence_number_delta < 0)
+ iphdr->total_length -= abs(current_sequence_number_delta);
+ else
+ iphdr->total_length += current_sequence_number_delta;
+
+ iphdr->total_length = rte_bswap16(iphdr->total_length);
+ if (existing_tcpSeqdiff !=0)
+ ftp_alg_adjust_tcp_seq(pkt,existing_tcpSeqdiff);
+ final_sequence_number_delta= current_sequence_number_delta + existing_tcpSeqdiff;
+ return final_sequence_number_delta;
+}
+
+
+/*
+ * ftp_alg dpi - this function parses the packet and does the respective
+ * action based on the type PORT or PASV, based on the direction of packet
+ * (Private or Public) This is called from CGNAPT
+ * Input - cgnapt pipeline struct, cgnapt key, pkt, CT ,
+ * position of packet assigned by CT, direction of packet
+ * Output - None - as it calls respective action functions
+ */
+void ftp_alg_dpi(
+ struct pipeline_cgnapt *p_nat,
+ struct pipeline_cgnapt_entry_key *nat_entry_key,
+ struct rte_mbuf *pkt,
+ struct rte_ct_cnxn_tracker *cgnat_cnxn_tracker,
+ int32_t ct_position,
+ uint8_t direction)
+{
+ /*
+ * recv_ack and total length first to be chnaged to host byte order
+ * and then do the addition and then set back to network byte order
+ */
+
+ /*entry key to be framed in cgnat and pass it over here*/
+ char *port_cmd_string;
+ char *port_cmd_end_string;
+ char *tcp_header_end;
+ char *tcp_start;
+
+
+ uint16_t private_port_number;
+ uint16_t public_port_number;
+ uint16_t ip1, ip2, ip3, ip4, port1, port2;
+ int16_t tcpSeqdiff;
+ int16_t ackSeqdiff, ackAdjust;
+ uint32_t private_address;
+ uint32_t public_address;
+ uint8_t *bptr_private_address;
+ /* also for PASV string */
+ char port_string[FTP_MAXIMUM_PORT_STRING_LENGTH];
+ char port_string_translated[FTP_MAXIMUM_PORT_STRING_LENGTH];
+ int16_t new_port_string_length;
+ int16_t old_port_string_length;
+ int dummy_value;
+ struct cgnapt_table_entry *egress_entry, *ingress_entry;
+ uint32_t ct_key[10];
+ uint8_t key_direction;
+ /*Since v6 is not supported now*/
+ uint16_t ip_hdr_size_bytes = IPv4_HEADER_SIZE;
+
+ struct ipv4_hdr *ip_hdr = rte_pktmbuf_mtod_offset(pkt,
+ struct ipv4_hdr *, sizeof(struct ether_hdr));
+ /* TCP and UDP ports at same offset,
+ * just use TCP for offset calculation
+ */
+ struct tcp_hdr *thdr = rte_pktmbuf_mtod_offset(pkt, struct tcp_hdr *,
+ (sizeof(struct ether_hdr)+sizeof(struct ipv4_hdr)));
+ uint16_t src_port = rte_bswap16(thdr->src_port);
+ uint16_t dst_port = rte_bswap16(thdr->dst_port);
+ uint8_t proto = ip_hdr->next_proto_id;
+ uint32_t src_addr = rte_bswap32(ip_hdr->src_addr);
+ uint32_t dst_addr = rte_bswap32(ip_hdr->dst_addr);
+ uint32_t tmp_tcp_paylod_size;
+
+ #if 0
+ - src_port & dst_port checking to be moved from cgnat to dpi
+ - For control channel
+ first validation of tcpSeqdiff to be checked
+ IF < > 0
+ ftp_alg_tcp_ack() to be called(this includes PORT
+ response and PASV response ack as well)
+ Return
+ ELSE
+ the port/pasv paramter checkign to be done
+ - For data channel
+ -retreive ALG entry
+ IF found
+ - remove the ALG entry
+ even if not found(found cases too)
+ - set the bypass flag in the CT session table
+
+ #endif
+
+ #ifdef ALGDBG
+ {
+ printf("ftp port number: %d, %d\n", src_port, dst_port);
+ printf("ftp TCP seq num diff: %d\n",
+ cgnat_cnxn_tracker->hash_table_entries[
+ ct_position].tcpSeqdiff);
+ printf("tcp data offset: %d\n",
+ ((thdr->data_off & 0xf0) >> 2));
+ printf("ct position in dpi:%d\n", ct_position);
+ }
+ #endif
+
+ if (src_port == 21 || dst_port == 21)/* Control Channel*/{
+ /* Control Channel Start */
+ /*
+ * need to look for the port or pasv command. Then have to look for
+ * the IP address and the port address. Then must create a TCP control
+ * block and spoof the port number, and change the ip address, and do
+ * the sequence number setting.
+ */
+ /* Handle TCP headers.*/
+ tcp_start = (char *)thdr;
+
+ /* start of TCP payload */
+ port_cmd_string = (char * )(tcp_start+((thdr->data_off & 0xf0) >> 2));
+ tcp_header_end = port_cmd_string;
+
+ if (direction == PRIVATE) {
+
+ #ifdef ALGDBG
+ printf("In PRIVATE ");
+ #endif
+
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].seq_client
+ = rte_bswap32(thdr->sent_seq);
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].ack_client
+ = rte_bswap32(thdr->recv_ack);
+ #ifdef ALGDBG
+ printf("-->Seq_cli:%u, Ack_cli:%u, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq),
+ rte_bswap32(thdr->recv_ack),
+ (rte_bswap16(ip_hdr->total_length) -
+ (((thdr->data_off & 0xf0) >> 4) * 4)) - 20);
+ #endif
+ } else {
+
+ #ifdef ALGDBG
+ printf("In PUBLIC ");
+ #endif
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].seq_server
+ = rte_bswap32(thdr->sent_seq);
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].ack_server
+ = rte_bswap32(thdr->recv_ack);
+ #ifdef ALGDBG
+ printf("<--Seq_cli:%4d, Ack_cli%4d, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq), rte_bswap32(thdr->recv_ack),
+ (ip_hdr->total_length - (((thdr->data_off & 0xf0) >> 2))
+ - 20));
+ #endif
+ }
+
+ if (sscanf(port_cmd_string, FTP_PASV_PARAMETER_STRING, &dummy_value,
+ &ip1, &ip2, &ip3, &ip4, &port1, &port2) ==
+ FTP_PASV_PARAMETER_COUNT){
+
+ sprintf (port_string, FTP_PASV_PARAMETER_STRING, FTP_PASV_RETURN_CODE,
+ ip1, ip2, ip3, ip4, port1, port2);
+
+ int i = 0;
+ while (port_cmd_string[i] != '\r' && port_cmd_string[i+1] != '\n')
+ i++;
+
+ i += 2; // now it points to end of port cmd string.
+
+ old_port_string_length = i;
+
+ private_port_number = (uint16_t) (port1 * 0x100 + port2);
+ bptr_private_address = (uint8_t *) &private_address;
+
+ bptr_private_address[3] = (uint8_t) (ip4 & 0x00FF);
+ bptr_private_address[2] = (uint8_t) (ip3 & 0x00FF);
+ bptr_private_address[1] = (uint8_t) (ip2 & 0x00FF);
+ bptr_private_address[0] = (uint8_t) (ip1 & 0x00FF);
+
+ /* Not needed as got the position from CT*/
+
+ if (direction == PUBLIC) {
+ /*Client in Private, Server in Public*/
+ /* Not to convert in the payload for PASV resp from Pub side*/
+ /* Only Table creation and no payload modification*/
+ /* DAta Channel also no need to create as it will be done by NAT
+ * when initiated by Client later
+ */
+ populate_ftp_alg_entry(private_address, private_port_number);
+ /*
+ * Bypass ALG flag to be set ,
+ * seqnumber -delta either to be 0 or not needed ,
+ * direction checking for all scenarios
+ */
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ server_direction = SERVER_IN_PUBLIC;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ ftp_session_type= 1; // Passive session type
+ } else if (direction == PRIVATE) {
+ /*Client in Public , Server in Private*/
+
+ struct pipeline_cgnapt_entry_key data_channel_key;
+ private_address = rte_bswap32(private_address);
+ data_channel_key.ip = private_address;
+ data_channel_key.port = private_port_number;
+ /* to be checked if it can be passed as param from NAT*/
+ data_channel_key.pid = pkt->port;
+
+ /* add_dynamic_cgnat_entry() */ /* for DAta Channel*/
+ /*Will be getting Private IP and port from Server ,
+ * with that NAPT entry egress and ingress can be added ,
+ * for further data channel communication
+ */
+
+ if (add_dynamic_cgnapt_entry_alg((struct pipeline *)p_nat,
+ &data_channel_key, &egress_entry, &ingress_entry) == 0){
+
+ #ifdef ALGDBG
+ printf("Wrong FTP ALG packet\n");
+ #endif
+ //p_nat->invalid_packets |= pkt_mask;
+
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ tmp_tcp_paylod_size = rte_bswap16(ip_hdr->total_length) -
+ ((thdr->data_off & 0xf0) >> 2) - ip_hdr_size_bytes;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ tcp_payload_size = tmp_tcp_paylod_size;
+
+ /*Adding ALG entry , params to be derived from egress entry*/
+ populate_ftp_alg_entry(egress_entry->data.pub_ip,
+ egress_entry->data.pub_port);
+ /* payload modification */
+ new_port_string_length = ftp_alg_modify_payload(egress_entry,
+ port_string,
+ port_string_translated, 1);
+ strncpy(tcp_header_end, port_string_translated,
+ strlen(port_string_translated));
+ tcpSeqdiff = ftp_alg_delta_tcp_sequence( pkt, port_string,
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].tcpSeqdiff,
+ old_port_string_length,
+ new_port_string_length);
+
+ /* same as rte_synproxy_adjust_pkt_length() in ct */
+ ftp_alg_modify_pkt_len(pkt);
+ /*
+ * Store sequence_number_delta in Session_data structure, also bypass
+ * flag to be set as NO (expecting TCP ack from other end then set the
+ * bypass accordingly , handled earlier in the function
+ */
+
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ alg_bypass_flag = NO_BYPASS;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ tcpSeqdiff = tcpSeqdiff;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ server_direction = SERVER_IN_PRIVATE;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ ftp_session_type = 1; // Passive session type
+ return;
+
+ } /* PRIVATE dir */
+ } else if (sscanf(port_cmd_string, FTP_PORT_PARAMETER_STRING,
+ &ip1, &ip2, &ip3, &ip4, &port1, &port2) ==
+ FTP_PORT_PARAMETER_COUNT){
+
+ int i = 0;
+ static uint8_t port_hit;
+ while (port_cmd_string[i] != '\r' &&
+ port_cmd_string[i+1] != '\n')
+ i++;
+
+ i += 2; // now it points to end of port cmd string.
+
+ old_port_string_length = i;
+
+ #ifdef ALGDBG
+ printf( " Existing Seq Diff = %d", cgnat_cnxn_tracker->
+ hash_table_entries[ct_position].tcpSeqdiff);
+ printf("FTP ALG: FTP PORT command length: %d\n",
+ old_port_string_length);
+ #endif
+
+ private_port_number = (uint16_t) (port1 * 0x100 + port2);
+
+ #ifdef ALGDBG
+ printf("FTP ALG: private port number before swap: %u\n",
+ private_port_number);
+ #endif
+
+ bptr_private_address = (uint8_t *) &private_address;
+ bptr_private_address[3] = (uint8_t) (ip4 & 0x00FF);
+ bptr_private_address[2] = (uint8_t) (ip3 & 0x00FF);
+ bptr_private_address[1] = (uint8_t) (ip2 & 0x00FF);
+ bptr_private_address[0] = (uint8_t) (ip1 & 0x00FF);
+
+ sprintf(port_string, FTP_PORT_PARAMETER_STRING, ip1, ip2,
+ ip3, ip4, port1, port2);
+
+ #ifdef ALGDBG
+ printf("FTP ALG: FTP original PORT string: %d,%s\n",
+ (int) strlen(port_string)-2, port_string);
+ printf("prv addr: %x\n", private_address);
+ #endif
+
+
+ if (direction == PUBLIC) {
+ /* Client in Public*/
+ /* retreive_cgnat_entry()* for Data Channel*/
+ /* Pub port and ip address to be used for framing key ,
+ * the private phrase is a misnomer
+ */
+ struct pipeline_cgnapt_entry_key data_channel_key;
+ data_channel_key.ip = private_address;
+ data_channel_key.port = private_port_number;
+ data_channel_key.pid = 0xffff;
+
+
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ server_direction = SERVER_IN_PRIVATE;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ ftp_session_type= 0; // Active session type
+
+ /* No payload modificaiton*/
+ #ifdef ALGDBG
+ printf("<--Seq_cli:%4d, Ack_cli%4d, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq),
+ rte_bswap32(thdr->recv_ack),
+ (ip_hdr->total_length -
+ (((thdr->data_off & 0xf0) >> 2)) - 20));
+ #endif
+ populate_ftp_alg_entry(private_address, private_port_number);
+ } else if (direction == PRIVATE) {
+
+ /* Client in Private Server in Public*/
+ /* Populate_alg_entry*/
+ /*add_dynamic_cgnapt_entry()*/
+ /* payload modificaion*/
+ struct pipeline_cgnapt_entry_key data_channel_key;
+ private_address = rte_bswap32(private_address);
+ data_channel_key.ip = private_address;
+ data_channel_key.port = private_port_number;
+ /* to be checked if it can be passed as param from NAT*/
+ data_channel_key.pid = pkt->port;
+
+ /* add_dynamic_cgnat_entry() */ /* for DAta Channel*/
+ /*
+ * Will be getting Private IP and port from Client ,
+ * with that NAPT entry egress and ingress can be added ,
+ * for further data channel communication
+ */
+
+ if (add_dynamic_cgnapt_entry_alg((struct pipeline *)
+ p_nat, &data_channel_key, &egress_entry,
+ &ingress_entry) == 0){
+
+ #ifdef ALGDBG
+ printf("Wrong FTP ALG packet\n");
+ #endif
+ //p_nat->invalid_packets |= pkt_mask;
+ p_nat->naptDroppedPktCount++;
+
+ #ifdef CGNAPT_DEBUGGING
+ p_nat->naptDroppedPktCount4++;
+ #endif
+ return;
+ }
+
+ tmp_tcp_paylod_size = rte_bswap16(ip_hdr->total_length) -
+ ((thdr->data_off & 0xf0) >> 2) -
+ ip_hdr_size_bytes;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ tcp_payload_size = tmp_tcp_paylod_size;
+ /*ALG entry add, params to be derived from egress entry*/
+
+ populate_ftp_alg_entry(egress_entry->data.pub_ip,
+ egress_entry->data.pub_port);
+ /* payload modification */
+ new_port_string_length = ftp_alg_modify_payload(egress_entry,
+ port_string,
+ port_string_translated, 0);
+ strncpy(tcp_header_end, port_string_translated,
+ strlen(port_string_translated));
+ tcpSeqdiff = ftp_alg_delta_tcp_sequence( pkt, port_string,
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].tcpSeqdiff,
+ old_port_string_length,
+ new_port_string_length);
+ /* same as rte_synproxy_adjust_pkt_length() in ct */
+ ftp_alg_modify_pkt_len(pkt);
+
+ /*
+ * Store sequence_number_delta in Session_data structure ,
+ * also bypass flag to be set as NO
+ * While response from other end is received ,
+ * modify the ack no using reverse sign of sequen
+ */
+
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ alg_bypass_flag = NO_BYPASS;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ tcpSeqdiff = tcpSeqdiff;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ server_direction = SERVER_IN_PUBLIC;
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].
+ ftp_session_type = 0; // Active session type
+
+ #ifdef ALGDBG
+ printf("<--Seq_cli:%4d, Ack_cli%4d, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq),
+ rte_bswap32(thdr->recv_ack),
+ (ip_hdr->total_length -
+ (((thdr->data_off & 0xf0) >> 2)) - 20));
+
+ #endif
+ return;
+ } /* PRIVATE dir */
+ } /* PORT cmd message */
+
+ if ((ackAdjust=cgnat_cnxn_tracker->hash_table_entries[
+ ct_position].tcpSeqdiff) != 0) {
+ if (direction == PRIVATE) {
+ if (
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].seq_client !=
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].ack_server) {
+ static int Seqhits;
+ ftp_alg_adjust_tcp_seq( pkt,ackAdjust);
+ tmp_tcp_paylod_size = rte_bswap16(
+ ip_hdr->total_length) -
+ ((thdr->data_off & 0xf0) >> 2) -
+ ip_hdr_size_bytes;
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].tcp_payload_size = tmp_tcp_paylod_size;
+ #ifdef ALGDBG
+ printf("<--Seq_cli:%4d, Ack_cli%4d, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq),
+ rte_bswap32(thdr->recv_ack),
+ (ip_hdr->total_length -(((thdr->data_off & 0xf0) >> 2))- 20));
+ #endif
+ }
+ } else {
+ if (cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].ack_server !=
+ (cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].seq_client +
+ cgnat_cnxn_tracker->hash_table_entries
+ [ct_position].tcp_payload_size)) {
+ static int Ackhits;
+ ftp_alg_adjust_tcp_ack( pkt,ackAdjust);
+ #ifdef ALGDBG
+ printf("<--Seq_cli:%4d, Ack_cli%4d, Len:%4d\n",
+ rte_bswap32(thdr->sent_seq),
+ rte_bswap32(thdr->recv_ack),
+ (ip_hdr->total_length -(((thdr->data_off & 0xf0) >> 2))- 20));
+ #endif
+ }
+ }
+ return;
+ } /* expected_ack and sequence number updation for PUBLIC dir TCP window */
+ } /* Control Channel End */
+ else {
+ /*remove the ALG entry, retreival is taken care by rte function */
+ #ifdef ALGDBG
+ printf("In Data Channel \n");
+ #endif
+ remove_ftp_alg_entry (dst_addr,dst_port);/* remove the ALG entry */
+ cgnat_cnxn_tracker->hash_table_entries[ct_position].alg_bypass_flag = BYPASS;
+ } /* Data Channel End */
+}
diff --git a/common/VIL/alg/lib_ftp_alg.h b/common/VIL/alg/lib_ftp_alg.h
new file mode 100644
index 00000000..875d6276
--- /dev/null
+++ b/common/VIL/alg/lib_ftp_alg.h
@@ -0,0 +1,102 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef __INCLUDE_LIB_FTP_ALG_H__
+#define __INCLUDE_LIB_FTP_ALG_H__
+#include "rte_ether.h"
+#include "rte_ct_tcp.h"
+/*CT & CGNAT integration to be resolved for this definitions*/
+#define META_DATA_OFFSET 128
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+#define PROTOCOL_START (IP_START + 9)
+#define TCP_START (IP_START + IP_V4_HEADER_SIZE)
+#define TCP_MIN_HDR_SIZE 20
+
+#define RTE_TCP_PROTO_ID 6
+#define RTE_SP_DEFAULT_TTL 64
+
+#define RTE_SYNPROXY_MAX_SPOOFED_PKTS 64
+
+#define RTE_TCP_SYN 0x02
+#define RTE_TCP_ACK 0x10
+#define RTE_TCP_SYN_ACK (RTE_TCP_SYN | RTE_TCP_ACK)
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+#define IPv4_HEADER_SIZE 20
+#define IPv6_HEADER_SIZE 40
+
+//#define IPV4 4
+//#define IPV6 6
+enum ftp_alg_bypass {
+ NO_BYPASS,
+ BYPASS
+};
+
+enum ftp_alg_mode {
+ FTP_ALG_PORT,
+ FTP_ALG_PASV
+};
+enum ftp_alg_direction {
+ SERVER_IN_PRIVATE,
+ SERVER_IN_PUBLIC
+};
+enum phy_port {
+ PRIVATE_PORT,
+ PUBLIC_PORT
+};
+
+struct ftp_alg_key {
+ uint32_t ip_address;
+ uint16_t l4port;
+ uint8_t filler1;
+ uint8_t filler2;
+};
+struct ftp_alg_table_entry {
+ uint32_t ip_address;
+ uint16_t l4port;
+ uint8_t ftp_alg_mode;
+ uint8_t ftp_alg_direction;
+ uint32_t session_id; /*to be checked */
+ uint8_t alg_bypass_flag;
+ uint8_t dummy;
+ uint8_t dummy1;
+ //uint32_t napt_entry;/* to be checked*/
+} __rte_cache_aligned;
+
+#define FTP_SERVER_PORT 21
+#define FTP_PORT_STRING "PORT"
+#define FTP_PORT_PARAMETER_STRING "PORT %hu,%hu,%hu,%hu,%hu,%hu\r\n"
+#define FTP_PORT_PARAMETER_COUNT 6
+#define FTP_PORT_RESPONSE_STRING "200 PORT command successful.\r\n"
+#define FTP_PORT_STRING_END_MARKER '\n'
+#define FTP_MAXIMUM_PORT_STRING_LENGTH 60
+#define FTP_PASV_STRING "PASV"
+#define FTP_PASV_PARAMETER_STRING "%d Entering Passive Mode (%hu,%hu,%hu,%hu,%hu,%hu)\r\n"
+#define FTP_PASV_PARAMETER_COUNT 7
+#define FTP_PASV_STRING_END_MARKER '\n' /* not ')' */
+#define FTP_PASV_RETURN_CODE 227
+
+void ftp_alg_dpi(
+ struct pipeline_cgnapt *p_nat,
+ struct pipeline_cgnapt_entry_key *nat_entry_key,
+ struct rte_mbuf *pkt,
+ struct rte_ct_cnxn_tracker *cgnat_cnxn_tracker,
+ int32_t ct_position,
+ uint8_t direction);
+void lib_ftp_alg_init(void);
+extern int8_t rte_ct_ipversion(void *i_hdr);
+#endif
diff --git a/common/VIL/alg/lib_sip_alg.c b/common/VIL/alg/lib_sip_alg.c
new file mode 100644
index 00000000..9940d59a
--- /dev/null
+++ b/common/VIL/alg/lib_sip_alg.c
@@ -0,0 +1,2257 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+/*Sriramajeyam*/
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <math.h>
+
+#include <app.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+
+#include "pipeline_actions_common.h"
+#include "hash_func.h"
+#include "lib_sip_alg.h"
+#include "vnf_common.h"
+#include "pipeline_common_be.h"
+
+#define SIP_ALG_SIP "SIP"
+#define SIP_ALG_200_OK "200 OK"
+#define SIP_ALG_INVITE "INVITE"
+#define SIP_ALG_BYE "BYE"
+#define SIP_ALG_TRYING "100 Trying"
+#define SIP_ALG_RINGING "180 Ringing"
+#define SIP_ALG_ACK "ACK"
+#define SIP_ALG_CONTACT "Contact"
+#define SIP_ALG_CONTENT_LEN "Content-Length"
+#define SIP_ALG_VIA "Via"
+#define SIP_ALG_FROM "From"
+#define SIP_ALG_TO "To"
+#define SIP_ALG_CALLID "Call-ID"
+#define SIP_ALG_RTP "RTP"
+#define SIP_ALG_RTCP "a=RTCP"
+#define SIP_ALG_CANCEL "CANCEL"
+#define SIP_ALG_CONTYPE "Content-Type"
+#define SIP_ALG_APPSDP "application/sdp"
+#define SIP_ALG_CSEQ "CSeq"
+#define SIP_ALG_AUDIO "m=audio"
+#define SIP_ALG_DOUBLE_CRLF "\r\n\r\n"
+#define SIP_ALG_CRLF "\r\n"
+#define SIP_ALG_AT "@"
+#define SIP_ALG_GREAT ">"
+#define SIP_ALG_OWNER "o="
+#define SIP_ALG_IPV4 "IP4"
+#define SIP_ALG_CONN "c="
+#define SIP_ALG_REMOTE_PARTY_ID "Remote-Party-ID"
+#define SIP_ALG_SPACE " "
+#define SIP_ALG_SEMICOLON ";"
+
+#define SIP_DEFAULT_L4PORT 5060
+
+#define SIP_ALG_INVITE_MSGTYPE 1
+#define SIP_ALG_BYE_MSGTYPE 2
+#define SIP_ALG_200_OK_INVITE_MSGTYPE 3
+#define SIP_ALG_200_OK_BYE_MSGTYPE 4
+#define SIP_ALG_TRYING_RINGING_MSGTYPE 5
+#define SIP_ALG_ACK_MSGTYPE 6
+
+#define MAX_NUM_SIP_ALG_ENTRIES 16384
+
+#define SIP_ALG_VIA_FIELD_IPADDR 14
+#define SIP_ALG_CTAC_FIELD_IPADDR 7
+
+#define ADDRESS_PORT_STRING 1
+#define PORT_STRING 2
+
+#define MAX_ADDR_PORT_SIZE 30
+#define MAX_ADDR_SIZE 20
+#define MAX_PORT_SIZE 10
+#define MAX_SIP_UDP_MSG_SIZE 2000
+
+#define ALG_DEBUG 0
+
+enum { FALSE, TRUE };
+
+struct rte_mempool *lib_alg_pktmbuf_tx_pool;
+
+struct rte_mbuf *lib_alg_pkt;
+
+static struct rte_hash_parameters sip_alg_hash_params = {
+ .name = NULL,
+ .entries = MAX_NUM_SIP_ALG_ENTRIES,
+ .reserved = 0,
+ .key_len = sizeof(struct sip_alg_key),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ .extra_flag = 1,
+};
+
+struct rte_hash *sip_alg_hash_table;
+
+struct sip_alg_table_entry *sip_alg_table[MAX_NUM_SIP_ALG_ENTRIES];
+
+char *sip_alg_process(struct rte_mbuf *pkt,
+ uint16_t pkt_direction, uint16_t call_direction,
+ uint16_t msgType, uint32_t modIp,
+ uint16_t modL4Port, uint32_t pubIp,
+ uint16_t pubL4Port, uint16_t modRtpPort,
+ uint16_t modRtcpPort, uint16_t *diffModSipLen);
+char *getSipCallIdStr(char *pMsg);
+char *natSipAlgModifyPayloadAddrPort(char *pSipMsg, char **pSipMsgEnd,
+ uint32_t oldStrLen, uint32_t *diffLen,
+ uint32_t pub_ip, uint16_t pub_port,
+ uint32_t type);
+char *natSipAlgAdjustMsg(char *pSipMsg, char **pSipMsgEnd,
+ uint32_t newStrLen, uint32_t oldStrLen);
+
+// This method will be called from other VNF to initialize SIP lib
+// Make an API out of it
+void lib_sip_alg_init(void)
+{
+ char *s = rte_zmalloc(NULL, 64, RTE_CACHE_LINE_SIZE);;
+ int socketid = 0;
+ /* create ipv4 hash */
+ if(!s){
+ printf("NAT SIP ALG Init failed\n");
+ return;
+ }
+ snprintf(s, strlen(s), "ipv4_sip_alg_hash_%d", socketid);
+ printf("NAT SIP ALG initialization ...\n");
+
+ /* SIP ALG hash table initialization */
+ sip_alg_hash_params.socket_id = SOCKET_ID_ANY;
+ sip_alg_hash_params.name = s;
+ sip_alg_hash_table = rte_hash_create(&sip_alg_hash_params);
+
+ if (sip_alg_hash_table == NULL) {
+ printf("SIP ALG rte_hash_create failed. socket %d ...\n",
+ sip_alg_hash_params.socket_id);
+ rte_exit(0, "SIP ALG rte_hash_create failed");
+ } else {
+ printf("sip_alg_hash_table %p\n\n", (void *)sip_alg_hash_table);
+ }
+
+}
+
+char *itoa(long n);
+char *itoa(long n)
+{
+ int len = n == 0 ? 1 : floor(log10l(labs(n))) + 1;
+
+ if (n < 0)
+ len++; /* room for negative sign '-' */
+
+ char *buf = calloc(sizeof(char), len + 1); // +1 for null
+ if(buf != NULL)
+ snprintf(buf, len + 1, "%ld", n);
+ return buf;
+}
+
+struct sip_alg_table_entry *retrieve_sip_alg_entry(
+ struct sip_alg_key *alg_key);
+
+struct sip_alg_table_entry *retrieve_sip_alg_entry(
+ struct sip_alg_key *alg_key)
+{
+ struct sip_alg_table_entry *sip_alg_data = NULL;
+
+ int ret = rte_hash_lookup(sip_alg_hash_table, alg_key);
+
+ if (ret < 0) {
+ #ifdef ALGDBG
+ printf("alg-hash lookup failed ret %d, "
+ "EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ } else {
+ sip_alg_data = sip_alg_table[ret];
+ return sip_alg_data;
+ }
+
+ return NULL;
+}
+
+//int remove_sip_alg_entry(uint32_t ipaddr, uint16_t portid);
+int remove_sip_alg_entry(uint32_t ipaddr, uint16_t portid)
+{
+ struct sip_alg_key alg_key;
+ void *sip_alg_entry_data;
+ int ret;
+
+ alg_key.l4port = portid;
+ alg_key.ip_address = ipaddr;
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+
+ if (ALG_DEBUG)
+ printf("remove_sip_entry ip %x, port %d\n", alg_key.ip_address,
+ alg_key.l4port);
+
+ ret = rte_hash_lookup(sip_alg_hash_table, &alg_key);
+ if (ret < 0) {
+ if (ALG_DEBUG)
+ printf("removesipalgentry: "
+ "rtehashlookup failed with error %d",
+ ret);
+ return -1;
+ }
+
+ sip_alg_entry_data = sip_alg_table[ret];
+
+ free(sip_alg_entry_data);
+ rte_hash_del_key(sip_alg_hash_table, &alg_key);
+
+ return 0;
+}
+
+/*
+ * Function for populating SIP ALG entry. return 0 - success &
+ * return -1 - failure
+ */
+int populate_sip_alg_entry(uint32_t ipaddr, uint16_t portid,
+ char *sip_call_id, uint8_t call_direction,
+ enum sip_alg_port_type port_type);
+int populate_sip_alg_entry(uint32_t ipaddr, uint16_t portid,
+ char *sip_call_id, uint8_t call_direction,
+ enum sip_alg_port_type port_type)
+{
+ struct sip_alg_key alg_key;
+
+ alg_key.l4port = portid;
+ alg_key.ip_address = ipaddr;
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+ int ret;
+
+ if (ALG_DEBUG)
+ printf("populate_sip_alg_entry port %d, ip %x\n",
+ alg_key.l4port, alg_key.ip_address);
+
+ struct sip_alg_table_entry *new_alg_data =
+ retrieve_sip_alg_entry(&alg_key);
+
+ if (new_alg_data) {
+ if (ALG_DEBUG)
+ printf("sip_alg_entry exists ip%x, port %d\n",
+ alg_key.ip_address, alg_key.l4port);
+ return 0;
+ }
+
+ new_alg_data = NULL;
+ new_alg_data = (struct sip_alg_table_entry *)
+ malloc(sizeof(struct sip_alg_table_entry));
+ if (new_alg_data == NULL) {
+ printf("populate sip alg entry: allocation failed\n");
+ return -1;
+ }
+
+ new_alg_data->l4port = portid;
+ new_alg_data->ip_address = ipaddr;
+ new_alg_data->l4port_type = port_type;
+ new_alg_data->sip_alg_call_direction = call_direction;
+ strcpy((char *)new_alg_data->sip_alg_call_id, (char *)sip_call_id);
+ new_alg_data->filler1 = 0;
+ new_alg_data->filler2 = 0;
+ new_alg_data->filler3 = 0;
+
+ ret = rte_hash_add_key(sip_alg_hash_table, &alg_key);
+ if (ret < 0) {
+ printf("populate sip - rte_hash_add_key_data ERROR %d\n", ret);
+ free(new_alg_data);
+ return -1;
+ }
+
+ sip_alg_table[ret] = new_alg_data;
+
+ if (ALG_DEBUG) {
+ printf("SIP_ALG: table update - ip=%x on port=%d ret=%d\n",
+ alg_key.ip_address, portid, ret);
+ }
+ return 0;
+}
+
+int sip_alg_dpi(struct rte_mbuf *pkt, enum pkt_dir pkt_direction,
+ uint32_t modIp, uint16_t modL4Port,
+ uint32_t pubIp, uint16_t pubL4Port,
+ uint16_t modRtpPort, uint16_t modRtcpPort)
+{
+ uint16_t msgType = 0;
+ enum sip_alg_call_direction call_direction = 0;
+ uint32_t ip_address = 0;
+ uint16_t port = 0;
+ int ret = 0;
+ struct ipv4_hdr *ip_h;
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ struct udp_hdr *udp_h;
+ char *pSipMsg = NULL;
+ struct sip_alg_table_entry *sip_alg_entry;
+ char *sip_call_id = NULL;
+ int pos = 0;
+ struct sip_alg_key alg_key;
+ uint16_t diffModSipLen = 0;
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ udp_h = (struct udp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ pSipMsg = ((char *)udp_h + sizeof(struct udp_hdr));
+
+ if (ALG_DEBUG) {
+ printf("%s: packet length(%u), buffer length(%u)\n", __func__,
+ rte_pktmbuf_pkt_len(pkt), pkt->buf_len);
+ printf("%s: last segment addr(%p %p)\n", __func__,
+ rte_pktmbuf_lastseg(pkt), pkt);
+ printf("%s: data len(%u, %u)\n", __func__, rte_pktmbuf_data_len(pkt),
+ rte_pktmbuf_data_len(rte_pktmbuf_lastseg(pkt)));
+ printf("%s: buffer addr(%p), data_off(%u), nb_segs(%u)\n", __func__,
+ pkt->buf_addr, pkt->data_off, pkt->nb_segs);
+ }
+
+ if (IS_STRING_SAME(pSipMsg, SIP_ALG_INVITE)) {
+ /* find the call id position in the message */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_CALLID, &pos, 0) ==
+ TRUE)
+ sip_call_id =
+ getSipCallIdStr(pSipMsg + pos +
+ TAG_TO_DATAPOS(SIP_ALG_CALLID));
+
+ if (ALG_DEBUG)
+ printf("sipalgdpi: %d call id %s\n", __LINE__,
+ sip_call_id);
+
+ if (pkt_direction == PRIVATE) {
+ call_direction = SIP_CALL_OUTGOING;
+ ip_address = rte_bswap32(ip_h->src_addr);
+ port = rte_bswap16(udp_h->src_port);
+ } else if (pkt_direction == PUBLIC) {
+ call_direction = SIP_CALL_INCOMING;
+ ip_address = pubIp;
+ port = pubL4Port;
+ }
+
+ if (ALG_DEBUG)
+ printf("0=>sip_alg_dpi: pkt_dir(%d), call_dir(%d), "
+ "ipaddr(%x) port(%x)\n",
+ pkt_direction, call_direction, ip_address, port);
+
+ /* add 3 entries in ALG table for SIP, RTP, RTCP */
+ ret = populate_sip_alg_entry(ip_address, port,
+ sip_call_id, call_direction,
+ SIP_UDP);
+ if (ret < 0) {
+ printf("sipalgdpi:populate SIP alg UDP entry failed\n");
+ return 0;
+ }
+ if (modRtpPort != 0) {
+ ret = populate_sip_alg_entry(ip_address, modRtpPort,
+ sip_call_id,
+ call_direction, SIP_RTP);
+ if (ret < 0) {
+ printf("sipalgdpi: "
+ "populate SIP alg entry RTP failed\n");
+ return 0;
+ }
+ }
+ if (modRtcpPort != 0) {
+ ret = populate_sip_alg_entry(ip_address, modRtcpPort,
+ sip_call_id,
+ call_direction, SIP_RTCP);
+ if (ret < 0) {
+ printf("sipalgdpi: "
+ "populate SIP alg entry RTCP failed\n");
+ return 0;
+ }
+ }
+
+/* Call ALG packet process function for checking & payload modification */
+ pSipMsg =
+ sip_alg_process(pkt, pkt_direction, call_direction,
+ SIP_ALG_INVITE_MSGTYPE, modIp, modL4Port, 0,
+ 0, modRtpPort, modRtcpPort, &diffModSipLen);
+ } else {
+ /*
+ * not SIP INVITE, could be SIP response 200 OK invite, 100 trying,
+ * 180 ringing or BYE or 200 OK BYe
+ */
+ /* retrieve ALG entry from SIP ALG table */
+ if (pkt_direction == PRIVATE) {
+ alg_key.ip_address = rte_bswap32(ip_h->src_addr);
+ alg_key.l4port = rte_bswap16(udp_h->src_port);
+ } else {
+ alg_key.ip_address = pubIp;
+ alg_key.l4port = pubL4Port;
+ }
+
+ alg_key.filler1 = 0;
+ alg_key.filler2 = 0;
+ sip_alg_entry = retrieve_sip_alg_entry(&alg_key);
+
+ if (ALG_DEBUG) {
+ printf("%s: sip_alg_entry_ptr(%p)\n", __func__,
+ sip_alg_entry);
+ printf("1=>%s: pkt_dir(%d), modIp(%x),modL4Port(%x), "
+ "modRtpPort(%x), modRtcpPort(%x), pubIp(%x), pubL4Port(%x)\n",
+ __func__, pkt_direction, modIp, modL4Port,
+ modRtpPort, modRtcpPort, pubIp, pubL4Port);
+ }
+
+ if (sip_alg_entry) {
+ call_direction = sip_alg_entry->sip_alg_call_direction;
+ if (IS_STRING_SAME(pSipMsg, SIP_ALG_BYE) ||
+ IS_STRING_SAME(pSipMsg, SIP_ALG_CANCEL)) {
+ msgType = SIP_ALG_BYE_MSGTYPE;
+
+ goto sipAlgProcess;
+ } else if (IS_STRING_SAME(pSipMsg, SIP_ALG_ACK)) {
+ msgType = SIP_ALG_ACK_MSGTYPE;
+
+ goto sipAlgProcess;
+ }
+
+ pSipMsg += 8;
+ /* checking if its OK or Trying or Ringing */
+ if (IS_STRING_SAME(pSipMsg, SIP_ALG_200_OK)) {
+ /* check CSEQ. Based on that update the msg type */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CSEQ, &pos, 0) == TRUE) {
+ char *pBye;
+
+ pBye =
+ pSipMsg + pos +
+ TAG_TO_DATAPOS(SIP_ALG_CSEQ);
+ SKIP_SPACES(pBye);
+ /* skip the number field */
+ while (*pBye != ' ')
+ pBye++;
+ SKIP_SPACES(pBye);
+ if (IS_STRING_SAME(pBye, SIP_ALG_BYE)
+ ||
+ (IS_STRING_SAME
+ (pBye, SIP_ALG_CANCEL)))
+ msgType =
+ SIP_ALG_200_OK_BYE_MSGTYPE;
+
+ else
+ msgType =
+ SIP_ALG_200_OK_INVITE_MSGTYPE;
+ }
+ } else if (IS_STRING_SAME(pSipMsg, SIP_ALG_TRYING) ||
+ IS_STRING_SAME(pSipMsg, SIP_ALG_RINGING)) {
+ msgType = SIP_ALG_TRYING_RINGING_MSGTYPE;
+ }
+
+ sipAlgProcess:
+ if (ALG_DEBUG)
+ printf("2=>%s: pkt_dir(%d), call_dir(%d), "
+ "msgType(%d), modIp(%x), modL4Port(%x), "
+ " modRtpPort(%x), modRtcpPort(%x)\n",
+ __func__, pkt_direction, call_direction,
+ msgType, modIp, modL4Port, modRtpPort,
+ modRtcpPort);
+ /* Call SIP alg processing for further processing. */
+ pSipMsg =
+ sip_alg_process(pkt, pkt_direction, call_direction,
+ msgType, modIp, modL4Port, pubIp,
+ pubL4Port, modRtpPort, modRtcpPort,
+ &diffModSipLen);
+ } else
+ pSipMsg = NULL;
+ }
+
+ if (ALG_DEBUG)
+ printf("%s: Before IP total length(%u), udp length(%u)\n", __func__,
+ rte_bswap16(ip_h->total_length), rte_bswap16(udp_h->dgram_len));
+ /*
+ * need to modify mbuf & modified length of payload in the IP/UDP
+ * header length fields and return to CGNAT for transmitting
+ */
+ uint16_t len = 0;
+ if (diffModSipLen > 0) {
+ len = rte_bswap16(udp_h->dgram_len);
+ len += diffModSipLen;
+ udp_h->dgram_len = rte_bswap16(len);
+
+ len = rte_bswap16(ip_h->total_length);
+ len += diffModSipLen;
+ ip_h->total_length = rte_bswap16(len);
+
+ if (rte_pktmbuf_append(pkt, diffModSipLen) == NULL)
+ printf("%s: pktmbuf_append returns NULL", __func__);
+
+ }
+
+ if (ALG_DEBUG)
+ printf("%s: After IP total length(%u), udp length(%u), "
+ "diffModSipLen(%u)\n", __func__,
+ rte_bswap16(ip_h->total_length),
+ rte_bswap16(udp_h->dgram_len),
+ diffModSipLen);
+
+ if (pSipMsg != NULL)
+ return 1;
+ else
+ return 0;
+}
+
+char *sip_alg_process(struct rte_mbuf *pkt, uint16_t pkt_direction,
+ uint16_t call_direction, uint16_t msgType, uint32_t modIp,
+ uint16_t modL4Port, uint32_t pubIp, uint16_t pubL4Port,
+ uint16_t modRtpPort, uint16_t modRtcpPort,
+ uint16_t *diffModSipLen)
+{
+ struct ipv4_hdr *ip_h;
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ struct udp_hdr *udp_h;
+ char *pSipMsg, *pStr, *pEndPtr;
+ int pos;
+ /* diff between old & new modified field len */
+ uint32_t diffLen, addrPortLen;
+ int sdpMsgLen = 0;
+ int sip_msg_len = 0;
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ udp_h = (struct udp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ pSipMsg = ((char *)udp_h + sizeof(struct udp_hdr));
+ char *pTmpSipMsg = pSipMsg;
+ char *pStartSipMsg = pSipMsg;
+
+ sip_msg_len =
+ rte_bswap16(ip_h->total_length) - sizeof(struct ipv4_hdr) -
+ sizeof(struct udp_hdr);
+
+ if (natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_CONTENT_LEN, &pos, 0) ==
+ TRUE)
+ pTmpSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_CONTENT_LEN));
+ else {
+ printf("sip_alg_process: Invalid Content Length\n");
+ return NULL;
+ }
+
+ SKIP_SPACES(pTmpSipMsg);
+ int sdpDataLen = strtol(pTmpSipMsg, &pStr, 10);
+
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg, SIP_ALG_DOUBLE_CRLF, &pos, 0);
+ pTmpSipMsg += (pos + strlen(SIP_ALG_DOUBLE_CRLF));
+
+ if (sdpDataLen != 0)
+ if (natSipAlgMsgFieldPos
+ (pTmpSipMsg, SIP_ALG_REMOTE_PARTY_ID, &pos, 0) == TRUE) {
+ pTmpSipMsg += pos + strlen(SIP_ALG_REMOTE_PARTY_ID);
+ /* move further to CRLF which is the end of SIP msg */
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_DOUBLE_CRLF, &pos,
+ 0);
+ pTmpSipMsg += (pos + strlen(SIP_ALG_DOUBLE_CRLF));
+ }
+
+ int sipMsgLen = (pTmpSipMsg - pSipMsg);
+
+ char *pSipMsgEnd = pSipMsg + sipMsgLen + sdpDataLen;
+
+ if (ALG_DEBUG)
+ printf("%s: pSipMsg: %p, pSipMsgEnd: %p, sipMsgLen: %d, "
+ "sdpDataLen: %d totalSipMsgLen: %d\n",
+ __func__, pSipMsg, pSipMsgEnd, sipMsgLen, sdpDataLen,
+ sip_msg_len);
+
+ if (call_direction == SIP_CALL_OUTGOING) {
+ if ((msgType == SIP_ALG_INVITE_MSGTYPE)
+ || (msgType == SIP_ALG_ACK_MSGTYPE)) {
+ /* Get to Via field IP address/Port to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr:port in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_FROM, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to Call id field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CALLID, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+/* moving it to start of string "Call-ID" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmpSipMsg to next field */
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+ /* Move pSipMsg to start of Call id "IP addr" string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ " No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Call-id" field "addr:port" in payload */
+/* L4 port input is made as 0 as its only addr string modification */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+
+ /* Advance to "Contact" field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTACT, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+ /* move tmpMsg to CRLF */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* move sipMsg to addr:port string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Contact" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+
+ if (msgType == SIP_ALG_INVITE_MSGTYPE) {
+/* Advance to check content type & get content length (SDP length) */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTYPE, &pos,
+ 0) == TRUE) {
+ pSipMsg +=
+ (pos +
+ TAG_TO_DATAPOS(SIP_ALG_CONTYPE));
+ SKIP_SPACES(pSipMsg);
+ /*check the application/sdp type, if not, exit */
+ if (!IS_STRING_SAME
+ (pSipMsg, SIP_ALG_APPSDP)) {
+ printf("sip_alg_process "
+ "Invalid Content type\n");
+ return NULL;
+ }
+ } else {
+ printf("sip_alg_process; "
+ "No valid Content field\n");
+ return NULL;
+ }
+
+ /* get the SDP content length */
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_CONTENT_LEN, &pos,
+ 0);
+ pSipMsg +=
+ (pos + TAG_TO_DATAPOS(SIP_ALG_CONTENT_LEN));
+ SKIP_SPACES(pSipMsg);
+ sdpMsgLen = strtol(pSipMsg, &pEndPtr, 10);
+ if (!sdpMsgLen) {
+/* if ACK message, SDP content wont be there.go to ALG process complete */
+ if (msgType == SIP_ALG_ACK_MSGTYPE)
+ goto sipAlgProcessExit;
+
+ printf("sip_alg_process - "
+ "sdpMsgLen is 0\n");
+ return NULL;
+ }
+
+ /* Advance to SDP data message Owner address */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_OWNER, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos;
+ /* at start of owner string "o=" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmmsg to CRLF of owner field */
+ natSipAlgMsgFieldPosFindCrlf(pSipMsg,
+ SIP_ALG_CRLF,
+ &pos,
+ 0);
+ pTmpSipMsg += pos;
+/* start of CRLF "/r/n" */
+/* move pSipMsg to IP address string in owner field */
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_IPV4, &pos,
+ 0);
+ pSipMsg += (pos + strlen(SIP_ALG_IPV4));
+ SKIP_SPACES(pSipMsg);
+/* after skipping spaces, pSip at start of addr */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_processing: "
+ "Invalid Owner field\n");
+ return NULL;
+ }
+/* Modify "Owner" field "addr" in payload. Input L4 port as 0 */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* need to adjust the SDP msg len as modification done. */
+
+/* Advance to Connection information to modify IP address */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONN, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ /* move tmmsg to CRLF of owner field */
+ natSipAlgMsgFieldPosFindCrlf(pSipMsg,
+ SIP_ALG_CRLF,
+ &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* start of CRLF "/r/n" */
+ /* move pSipMsg to IP address string in owner field */
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_IPV4, &pos,
+ 0);
+ pSipMsg += (pos + strlen(SIP_ALG_IPV4));
+ SKIP_SPACES(pSipMsg);
+/* after skipping spaces, pSip at start of addr */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_processing: "
+ "Invalid Owner field\n");
+ return NULL;
+ }
+/* Modify "Connection" field "addr" in payload. Input L4 port as 0 */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* need to adjust the SDP msg len as modification done. */
+
+ /* Advance to RTP audio port */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_AUDIO, &pos,
+ 0) == TRUE) {
+ pSipMsg +=
+ (pos +
+ TAG_TO_DATAPOS(SIP_ALG_AUDIO));
+ SKIP_SPACES(pSipMsg);
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindSpace
+ (pTmpSipMsg, SIP_ALG_SPACE, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ }
+
+/* Modify "RTP Audio" port in payload. pass pub_ip as 0. */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, 0,
+ modRtpPort,
+ PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* need to adjust the SDP msg len as modification done. */
+
+ /* Advance to RTCP control port, if its there */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_RTCP, &pos,
+ 0) == TRUE) {
+ pSipMsg +=
+ (pos +
+ TAG_TO_DATAPOS(SIP_ALG_RTCP));
+ SKIP_SPACES(pSipMsg);
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindSpace
+ (pTmpSipMsg, SIP_ALG_SPACE, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+
+/* Modify "RTP Audio" port in payload. pass pub_ip as 0. */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort
+ (pSipMsg, &pSipMsgEnd, addrPortLen,
+ &diffLen, 0, modRtcpPort,
+ PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* need to adjust the SDP msg len as modification done. */
+ }
+ }
+/* with this SIP payload modification is complete for outbound invite message */
+ } else if ((msgType == SIP_ALG_TRYING_RINGING_MSGTYPE)
+ || (msgType == SIP_ALG_200_OK_INVITE_MSGTYPE)) {
+ /* Get to Via field IP address/Port to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr:port in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen = diffLen;
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_FROM, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ //diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to Call id field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CALLID, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+/* moving it to start of string "Call-ID" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmpSipMsg to next field */
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ //diffLen = pTmpSipMsg - pSipMsg;
+ /* Move pSipMsg to start of Call id "IP addr" string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Call-id" field "addr" in payload */
+/* L4 port input is made as 0 as its only addr string modification */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+
+ } else if (pkt_direction == PRIVATE
+ && msgType == SIP_ALG_BYE_MSGTYPE) {
+ /* change via, from, call-id and contact field */
+
+ /* Get to Via field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen = diffLen;
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_FROM, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to Call id field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CALLID, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+/* moving it to start of string "Call-ID" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmpSipMsg to next field */
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+ /* Move pSipMsg to start of Call id "IP addr" string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Call-id" field "addr:port" in payload */
+ /* L4 port input is made as 0 as its only addr string modification */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+
+ /* Advance to "Contact" field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTACT, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+ /* move tmpMsg to semicolon */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos, 0);
+ pTmpSipMsg += pos;
+ /* move sipMsg to addr:port string */
+ int flag = 0;
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT,
+ &pos, 0) == FALSE)
+ flag = 1;
+
+ if (flag)
+ goto SipMsgAdvance2;
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Contact" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ } else if (pkt_direction == PUBLIC
+ && msgType == SIP_ALG_BYE_MSGTYPE) {
+ /*
+ * Modify Bye URL (if its BYE), To field,
+ * Call-Id if call triggered from private, then modify
+ */
+
+ /* need to modify address:Port in Bye message string. */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos, 0);
+ pSipMsg += pos + 1;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindSpace(pTmpSipMsg, SIP_ALG_SPACE,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ /* modify the "addr:port" in Bye message line */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to 'To" field */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ }
+
+ /* check for Call-Id. */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CALLID, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+/* moving it to start of string "Call-ID" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmpSipMsg to next field */
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ //diffLen = pTmpSipMsg - pSipMsg;
+ /* Move pSipMsg to start of Call id "IP addr" string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Call-id" field "addr" in payload */
+ /* L4 port input is made as 0 as its only addr string modification */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ } else if (pkt_direction == PRIVATE
+ && (msgType == SIP_ALG_200_OK_BYE_MSGTYPE)) {
+ /*
+ * Need to modify To field, Call-Id,
+ * Contact if call triggered from private, then modify
+ */
+ /* Get to To field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; no valid from field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen = diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to "Contact" field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTACT, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+ /* move tmpMsg to CRLF */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* move sipMsg to addr:port string */
+ int flag = 0;
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT,
+ &pos, 0) == FALSE)
+ flag = 1;
+
+ if (flag)
+ goto SipMsgAdvance2;
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Contact" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ } else if (pkt_direction == PUBLIC
+ && (msgType == SIP_ALG_200_OK_BYE_MSGTYPE)) {
+ /* change via and from field, call-id field */
+
+ /* Get to Via field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen = diffLen;
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_FROM, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+
+ /* check for Call-Id. */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CALLID, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+ /* Call id 'addr" need to modified. */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ /* modify call id "addr" */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ } else {
+ printf("sip_alg_process; "
+ "no valid Call-id field\n");
+ return NULL;
+ }
+/* increase the overall diff between old & mod sip msg */
+ }
+ } else if (call_direction == SIP_CALL_INCOMING) {
+ if ((msgType == SIP_ALG_INVITE_MSGTYPE)
+ || (msgType == SIP_ALG_ACK_MSGTYPE)) {
+ /* need to modify Invite RL, TO field */
+ /* move to Invite RL IP address string */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos + 1;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_SIP,
+ &pos, 0);
+ pTmpSipMsg += (pos - 1);
+/* pointing to space before SIP/2.0 */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process: %d Invalid Invite RL\n",
+ __LINE__);
+ return NULL;
+ }
+ /* modify Invite RL URI in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to 'To" field */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_processing; "
+ "%d Invalid To field\n",
+ __LINE__);
+ return NULL;
+ }
+ /* Modify TO field IP addr:port in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ } else if ((msgType == SIP_ALG_TRYING_RINGING_MSGTYPE)
+ || (msgType == SIP_ALG_200_OK_INVITE_MSGTYPE)) {
+ /* Need to modify TO field */
+ /* Advance to 'To" field */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ }
+ if (msgType == SIP_ALG_200_OK_INVITE_MSGTYPE) {
+/* need to modify Contact, Remote-Party Id, SDP O=IN, C=IN, Audio Port */
+ /* Advance to "Contact" field */
+
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTACT, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos;
+ /* move tmpMsg to CRLF */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ /* move sipMsg to addr:port string */
+ int flag = 0;
+ if (natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_AT, &pos,
+ 30) == FALSE)
+ flag = 1;
+
+ if (flag)
+ goto SipMsgAdvance;
+
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Contact" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+SipMsgAdvance:
+ /* advance to Remote-Party Id */
+ pTmpSipMsg = pSipMsg;
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_REMOTE_PARTY_ID, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos +
+ strlen(SIP_ALG_REMOTE_PARTY_ID);
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ /* modify the field */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort
+ (pSipMsg, &pSipMsgEnd, addrPortLen,
+ &diffLen, modIp, modL4Port,
+ ADDRESS_PORT_STRING);
+ diffModSipLen += diffLen;
+ } else {
+ printf("sip_alg_process: "
+ "Remote-party-id is not in the msg\n");
+ pSipMsg = pTmpSipMsg;
+ }
+
+ /* Advance to SDP data message Owner address */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_OWNER, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos;
+ /* at start of owner string "o=" */
+ pTmpSipMsg = pSipMsg;
+ /* move tmmsg to CRLF of owner field */
+ natSipAlgMsgFieldPosFindCrlf(pSipMsg,
+ SIP_ALG_CRLF,
+ &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* start of CRLF "/r/n" */
+/* move pSipMsg to IP address string in owner field */
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_IPV4, &pos,
+ 0);
+ pSipMsg += (pos + strlen(SIP_ALG_IPV4));
+ SKIP_SPACES(pSipMsg);
+/* after skipping spaces, pSip at start of addr */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_processing: "
+ "Invalid Owner field\n");
+ return NULL;
+ }
+/* Modify "Owner" field "addr" in payload. Input L4 port as 0 */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+ /* update the sdpMsgLen after modification */
+
+ /* Advance to Connection information to modify IP address */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONN, &pos,
+ 0) == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ /* move tmmsg to CRLF of owner field */
+ natSipAlgMsgFieldPosFindCrlf(pSipMsg,
+ SIP_ALG_CRLF,
+ &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* start of CRLF "/r/n" */
+ /* move pSipMsg to IP address string in owner field */
+ natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_IPV4, &pos,
+ 0);
+ pSipMsg += (pos + strlen(SIP_ALG_IPV4));
+ SKIP_SPACES(pSipMsg);
+/* after skipping spaces, pSip at start of addr */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_processing: "
+ "Invalid Connection field\n");
+ return NULL;
+ }
+/* Modify "Connection" field "addr" in payload. Input L4 port as 0 */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp, 0,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* update the sdpMsgLen after modification */
+
+ /* Advance to RTP audio port */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_AUDIO, &pos,
+ 0) == TRUE) {
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_AUDIO));
+ SKIP_SPACES(pSipMsg);
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindSpace
+ (pTmpSipMsg, SIP_ALG_SPACE, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ }
+
+/* Modify "RTP Audio" port in payload. pass pub_ip as 0. */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, 0,
+ modRtpPort,
+ PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ sdpMsgLen += diffLen;
+/* update the sdpMsgLen after modification */
+ }
+ } else if (pkt_direction == PUBLIC
+ && msgType == SIP_ALG_BYE_MSGTYPE) {
+ /* Modify Bye URL (if its BYE), To field */
+
+ /* need to modify address:Port in Bye message string. */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos, 0);
+ pSipMsg += pos + 1;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindSpace(pTmpSipMsg, SIP_ALG_SPACE,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ /* modify the "addr:port" in Bye message line */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to 'To" field */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos;
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen,
+ modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ } else {
+ printf
+ ("sip_alg_processing: Invalid TO field\n");
+ return NULL;
+ }
+ } else if (pkt_direction == PRIVATE
+ && msgType == SIP_ALG_BYE_MSGTYPE) {
+ /* change via and from field */
+
+ /* Get to Via field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen = diffLen;
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_FROM, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ } else if (pkt_direction == PRIVATE
+ && msgType == SIP_ALG_200_OK_BYE_MSGTYPE) {
+ /* change via and from field */
+
+ /* Get to Via field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_VIA, &pos, 0)
+ == TRUE) {
+ /* advance to IP/Port string */
+ pSipMsg +=
+ (pos + strlen(SIP_ALG_VIA) +
+ SIP_ALG_VIA_FIELD_IPADDR);
+ pTmpSipMsg = pSipMsg;
+ /* move pTmp to next field */
+ natSipAlgMsgFieldPos(pTmpSipMsg,
+ SIP_ALG_SEMICOLON, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; No valid VIA field\n");
+ return NULL;
+ }
+ /* Modify VIA field IP addr in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp, 0,
+ ADDRESS_PORT_STRING);
+ *diffModSipLen = diffLen;
+
+ /* Advance to "From" field IP addr in payload */
+ if (natSipAlgMsgFieldPos(pSipMsg,
+ SIP_ALG_FROM, &pos, 0) == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; No valid From field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+/* increase the overall diff between old & mod sip msg */
+ } else if (pkt_direction == PUBLIC
+ && msgType == SIP_ALG_200_OK_BYE_MSGTYPE) {
+ /* Get to To field IP address to modify */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_TO, &pos, 0)
+ == TRUE) {
+ pSipMsg += pos; /* Moving to "From" */
+ /* advance to IP/Port string */
+ pTmpSipMsg = pSipMsg;
+/* move pTmpSipMsg to str ">" which is end of add:port string */
+ natSipAlgMsgFieldPos(pTmpSipMsg, SIP_ALG_GREAT,
+ &pos, 0);
+ pTmpSipMsg += pos;
+ diffLen = pTmpSipMsg - pSipMsg;
+/* find "@" from "From" string to ">" string which is start of "addr:port" */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+/* now its pointing to start of From field "address:port" */
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf
+ ("sip_alg_process; no valid from field\n");
+ return NULL;
+ }
+ /* Modify "From" field "addr:port" in payload */
+ pSipMsg = natSipAlgModifyPayloadAddrPort(pSipMsg,
+ &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen = diffLen;
+/* increase the overall diff between old & mod sip msg */
+
+ /* Advance to "Contact" field */
+ if (natSipAlgMsgFieldPos
+ (pSipMsg, SIP_ALG_CONTACT, &pos, 0) == TRUE) {
+ pSipMsg += pos;
+ /* move tmpMsg to CRLF */
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ /* move sipMsg to addr:port string */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AT, &pos,
+ 0);
+ pSipMsg += pos + 1;
+ addrPortLen = pTmpSipMsg - pSipMsg;
+ } else {
+ printf("sip_alg_process; "
+ "No valid Call Id field\n");
+ return NULL;
+ }
+ /* Modify "Contact" field "addr:port" in payload */
+ pSipMsg =
+ natSipAlgModifyPayloadAddrPort(pSipMsg, &pSipMsgEnd,
+ addrPortLen,
+ &diffLen, modIp,
+ modL4Port,
+ ADDRESS_PORT_STRING);
+
+ *diffModSipLen += diffLen;
+ }
+ }
+
+SipMsgAdvance2:
+/* need to remove the SIP ALG entry if msg is 200 OK BYE response */
+ if (call_direction == SIP_CALL_OUTGOING) {
+ /* call remove sip alg entry here */
+ if (pkt_direction == PRIVATE) {
+ if (msgType == SIP_ALG_200_OK_BYE_MSGTYPE) {
+ if (remove_sip_alg_entry
+ (rte_bswap32(ip_h->src_addr),
+ rte_bswap16(udp_h->src_port)) < 0)
+ printf("removesipalgentry failed: "
+ "ipaddr %d, portid %d\n",
+ ip_h->src_addr, udp_h->src_port);
+ }
+ }
+ } else {
+ if (pkt_direction == PUBLIC) {
+ if (msgType == SIP_ALG_200_OK_BYE_MSGTYPE) {
+ if (remove_sip_alg_entry(pubIp, pubL4Port) < 0)
+ printf("removesipalgentry failed: "
+ " ipaddr %d, portid %d\n",
+ pubIp, pubL4Port);
+ }
+ }
+ }
+
+/* adjust SDP msg len (sdpMsgLen) in the content length field of SIP msg */
+ if ((sdpMsgLen > 0) && (sdpDataLen > 0)) {
+ pSipMsg = pStartSipMsg;
+ char *tmpSdpLen = NULL;
+
+ sdpMsgLen += sdpDataLen;
+ tmpSdpLen = itoa(sdpMsgLen);
+ int tmpStrLen = strlen(tmpSdpLen);
+
+ /* move to Content length field & change the length to sipMsgLen */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_CONTENT_LEN, &pos, 0)
+ == TRUE) {
+ pSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_CONTENT_LEN));
+ SKIP_SPACES(pSipMsg);
+ pTmpSipMsg = pSipMsg;
+ natSipAlgMsgFieldPosFindCrlf(pTmpSipMsg,
+ SIP_ALG_DOUBLE_CRLF, &pos,
+ 0);
+ pTmpSipMsg += pos;
+ SKIP_SPACES(pSipMsg);
+ diffLen = pTmpSipMsg - pSipMsg;
+ natSipAlgAdjustMsg(pSipMsg, &pSipMsgEnd, tmpStrLen,
+ diffLen);
+ strncpy(pSipMsg, tmpSdpLen, tmpStrLen);
+ } else {
+ printf("sip_alg_process: Invalid Content Length\n");
+ return NULL;
+ }
+ }
+
+ sipAlgProcessExit:
+ /* need to return toe start of the SIP msg */
+ return pStartSipMsg;
+}
+
+/*
+ * Function to Fetch RTP & RTCP port & return. Invoked by CGNAT
+ * while adding NAPT entry for RTP & RTCP
+ */
+int natSipAlgGetAudioPorts(struct rte_mbuf *pkt, uint16_t *rtpPort,
+ uint16_t *rtcpPort)
+{
+ struct ipv4_hdr *ip_h;
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ struct udp_hdr *udp_h;
+ char *pSipMsg, *pEndPtr;
+ int pos, sdpMsgLen;
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ udp_h = (struct udp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ pSipMsg = ((char *)udp_h + sizeof(struct udp_hdr));
+
+ /* Advance to check content type & get content length (SDP length) */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_CONTYPE, &pos, 0) == FALSE)
+ return -1;
+
+ pSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_CONTYPE));
+ SKIP_SPACES(pSipMsg);
+
+ /*check the application/sdp type, if not, exit */
+ if (!IS_STRING_SAME(pSipMsg, SIP_ALG_APPSDP)) {
+ printf("sip_alg_getAudioPort Invalid Content type\n");
+ return -1;
+ }
+
+ /* get the SDP content length */
+ natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_CONTENT_LEN, &pos, 0);
+ pSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_CONTENT_LEN));
+ SKIP_SPACES(pSipMsg);
+ sdpMsgLen = strtol(pSipMsg, &pEndPtr, 10);
+ if (!sdpMsgLen) {
+ printf("sipAlggetAudioport - sdpMsgLen is 0\n");
+ return -1;
+ }
+
+ /* advance to RTP audio port */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_AUDIO, &pos, 0) ==
+ TRUE) {
+ pSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_AUDIO));
+ SKIP_SPACES(pSipMsg);
+ *rtpPort = strtol(pSipMsg, &pEndPtr, 10);
+ } else
+ *rtpPort = 0;
+
+ /* advance to RTCP audio control port */
+ if (natSipAlgMsgFieldPos(pSipMsg, SIP_ALG_RTCP, &pos, 0) ==
+ TRUE) {
+ pSipMsg += (pos + TAG_TO_DATAPOS(SIP_ALG_RTCP));
+ SKIP_SPACES(pSipMsg);
+ *rtcpPort = strtol(pSipMsg, &pEndPtr, 10);
+ } else
+ *rtcpPort = 0;
+
+ if (ALG_DEBUG)
+ printf(" sipAlgGetAudioPort; rtpPort %d, rtcpPort %d\n",
+ *rtpPort, *rtcpPort);
+ return 0;
+}
+
+/* function to find SPACES in ALG message */
+int
+natSipAlgMsgFieldPosFindSpace(char *pData, const char *pIdStr, int *pPos,
+ int searchLen)
+{
+ char *pStart = pData;
+ int i = 0;
+
+ if (!pIdStr)
+ return FALSE;
+
+ if (!searchLen)
+ searchLen = 1500; /* max default search length */
+
+ while (TRUE) {
+ while (*pData != ' ') {
+ pData++;
+ i++;
+ }
+
+ if (i > searchLen) {
+ printf("SIP ALG Find Field Pos: "
+ "Single message exceeds max len: %d\n",
+ searchLen);
+ *pPos = searchLen; /* reaches the end */
+ return FALSE;
+ }
+
+ if (bcmp(pData, pIdStr, strlen(pIdStr)) == 0)
+ break;
+ }
+
+ *pPos = pData - pStart;
+ return TRUE;
+}
+
+/* function to find CRLF in ALG message */
+int natSipAlgMsgFieldPosFindCrlf(
+ char *pData,
+ const char *pIdStr,
+ int *pPos,
+ int searchLen)
+{
+ char *pStart = pData;
+ int i = 0;
+
+ if (!pIdStr)
+ return FALSE;
+
+ if (!searchLen)
+ searchLen = 1500; /* max default search length */
+
+ while (TRUE) {
+ while (*pData != '\r' && *(pData + 1) != '\n') {
+ pData++;
+ i++;
+ }
+ if (i >= searchLen) {
+ printf("SIP ALG Find Field Pos: "
+ " Single message exceeds max len: %d\n",
+ searchLen);
+ *pPos = searchLen; /* reaches the end */
+ return FALSE;
+ }
+
+ if (bcmp(pData, pIdStr, strlen(pIdStr)) == 0)
+ break;
+ }
+
+ *pPos = pData - pStart;
+ return TRUE;
+}
+
+/* function to find field position in ALG message */
+int natSipAlgMsgFieldPos(char *pData,
+ const char *pIdStr,
+ int *pPos,
+ int searchLen)
+{
+ char *pStart = pData;
+ int i = 0, j = 0;
+
+ if (!pIdStr)
+ return FALSE;
+
+ if (!searchLen)
+ searchLen = 1500; /* max default search length */
+
+ while (TRUE) {
+ while (*pData != '\r' && *(pData + 1) != '\n') {
+ /* skip all space */
+
+ while (*pData == ' ') {
+ pData++;
+ j++;
+ }
+
+ if (*pData == '\r' && *(pData + 1) == '\n')
+ break;
+
+ if (bcmp(pData, pIdStr, strlen(pIdStr)) == 0) {
+ *pPos = pData - pStart;
+ return TRUE;
+ }
+
+ pData++;
+ j++;
+
+ if (j >= searchLen) {
+ *pPos = pData - pStart;
+ return FALSE;
+ }
+
+ }
+
+ /* advance to next line */
+
+ for (i = 0; i < (searchLen - 1); i++) {
+ if (pData[i] == '\r')
+ if (pData[i + 1] == '\n')
+ break;
+ }
+
+ if (i > searchLen) {
+ printf("SIP ALG Find Field Pos: "
+ "Single message exceeds max len: %d\n",
+ searchLen);
+ *pPos = searchLen; /* reaches the end */
+ return FALSE;
+ }
+
+ pData += i + 2;
+ searchLen -= (i + 2);
+
+ if ((pData[0] == '\r' && pData[1] == '\n') ||
+ (searchLen <= 0)) {
+ /* reach the end mark \r\n\r\n */
+
+ if (searchLen > 0) {
+ pData += 2;
+ continue;
+ }
+
+ *pPos = pData - pStart;
+
+ return FALSE;
+ }
+ }
+
+ *pPos = pData - pStart;
+ return TRUE;
+}
+
+/* get SIP Call id string */
+char *getSipCallIdStr(char *pMsg)
+{
+ char *pStart;
+ char *pCallId = NULL;
+ int i;
+
+ pStart = pMsg;
+ for (i = 0; i < 200; i++) {
+ if (*pMsg != '\r')
+ pMsg++;
+ else
+ break;
+ }
+ if (i >= 200) {
+ printf("SIP_ALG: getCallid wrong string format\n");
+ return NULL;
+ }
+
+ size_t size = RTE_CACHE_LINE_ROUNDUP(pMsg - pStart + 1);
+
+ pCallId = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (!pCallId)
+ return NULL;
+
+ bcopy(pStart, pCallId, pMsg - pStart);
+ *(pCallId + (pMsg - pStart)) = 0;
+
+ if (ALG_DEBUG)
+ printf("%s: %s\n", __func__, pCallId);
+
+ return pCallId;
+}
+
+char *natSipAlgModifyPayloadAddrPort(
+ char *pSipMsg, char **pSipMsgEnd,
+ uint32_t oldStrLen, uint32_t *diffLen,
+ uint32_t modIp, uint16_t modPort, uint32_t type)
+{
+ char addrport[MAX_ADDR_PORT_SIZE];
+ struct in_addr ipAddr;
+ uint32_t newStrLen = 0;
+ char *tmpPort = NULL;
+
+ if (modPort != 0)
+ tmpPort = itoa(modPort);
+
+ *diffLen = 0;
+ if (type == ADDRESS_PORT_STRING) {
+ ipAddr.s_addr = htonl(modIp);
+ char *tmpAddr = inet_ntoa(ipAddr);
+
+ if (modPort != 0) /* for addr:port combo modification */
+ sprintf(addrport, "%s:%s", tmpAddr, tmpPort);
+ else /* if only address modification */
+ sprintf(addrport, "%s", tmpAddr);
+
+ newStrLen = strlen(addrport);
+
+ if (abs(newStrLen - oldStrLen) > 0) {
+ /*
+ * Call the function moving the SIP Msg pointer
+ * to modify the field
+ */
+ natSipAlgAdjustMsg(pSipMsg, pSipMsgEnd,
+ newStrLen, oldStrLen);
+ }
+
+ /* replace the old addr:port with new addr:port */
+ strncpy(pSipMsg, addrport, strlen(addrport));
+ } else if (type == PORT_STRING) { /* only port modification */
+ if(tmpPort)
+ newStrLen = strlen(tmpPort);
+
+ if (abs(newStrLen - oldStrLen) > 0) {
+ /*
+ * Call the function moving the SIP msg pointer
+ * to modify the field
+ */
+ natSipAlgAdjustMsg(pSipMsg, pSipMsgEnd,
+ newStrLen, oldStrLen);
+ }
+
+ /* replace the old port with new port */
+ if(tmpPort)
+ strncpy(pSipMsg, tmpPort, strlen(tmpPort));
+ }
+ /* output difflen between old str len & modified new str length */
+ if (newStrLen > oldStrLen)
+ *diffLen = newStrLen - oldStrLen;
+
+ return pSipMsg; /* modified SIP Msg */
+}
+
+char *natSipAlgAdjustMsg(char *pSipMsg, char **pSipMsgEnd,
+ uint32_t newStrLen, uint32_t oldStrLen)
+{
+ char MsgBuffer[MAX_SIP_UDP_MSG_SIZE];
+
+ if (newStrLen > oldStrLen) {
+ pSipMsg += oldStrLen;
+ int msgLen = *pSipMsgEnd - pSipMsg;
+
+ strncpy(MsgBuffer, pSipMsg, msgLen);
+ pSipMsg += (newStrLen - oldStrLen);
+ strncpy(pSipMsg, MsgBuffer, msgLen);
+
+ if (ALG_DEBUG)
+ printf("natSipAlgAdjustMsg: %u\n", msgLen);
+
+ /* moving output end of SIP MSG by difflen like pSipMsg */
+ *pSipMsgEnd += (newStrLen - oldStrLen);
+ } else {
+ /* Setting space on the oldStr position */
+ memset(pSipMsg, ' ', oldStrLen);
+ }
+
+ return pSipMsg;
+}
+
+/* end of file */
diff --git a/common/VIL/alg/lib_sip_alg.h b/common/VIL/alg/lib_sip_alg.h
new file mode 100644
index 00000000..b320a4f4
--- /dev/null
+++ b/common/VIL/alg/lib_sip_alg.h
@@ -0,0 +1,156 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_LIB_ALG_H__
+#define __INCLUDE_LIB_ALG_H__
+
+#include "rte_ether.h"
+
+uint16_t sip_session_number;/* SIP session count */
+#define IS_STRING_SAME(pStr, strId) (bcmp((pStr), strId, strlen(strId)) == 0)
+#define TAG_TO_DATAPOS(str) (strlen(str) + 1)
+#define SKIP_SPACES(pStr) \
+{ \
+ while (*(char *)(pStr) == ' ') \
+ (char *)(pStr)++; \
+}
+
+enum pkt_dir {PRIVATE, PUBLIC};
+
+/* enum for SIP Call direction - NAT ALG */
+enum sip_alg_call_direction {
+ SIP_CALL_INCOMING, /* Incoming call public to private */
+ SIP_CALL_OUTGOING /* Outgoing call private to public */
+};
+
+/* enum of SIP port type - NAT ALG */
+enum sip_alg_port_type {
+ SIP_UDP, /* SIP SDP port 5460 */
+ SIP_RTP, /* RTP port number */
+ SIP_RTCP /* RTCP port number */
+};
+
+/*
+ * Data structure for NAT SIP ALG table key
+ * Key - IP address & L4 port number.
+ */
+struct sip_alg_key {
+ /*
+ * IP address based on direction.
+ * outgoing - public IP, incoming - destinatio IP of pkt
+ */
+ uint32_t ip_address;
+ uint16_t l4port; /* SIP SDP, RTP, RTCP port number */
+ uint8_t filler1;
+ uint8_t filler2;
+};
+
+/*
+ * Data structure for NAT SIP ALG table entry.
+ * Placeholder for storing SIP ALG entries.
+ */
+struct sip_alg_table_entry {
+ uint32_t ip_address;
+ /*
+ * IP address based on direction.
+ * outgoing - public IP, incoming - destinatio IP of pkt
+ */
+ uint16_t l4port; /* SIP UDP (5061), RTP, RTCP port number */
+ uint8_t sip_alg_call_direction;
+ /* Call incoming (pub to prv) or outgoing (prv to pub) */
+ uint8_t sip_alg_call_id[100];/* unique identfier for a SIP call */
+ uint8_t l4port_type;/* SIP_UDP or RTP or RTCP */
+ uint8_t filler1;
+ uint16_t filler2;
+ uint32_t filler3;
+} __rte_cache_aligned;
+
+
+/* Function declarations */
+
+/**
+ * To initalize SIP ALG library and should be called-
+ * - before other SIP ALG library funcitons
+ * @param params
+ * pipeline parameter structure pointer
+ * @param app
+ * pipeline application conext structure pointer
+ * @return
+ * void return
+ */
+void lib_sip_alg_init(void);
+
+/**
+ * Main SIP ALG DPI function for processing SIP ALG functionlity
+ * @param pkt
+ * mbuf packet pointer
+ * @param pkt_direction
+ * Indicates whether pkt is from PRIVATE or PUBLIC direction
+ * @param modIp
+ * NAPT tranlated IP address based on direction
+ * @param modL4Port
+ * NAPT translated L4 port based on direction
+ * @param pubIP
+ * Original IP address before translation
+ * @param pubL4Port
+ * Original L4 port before translation
+ * @param modRtpPort
+ * RTP port
+ * @param modRtcpPort
+ * RTCP port
+ * @return
+ * 0 means success, -1 means failure
+ */
+int sip_alg_dpi(struct rte_mbuf *pkt, enum pkt_dir pkt_direction,
+ uint32_t modIp, uint16_t modL4Port,
+ uint32_t pubIp, uint16_t pubL4Port,
+ uint16_t modRtpPort, uint16_t modRtcpPort);
+
+/**
+ * To get audio ports from SIP Packet
+ * @param pkt
+ * mbuf packet pointer
+ * @param rtpPort
+ * rtp port in parameter
+ * @param rtcpPort
+ * rtcp port in parameter
+ * @return
+ * 0 means success, -1 means failre
+ */
+int natSipAlgGetAudioPorts(
+ struct rte_mbuf *pkt,
+ uint16_t *rtpPort,
+ uint16_t *rtcp_port);
+int natSipAlgMsgFieldPos(
+ char *pData,
+ const char *pIdStr,
+ int *pos,
+ int searchLen);
+int natSipAlgMsgFieldPosFindCrlf(
+ char *pData,
+ const char *pIdStr,
+ int *pPos,
+ int searchLen);
+int natSipAlgMsgFieldPosFindSpace(
+ char *pData,
+ const char *pIdStr,
+ int *pPos,
+ int searchLen);
+int remove_sip_alg_entry(
+ uint32_t ipaddr,
+ uint16_t portid);
+
+#endif
diff --git a/common/VIL/conntrack/rte_cnxn_tracking.c b/common/VIL/conntrack/rte_cnxn_tracking.c
new file mode 100644
index 00000000..461ed422
--- /dev/null
+++ b/common/VIL/conntrack/rte_cnxn_tracking.c
@@ -0,0 +1,1804 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <rte_ether.h>
+#include <rte_prefetch.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_timer.h>
+#include <rte_spinlock.h>
+#include "rte_cnxn_tracking.h"
+#include "rte_ct_tcp.h"
+
+#define CNXN_TRX_DEBUG 0
+#define TESTING_TIMERS 0
+#define RTE_CT_TIMER_EXPIRED_DUMP 0
+
+#define META_DATA_OFFSET 128
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+#define PROTOCOL_START (IP_START + 9)
+#define SRC_ADDR_START (IP_START + 12)
+#define TCP_START (IP_START + 20)
+
+/* IPV6 changes */
+#define PROTOCOL_START_IPV6 (IP_START + 6)
+#define SRC_ADDR_START_IPV6 (IP_START + 8)
+#define TCP_START_IPV6 (IP_START + 40)
+
+#define TCP_PROTOCOL 6
+#define UDP_PROTOCOL 17
+#define TCP_FW_IPV4_KEY_SIZE 16
+
+#define TCP_FW_IPV6_KEY_SIZE 40
+
+#define IPv4_HEADER_SIZE 20
+#define IPv6_HEADER_SIZE 40
+
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+
+static void
+rte_ct_cnxn_tracker_batch_lookup_basic_type(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ uint64_t no_new_cnxn_mask,
+ uint64_t *reply_pkt_mask,
+ uint64_t *hijack_mask,
+ uint8_t ip_hdr_size_bytes);
+
+/*
+ * Check if the packet is valid for the given connection. "original_direction"
+ * is false if the address order need to be "flipped".See create_cnxn_hashkey().
+ * True otherwise. Return 0 if the packet is valid, or a negative otherwise.
+ */
+
+/* IP/TCP header print for debugging */
+static void
+rte_ct_cnxn_print_pkt(struct rte_mbuf *pkt, uint8_t type)
+{
+ int i;
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
+
+ printf("\n");
+ printf("IP and TCP/UDP headers:\n");
+
+ if (type == IP_VERSION_4) {
+ for (i = 0; i < 40; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+ printf("\n");
+ }
+
+ if (type == IP_VERSION_6) {
+ for (i = 0; i < 60; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+ printf("\n");
+ }
+
+}
+
+static void
+rte_cnxn_ip_type(uint8_t *type, struct rte_mbuf *pkt)
+{
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(pkt);
+
+ if (ip_hdr_size_bytes == IPv4_HEADER_SIZE)
+ *type = IP_VERSION_4;
+
+ if (ip_hdr_size_bytes == IPv6_HEADER_SIZE)
+ *type = IP_VERSION_6;
+}
+
+static void
+rte_ct_print_hashkey(uint32_t *key)
+{
+ printf("Key: %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x \\\n",
+ key[0], key[1], key[2], key[3],
+ key[4], key[5], key[6], key[7], key[8], key[9]);
+}
+
+/*
+ * Create a hash key consisting of the source address/port, the destination
+ * address/ports, and the tcp protocol number. The address/port combos are
+ * treated as two 48 bit numbers and sorted. Thus the key is always the
+ * same regardless of the direction of the packet. Remembering if the numbers
+ * were "flipped" from the order in the packet, and comparing that to whether
+ * the original hash key was flipped, tells if this packet is from the same
+ * direction as the original sender or the response direction. Returns 1 (true)
+ * if the key was left in the original direction.
+ */
+uint8_t
+rte_ct_create_cnxn_hashkey(
+ uint32_t *src_addr,
+ uint32_t *dst_addr,
+ uint16_t src_port,
+ uint16_t dst_port,
+ uint8_t proto,
+ uint32_t *key,
+ uint8_t type)
+{
+ uint8_t hash_order_original_direction = 1;
+
+ key[9] = proto;
+
+ if (type == IP_VERSION_4) {
+ uint32_t source = *src_addr;
+ uint32_t dest = *dst_addr;
+
+ key[3] = key[4] = key[5] = key[6] = key[7] = key[8] = 0;
+
+ if ((source < dest)
+ || ((source == dest) && (src_port < dst_port))) {
+ key[0] = source;
+ key[1] = dest;
+ key[2] = (src_port << 16) | dst_port;
+ } else {
+ key[0] = dest;
+ key[1] = source;
+ key[2] = (dst_port << 16) | src_port;
+ hash_order_original_direction = 0;
+ }
+ }
+
+ if (type == IP_VERSION_6) {
+ int ip_cmp = memcmp(src_addr, dst_addr, 16);
+ uint32_t *lo_addr;
+ uint32_t *hi_addr;
+
+ if ((ip_cmp < 0) || ((ip_cmp == 0) && (src_port < dst_port))) {
+ lo_addr = src_addr;
+ hi_addr = dst_addr;
+ key[8] = (src_port << 16) | dst_port;
+ } else {
+ lo_addr = dst_addr;
+ hi_addr = src_addr;
+ key[8] = (dst_port << 16) | src_port;
+ hash_order_original_direction = 0;
+ }
+ key[0] = lo_addr[0];
+ key[1] = lo_addr[1];
+ key[2] = lo_addr[2];
+ key[3] = lo_addr[3];
+ key[4] = hi_addr[0];
+ key[5] = hi_addr[1];
+ key[6] = hi_addr[2];
+ key[7] = hi_addr[3];
+
+ }
+#ifdef ALGDBG
+ rte_ct_print_hashkey(key);
+#endif
+ return hash_order_original_direction;
+}
+
+
+int
+rte_ct_get_IP_hdr_size(struct rte_mbuf *pkt)
+{
+ /* NOTE: Only supporting IP headers with no options at this time, so
+ * header is fixed size
+ */
+ /* TODO: Need to find defined contstants for start of Ether and
+ * IP headers.
+ */
+ uint8_t hdr_chk = RTE_MBUF_METADATA_UINT8(pkt, IP_START);
+
+ hdr_chk = hdr_chk >> 4;
+
+ if (hdr_chk == IP_VERSION_4)
+ return IPv4_HEADER_SIZE;
+
+ else if (hdr_chk == IP_VERSION_6)
+ return IPv6_HEADER_SIZE;
+
+ else /* Not IPv4 header with no options, return negative. */
+ return -1;
+ /*
+ * int ip_hdr_size_bytes = (ihdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ * IPV4_IHL_MULTIPLIER;
+ * return ip_hdr_size_bytes;
+ */
+}
+
+static void
+rte_ct_set_timer_for_new_cnxn(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd)
+{
+ cd->state_used_for_timer = RTE_CT_TCP_NONE;
+ rte_ct_set_cnxn_timer_for_tcp(ct, cd, RTE_CT_TCP_SYN_SENT);
+}
+
+/*
+ * The connection data is stored in a hash table which makes use of the bulk
+ * lookup optimization provided in DPDK. All of the packets seen in one call
+ * to rte_ct_cnxn_tracker_batch_lookup are done in one hash table lookup. The
+ * number of packets is the number being processed by the pipeline (default
+ * max 32, absolute max 64). For any TCP or UDP packet that does not have
+ * an existing (pseudo-)connection in the table (i.e. was a miss on the hash
+ * lookup), a new connection must be added.
+ *
+ * It is possible, for UDP, that the first packet for a (pseudo-)connection and
+ * a subsequent packet are in the same batch. This means that when looking for
+ * new connections in a batch the first one must add the connection, the
+ * second and subsequent (in that batch) that are part of the same connection
+ * must use that newly created one, not create another table entry.
+ *
+ * Any newly created entries are "remembered" in linear table, which is search
+ * when processing hash tables misses. All the entries in that table are
+ * "forgotten" at the start of a new batch.
+ *
+ * A linear table may seem slow, but consider:
+ * - out of millions of packets/second, this involves at most 64.
+ * - this affects only UDP. TCP connections are set up using an acknowledgement
+ * protocl, so would not have multiple packets for new connection in
+ * same batch (TODO)
+ * - the number of new connections in a batch would usually be zero, or a low
+ * number like 1
+ * - all the data to search through should still be in cache
+ */
+
+static inline void
+rte_ct_remember_new_connection(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *entry)
+{
+ ct->latest_connection++;
+ ct->new_connections[ct->latest_connection] = entry;
+}
+
+static struct rte_ct_cnxn_data *
+rte_ct_search_new_connections(struct rte_ct_cnxn_tracker *ct, uint32_t *key)
+{
+ int i;
+
+ for (i = 0; i <= ct->latest_connection; i++) {
+ uint32_t *cnxn_key = ct->new_connections[i]->key;
+ int key_cmp = memcmp(cnxn_key, key,
+ sizeof(ct->new_connections[i]->key));
+
+ if (key_cmp == 0)
+ return ct->new_connections[i];
+ }
+ return NULL;
+}
+
+static inline void rte_ct_forget_new_connections(struct rte_ct_cnxn_tracker *ct)
+{
+ ct->latest_connection = -1;
+}
+
+
+
+
+static enum rte_ct_packet_action
+rte_ct_handle_tcp_lookup(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf *packet,
+ uint8_t pkt_num,
+ uint8_t key_is_client_order,
+ uint32_t *key,
+ int hash_table_entry,
+ int no_new_cnxn,
+ uint8_t ip_hdr_size_bytes)
+{
+ struct rte_ct_cnxn_data new_cnxn_data;
+
+ memset(&new_cnxn_data, 0, sizeof(struct rte_ct_cnxn_data));
+ enum rte_ct_packet_action packet_action;
+
+ #ifdef CT_CGNAT
+ int32_t position = hash_table_entry;
+ ct->positions[pkt_num] = position;
+ #endif
+
+ /* rte_ct_cnxn_print_pkt(packet); */
+ if (hash_table_entry >= 0) {
+ /*
+ * connection found for this packet.
+ * Check that this is a valid packet for connection
+ */
+
+ struct rte_ct_cnxn_data *entry =
+ &ct->hash_table_entries[hash_table_entry];
+
+ packet_action = rte_ct_verify_tcp_packet(ct, entry, packet,
+ key_is_client_order, ip_hdr_size_bytes);
+
+ switch (packet_action) {
+
+ case RTE_CT_FORWARD_PACKET:
+ entry->counters.packets_forwarded++;
+ break;
+
+ case RTE_CT_DROP_PACKET:
+ entry->counters.packets_dropped++;
+ return RTE_CT_DROP_PACKET;
+
+ case RTE_CT_REOPEN_CNXN_AND_FORWARD_PACKET:
+ /* Entry already in hash table, just re-initialize */
+
+ /* Don't use syproxy on re-init, since it
+ * is a valid connection
+ */
+
+ if (rte_ct_tcp_new_connection(ct, &new_cnxn_data,
+ packet, 0, ip_hdr_size_bytes) !=
+ RTE_CT_DROP_PACKET) {
+ rte_memcpy(&entry->ct_protocol.tcp_ct_data,
+ &new_cnxn_data.ct_protocol.tcp_ct_data,
+ sizeof(new_cnxn_data.ct_protocol.tcp_ct_data));
+ rte_ct_set_timer_for_new_cnxn(ct, entry);
+ if (ct->counters->sessions_reactivated > 0)
+ ct->counters->sessions_reactivated--;
+ }
+
+ break;
+
+ case RTE_CT_SEND_SERVER_SYN:
+ ct->counters->pkts_forwarded++;
+ /* packet modified, send back to original source */
+ return RTE_CT_SEND_SERVER_SYN;
+
+ case RTE_CT_SEND_SERVER_ACK:
+ ct->counters->pkts_forwarded++;
+ /* packet modified, send back to original source */
+ return RTE_CT_SEND_SERVER_ACK;
+
+ case RTE_CT_HIJACK:
+ ct->counters->pkts_forwarded++;
+ /* packet saved with connection, notify VNF
+ * to hijack it
+ */
+ return RTE_CT_HIJACK;
+
+ case RTE_CT_DESTROY_CNXN_AND_FORWARD_PACKET:
+
+ /*
+ * Forward the packet because it is "legal", but destroy
+ * the connection by removing it from the hash table and
+ * cancelling any timer. There is a remote possibility
+ * (perhaps impossible?) that a later packet in the same
+ * batch is for this connection. Due to the batch
+ * lookup, which has already happened, the later packet
+ * thinks that the connection is valid. This might cause
+ * a timer to be set. Eventually, it would time out so
+ * the only bug case occurs if the hash table also, in
+ * the same batch, allocates this entry for a new
+ * connection before the above packet is received. The
+ * chances of this happening seem impossibly small but
+ * this case should perhaps be investigated further.
+ */
+
+ if (rte_hash_del_key(ct->rhash, entry->key) >= 0) {
+ /*
+ * if rte_hash_del_key >= 0, then the connection
+ * was found in the hash table and removed.
+ * Counters must be updated, and the timer
+ * cancelled. If the result was < 0, then the
+ * connection must have already been deleted,
+ * and it must have been deleted in this batch
+ * of packets processed. Do nothing.
+ */
+
+ ct->counters->sessions_closed++;
+ if (ct->counters->current_active_sessions > 0)
+ ct->counters->current_active_sessions--;
+ rte_ct_cancel_cnxn_timer(entry);
+ }
+ entry->counters.packets_forwarded++;
+ break;
+
+ default:
+ break;
+ }
+ } else {
+ /* try to add new connection */
+ struct rte_ct_cnxn_data *new_hash_entry;
+
+ if (no_new_cnxn) {
+ ct->counters->pkts_drop_invalid_conn++;
+ return RTE_CT_DROP_PACKET;
+ }
+
+ packet_action = rte_ct_tcp_new_connection(ct, &new_cnxn_data,
+ packet, ct->misc_options.synproxy_enabled,
+ ip_hdr_size_bytes);
+
+ if (unlikely(packet_action == RTE_CT_DROP_PACKET)) {
+ ct->counters->pkts_drop_invalid_conn++;
+ return RTE_CT_DROP_PACKET;
+ }
+
+ /* This packet creates a connection . */
+ int32_t position = rte_hash_add_key(ct->rhash, key);
+ if (position < 0) {
+ printf
+ ("Failed to add new connection to hash table %d, pkt_num:%d\n",
+ position, pkt_num);
+ return RTE_CT_DROP_PACKET;
+ }
+ #ifdef CT_CGNAT
+ ct->positions[pkt_num] = position;
+ #endif
+ new_hash_entry = &ct->hash_table_entries[position];
+
+ /* update fields in new_cnxn_data not set by new_connection */
+
+ memcpy(new_cnxn_data.key, key, sizeof(new_cnxn_data.key));
+ new_cnxn_data.key_is_client_order = key_is_client_order;
+ new_cnxn_data.protocol = TCP_PROTOCOL;
+ rte_cnxn_ip_type(&new_cnxn_data.type, packet);
+ rte_memcpy(new_hash_entry, &new_cnxn_data,
+ sizeof(struct rte_ct_cnxn_data));
+ new_hash_entry->counters.packets_forwarded = 1;
+ new_hash_entry->counters.packets_dropped = 0;
+ ct->counters->current_active_sessions++;
+ ct->counters->sessions_activated++;
+
+ if (packet_action == RTE_CT_SEND_CLIENT_SYNACK) {
+ /* this is a synproxied connecton */
+ /* must remember mss, window scaling etc. from client */
+
+ rte_sp_parse_options(packet, new_hash_entry);
+
+ /*
+ * update packet to a SYN/ACK directed to the client,
+ * including default header options
+ */
+
+ rte_sp_cvt_to_spoofed_client_synack(new_hash_entry,
+ packet);
+
+ /*
+ * run updated packet through connection tracking so
+ * cnxn data updated appropriately and timer set for syn
+ * received state, not syn sent.
+ */
+ packet_action = rte_ct_verify_tcp_packet(ct,
+ new_hash_entry, packet,
+ !key_is_client_order,
+ ip_hdr_size_bytes);
+
+ if (unlikely(packet_action != RTE_CT_FORWARD_PACKET)) {
+ /* should never get here */
+ printf("Serious error in synproxy generating ");
+ printf("SYN/ACK\n");
+ return RTE_CT_DROP_PACKET;
+ }
+ ct->counters->pkts_forwarded++;
+ /* spoofed packet good to go */
+ return RTE_CT_SEND_CLIENT_SYNACK;
+ }
+ rte_ct_set_timer_for_new_cnxn(ct, new_hash_entry);
+
+ }
+
+ /* TODO: is it possible that earlier packet in this batch caused new
+ * entry to be added for the connection? Seems unlikely, since it
+ * would require multiple packets from the same side of the connection
+ * one after another immediately, and the TCP connection OPEN requires
+ * acknowledgement before further packets. What about simultaneous
+ * OPEN? Only if both sides are on same input port. Is that possible?
+ */
+ /* if made it here, packet will be forwarded */
+ ct->counters->pkts_forwarded++;
+ return RTE_CT_FORWARD_PACKET;
+}
+
+static uint64_t
+rte_ct_cnxn_tracker_batch_lookup_basic(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t no_new_cnxn_mask,
+ uint64_t *reply_pkt_mask,
+ uint64_t *hijack_mask)
+{
+ /* bitmap of packets left to process */
+ uint64_t pkts_to_process = pkts_mask;
+ /* bitmap of valid packets to return */
+ uint64_t valid_packets = pkts_mask;
+ uint8_t compacting_map[RTE_HASH_LOOKUP_BULK_MAX];
+ /* for pkt, key in originators direction? */
+ uint8_t key_orig_dir[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t packets_for_lookup = 0;
+ int32_t positions[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t i;
+ struct rte_ct_cnxn_data new_cnxn_data;
+
+ if (CNXN_TRX_DEBUG > 1) {
+ printf("Enter cnxn tracker %p", ct);
+ printf(" synproxy batch lookup with packet mask %p\n",
+ (void *)pkts_mask);
+ }
+
+ rte_ct_forget_new_connections(ct);
+ *reply_pkt_mask = 0;
+ *hijack_mask = 0;
+
+ /*
+ * Use bulk lookup into hash table for performance reasons. Cannot have
+ * "empty slots" in the bulk lookup,so need to create a compacted table.
+ */
+
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+
+ struct rte_mbuf *pkt = pkts[pos];
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(pkt);
+
+ if (unlikely(ip_hdr_size_bytes < 0)) {
+ /* Not IPv4, ignore. */
+ continue;
+ }
+
+ void *ip_hdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+
+ /* TCP and UDP ports at same offset, just use TCP for
+ * offset calculation
+ */
+ struct tcp_hdr *thdr =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START + ip_hdr_size_bytes));
+ uint16_t src_port = rte_bswap16(thdr->src_port);
+ uint16_t dst_port = rte_bswap16(thdr->dst_port);
+
+ if (ip_hdr_size_bytes == IPv4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr = (struct ipv4_hdr *)ip_hdr;
+ uint8_t proto = ihdr->next_proto_id;
+
+ if (!(proto == TCP_PROTOCOL || proto == UDP_PROTOCOL)) {
+ /* only tracking TCP and UDP at this time */
+ continue;
+ }
+
+ /*
+ * Load the addresses and ports, and convert from Intel
+ * to network byte order. Strictly speaking, it is not
+ * necessary to do this conversion, as this data is only
+ * used to create a hash key.
+ */
+ uint32_t src_addr = rte_bswap32(ihdr->src_addr);
+ uint32_t dst_addr = rte_bswap32(ihdr->dst_addr);
+
+ if (CNXN_TRX_DEBUG > 2) {
+ if (CNXN_TRX_DEBUG > 4)
+ rte_ct_cnxn_print_pkt(pkt,
+ IP_VERSION_4);
+ }
+ /* need to create compacted table of pointers to pass
+ * to bulk lookup
+ */
+
+ compacting_map[packets_for_lookup] = pos;
+ key_orig_dir[packets_for_lookup] =
+ rte_ct_create_cnxn_hashkey(&src_addr, &dst_addr,
+ src_port, dst_port,
+ proto,
+ &ct->hash_keys
+ [packets_for_lookup][0],
+ IP_VERSION_4);
+ packets_for_lookup++;
+ }
+
+ if (ip_hdr_size_bytes == IPv6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr = (struct ipv6_hdr *)ip_hdr;
+ uint8_t proto = ihdr->proto;
+
+ if (!(proto == TCP_PROTOCOL || proto == UDP_PROTOCOL)) {
+ /* only tracking TCP and UDP at this time */
+ continue;
+ }
+
+ if (CNXN_TRX_DEBUG > 2) {
+ if (CNXN_TRX_DEBUG > 4)
+ rte_ct_cnxn_print_pkt(pkt,
+ IP_VERSION_6);
+ }
+
+ /* need to create compacted table of pointers to pass
+ * to bulk lookup
+ */
+
+ compacting_map[packets_for_lookup] = pos;
+ key_orig_dir[packets_for_lookup] =
+ rte_ct_create_cnxn_hashkey(
+ (uint32_t *) ihdr->src_addr,
+ (uint32_t *) ihdr->dst_addr,
+ src_port, dst_port,
+ proto,
+ &ct->hash_keys
+ [packets_for_lookup][0],
+ IP_VERSION_6);
+ packets_for_lookup++;
+ }
+
+ }
+
+ if (unlikely(packets_for_lookup == 0))
+ return valid_packets; /* no suitable packet for lookup */
+
+ /* Clear all the data to make sure no stack garbage is in it */
+ memset(&new_cnxn_data, 0, sizeof(struct rte_ct_cnxn_data));
+
+ /* lookup all tcp & udp packets in the connection table */
+
+ int lookup_result =
+ rte_hash_lookup_bulk(ct->rhash, (const void **)&ct->hash_key_ptrs,
+ packets_for_lookup, &positions[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* TODO: change a log */
+ printf("Unexpected hash table problem, discarding all packets");
+ return 0; /* unknown error, just discard all packets */
+ }
+#ifdef ALGDBG
+ for (i = 0; i < packets_for_lookup; i++) {
+ if (positions[i] >= 0)
+ printf("@CT positions[i]= %d, compacting_map[i]= %d\n",
+ positions[i], compacting_map[i]);
+ }
+#endif
+ for (i = 0; i < packets_for_lookup; i++) {
+ /* index into hash table entries */
+ int hash_table_entry = positions[i];
+ /* index into packet table of this packet */
+ uint8_t pkt_index = compacting_map[i];
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ uint8_t key_is_client_order = key_orig_dir[i];
+ uint32_t *key = ct->hash_key_ptrs[pkt_index];
+ uint8_t protocol = *(key + 9);
+ struct rte_mbuf *packet = pkts[pkt_index];
+ int no_new_cnxn = (pkt_mask & no_new_cnxn_mask) != 0;
+
+ /* rte_ct_print_hashkey(key); */
+
+ if (protocol == TCP_PROTOCOL) {
+ enum rte_ct_packet_action tcp_pkt_action;
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(packet);
+ tcp_pkt_action = rte_ct_handle_tcp_lookup(ct, packet,
+ pkt_index, key_is_client_order,
+ key, hash_table_entry, no_new_cnxn,
+ ip_hdr_size_bytes);
+
+ switch (tcp_pkt_action) {
+
+ case RTE_CT_SEND_CLIENT_SYNACK:
+ case RTE_CT_SEND_SERVER_ACK:
+ /* altered packet or copy must be returned
+ * to originator
+ */
+ *reply_pkt_mask |= pkt_mask;
+ /* FALL-THROUGH */
+
+ case RTE_CT_SEND_SERVER_SYN:
+ case RTE_CT_FORWARD_PACKET:
+ break;
+
+ case RTE_CT_HIJACK:
+ *hijack_mask |= pkt_mask;
+ break;
+
+ default:
+ /* bad packet, clear mask to drop */
+ valid_packets ^= pkt_mask;
+ ct->counters->pkts_drop++;
+ break;
+ }
+
+ /* rte_ct_cnxn_print_pkt(pkts[pkt_index]); */
+ } else { /* UDP entry */
+
+ if (hash_table_entry >= 0) {
+ /*
+ * connection found for this packet. Check that
+ * this is a valid packet for connection
+ */
+
+ struct rte_ct_cnxn_data *entry =
+ &ct->hash_table_entries[hash_table_entry];
+
+ if (rte_ct_udp_packet
+ (ct, entry, pkts[pkt_index],
+ key_is_client_order)) {
+ entry->counters.packets_forwarded++;
+ ct->counters->pkts_forwarded++;
+ }
+ } else {
+ /*
+ * connection not found in bulk hash lookup,
+ * but might have been added in this batch
+ */
+
+ struct rte_ct_cnxn_data *recent_entry =
+ rte_ct_search_new_connections(ct, key);
+
+ if (recent_entry != NULL) {
+ if (rte_ct_udp_packet(ct, recent_entry,
+ pkts[pkt_index],
+ key_is_client_order)) {
+ recent_entry->counters.
+ packets_forwarded++;
+ ct->counters->pkts_forwarded++;
+ }
+ } else {
+ /* no existing connection, try to add
+ * new one
+ */
+
+ if (no_new_cnxn) {
+ /* new cnxn not allowed, clear
+ * mask to drop
+ */
+ valid_packets ^= pkt_mask;
+ ct->counters->pkts_drop++;
+ ct->counters->
+ pkts_drop_invalid_conn++;
+ continue;
+ }
+
+ if (rte_ct_udp_new_connection(ct,
+ &new_cnxn_data,
+ pkts[pkt_index])) {
+ /* This packet creates a
+ * connection .
+ */
+ int32_t position =
+ rte_hash_add_key(
+ ct->rhash, key);
+
+ if (position < 0)
+ continue;
+
+ struct rte_ct_cnxn_data
+ *new_hash_entry = &ct->
+ hash_table_entries[position];
+
+ /*
+ *update fields in new_cnxn_data
+ * not set by "new_connection"
+ */
+
+ memcpy(new_cnxn_data.key, key,
+ sizeof(new_cnxn_data.key));
+
+ new_cnxn_data.
+ key_is_client_order
+ = key_is_client_order;
+ new_cnxn_data.protocol =
+ UDP_PROTOCOL;
+ rte_cnxn_ip_type(
+ &new_cnxn_data.type,
+ packet);
+ rte_memcpy(new_hash_entry,
+ &new_cnxn_data,
+ sizeof(struct
+ rte_ct_cnxn_data));
+
+ new_hash_entry->counters.
+ packets_forwarded = 1;
+ ct->counters->pkts_forwarded++;
+ new_hash_entry->counters.
+ packets_dropped = 0;
+ ct->counters->pkts_drop = 0;
+ ct->counters->
+ current_active_sessions++;
+ ct->counters->
+ sessions_activated++;
+
+ new_hash_entry->
+ state_used_for_timer
+ = RTE_CT_UDP_NONE;
+ rte_ct_set_cnxn_timer_for_udp(
+ ct,
+ new_hash_entry,
+ RTE_CT_UDP_UNREPLIED);
+
+ rte_ct_remember_new_connection(
+ ct,
+ new_hash_entry);
+ }
+ }
+
+ }
+
+ } /* UDP */
+ } /* packets_for_lookup */
+
+ if (CNXN_TRX_DEBUG > 1) {
+ printf("Exit cnxn tracker synproxy batch lookup with");
+ printf(" packet mask %p\n", (void *)valid_packets);
+ }
+
+ return valid_packets;
+}
+
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup_with_synproxy(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_synproxy_helper *sp_helper)
+{
+ return rte_ct_cnxn_tracker_batch_lookup_basic(ct, pkts, pkts_mask, 0,
+ &sp_helper->reply_pkt_mask, &sp_helper->hijack_mask);
+}
+#ifdef CT_CGNAT
+uint64_t cgnapt_ct_process(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_CT_helper *ct_helper)
+{
+/* to disable SynProxy for CGNAT */
+ rte_ct_disable_synproxy(ct);
+ return rte_ct_cnxn_tracker_batch_lookup_basic(ct, pkts, pkts_mask,
+ ct_helper->no_new_cnxn_mask,
+ &ct_helper->reply_pkt_mask,
+ &ct_helper->hijack_mask);
+}
+#endif/*CT-CGNAT*/
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_CT_helper *ct_helper)
+{
+
+ return rte_ct_cnxn_tracker_batch_lookup_basic(ct, pkts, pkts_mask,
+ ct_helper->no_new_cnxn_mask,
+ &ct_helper->reply_pkt_mask, &ct_helper->hijack_mask);
+}
+
+
+void rte_ct_cnxn_tracker_batch_lookup_type(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ struct rte_CT_helper *ct_helper,
+ uint8_t ip_hdr_size_bytes)
+{
+
+ rte_ct_cnxn_tracker_batch_lookup_basic_type(ct, pkts, pkts_mask,
+ ct_helper->no_new_cnxn_mask,
+ &ct_helper->reply_pkt_mask, &ct_helper->hijack_mask,
+ ip_hdr_size_bytes);
+}
+
+
+
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t no_new_cnxn_mask)
+{
+ uint64_t dont_care;
+
+ return rte_ct_cnxn_tracker_batch_lookup_basic(ct, pkts, pkts_mask,
+ no_new_cnxn_mask,
+ &dont_care, &dont_care);
+}
+
+
+int
+rte_ct_initialize_default_timeouts(struct rte_ct_cnxn_tracker *new_cnxn_tracker)
+{
+
+ /* timer system init */
+
+ uint64_t hertz = rte_get_tsc_hz();
+
+ new_cnxn_tracker->hertz = hertz;
+ new_cnxn_tracker->timing_cycles_per_timing_step = hertz / 10;
+ new_cnxn_tracker->timing_100ms_steps_previous = 0;
+ new_cnxn_tracker->timing_100ms_steps = 0;
+ new_cnxn_tracker->timing_last_time = rte_get_tsc_cycles();
+
+ /* timeouts in seconds */
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_SYN_SENT] = 120 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_SYN_RECV] = 60 * hertz;
+ /* 5 * DAYS */
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_ESTABLISHED] = 60 * 60 * 24 * 5 * hertz;
+
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_FIN_WAIT] = 120 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_CLOSE_WAIT] = 60 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_LAST_ACK] = 30 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_TIME_WAIT] = 120 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_CLOSE] = 10 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_SYN_SENT_2] = 120 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_RETRANS] = 300 * hertz;
+ new_cnxn_tracker->ct_timeout.tcptimeout.tcp_timeouts
+ [RTE_CT_TCP_UNACK] = 300 * hertz;
+
+ new_cnxn_tracker->ct_timeout.udptimeout.udp_timeouts
+ [RTE_CT_UDP_UNREPLIED] = 30 * hertz;
+ new_cnxn_tracker->ct_timeout.udptimeout.udp_timeouts
+ [RTE_CT_UDP_REPLIED] = 180 * hertz;
+ /* miscellaneous init */
+ new_cnxn_tracker->misc_options.tcp_max_retrans =
+ RTE_CT_TCP_MAX_RETRANS;
+ new_cnxn_tracker->misc_options.tcp_loose = 0;
+ new_cnxn_tracker->misc_options.tcp_be_liberal = 0;
+#ifdef CT_CGNAT
+ int i;
+ for (i=0; i < RTE_HASH_LOOKUP_BULK_MAX ;i ++ )
+ new_cnxn_tracker->positions[i] = -1;
+#endif
+
+ return 0;
+}
+
+struct rte_CT_counter_block rte_CT_counter_table[MAX_CT_INSTANCES]
+__rte_cache_aligned;
+int rte_CT_hi_counter_block_in_use = -1;
+
+int
+rte_ct_initialize_cnxn_tracker_with_synproxy(
+ struct rte_ct_cnxn_tracker *new_cnxn_tracker,
+ uint32_t max_connection_count,
+ char *name,
+ uint16_t pointer_offset)
+{
+ uint32_t i;
+ uint32_t size;
+ struct rte_CT_counter_block *counter_ptr;
+ /*
+ * TODO: Should number of entries be something like
+ * max_connection_count * 1.1 to allow for unused space
+ * and thus increased performance of hash table, at a cost of memory???
+ */
+
+ new_cnxn_tracker->pointer_offset = pointer_offset;
+
+ memset(new_cnxn_tracker->name, '\0', sizeof(new_cnxn_tracker->name));
+ strncpy(new_cnxn_tracker->name, name, strlen(new_cnxn_tracker->name));
+ //strcpy(new_cnxn_tracker->name, name);
+ /* + (max_connection_count >> 3); */
+ uint32_t number_of_entries = max_connection_count;
+
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_ct_cnxn_data) *
+ number_of_entries);
+ new_cnxn_tracker->hash_table_entries =
+ rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (new_cnxn_tracker->hash_table_entries == NULL) {
+ printf(" Not enough memory, or invalid arguments\n");
+ return -1;
+ }
+ new_cnxn_tracker->num_cnxn_entries = number_of_entries;
+
+ /* initialize all timers */
+
+ for (i = 0; i < number_of_entries; i++)
+ rte_timer_init(&new_cnxn_tracker->hash_table_entries[i].timer);
+
+ /* pointers for temp storage used during bulk hash */
+ for (i = 0; i < RTE_HASH_LOOKUP_BULK_MAX; i++)
+ new_cnxn_tracker->hash_key_ptrs[i] =
+ &new_cnxn_tracker->hash_keys[i][0];
+
+ /*
+ * Now allocate a counter block entry.It appears that the initialization
+ * of these threads is serialized on core 0 so no lock is necessary
+ */
+
+ if (rte_CT_hi_counter_block_in_use == MAX_CT_INSTANCES)
+ return -1;
+
+ rte_CT_hi_counter_block_in_use++;
+ counter_ptr = &rte_CT_counter_table[rte_CT_hi_counter_block_in_use];
+
+ new_cnxn_tracker->counters = counter_ptr;
+
+ /* set up hash table parameters, then create hash table */
+ struct rte_hash_parameters rhash_parms = {
+ .name = name,
+ .entries = number_of_entries,
+ .hash_func = NULL, /* use default hash */
+ .key_len = 40,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ .extra_flag = 1 /*This is needed for TSX memory*/
+ };
+
+ new_cnxn_tracker->rhash = rte_hash_create(&rhash_parms);
+
+ return 0;
+}
+
+int
+rte_ct_initialize_cnxn_tracker(
+ struct rte_ct_cnxn_tracker *new_cnxn_tracker,
+ uint32_t max_connection_count,
+ char *name)
+{
+ return rte_ct_initialize_cnxn_tracker_with_synproxy(new_cnxn_tracker,
+ max_connection_count, name, 0);
+}
+
+int
+rte_ct_free_cnxn_tracker_resources(struct rte_ct_cnxn_tracker *old_cnxn_tracker)
+{
+ rte_free(old_cnxn_tracker->hash_table_entries);
+ rte_hash_free(old_cnxn_tracker->rhash);
+ return 0;
+}
+
+int
+rte_ct_get_cnxn_tracker_size(void)
+{
+ return sizeof(struct rte_ct_cnxn_tracker);
+}
+
+void
+rte_ct_cnxn_timer_expired(struct rte_timer *rt, void *arg);
+
+static void
+rte_ct_set_cnxn_timer(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ uint64_t ticks_until_timeout)
+{
+ /*
+ * pointer to cnxn_data will be stored in timer system as pointer to
+ * rte_timer for later cast back to cnxn_data during timeout handling
+ */
+
+ struct rte_timer *rt = (struct rte_timer *)cd;
+ #ifdef CT_CGNAT
+ /* execute timeout on timer core */
+ uint32_t core_id = get_timer_core_id();
+ #else
+ /* execute timeout on current core */
+ uint32_t core_id = rte_lcore_id();
+ #endif
+ /* safe to reset since timeouts handled synchronously
+ * by rte_timer_manage
+ */
+ int success = rte_timer_reset(rt, ticks_until_timeout, SINGLE, core_id,
+ rte_ct_cnxn_timer_expired, ct);
+
+ if (success < 0) {
+ /* TODO: Change to log, perhaps something else?
+ * This should not happen
+ */
+ printf("CNXN_TRACKER: Failed to set connection timer.\n");
+ }
+}
+
+/*
+ * For the given connection, set a timeout based on the given state. If the
+* timer is already set, this call will reset the timer with a new value.
+ */
+
+void
+rte_ct_set_cnxn_timer_for_tcp(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ uint8_t tcp_state)
+{
+
+ cd->expected_timeout =
+ (ct->timing_100ms_steps * ct->timing_cycles_per_timing_step) +
+ ct->ct_timeout.tcptimeout.tcp_timeouts[tcp_state];
+
+ if (tcp_state == cd->state_used_for_timer) {
+ /*
+ * Don't reset timer, too expensive. Instead, determine time
+ * elapsed since start of timer. When this timer expires, the
+ * timer will be reset to the elapsed timer. So if in a state
+ * with a 5 minute timer last sees a packet 4 minutes into the
+ * timer, the timer when expires will be reset to 4 minutes.
+ * This means the timer will then expire 5 minutes after
+ * the last packet.
+ */
+ return;
+ }
+
+ if (TESTING_TIMERS)
+ printf("Set Timer for connection %p and state %s\n", cd,
+ rte_ct_tcp_names[tcp_state]);
+
+ rte_ct_set_cnxn_timer(ct, cd,
+ ct->ct_timeout.
+ tcptimeout.tcp_timeouts[tcp_state]);
+ cd->state_used_for_timer = tcp_state;
+}
+
+/*
+ * For the given connection, set a timeout based on the given state.
+ * If the timer is already set,
+ * this call will reset the timer with a new value.
+ */
+
+void
+rte_ct_set_cnxn_timer_for_udp(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ uint8_t udp_state)
+{
+
+ cd->expected_timeout = (ct->timing_cycles_per_timing_step) +
+ ct->ct_timeout.udptimeout.udp_timeouts[udp_state];
+
+ if (udp_state == cd->state_used_for_timer) {
+ /*
+ * Don't reset timer, too expensive. Instead, determine time
+ * elapsed since start of timer. When this timer expires, the
+ * timer will be reset to the elapsed timer. So if in a state
+ * with a 5 minute timer last sees a packet 4 minutes into the
+ * timer, the timer when expires will be reset to 4 minutes.
+ * This means the timer will then
+ * expire 5 minutes after the last packet.
+ */
+ return;
+ }
+
+ if (TESTING_TIMERS)
+ printf("Set Timer for connection %p and state %s\n", cd,
+ rte_ct_udp_names[udp_state]);
+ rte_ct_set_cnxn_timer(ct, cd,
+ ct->ct_timeout.
+ udptimeout.udp_timeouts[udp_state]);
+ cd->state_used_for_timer = udp_state;
+}
+
+/* Cancel the timer associated with the connection.
+ * Safe to call if no timer set.
+ */
+ void
+rte_ct_cancel_cnxn_timer(struct rte_ct_cnxn_data *cd)
+{
+ if (TESTING_TIMERS)
+ printf("Cancel Timer\n");
+
+ rte_timer_stop(&cd->timer);
+}
+
+void
+rte_ct_handle_expired_timers(struct rte_ct_cnxn_tracker *ct)
+{
+ /*
+ * If current time (in 100 ms increments) is different from the
+ * time it was last viewed, then check for and process expired timers.
+ */
+
+ uint64_t new_time = rte_get_tsc_cycles();
+ uint64_t time_diff = new_time - ct->timing_last_time;
+
+ if (time_diff >= ct->timing_cycles_per_timing_step) {
+ ct->timing_last_time = new_time;
+ ct->timing_100ms_steps++;
+ }
+
+ if (ct->timing_100ms_steps != ct->timing_100ms_steps_previous) {
+ rte_timer_manage();
+ ct->timing_100ms_steps_previous = ct->timing_100ms_steps;
+ }
+}
+
+/* timer has expired. Need to delete connection entry */
+
+void
+rte_ct_cnxn_timer_expired(struct rte_timer *rt, void *arg)
+{
+ /* the pointer to the rte_timer was actually a pointer
+ * to the cnxn data
+ */
+ struct rte_ct_cnxn_data *cd = (struct rte_ct_cnxn_data *)rt;
+ struct rte_ct_cnxn_tracker *ct = (struct rte_ct_cnxn_tracker *)arg;
+ int success = 0;
+
+ /*
+ * Check to see if the timer has "really" expired. If traffic occured
+ * since the timer was set, the timer needs be extended, so that timer
+ * expires the appropriate amount after that last packet.
+ */
+
+ uint64_t current_time = ct->timing_100ms_steps *
+ ct->timing_cycles_per_timing_step;
+
+ if (cd->expected_timeout >= current_time) {
+ uint64_t time_diff = cd->expected_timeout - current_time;
+
+ rte_ct_set_cnxn_timer(ct, cd, time_diff);
+ return;
+ }
+
+ if (cd->protocol == TCP_PROTOCOL) {
+ if (cd->state_used_for_timer == RTE_CT_TCP_TIME_WAIT ||
+ cd->state_used_for_timer == RTE_CT_TCP_CLOSE)
+ ct->counters->sessions_closed++;
+ else
+ ct->counters->sessions_timedout++;
+ /* if synproxied connection, free list of buffered
+ * packets if any
+ */
+
+ if (cd->ct_protocol.synproxy_data.synproxied)
+ rte_ct_release_buffered_packets(ct, cd);
+
+ } else if (cd->protocol == UDP_PROTOCOL)
+ ct->counters->sessions_closed++;
+ if (ct->counters->current_active_sessions > 0)
+ ct->counters->current_active_sessions--;
+
+ if (RTE_CT_TIMER_EXPIRED_DUMP) {
+ uint64_t percent = (cd->counters.packets_dropped * 10000) /
+ (cd->counters.packets_forwarded +
+ cd->counters.packets_dropped);
+
+ if (cd->protocol == TCP_PROTOCOL) {
+ printf("CnxnTrkr %s, timed-out TCP Connection: %p,",
+ ct->name, cd);
+ printf(" %s, pkts forwarded %"
+ PRIu64 ", pkts dropped %" PRIu64
+ ", drop%% %u.%u\n",
+ rte_ct_tcp_names[cd->state_used_for_timer],
+ cd->counters.packets_forwarded,
+ cd->counters.packets_dropped,
+ (uint32_t) (percent / 100),
+ (uint32_t) (percent % 100));
+ } else if (cd->protocol == UDP_PROTOCOL) {
+ printf("CnxnTrkr %s, Timed-out UDP Connection: %p,",
+ ct->name, cd);
+ printf(" %s, pkts forwarded %" PRIu64
+ ", pkts dropped %" PRIu64 ", drop%% %u.%u\n",
+ rte_ct_udp_names[cd->state_used_for_timer],
+ cd->counters.packets_forwarded,
+ cd->counters.packets_dropped,
+ (uint32_t) (percent / 100),
+ (uint32_t) (percent % 100));
+ }
+ }
+
+ success = rte_hash_del_key(ct->rhash, &cd->key);
+
+ if (success < 0) {
+ /* TODO: change to a log */
+ rte_ct_print_hashkey(cd->key);
+ }
+
+}
+
+struct rte_CT_counter_block *
+rte_ct_get_counter_address(struct rte_ct_cnxn_tracker *ct)
+{
+ return ct->counters;
+}
+
+int
+rte_ct_set_configuration_options(struct rte_ct_cnxn_tracker *ct,
+ char *name, char *value)
+{
+ /* check non-time values first */
+ int ival = atoi(value);
+
+ /* tcp_loose */
+ if (strcmp(name, "tcp_loose") == 0) {
+ ct->misc_options.tcp_loose = ival;
+ return 0;
+ }
+
+ /* tcp_be_liberal */
+ if (strcmp(name, "tcp_be_liberal") == 0) {
+ ct->misc_options.tcp_be_liberal = ival;
+ return 0;
+ }
+
+ /* tcp_max_retrans */
+ if (strcmp(name, "tcp_max_retrans") == 0) {
+ ct->misc_options.tcp_max_retrans = ival;
+ return 0;
+ }
+
+ uint64_t time_value = ival * ct->hertz;
+
+
+ /* configuration of timer values */
+
+ /* tcp_syn_sent */
+ if (strcmp(name, "tcp_syn_sent") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_SYN_SENT] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_syn_recv */
+ if (strcmp(name, "tcp_syn_recv") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_SYN_RECV] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_established */
+ if (strcmp(name, "tcp_established") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_ESTABLISHED] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_fin_wait */
+ if (strcmp(name, "tcp_fin_wait") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_FIN_WAIT] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_close_wait */
+ if (strcmp(name, "tcp_close_wait") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_CLOSE_WAIT] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_last_ack */
+ if (strcmp(name, "tcp_last_ack") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_LAST_ACK] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_time_wait */
+ if (strcmp(name, "tcp_time_wait") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_TIME_WAIT] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_close */
+ if (strcmp(name, "tcp_close") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_CLOSE] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_syn_sent_2 */
+ if (strcmp(name, "tcp_syn_sent_2") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_SYN_SENT_2] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_retrans */
+ if (strcmp(name, "tcp_retrans") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_RETRANS] =
+ time_value;
+ return 0;
+ }
+
+ /* tcp_unack */
+ if (strcmp(name, "tcp_unack") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.tcptimeout.tcp_timeouts[RTE_CT_TCP_UNACK] =
+ time_value;
+ return 0;
+ }
+
+ /* udp_unreplied */
+ if (strcmp(name, "udp_unreplied") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.udptimeout.udp_timeouts[RTE_CT_UDP_UNREPLIED] =
+ time_value;
+ return 0;
+ }
+
+ /* udp_replied */
+ if (strcmp(name, "udp_replied") == 0) {
+ if (time_value == 0)
+ return -1;
+ ct->ct_timeout.udptimeout.udp_timeouts[RTE_CT_UDP_REPLIED] =
+ time_value;
+ return 0;
+ }
+ return 1;
+}
+
+static void
+rte_ct_cnxn_tracker_batch_lookup_basic_type(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ uint64_t no_new_cnxn_mask,
+ uint64_t *reply_pkt_mask,
+ uint64_t *hijack_mask,
+ uint8_t ip_hdr_size_bytes)
+{
+ /* bitmap of packets left to process */
+ uint64_t pkts_to_process = *pkts_mask;
+ /* bitmap of valid packets to return */
+ uint8_t compacting_map[RTE_HASH_LOOKUP_BULK_MAX];
+ /* for pkt, key in originators direction? */
+ uint8_t key_orig_dir[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t packets_for_lookup = 0;
+ int32_t positions[RTE_HASH_LOOKUP_BULK_MAX];
+ uint32_t i;
+ struct rte_ct_cnxn_data new_cnxn_data;
+
+ if (CNXN_TRX_DEBUG > 1) {
+ printf("Enter cnxn tracker %p", ct);
+ printf(" synproxy batch lookup with packet mask %p\n",
+ (void *)*pkts_mask);
+ }
+
+ rte_ct_forget_new_connections(ct);
+ *reply_pkt_mask = 0;
+ *hijack_mask = 0;
+
+ /*
+ * Use bulk lookup into hash table for performance reasons. Cannot have
+ * "empty slots" in the bulk lookup,so need to create a compacted table.
+ */
+
+ switch (ip_hdr_size_bytes) {
+ case IPv4_HEADER_SIZE:
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t) __builtin_ctzll(
+ pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+
+ struct rte_mbuf *pkt = pkts[pos];
+
+
+ /* TCP and UDP ports at same offset, just use TCP for
+ * offset calculation
+ */
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START + ip_hdr_size_bytes));
+ uint16_t src_port = rte_bswap16(thdr->src_port);
+ uint16_t dst_port = rte_bswap16(thdr->dst_port);
+
+ struct ipv4_hdr *ihdr = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ uint8_t proto = ihdr->next_proto_id;
+
+ if (!(proto == TCP_PROTOCOL || proto == UDP_PROTOCOL)) {
+ /* only tracking TCP and UDP at this time */
+ continue;
+ }
+
+ /*
+ * Load the addresses and ports, and convert from Intel
+ * to network byte order. Strictly speaking, it is not
+ * necessary to do this conversion, as this data is only
+ * used to create a hash key.
+ */
+ uint32_t src_addr = rte_bswap32(ihdr->src_addr);
+ uint32_t dst_addr = rte_bswap32(ihdr->dst_addr);
+
+ if (CNXN_TRX_DEBUG > 2) {
+ if (CNXN_TRX_DEBUG > 4)
+ rte_ct_cnxn_print_pkt(pkt,
+ IP_VERSION_4);
+ }
+ /* need to create compacted table of pointers to pass
+ * to bulk lookup
+ */
+
+ compacting_map[packets_for_lookup] = pos;
+ key_orig_dir[packets_for_lookup] =
+ rte_ct_create_cnxn_hashkey(&src_addr, &dst_addr,
+ src_port, dst_port,
+ proto,
+ &ct->hash_keys
+ [packets_for_lookup][0],
+ IP_VERSION_4);
+ packets_for_lookup++;
+ }
+ break;
+ case IPv6_HEADER_SIZE:
+ for (; pkts_to_process;) {
+ uint8_t pos = (uint8_t) __builtin_ctzll(
+ pkts_to_process);
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pos;
+ /* remove this packet from remaining list */
+ pkts_to_process &= ~pkt_mask;
+
+ struct rte_mbuf *pkt = pkts[pos];
+
+
+ void *ip_hdr = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ IP_START);
+
+ /* TCP and UDP ports at same offset, just use TCP for
+ * offset calculation
+ */
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START + ip_hdr_size_bytes));
+ uint16_t src_port = rte_bswap16(thdr->src_port);
+ uint16_t dst_port = rte_bswap16(thdr->dst_port);
+
+ struct ipv6_hdr *ihdr = (struct ipv6_hdr *)ip_hdr;
+ uint8_t proto = ihdr->proto;
+
+ if (!(proto == TCP_PROTOCOL || proto == UDP_PROTOCOL)) {
+ /* only tracking TCP and UDP at this time */
+ continue;
+ }
+
+ if (CNXN_TRX_DEBUG > 2) {
+ if (CNXN_TRX_DEBUG > 4)
+ rte_ct_cnxn_print_pkt(pkt,
+ IP_VERSION_6);
+ }
+
+ /* need to create compacted table of pointers to pass
+ * to bulk lookup
+ */
+
+ compacting_map[packets_for_lookup] = pos;
+ key_orig_dir[packets_for_lookup] =
+ rte_ct_create_cnxn_hashkey(
+ (uint32_t *) ihdr->src_addr,
+ (uint32_t *) ihdr->dst_addr,
+ src_port, dst_port,
+ proto,
+ &ct->hash_keys
+ [packets_for_lookup][0],
+ IP_VERSION_6);
+ packets_for_lookup++;
+ }
+ break;
+ default:
+ break;
+ }
+ if (unlikely(packets_for_lookup == 0))
+ return; /* no suitable packet for lookup */
+
+ /* Clear all the data to make sure no stack garbage is in it */
+ memset(&new_cnxn_data, 0, sizeof(struct rte_ct_cnxn_data));
+
+ /* lookup all tcp & udp packets in the connection table */
+
+ int lookup_result = rte_hash_lookup_bulk(ct->rhash,
+ (const void **)&ct->hash_key_ptrs,
+ packets_for_lookup, &positions[0]);
+
+ if (unlikely(lookup_result < 0)) {
+ /* TODO: change a log */
+ printf("Unexpected hash table problem, discarding all packets");
+ *pkts_mask = 0;
+ return; /* unknown error, just discard all packets */
+ }
+ for (i = 0; i < packets_for_lookup; i++) {
+ /* index into hash table entries */
+ int hash_table_entry = positions[i];
+ /* index into packet table of this packet */
+ uint8_t pkt_index = compacting_map[i];
+ /* bitmask representing only this packet */
+ uint64_t pkt_mask = 1LLU << pkt_index;
+ uint8_t key_is_client_order = key_orig_dir[i];
+ uint32_t *key = ct->hash_key_ptrs[pkt_index];
+ uint8_t protocol = *(key + 9);
+ struct rte_mbuf *packet = pkts[pkt_index];
+ int no_new_cnxn = (pkt_mask & no_new_cnxn_mask) != 0;
+
+ /* rte_ct_print_hashkey(key); */
+
+ if (protocol == TCP_PROTOCOL) {
+ enum rte_ct_packet_action tcp_pkt_action;
+
+ tcp_pkt_action = rte_ct_handle_tcp_lookup(ct, packet,
+ pkt_index, key_is_client_order,
+ key, hash_table_entry, no_new_cnxn,
+ ip_hdr_size_bytes);
+
+ switch (tcp_pkt_action) {
+
+ case RTE_CT_SEND_CLIENT_SYNACK:
+ case RTE_CT_SEND_SERVER_ACK:
+ /* altered packet or copy must be returned
+ * to originator
+ */
+ *reply_pkt_mask |= pkt_mask;
+ /* FALL-THROUGH */
+
+ case RTE_CT_SEND_SERVER_SYN:
+ case RTE_CT_FORWARD_PACKET:
+ break;
+
+ case RTE_CT_HIJACK:
+ *hijack_mask |= pkt_mask;
+ break;
+
+ default:
+ /* bad packet, clear mask to drop */
+ *pkts_mask ^= pkt_mask;
+ ct->counters->pkts_drop++;
+ break;
+ }
+ /* rte_ct_cnxn_print_pkt(pkts[pkt_index]); */
+
+ } else { /* UDP entry */
+
+ if (hash_table_entry >= 0) {
+ /*
+ * connection found for this packet. Check that
+ * this is a valid packet for connection
+ */
+
+ struct rte_ct_cnxn_data *entry =
+ &ct->hash_table_entries[hash_table_entry];
+
+ if (rte_ct_udp_packet
+ (ct, entry, pkts[pkt_index],
+ key_is_client_order)) {
+ entry->counters.packets_forwarded++;
+ ct->counters->pkts_forwarded++;
+ }
+ } else {
+ /*
+ * connection not found in bulk hash lookup,
+ * but might have been added in this batch
+ */
+
+ struct rte_ct_cnxn_data *recent_entry =
+ rte_ct_search_new_connections(ct, key);
+
+ if (recent_entry != NULL) {
+ if (rte_ct_udp_packet(ct, recent_entry,
+ pkts[pkt_index],
+ key_is_client_order)) {
+ recent_entry->counters.
+ packets_forwarded++;
+ ct->counters->pkts_forwarded++;
+ }
+ } else {
+ /* no existing connection, try to add
+ * new one
+ */
+
+ if (no_new_cnxn) {
+ /* new cnxn not allowed, clear
+ * mask to drop
+ */
+ *pkts_mask ^= pkt_mask;
+ ct->counters->pkts_drop++;
+ ct->counters->
+ pkts_drop_invalid_conn++;
+ continue;
+ }
+
+ if (rte_ct_udp_new_connection(ct,
+ &new_cnxn_data, pkts[pkt_index])) {
+ /* This packet creates a
+ * connection
+ */
+ int32_t position =
+ rte_hash_add_key(ct->
+ rhash, key);
+
+ if (position < 0)
+ continue;
+
+ struct rte_ct_cnxn_data
+ *new_hash_entry = &ct->
+ hash_table_entries[position];
+
+ /*
+ *update fields in new_cnxn_data
+ * not set by "new_connection"
+ */
+
+ memcpy(new_cnxn_data.key, key,
+ sizeof(new_cnxn_data.key));
+
+ new_cnxn_data.
+ key_is_client_order
+ = key_is_client_order;
+ new_cnxn_data.protocol =
+ UDP_PROTOCOL;
+ rte_cnxn_ip_type(
+ &new_cnxn_data.type,
+ packet);
+ rte_memcpy(new_hash_entry,
+ &new_cnxn_data,
+ sizeof(struct
+ rte_ct_cnxn_data));
+
+ new_hash_entry->counters.
+ packets_forwarded = 1;
+ ct->counters->pkts_forwarded++;
+ new_hash_entry->counters.
+ packets_dropped = 0;
+ ct->counters->pkts_drop = 0;
+ ct->counters->
+ current_active_sessions++;
+ ct->counters->
+ sessions_activated++;
+
+ new_hash_entry->
+ state_used_for_timer
+ = RTE_CT_UDP_NONE;
+ rte_ct_set_cnxn_timer_for_udp(
+ ct,
+ new_hash_entry,
+ RTE_CT_UDP_UNREPLIED);
+
+ rte_ct_remember_new_connection(
+ ct,
+ new_hash_entry);
+ }
+ }
+
+ }
+
+ } /* UDP */
+ } /* packets_for_lookup */
+
+ if (CNXN_TRX_DEBUG > 1) {
+ printf("Exit cnxn tracker synproxy batch lookup with");
+ printf(" packet mask %p\n", (void *)*pkts_mask);
+ }
+}
diff --git a/common/VIL/conntrack/rte_cnxn_tracking.h b/common/VIL/conntrack/rte_cnxn_tracking.h
new file mode 100644
index 00000000..1efb60ef
--- /dev/null
+++ b/common/VIL/conntrack/rte_cnxn_tracking.h
@@ -0,0 +1,413 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef _CNXN_TRACKING_H
+#define _CNXN_TRACKING_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+
+#include <rte_hash.h>
+#include <rte_ether.h>
+
+#include "rte_ct_tcp.h"
+
+
+/**
+ * @file
+ * Connection Tracker
+ *
+ * A Connection Tracker tracks the status of TCP connections. By remembering
+ * keys pieces of data, such as connection state, sequence numbers seen, and
+ * transmission window size, it can determine if a give packet is valid, or
+ * invalid and should be discarded.
+ *
+ * The current interface is designed for use with ip_pipeline code.
+ */
+
+/*
+ * Opaque type definition for an instance of the connection tracker. It is
+ * possible to have multiple instances of connection tracking running, on one
+ * or more cores. All traffic for a TCP connection must be run through the same
+ * rte_ct_cnxn_tracker.
+ */
+
+/*
+ * The rte_ct_cnxn_tracker is an instance of a connection tracker.
+ */
+struct rte_ct_cnxn_tracker __rte_cache_aligned;
+
+extern int rte_CT_hi_counter_block_in_use;
+
+struct rte_CT_counter_block {
+ /* as long as a counter doesn't cross cache line, writes are atomic */
+ uint64_t current_active_sessions;
+ uint64_t sessions_activated; /* a SYN packet seen, or UDP */
+ /* a SYN packet re-opening a connection */
+ uint64_t sessions_reactivated;
+ /* SYN, SYN/ACK, ACK established a connection */
+ uint64_t sessions_established;
+ uint64_t sessions_closed;
+ uint64_t sessions_timedout;
+ uint64_t pkts_forwarded;
+ uint64_t pkts_drop;
+ uint64_t pkts_drop_invalid_conn;
+ uint64_t pkts_drop_invalid_state;
+ uint64_t pkts_drop_invalid_rst;
+ uint64_t pkts_drop_outof_window;
+} __rte_cache_aligned;
+
+struct rte_synproxy_helper {
+ uint64_t reply_pkt_mask;
+ uint64_t hijack_mask;
+ struct rte_mbuf **buffered_pkts_to_forward;
+ uint8_t num_buffered_pkts_to_forward;
+};
+
+struct rte_CT_helper {
+ uint64_t no_new_cnxn_mask;
+ uint64_t reply_pkt_mask;
+ uint64_t hijack_mask;
+ struct rte_mbuf **buffered_pkts_to_forward;
+ uint8_t num_buffered_pkts_to_forward;
+};
+
+#define MAX_CT_INSTANCES 24 /* max number fw threads, actual usually less*/
+
+extern struct rte_CT_counter_block rte_CT_counter_table[MAX_CT_INSTANCES]
+__rte_cache_aligned;
+
+/**
+ * Run the connection tracking for 1 to 64 packets.
+ *
+ * @param ct
+ * Instance of cnxn tracker to use.
+ * @param pkts
+ * Table of pointers to mbufs containing packets for connection tracking.
+ * Any packets which are not TCP/IP will be ignnored. A maximum of 64
+ * packets may be processed in a call.
+ * @param pkts_mask
+ * Bit map representing which table elements of "pkts" are valid mbuf
+ * pointers, where the least-significant bit of the map represents table
+ * element 0. There must be at least as many elements in the table as the
+ * highest order bit in the map. Valid table entries with a corresponding
+ * 0 in the bitmap will be ignored.
+ * @param ct_helper
+ * Pointer to rte_CT_helper structure which hold the connection tracker
+ * tracking information.
+ *
+ * @return
+ * Returns an updated bitmap that reflects which packets are valid and should
+ * be forwarded.
+ * Any bits representing invalid TCP packets are cleared.
+ * Any packets which are not TCP/IP are considered valid for this purpose.
+ */
+
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_CT_helper *ct_helper);
+
+void
+rte_ct_cnxn_tracker_batch_lookup_type(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t *pkts_mask,
+ struct rte_CT_helper *ct_helper,
+ uint8_t ip_hdr_size_bytes);
+
+
+/**
+ * Run the connection tracking for 1 to 64 packets.
+ *
+ * @param ct
+ * Instance of cnxn tracker to use.
+ * @param pkts
+ * Table of pointers to mbufs containing packets for connection tracking.
+ * Any packets which are not TCP/IP will be ignnored. A maximum of 64
+ * packets may be processed in a call.
+ * @param pkts_mask
+ * Bit map representing which table elements of "pkts" are valid mbuf
+ * pointers, where the least-significant bit of the map represents table
+ * element 0. There must be at least as many elements in the table as the
+ * highest order bit in the map. Valid table entries with a corresponding
+ * 0 in the bitmap will be ignored.
+ * @param no_new_cnxn_mask
+ * Bit map representing which table elements of "pkts" are should be
+ * considered valid packets only if there is already an existing connection
+ * for this packet (i.e. same ip addresses, tcp/udp ports, and protocol).
+ * This mask must be a subset of "pkts_mask" (including all or none), and
+ * follows the same format. A 1 means must be existing connection, a 0 means
+ * a new connection setup (e.g. TCP SYN packet) is allowed, or this entry
+ * corresponds to a 0 in pkts_mask.
+ *
+ * @return
+ * Returns an updated bitmap that reflects which packets are valid and should
+ * be forwarded.
+ * Any bits representing invalid TCP packets are cleared.
+ * Any packets which are not TCP/IP are considered valid for this purpose.
+ */
+
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup_with_new_cnxn_control(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ uint64_t no_new_cnxn_mask);
+
+
+/**
+* Run the connection tracking for 1 to 64 packets, with support for
+* synproxy.
+*
+* @param ct
+* Instance of cnxn tracker to use.
+* @param pkts
+* Table of pointers to mbufs containing packets for connection tracking.
+* Any packets which are not TCP/IP will be ignnored. A maximum of 64
+* packets may be processed in a call.
+* @param pkts_mask
+* Bit map representing which table elements of "pkts" are valid mbuf pointers,
+* where the least-significant bit of the map represents table element 0. There
+* must be at least as many elements in the table as the highest order bit in
+* the map. Valid table entries with a corresponding 0 in the bitmap will be
+* ignored.
+* @param reply_pkt_mask
+* Bit map representing which table elements of "pkts" have been altered to
+* reply messages for synproxy. These packets, or copies of them must be sent
+* back to the originator. IP and TCP headers have been altered, ethernet
+* header has not
+* @return
+* Returns an updated bitmap that reflects which packets are valid and should
+* be forwarded.Any bits representing invalid TCP packets are cleared.
+* Any packets which are not TCP/IP are considered valid for this purpose.
+*/
+
+
+uint64_t
+rte_ct_cnxn_tracker_batch_lookup_with_synproxy(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_synproxy_helper *sp_helper);
+
+
+
+
+
+/**
+ * Synproxy might need to buffer client-side packets while the
+ * server-side of the proxy is still being set up. The packets
+ * are released when the server-side connection is complete.
+ * This routine is used to retrieve those packets. Packets are
+ * also released in a similar manner if there is a timeout
+ * during a synproxy setup. This routine should be called before
+ * immediately before any timeout handling, to get the list of
+ * packets (if any) to forward, and again immediately after timeout
+ * handling to get the list of packets (if any) to delete.
+ * Calling this routine removes the packets from synproxy.
+ *
+ * @param new_cnxn_tracker
+ * The connection tracker from which to retrieve the packets
+ *
+ * @return
+ * a linked list of packets to process, in order. The list is
+ * connected via a pointer stored in the mbuf in the offset
+ * given in the "pointer_offset" parameter to the routine:
+ * "rte_ct_initialize_cnxn_tracker_with_synproxy".
+ * If not packets currently available, returns NULL.
+ */
+
+struct rte_mbuf *
+rte_ct_get_buffered_synproxy_packets(struct rte_ct_cnxn_tracker *ct);
+
+
+/**
+ * Initialize a connection tracker instance before use.
+ *
+ * @param new_cnxn_tracker
+ * The connection tracker to initialize, allocated by the user.
+ * @param max_connection_count
+ * Maximum number of simultaneous connections supported.
+ * @param name
+ * A name to give to this connection tracker, for debug purposes
+ *
+ * @return
+ * - 0 if successful
+ * - negative if unsuccesful
+ */
+
+int
+rte_ct_initialize_cnxn_tracker_with_synproxy(
+ struct rte_ct_cnxn_tracker *new_cnxn_tracker,
+ uint32_t max_connection_count,
+ char *name,
+ uint16_t pointer_offset);
+
+/**
+ * Initialize a connection tracker instance before use with synproxy support.
+ *
+ * @param new_cnxn_tracker
+ * The connection tracker to initialize, allocated by the user.
+ * @param max_connection_count
+ * Maximum number of simultaneous connections supported.
+ * @param name
+ * A name to give to this connection tracker, for debug purposes
+ * @param pointer_offset
+ * An offset into the mbuf where the connection tracker can store two pointers.
+ *
+ * @return
+ * - 0 if successful
+ * - negative if unsuccesful
+ */
+
+int
+rte_ct_initialize_cnxn_tracker(
+ struct rte_ct_cnxn_tracker *new_cnxn_tracker,
+ uint32_t max_connection_count,
+ char *name);
+
+
+/**
+ * Free resources allocated by earlier call to rte_ct_initialize_cnxn_tracker()
+ *
+ * @param old_cnxn_tracker
+ * The connection tracker previously initialized.
+ *
+ * @return
+ * - 0 if successful
+ * - < 0 if unsuccesful
+ */
+
+int
+rte_ct_free_cnxn_tracker_resources(
+ struct rte_ct_cnxn_tracker *old_cnxn_tracker);
+
+
+/**
+ * Get size of opaque type rte_ct_cnxn_tracker in order to allocate an instance.
+ *
+ * @return
+ * Size in bytes of rte_ct_cnxn_tracker type
+ */
+
+int
+rte_ct_get_cnxn_tracker_size(void);
+
+/**
+ * Get address of counters kept by this instance.
+ *
+ * @param ct
+ * Instance of cnxn tracker.
+ *
+ */
+
+struct rte_CT_counter_block*
+rte_ct_get_counter_address(struct rte_ct_cnxn_tracker *ct);
+
+
+/**
+ * Process a configuration option supported in the config file.
+ * If a valid name/value pair, update the cnxn tracker.
+ *
+ * @param ct
+ * Instance of cnxn tracker.
+ *
+ * @param name
+ * Name of configuration option.
+ *
+ * @param value
+ * Value of configuration option.
+ *
+ * @return
+ * - 0 if successful
+ * - < 0 if unsuccesful
+ */
+
+int
+rte_ct_set_configuration_options(
+ struct rte_ct_cnxn_tracker *ct,
+ char *name,
+ char *value);
+
+/**
+ * Check for expired connection tracking timers, and delete any expired
+ * connections. This routine must be called in the loop that processes packets,
+ * to ensure that timeouts are handled synchronously with packet processing.
+ * More frequent calls means more accurate timing but more overhead.
+ *
+ * @param ct
+ * Instance of cnxn tracker to check timers.
+ *
+ */
+
+void
+rte_ct_handle_expired_timers(struct rte_ct_cnxn_tracker *ct);
+
+
+int
+rte_ct_get_IP_hdr_size(struct rte_mbuf *pkt);
+
+/**
+* Enable synproxy for this connection tracker.
+*
+* @param ct
+* Instance of cnxn tracker to enable.
+*
+*/
+
+void
+rte_ct_enable_synproxy(struct rte_ct_cnxn_tracker *ct);
+
+/**
+* Disable synproxy for this connection tracker.
+*
+* @param ct
+* Instance of cnxn tracker to disable.
+*
+*/
+
+void
+rte_ct_disable_synproxy(struct rte_ct_cnxn_tracker *ct);
+int
+rte_ct_initialize_default_timeouts(
+ struct rte_ct_cnxn_tracker *new_cnxn_tracker);
+
+uint8_t
+rte_ct_create_cnxn_hashkey(
+ uint32_t *src_addr,
+ uint32_t *dst_addr,
+ uint16_t src_port,
+ uint16_t dst_port,
+ uint8_t proto,
+ uint32_t *key,
+ uint8_t type);
+
+/* To get timer core id from CGNAPT timer thread*/
+#ifdef CT_CGNAT
+extern uint32_t get_timer_core_id(void);
+uint64_t cgnapt_ct_process(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_CT_helper *ct_helper);
+#endif
+#endif
diff --git a/common/VIL/conntrack/rte_ct_synproxy.c b/common/VIL/conntrack/rte_ct_synproxy.c
new file mode 100644
index 00000000..967596d1
--- /dev/null
+++ b/common/VIL/conntrack/rte_ct_synproxy.c
@@ -0,0 +1,873 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_icmp.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+
+#include "rte_ct_tcp.h"
+
+
+/*
+ * OVERVIEW:
+ * This module will behave as a proxy between an initiator (external client)
+ * and listener (internal server).
+ * (1) Proxy receives SYN from initiator, replies with spoofed SYN-ACK message
+ * No packet is sent to the lister at this time.
+ * (2) Proxy receives ACK from the initiator, so the connection request is
+ * considred valid. Proxy sends a spoofed SYN message to the listener.
+ * (3) Proxy receives SYN-ACK message from listener. Proxy replies to listener
+ * with a spoofed ACK message. The connection is considered established.
+ * (4) Traffic is exchanged between initiator and listener. Sequence and
+ * ack numbers translated appropriately by proxy.
+ */
+
+/*
+ * DETAILS, when SynProxy on:
+ * (1) receive initial SYN from client
+ * call CT, all new connections assigned spoofed (random) SEQ number
+ * packet re-purposed as SYN-ACK back to client with spoofed SEQ
+ * -> change ethernet, IP, and TCP headers, put on appropriate output ring
+ * (2) receive ACK packet from client
+ * connection request now considered valid
+ * packet re-purposed as SYN to server, using SEQ from original SYN
+ * -> change TCP header, put on output ring originally targetted
+ * (3) receive SYN-ACK packet from server
+ * connection now ESTABLISHED
+ * compute SEQ difference between spoofed SEQ and real server SEQ
+ * packet re-purposed as ACK to server
+ * -> change ethernet, IP, and TCP headers, put on appropriate output ring
+ * (4) all further packets flow normally, except SEQ and ACK numbers must be
+ * modified by SEQ diff (SEQ in server->client direction, ACK and SACK in
+ * client->server direction)
+ *
+ */
+
+#define META_DATA_OFFSET 128
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+#define PROTOCOL_START (IP_START + 9)
+#define IP_V4_HEADER_SIZE 20
+#define IP_V6_HEADER_SIZE 40
+#define TCP_START (IP_START + IP_V4_HEADER_SIZE)
+#define TCP_MIN_HDR_SIZE 20
+
+#define RTE_TCP_PROTO_ID 6
+#define RTE_SP_DEFAULT_TTL 64
+
+#define RTE_SYNPROXY_MAX_SPOOFED_PKTS 64
+
+#define RTE_TCP_SYN 0x02
+#define RTE_TCP_ACK 0x10
+#define RTE_TCP_SYN_ACK (RTE_TCP_SYN | RTE_TCP_ACK)
+
+#define RTE_SP_DEFAULT_WINDOW 29200
+#define RTE_CT_DEBUG_SPOOFED_SEQ 0
+#define RTE_DPDK_IS_16_4 0
+
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+
+
+/* default TCP options */
+/* TODO: need to set in config file */
+
+struct rte_synproxy_options default_ipv4_synproxy_options = {
+ .options = RTE_SP_OPTIONS_MSS |
+ RTE_SP_OPTIONS_SACK_PERM |
+ RTE_SP_OPTIONS_WINDOW_SCALE,
+ .mss = 1460,
+ .window_scale = 7,
+ .initial_window = RTE_SP_DEFAULT_WINDOW
+};
+
+
+struct rte_synproxy_options default_ipv6_synproxy_options = {
+ .options = RTE_SP_OPTIONS_MSS |
+ RTE_SP_OPTIONS_SACK_PERM |
+ RTE_SP_OPTIONS_WINDOW_SCALE,
+ .mss = 1440,
+ .window_scale = 7,
+ .initial_window = RTE_SP_DEFAULT_WINDOW
+};
+
+/* IP/TCP header print for debugging */
+static __rte_unused void
+rte_ct_synproxy_print_pkt_info(struct rte_mbuf *pkt)
+{
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ __rte_unused struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, TCP_START);
+ uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
+
+ printf("\npacket length %u, ip length %u\n", packet_length,
+ rte_bswap16(ihdr4->total_length));
+ rte_pktmbuf_dump(stdout, pkt, 80);
+}
+
+static inline void
+rte_sp_incremental_tcp_chksum_update_32(
+ uint32_t num_before, /* in Intel order, not network order */
+ uint32_t num_after, /* in Intel order, not network order */
+
+ uint16_t *chksum) /* network order, e.g. pointer into header */
+{
+ uint32_t sum;
+
+ sum = ~rte_bswap16(*chksum) & 0xffff;
+ num_before = ~num_before;
+ sum += (num_before >> 16) + (num_before & 0xffff);
+ sum += (num_after >> 16) + (num_after & 0xffff);
+ sum = (sum >> 16) + (sum & 0xffff);
+ sum += (sum >> 16);
+ *chksum = rte_bswap16(~sum & 0xffff);
+}
+
+
+
+static inline uint32_t
+rte_sp_get_random_seq_number(void)
+{
+ return rte_get_tsc_cycles(); /* low 32 bits of timestamp*/
+}
+
+
+static int8_t rte_ct_ipversion(void *i_hdr)
+{
+ uint8_t *ihdr = (uint8_t *)i_hdr;
+ int8_t hdr_chk = *ihdr;
+
+ hdr_chk = hdr_chk >> 4;
+ if (hdr_chk == IP_VERSION_4 || hdr_chk == IP_VERSION_6)
+ return hdr_chk;
+ else
+ return -1;
+}
+
+static inline void
+rte_synproxy_adjust_pkt_length(struct rte_mbuf *pkt)
+{
+ uint16_t pkt_length = 0;
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(pkt);
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+
+ if (ip_hdr_size_bytes == IP_V4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
+
+ pkt_length = rte_bswap16(ihdr4->total_length) + ETH_HDR_SIZE;
+ } else if (ip_hdr_size_bytes == IP_V6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
+
+ pkt_length = rte_bswap16(ihdr6->payload_len) +
+ IP_V6_HEADER_SIZE + ETH_HDR_SIZE;
+ }
+ uint16_t mbuf_pkt_length = rte_pktmbuf_pkt_len(pkt);
+
+ if (pkt_length == mbuf_pkt_length)
+ return;
+
+ if (pkt_length < mbuf_pkt_length) {
+ rte_pktmbuf_trim(pkt, mbuf_pkt_length - pkt_length);
+ return;
+ }
+
+ /* pkt_length > mbuf_pkt_length */
+ rte_pktmbuf_append(pkt, pkt_length - mbuf_pkt_length);
+}
+
+static void
+rte_synproxy_build_ipv4_header(
+ struct ipv4_hdr *hdr4,
+ uint32_t src_addr,
+ uint32_t dst_addr,
+ uint16_t tcp_length)
+{
+ /* TODO: consider interface re-work, too many rte_bswapxx */
+ /* options are not supported, so header size is fixed */
+ hdr4->version_ihl = 0x45;
+ hdr4->type_of_service = 0;
+ hdr4->total_length = rte_bswap16(tcp_length + IP_V4_HEADER_SIZE);
+ hdr4->packet_id = 0;
+ /* set Don't fragment bit, Intel order */
+ hdr4->fragment_offset = 0x0040;
+ hdr4->time_to_live = RTE_SP_DEFAULT_TTL;
+ hdr4->next_proto_id = RTE_TCP_PROTO_ID;
+ /* checksum calculated later */
+ hdr4->src_addr = rte_bswap32(src_addr);
+ hdr4->dst_addr = rte_bswap32(dst_addr);
+}
+
+
+static void
+rte_synproxy_build_ipv6_header(
+ struct ipv6_hdr *hdr6,
+ uint8_t *src_addr,
+ uint8_t *dst_addr,
+ uint16_t tcp_length)
+{
+ /* TODO: consider interface re-work, too many rte_bswapxx */
+ /* options are not supported, so header size is fixed */
+ uint8_t temp_src[16];
+ uint8_t temp_dst[16];
+
+ hdr6->vtc_flow = 0x60; /* Intel Order */
+ hdr6->payload_len = rte_bswap16(tcp_length);
+ hdr6->proto = RTE_TCP_PROTO_ID;
+ hdr6->hop_limits = RTE_SP_DEFAULT_TTL;
+ /* checksum calculated later */
+
+ /* must copy to temps to avoid overwriting */
+ rte_mov16(temp_src, src_addr);
+ rte_mov16(temp_dst, dst_addr);
+ rte_mov16(hdr6->src_addr, temp_src);
+ rte_mov16(hdr6->dst_addr, temp_dst);
+}
+
+/* add options specified in t_opts to TCP header in packet. */
+
+static uint16_t
+rte_sp_add_tcp_options(struct tcp_hdr *thdr,
+ const struct rte_synproxy_options *t_opts)
+{
+ uint32_t *options_ptr = (uint32_t *)(thdr + 1);
+ uint32_t *saved_ptr = options_ptr;
+ uint8_t options = t_opts->options;
+ uint32_t option_bytes; /* options built in groups of 4 bytes */
+
+ if (options & RTE_SP_OPTIONS_MSS) {
+ option_bytes = (RTE_CT_TCPOPT_MSS << 24) |
+ (RTE_CT_TCPOLEN_MSS << 16) | t_opts->mss;
+ *options_ptr++ = rte_bswap32(option_bytes);
+ }
+
+ if (options & RTE_SP_OPTIONS_TIMESTAMP) {
+ /* if both timestamp and sack permitted options,
+ * pack together
+ */
+ if (options & RTE_SP_OPTIONS_SACK_PERM)
+ option_bytes = (RTE_CT_TCPOPT_SACK_PERM << 24) |
+ (RTE_CT_TCPOLEN_SACK_PERM << 16);
+ else
+ option_bytes = (RTE_CT_TCPOPT_NOP << 24) |
+ (RTE_CT_TCPOPT_NOP << 16);
+
+ option_bytes |= (RTE_CT_TCPOPT_TIMESTAMP << 8) |
+ RTE_CT_TCPOLEN_TIMESTAMP;
+ *options_ptr++ = rte_bswap32(option_bytes);
+ *options_ptr++ = rte_bswap32(t_opts->ts_val);
+ *options_ptr++ = rte_bswap32(t_opts->ts_echo_reply);
+ } else if (options & RTE_SP_OPTIONS_SACK_PERM) {
+ option_bytes = (RTE_CT_TCPOPT_NOP << 24) |
+ (RTE_CT_TCPOPT_NOP << 16) |
+ (RTE_CT_TCPOPT_SACK_PERM << 8) |
+ RTE_CT_TCPOLEN_SACK_PERM;
+ *options_ptr++ = rte_bswap32(option_bytes);
+ }
+
+ if (options & RTE_SP_OPTIONS_WINDOW_SCALE) {
+ option_bytes = (RTE_CT_TCPOPT_NOP << 24) |
+ (RTE_CT_TCPOPT_WINDOW << 16) |
+ (RTE_CT_TCPOLEN_WINDOW << 8) |
+ t_opts->window_scale;
+ *options_ptr++ = rte_bswap32(option_bytes);
+ }
+
+ /* compute the data offset field, which is size of total
+ * TCP header in 32 bit words
+ */
+ /* TODO: diff from options ptr to thdr */
+ uint16_t data_offset_bytes = (uint16_t)RTE_PTR_DIFF(options_ptr,
+ saved_ptr) + sizeof(struct tcp_hdr);
+ thdr->data_off = (data_offset_bytes >> 2) << 4;
+
+ return data_offset_bytes;
+}
+
+/* Build a TCP header.
+ * Note that the the tcp_hdr must be in the appropriate location
+ * in an mbuf
+ * TODO: consider interface re-work, too many rte_bswapxx
+ */
+static inline uint16_t
+rte_synproxy_build_tcp_header(
+ __rte_unused struct rte_mbuf *old_pkt,
+ struct tcp_hdr *t_hdr,
+ uint16_t src_port,
+ uint16_t dst_port,
+ uint32_t seq,
+ uint32_t ack,
+ uint8_t flags,
+ const struct rte_synproxy_options *t_opts,
+ uint8_t add_options)
+{
+ t_hdr->src_port = rte_bswap16(src_port);
+ t_hdr->dst_port = rte_bswap16(dst_port);
+ t_hdr->sent_seq = rte_bswap32(seq);
+ t_hdr->recv_ack = rte_bswap32(ack);
+
+ t_hdr->tcp_flags = flags;
+ t_hdr->rx_win = t_opts->initial_window;
+ /* checksum calculated later */
+ t_hdr->tcp_urp = 0;
+
+ /* add tcp header options, if applicable */
+
+ uint16_t new_tcp_hdr_size = TCP_MIN_HDR_SIZE;
+
+ if (add_options)
+ new_tcp_hdr_size = rte_sp_add_tcp_options(t_hdr, t_opts);
+ else
+ t_hdr->data_off = (TCP_MIN_HDR_SIZE >> 2) << 4;
+
+ return new_tcp_hdr_size;
+}
+
+static void
+rte_synproxy_compute_checksums(void *i_hdr, struct tcp_hdr *t_hdr)
+{
+ /*
+ * calculate IP and TCP checksums. Note that both checksum
+ * routines requirehecksum fields to be set to zero,
+ * and the the checksum is in the correct
+ * byte order, so no rte_bswap16 is required.
+ */
+
+ /* TODO: look into h/w computation of checksums */
+
+ int8_t hdr_chk = rte_ct_ipversion(i_hdr);
+
+ t_hdr->cksum = 0;
+
+ if (hdr_chk == IP_VERSION_4) {
+ struct ipv4_hdr *i4_hdr = (struct ipv4_hdr *)i_hdr;
+
+ i4_hdr->hdr_checksum = 0;
+ t_hdr->cksum = rte_ipv4_udptcp_cksum(i4_hdr, t_hdr);
+ i4_hdr->hdr_checksum = rte_ipv4_cksum(i4_hdr);
+ } else if (hdr_chk == IP_VERSION_6) {
+ struct ipv6_hdr *i6_hdr = (struct ipv6_hdr *)i_hdr;
+
+ t_hdr->cksum = rte_ipv6_udptcp_cksum(i6_hdr, t_hdr);
+ }
+}
+
+
+
+/*
+ * Building new packet headers:
+ * For IPv4 and IPv6 headers, no options and no fragmentation are supported.
+ * Header size is fixed.
+ * TCP header will (likely) have options, so header size is not fixed.
+ * TCP header will be built first, and size used in IP packet size calculation.
+ */
+void
+rte_sp_cvt_to_spoofed_client_synack(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt)
+{
+ /* old packet is syn from client. Change to a (spoofed)
+ * SYN-ACK to send back
+ */
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(old_pkt);
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START +
+ ip_hdr_size_bytes);
+ uint16_t tcp_header_size;
+
+ /* get a spoofed sequence number and save in the connection data */
+ uint32_t new_seq = rte_sp_get_random_seq_number();
+
+ if (RTE_CT_DEBUG_SPOOFED_SEQ)
+ new_seq = 10; /* something simple to aid debugging */
+
+ cd->ct_protocol.synproxy_data.original_spoofed_seq = new_seq;
+
+ /* build the TCP header, including reversing the port numbers. */
+ tcp_header_size = rte_synproxy_build_tcp_header(old_pkt, thdr,
+ rte_bswap16(thdr->dst_port),
+ rte_bswap16(thdr->src_port),
+ new_seq, rte_bswap32(thdr->sent_seq) + 1,
+ RTE_TCP_SYN_ACK,
+ ip_hdr_size_bytes == IP_V4_HEADER_SIZE ?
+ &default_ipv4_synproxy_options :
+ &default_ipv6_synproxy_options, 1);
+
+ /* reverse the source and destination addresses in the IP hdr */
+ if (ip_hdr_size_bytes == IP_V4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
+
+ rte_synproxy_build_ipv4_header(ihdr4,
+ rte_bswap32(ihdr4->dst_addr),
+ rte_bswap32(ihdr4->src_addr), tcp_header_size);
+
+ } else if (ip_hdr_size_bytes == IP_V6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
+
+ rte_synproxy_build_ipv6_header(ihdr6,
+ (uint8_t *)ihdr6->dst_addr,
+ (uint8_t *)ihdr6->src_addr, tcp_header_size);
+ }
+ rte_synproxy_adjust_pkt_length(old_pkt);
+ /* compute checksums */
+ rte_synproxy_compute_checksums(iphdr, thdr);
+
+}
+
+
+void
+rte_sp_cvt_to_spoofed_server_syn(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt)
+{
+ /* old packet is ACK from client. Change to (spoofed)
+ * SYN to send to server
+ */
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(old_pkt);
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START
+ + ip_hdr_size_bytes);
+ uint16_t tcp_header_size;
+
+ tcp_header_size = rte_synproxy_build_tcp_header(old_pkt, thdr,
+ rte_bswap16(thdr->src_port),
+ rte_bswap16(thdr->dst_port),
+ rte_bswap32(thdr->sent_seq) - 1, 0,
+ RTE_TCP_SYN,
+ &cd->ct_protocol.synproxy_data.cnxn_options, 1);
+
+ if (ip_hdr_size_bytes == IP_V4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
+
+ rte_synproxy_build_ipv4_header(ihdr4,
+ rte_bswap32(ihdr4->src_addr),
+ rte_bswap32(ihdr4->dst_addr), tcp_header_size);
+ } else if (ip_hdr_size_bytes == IP_V6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
+
+ rte_synproxy_build_ipv6_header(ihdr6,
+ (uint8_t *)ihdr6->src_addr,
+ (uint8_t *)ihdr6->dst_addr, tcp_header_size);
+ }
+
+ rte_synproxy_adjust_pkt_length(old_pkt);
+ /* compute checksums */
+ rte_synproxy_compute_checksums(iphdr, thdr);
+
+}
+
+void
+rte_sp_cvt_to_spoofed_server_ack(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt)
+{
+ /* old packet is SYN-ACK from server. Change to spoofed ACK and
+ * send back to server
+ */
+
+ int ip_hdr_size_bytes = rte_ct_get_IP_hdr_size(old_pkt);
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(old_pkt, IP_START +
+ ip_hdr_size_bytes);
+
+ /* read real seq out of SYN-ACK from server, and save the delta from
+ * the spoofed one
+ */
+ uint32_t real_seq = rte_bswap32(thdr->sent_seq);
+ uint16_t tcp_header_size;
+
+ cd->ct_protocol.synproxy_data.seq_diff =
+ real_seq - cd->ct_protocol.synproxy_data.original_spoofed_seq;
+
+ /* reverse the source and destination addresses */
+ tcp_header_size = rte_synproxy_build_tcp_header(old_pkt, thdr,
+ rte_bswap16(thdr->dst_port),
+ rte_bswap16(thdr->src_port),
+ rte_bswap32(thdr->recv_ack),
+ rte_bswap32(thdr->sent_seq) + 1, RTE_TCP_ACK,
+ &cd->ct_protocol.synproxy_data.cnxn_options, 0);
+
+ /* reverse the source and destination addresses in the IP hdr */
+ if (ip_hdr_size_bytes == IP_V4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr4 = (struct ipv4_hdr *)iphdr;
+
+ rte_synproxy_build_ipv4_header(ihdr4,
+ rte_bswap32(ihdr4->dst_addr),
+ rte_bswap32(ihdr4->src_addr), tcp_header_size);
+
+ } else if (ip_hdr_size_bytes == IP_V6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr6 = (struct ipv6_hdr *)iphdr;
+
+ rte_synproxy_build_ipv6_header(ihdr6,
+ (uint8_t *)ihdr6->dst_addr,
+ (uint8_t *)ihdr6->src_addr, tcp_header_size);
+ }
+ rte_synproxy_adjust_pkt_length(old_pkt);
+ /* compute checksums */
+ rte_synproxy_compute_checksums(iphdr, thdr);
+}
+
+/*
+ * if running synproxy and both halves of the proxied connection has been
+ * established, need adjust the seq or ack value of the packet.
+ * The value is adjusted by the difference between the spoofed server
+ * initial sequence number and the real server sequence number.
+ * In the client -> server direction, the ack must be increased by the
+ * difference before the window check.
+ * In the server -> client direction, the seq must be decreased by the
+ * difference after the window check.
+ */
+
+
+void
+rte_sp_adjust_server_seq_after_window_check(
+ struct rte_ct_cnxn_data *cd,
+ __rte_unused void *i_hdr,
+ struct tcp_hdr *thdr,
+ enum rte_ct_pkt_direction dir)
+{
+ uint32_t num_before, num_after;
+
+ if (!cd->ct_protocol.synproxy_data.cnxn_established)
+ return;
+
+ if (dir == RTE_CT_DIR_ORIGINAL)
+ return; /*wrong direction */
+
+
+ /* update appropriate number (seq or ack) in header */
+ num_before = rte_bswap32(thdr->sent_seq);
+ num_after = num_before - cd->ct_protocol.synproxy_data.seq_diff;
+ thdr->sent_seq = rte_bswap32(num_after);
+
+ rte_sp_incremental_tcp_chksum_update_32(num_before, num_after,
+ &thdr->cksum);
+}
+
+
+static void
+rte_sp_adjust_client_sack_entries(
+ struct tcp_hdr *thdr,
+ uint32_t diff)
+{
+ uint32_t num_before, num_after;
+ uint32_t *sack_ptr;
+ uint8_t sack_blk_size;
+ uint16_t dataoff_in_bytes = (thdr->data_off & 0xf0) >> 2;
+ uint16_t length = dataoff_in_bytes - sizeof(struct tcp_hdr);
+
+ if (!length)
+ return;
+
+ uint8_t *options_ptr = (uint8_t *)(thdr + 1);
+
+ while (length > 0) {
+ uint8_t opcode = *options_ptr;
+ uint8_t opsize = options_ptr[1];
+ int i;
+
+ switch (opcode) {
+
+ case RTE_CT_TCPOPT_EOL:
+ return; /* end of options */
+
+ case RTE_CT_TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
+ length--;
+ options_ptr++;
+ continue;
+
+ case RTE_CT_TCPOPT_SACK:
+ /*
+ * SACK (selective ACK) contains a block of 1 to 4
+ * entries of 8 bytes each. Each entry is a pair of
+ * 32 bit numbers. This block follows the usual 2
+ * bytes for opcode and opsize. Thus, the entire SACK
+ * option must be 10, 18, 26 or 34 bytes long.
+ */
+
+ sack_blk_size = opsize - 2;
+ /* start of entries */
+ sack_ptr = (uint32_t *)(options_ptr + 2);
+ /* count of 32 bit elements */
+ int num_acks = sack_blk_size >> 2;
+
+ if (unlikely(sack_blk_size > 32 ||
+ ((sack_blk_size & 0x3) != 0))) {
+ printf("Sack block parsing failure\n");
+ return;
+ }
+
+ for (i = 0; i < num_acks; i++) {
+ num_before = rte_bswap32(*sack_ptr);
+ num_after = num_before + diff;
+ *sack_ptr = rte_bswap32(num_after);
+ sack_ptr++;
+ rte_sp_incremental_tcp_chksum_update_32(
+ num_before,
+ num_after,
+ &thdr->cksum);
+ }
+
+ return;
+ default:
+ break;
+ }
+ if ((opsize < 2) || (opsize > length)) {
+ printf("ERROR!, opsize %i, length %i\n",
+ opsize, length);
+ return;
+ }
+
+ options_ptr += opsize;
+ length -= opsize;
+ }
+}
+
+void
+rte_sp_adjust_client_ack_before_window_check(
+ struct rte_ct_cnxn_data *cd,
+ __rte_unused void *i_hdr,
+ struct tcp_hdr *thdr,
+ enum rte_ct_pkt_direction dir)
+{
+ uint32_t num_before, num_after;
+
+ if (!cd->ct_protocol.synproxy_data.cnxn_established)
+ return;
+
+ if (dir != RTE_CT_DIR_ORIGINAL)
+ return; /*wrong direction */
+
+
+ /* first update appropriate number (seq or ack) in header */
+ num_before = rte_bswap32(thdr->recv_ack);
+ num_after = num_before + cd->ct_protocol.synproxy_data.seq_diff;
+ thdr->recv_ack = rte_bswap32(num_after);
+ rte_sp_incremental_tcp_chksum_update_32(num_before,
+ num_after, &thdr->cksum);
+
+ /* update SACK entries in header if any */
+
+ if (1) { /* TODO: check if sack permitted before calling */
+ rte_sp_adjust_client_sack_entries(thdr,
+ cd->ct_protocol.synproxy_data.seq_diff);
+ /* note that tcp hdr checksum adjusted in above sack
+ * entries routine call
+ */
+ }
+}
+
+
+
+
+/* parse the tcp header options, if any, and save interesting ones */
+static void
+rte_sp_parse_tcp_options(
+ uint8_t *options_ptr,
+ uint16_t length,
+ struct rte_synproxy_options *t_opts)
+{
+ int opsize;
+
+ t_opts->options = 0;
+
+ while (length > 0) {
+ uint8_t opcode = *options_ptr++;
+
+ if (opcode == RTE_CT_TCPOPT_EOL)
+ return;
+
+ if (opcode == RTE_CT_TCPOPT_NOP) {
+ length--;
+ continue; /* skip adjustments at loop bottom */
+ }
+
+ opsize = *options_ptr++;
+
+ if (unlikely(opsize < 2 || opsize > length)) {
+ /* TODO: Change printf to log */
+ printf("parsing error, opsize: %i, length: %i\n",
+ opsize, length);
+ return;
+ }
+
+ switch (opcode) {
+
+ case RTE_CT_TCPOPT_MSS:
+ if (opsize == RTE_CT_TCPOLEN_MSS) {
+ uint16_t *mss_ptr = (uint16_t *)options_ptr;
+
+ t_opts->mss = rte_bswap16(*mss_ptr);
+ t_opts->options |= RTE_SP_OPTIONS_MSS;
+ }
+ break;
+
+ case RTE_CT_TCPOPT_WINDOW:
+ if (opsize == RTE_CT_TCPOLEN_WINDOW) {
+ t_opts->window_scale = RTE_MIN(*options_ptr,
+ RTE_CT_MAX_TCP_WINDOW_SCALE);
+ t_opts->options |= RTE_SP_OPTIONS_WINDOW_SCALE;
+ }
+ break;
+
+ case RTE_CT_TCPOPT_TIMESTAMP:
+ if (opsize == RTE_CT_TCPOLEN_TIMESTAMP) {
+ uint32_t *ts_val_ptr = (uint32_t *)options_ptr;
+ uint32_t *ts_ecr_ptr =
+ (uint32_t *)(options_ptr + 4);
+ t_opts->ts_val = rte_bswap32(*ts_val_ptr);
+ t_opts->ts_echo_reply =
+ rte_bswap32(*ts_ecr_ptr);
+ t_opts->options |= RTE_SP_OPTIONS_TIMESTAMP;
+ }
+ break;
+
+ case RTE_CT_TCPOPT_SACK_PERM:
+ if (opsize == RTE_CT_TCPOLEN_SACK_PERM)
+ t_opts->options |= RTE_SP_OPTIONS_SACK_PERM;
+ break;
+
+ default:
+ break;
+ }
+
+ options_ptr += opsize - 2;
+ length -= opsize;
+
+ }
+}
+
+/* parse the tcp header options, if any, and save interesting ones in t_opts */
+void
+rte_sp_parse_options(struct rte_mbuf *pkt, struct rte_ct_cnxn_data *cd)
+{
+ /*uint16_t ip_hdr_length = rte_sp_get_ip_header_size(pkt);
+ * skip over IPv4 or IPv6 header
+ */
+ int ip_hdr_length = rte_ct_get_IP_hdr_size(pkt);
+ struct tcp_hdr *thdr = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START + ip_hdr_length);
+ uint8_t *opt_ptr = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ (IP_START + ip_hdr_length + sizeof(struct tcp_hdr)));
+
+ struct rte_synproxy_options *t_opts =
+ &cd->ct_protocol.synproxy_data.cnxn_options;
+ int length_in_bytes =
+ ((thdr->data_off & 0xf0) >> 2) - sizeof(struct tcp_hdr);
+
+ rte_sp_parse_tcp_options(opt_ptr, length_in_bytes, t_opts);
+ t_opts->initial_window = thdr->rx_win;
+}
+
+
+
+
+struct rte_mbuf *
+rte_ct_get_buffered_synproxy_packets(
+ struct rte_ct_cnxn_tracker *ct)
+{
+ struct rte_mbuf *trkr_list = ct->buffered_pkt_list;
+
+ ct->buffered_pkt_list = NULL;
+ return trkr_list;
+}
+
+
+
+void rte_ct_enable_synproxy(struct rte_ct_cnxn_tracker *ct)
+{
+ ct->misc_options.synproxy_enabled = 1;
+ printf("rte_ct_enable_synproxy = %d\n",
+ ct->misc_options.synproxy_enabled);
+}
+
+void rte_ct_disable_synproxy(struct rte_ct_cnxn_tracker *ct)
+{
+ ct->misc_options.synproxy_enabled = 0;
+ //printf("rte_ct_disable_synproxy = %d\n",
+ // ct->misc_options.synproxy_enabled);
+}
+
+void
+rte_ct_buffer_packet(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt)
+{
+ /*
+ * Add packet to list of buffered packets for the connection.
+ * List is built in reverse of order received by adding to front.
+ * List will later be reversed to maintain order of arrival.
+ */
+
+ struct rte_mbuf **next = (struct rte_mbuf **)
+ RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ ct->pointer_offset);
+ *next = cd->ct_protocol.synproxy_data.buffered_pkt_list;
+ cd->ct_protocol.synproxy_data.buffered_pkt_list = pkt;
+}
+
+void
+rte_ct_release_buffered_packets(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd)
+{
+ struct rte_mbuf *cnxn_list =
+ cd->ct_protocol.synproxy_data.buffered_pkt_list;
+
+ if (cnxn_list == NULL)
+ return;
+
+ cd->ct_protocol.synproxy_data.buffered_pkt_list = NULL;
+
+ struct rte_mbuf *trkr_list = ct->buffered_pkt_list;
+
+ if (trkr_list == NULL)
+ return;
+ /*
+ * walk the cnxn_list, and add to front of trkr_list, reversing order
+ * and thus restoring orginal order. Order between different
+ * connections is irrelevant.
+ */
+ while (cnxn_list != NULL) {
+ struct rte_mbuf *old_next;
+
+ struct rte_mbuf **next = (struct rte_mbuf **)
+ RTE_MBUF_METADATA_UINT64_PTR(cnxn_list,
+ ct->pointer_offset);
+
+ old_next = *next; /* save next cd packet */
+ *next = trkr_list;/* make this cd packet point to ct list */
+ trkr_list = cnxn_list;/* make the cd packet head of ct list */
+ cnxn_list = old_next; /* advance along cd list */
+ }
+ ct->buffered_pkt_list = trkr_list;
+}
diff --git a/common/VIL/conntrack/rte_ct_tcp.c b/common/VIL/conntrack/rte_ct_tcp.c
new file mode 100644
index 00000000..073c63ed
--- /dev/null
+++ b/common/VIL/conntrack/rte_ct_tcp.c
@@ -0,0 +1,1116 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include <immintrin.h>
+#include <inttypes.h>
+#include "rte_ct_tcp.h"
+#include "rte_cnxn_tracking.h"
+
+/* uint32_t CT_DEBUG = 1; */ /* Can be used to conditionally turn of debug */
+#define CT_DEBUG 0
+#define STATE_TRACKING 0
+#define RTE_CT_ASSERT 0
+
+/* constants for mbuff manipulation */
+#define META_DATA_OFFSET 128
+#define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+
+#define IPv4_HEADER_SIZE 20
+#define IPv6_HEADER_SIZE 40
+
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+
+#define rte_after(seq2, seq1) rte_before(seq1, seq2)
+static inline uint8_t rte_before(uint32_t seq1, uint32_t seq2)
+{
+ return (int32_t) (seq1 - seq2) < 0;
+}
+
+/* short state names for defining state table */
+
+#define ctNO RTE_CT_TCP_NONE
+#define ctSS RTE_CT_TCP_SYN_SENT
+#define ctSR RTE_CT_TCP_SYN_RECV
+#define ctES RTE_CT_TCP_ESTABLISHED
+#define ctFW RTE_CT_TCP_FIN_WAIT
+#define ctCW RTE_CT_TCP_CLOSE_WAIT
+#define ctLA RTE_CT_TCP_LAST_ACK
+#define ctTW RTE_CT_TCP_TIME_WAIT
+#define ctCL RTE_CT_TCP_CLOSE
+#define ctS2 RTE_CT_TCP_SYN_SENT_2
+#define ctIV RTE_CT_TCP_MAX
+#define ctIG RTE_CT_TCP_IGNORE
+
+static const uint8_t rte_ct_tcp_state_table[2][6][RTE_CT_TCP_MAX] = {
+ { /* "client" direction, i.e. first SYN sent */
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* syn */ {ctSS, ctSS, ctIG, ctIG, ctIG, ctIG, ctIG, ctSS, ctSS,
+ ctS2},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* synack */ {ctIV, ctIV, ctSR, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV,
+ ctSR},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* fin */ {ctIV, ctIV, ctFW, ctFW, ctLA, ctLA, ctLA, ctTW, ctCL,
+ ctIV},
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* ack */ {ctES, ctIV, ctES, ctES, ctCW, ctCW, ctTW, ctTW, ctCL,
+ ctIV},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* rst */ {ctIV, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL,
+ ctCL},
+ /* ill */ {ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV}
+ },
+
+ { /* "server" direction */
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* syn */ {ctIV, ctS2, ctIV, ctIV, ctIV, ctIV, ctIV, ctSS, ctIV,
+ ctS2},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* synack */ {ctIV, ctSR, ctIG, ctIG, ctIG, ctIG, ctIG, ctIG, ctIG,
+ ctSR},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* fin */ {ctIV, ctIV, ctFW, ctFW, ctLA, ctLA, ctLA, ctTW, ctCL,
+ ctIV},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* ack */ {ctIV, ctIG, ctSR, ctES, ctCW, ctCW, ctTW, ctTW, ctCL,
+ ctIG},
+
+ /* ctNO, ctSS, ctSR, ctES, ctFW, ctCW, ctLA, ctTW, ctCL, ctS2 */
+ /* rst */ {ctIV, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL, ctCL,
+ ctCL},
+ /* ill */ {ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV, ctIV}
+ }
+};
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum rte_tcp_flag {
+ RTE_CT_TCP_SYN_FLAG,
+ RTE_CT_TCP_SAK_FLAG, /* SYN ACK */
+ RTE_CT_TCP_FIN_FLAG,
+ RTE_CT_TCP_ACK_FLAG,
+ RTE_CT_TCP_RST_FLAG,
+ RTE_CT_TCP_ILL_FLAG,
+};
+
+static uint8_t rte_ct_tcp_flags_to_state_table_index[16] = {
+ /* A R S F */
+ RTE_CT_TCP_ILL_FLAG, /* 0 0 0 0 */
+ RTE_CT_TCP_FIN_FLAG, /* 0 0 0 1 */
+ RTE_CT_TCP_SYN_FLAG, /* 0 0 1 0 */
+ RTE_CT_TCP_ILL_FLAG, /* 0 0 1 1 */
+ RTE_CT_TCP_RST_FLAG, /* 0 1 0 0 */
+ RTE_CT_TCP_RST_FLAG, /* 0 1 0 1 */
+ RTE_CT_TCP_RST_FLAG, /* 0 1 1 0 */
+ RTE_CT_TCP_ILL_FLAG, /* 0 1 1 1 */
+
+ RTE_CT_TCP_ACK_FLAG, /* 1 0 0 0 */
+ RTE_CT_TCP_FIN_FLAG, /* 1 0 0 1 */
+ RTE_CT_TCP_SAK_FLAG, /* 1 0 1 0 */
+ RTE_CT_TCP_ILL_FLAG, /* 1 0 1 1 */
+ RTE_CT_TCP_RST_FLAG, /* 1 1 0 0 */
+ RTE_CT_TCP_ILL_FLAG, /* 1 1 0 1 */
+ RTE_CT_TCP_RST_FLAG, /* 1 1 1 0 */
+ RTE_CT_TCP_ILL_FLAG, /* 1 1 1 1 */
+};
+
+static inline uint8_t
+rte_ct_get_index(uint8_t tcp_flags)
+{
+ uint8_t important_flags;
+
+ tcp_flags &= 0x3f; /* clear off optional flags */
+ important_flags = ((tcp_flags & 0x10) >> 1) | (tcp_flags & 7);
+ /* should be _pext_u32(tcp_flags, 0x17) */
+
+ if (unlikely((tcp_flags == 0) || (tcp_flags == 0x3f)))
+ /* these known as null and christmas tree respectively */
+ return RTE_CT_TCP_ILL_FLAG;
+
+ return rte_ct_tcp_flags_to_state_table_index[important_flags];
+
+}
+
+static inline int
+rte_ct_either_direction_has_flags(struct rte_ct_cnxn_data *cd, uint8_t flags)
+{
+ return ((cd->ct_protocol.tcp_ct_data.seen[0].flags | cd->
+ ct_protocol.tcp_ct_data.seen[1].flags) & flags) != 0;
+}
+
+static inline uint32_t rte_ct_seq_plus_length(struct rte_mbuf *pkt,
+ uint8_t ip_hdr_size)
+{
+ uint16_t pkt_length = 0;
+ struct tcp_hdr *tcpheader =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START +
+ ip_hdr_size));
+ uint32_t tcp_hdr_size = (tcpheader->data_off & 0xf0) >> 2;
+
+ void *ip_hdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+
+ if (ip_hdr_size == IPv4_HEADER_SIZE) {
+ struct ipv4_hdr *ihdr = (struct ipv4_hdr *)ip_hdr;
+
+ pkt_length = rte_bswap16(ihdr->total_length);
+ }
+ if (ip_hdr_size == IPv6_HEADER_SIZE) {
+ struct ipv6_hdr *ihdr = (struct ipv6_hdr *)ip_hdr;
+
+ pkt_length = rte_bswap16(ihdr->payload_len) + IPv6_HEADER_SIZE;
+ }
+
+ /*
+ * Return sequence number plus the length of TCP segment (payload).
+ * SYN & FIN are each considered one byte, but it is illegal
+ * to have them together in one header (checked elsewhere)
+ */
+
+
+ return rte_bswap32(tcpheader->sent_seq) +
+ pkt_length - ip_hdr_size - tcp_hdr_size +
+ ((tcpheader->tcp_flags & (RTE_CT_TCPHDR_SYN | RTE_CT_TCPHDR_FIN)) !=
+ 0 ? 1 : 0);
+
+}
+
+static void
+rte_ct_check_for_scaling_and_sack_perm(
+ struct rte_mbuf *pkt,
+ struct rte_ct_tcp_state *state,
+ uint8_t ip_hdr_size)
+{
+
+ struct tcp_hdr *tcpheader =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START +
+ ip_hdr_size));
+ uint32_t dataoff_in_bytes = (tcpheader->data_off & 0xf0) >> 2;
+ uint32_t length = dataoff_in_bytes - sizeof(struct tcp_hdr);
+
+ state->scale = 0;
+ state->flags = 0;
+
+ if (length == 0)
+ /* no header options */
+ return;
+ uint8_t *options_ptr =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ (IP_START + ip_hdr_size +
+ sizeof(struct tcp_hdr)));
+
+ while (length > 0) {
+ uint8_t option = *options_ptr;
+ uint8_t opsize = options_ptr[1];
+ /* opsize reset for NOPs below */
+
+ switch (option) {
+
+ case RTE_CT_TCPOPT_EOL:
+ /* end of options */
+ return;
+
+ case RTE_CT_TCPOPT_NOP:
+ options_ptr++;
+ length--;
+ continue;
+
+ case RTE_CT_TCPOPT_SACK_PERM:
+ if (opsize == RTE_CT_TCPOLEN_SACK_PERM)
+ state->flags |= RTE_CT_TCP_FLAG_SACK_PERM;
+ break;
+
+ case RTE_CT_TCPOPT_WINDOW:
+ if (opsize == RTE_CT_TCPOLEN_WINDOW) {
+ state->scale =
+ RTE_MIN(options_ptr[2],
+ RTE_CT_MAX_TCP_WINDOW_SCALE);
+ state->flags |= RTE_CT_TCP_FLAG_WINDOW_SCALE;
+ }
+ break;
+
+ default:
+ break;
+
+ }
+
+ if ((opsize < 2) || (opsize > length)) {
+ /* something wrong */
+ printf("scaling_and_sack_perm:something wrong\n");
+ return;
+ }
+ options_ptr += opsize;
+ length -= opsize;
+
+ }
+}
+
+static void
+rte_ct_tcpdisplay_hdr(struct tcp_hdr *tcpheader)
+{
+ printf("Tcp header: src_port=%d", rte_bswap16(tcpheader->src_port));
+ printf(", dst_port=%d", rte_bswap16(tcpheader->dst_port));
+ printf(", sent_seq=%u", rte_bswap32(tcpheader->sent_seq));
+ printf(", recv_ack=%u", rte_bswap32(tcpheader->recv_ack));
+ printf(",data_off=%d", tcpheader->data_off / 16);
+ printf(",tcp_flags=%02x", tcpheader->tcp_flags);
+ printf(", rx_win=%d\n", rte_bswap16(tcpheader->rx_win));
+
+}
+
+static inline void
+rte_ct_clear_cnxn_data(__rte_unused struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ __rte_unused struct rte_mbuf *pkt)
+{
+ /* clear all tcp connection data, then set up individual fields */
+
+ memset(&cd->ct_protocol.tcp_ct_data, 0,
+ sizeof(cd->ct_protocol.tcp_ct_data));
+ cd->ct_protocol.tcp_ct_data.last_index = RTE_CT_TCP_ILL_FLAG;
+
+}
+
+enum rte_ct_packet_action
+rte_ct_tcp_new_connection(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt,
+ int use_synproxy,
+ uint8_t ip_hdr_size)
+{
+ struct tcp_hdr *tcpheader =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START + ip_hdr_size));
+
+ enum rte_ct_tcp_states new_state;
+ uint8_t index;
+ struct rte_ct_tcp_state *sender =
+ &cd->ct_protocol.tcp_ct_data.seen[RTE_CT_DIR_ORIGINAL];
+ struct rte_ct_tcp_state *receiver =
+ &cd->ct_protocol.tcp_ct_data.seen[RTE_CT_DIR_REPLY];
+ uint16_t win;
+
+ if (CT_DEBUG)
+ rte_ct_tcpdisplay_hdr(tcpheader);
+
+ index = rte_ct_get_index(tcpheader->tcp_flags);
+ new_state = rte_ct_tcp_state_table[0][index][RTE_CT_TCP_NONE];
+
+ if (unlikely(new_state >= RTE_CT_TCP_MAX)) {
+ if (CT_DEBUG)
+ printf("invalid new state with flags %02x\n",
+ tcpheader->tcp_flags);
+ return RTE_CT_DROP_PACKET;
+ }
+ /*
+ * A normal connection starts with a SYN packet. However, it is possible
+ * that an onginging connection has been routed here somehow. Support
+ * for these connections is optional.
+ */
+
+ if (unlikely((new_state != RTE_CT_TCP_SYN_SENT
+ && ct->misc_options.tcp_loose == 0))) {
+ /* Not a standard connection start and not supporting
+ * onging connections. */
+ return RTE_CT_DROP_PACKET;
+ }
+
+ if (CT_DEBUG)
+ printf(" new connection with state %s\n",
+ rte_ct_tcp_names[new_state]);
+
+ /* clear all tcp connection data, then set up individual fields */
+ rte_ct_clear_cnxn_data(ct, cd, pkt);
+ cd->ct_protocol.tcp_ct_data.state = new_state;
+
+ sender->end = sender->maxend = rte_ct_seq_plus_length(pkt, ip_hdr_size);
+ win = rte_bswap16(tcpheader->rx_win);
+ sender->maxwin = RTE_MAX(win, (uint32_t)1);
+
+ if (likely(new_state == RTE_CT_TCP_SYN_SENT)) {
+ /* check for window scaling and selective ACK */
+ rte_ct_check_for_scaling_and_sack_perm(pkt, sender,
+ ip_hdr_size);
+
+ cd->ct_protocol.synproxy_data.synproxied = use_synproxy;
+
+ if (use_synproxy) {
+ /*
+ * new connection from client using synproxy. The proxy
+ * must send back a SYN-ACK
+ */
+
+
+ if (CT_DEBUG > 2)
+ printf("synproxy sending SYN-ACK to client\n");
+
+ return RTE_CT_SEND_CLIENT_SYNACK;
+ }
+ } else {
+ /*
+ * An ongoing connection. Make a very liberal connection since
+ * all the original set up data is lost. Assume SACK and
+ * liberal window checking to handle unknown window scaling.
+ */
+
+ sender->maxend += sender->maxwin;
+ sender->flags = receiver->flags =
+ (RTE_CT_TCP_FLAG_SACK_PERM | RTE_CT_TCP_FLAG_BE_LIBERAL);
+ }
+
+ if (CT_DEBUG > 0) {
+ printf("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i",
+ sender->end, sender->maxend, sender->maxwin,
+ sender->scale);
+ printf(" receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ receiver->end, receiver->maxend,
+ receiver->maxwin,
+ receiver->scale);
+ }
+
+ return RTE_CT_OPEN_CONNECTION;
+}
+
+static uint32_t
+rte_ct_tcp_sack(struct rte_mbuf *pkt, uint8_t ip_hdr_size)
+{
+ struct tcp_hdr *tcpheader =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START +
+ ip_hdr_size));
+ uint16_t dataoff_in_bytes = (tcpheader->data_off & 0xf0) >> 2;
+ uint16_t length = dataoff_in_bytes - sizeof(struct tcp_hdr);
+ uint32_t sack = rte_bswap32(tcpheader->recv_ack);
+
+ if (unlikely(!length))
+ return sack;
+
+ uint8_t *options_ptr = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ (IP_START + ip_hdr_size + sizeof(struct tcp_hdr)));
+
+ while (length > 0) {
+ uint8_t opcode = *options_ptr;
+ uint8_t opsize = options_ptr[1];
+ int i;
+ uint32_t *sack_ptr;
+
+ switch (opcode) {
+ case RTE_CT_TCPOPT_TIMESTAMP:
+ /* common "solo" option, check first */
+ break;
+
+ case RTE_CT_TCPOPT_EOL:
+ return sack; /* end of options */
+
+ case RTE_CT_TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
+ length--;
+ options_ptr++;
+ continue;
+
+ case RTE_CT_TCPOPT_SACK:
+ /*
+ * SACK (selective ACK) contains a block of
+ * 1 to 4 entries of 8 bytes each.
+ * Each entry is a pair of 32 bit numbers.
+ * This block follows the usual 2
+ * bytes for opcode and opsize. Thus,
+ * the entire SACK option must be 10, 18,
+ * 26 or 34 bytes long.
+ */
+ if ((opsize >= (RTE_CT_TCPOLEN_PER_SACK_ENTRY + 2)) &&
+ ((opsize - 2) %
+ RTE_CT_TCPOLEN_PER_SACK_ENTRY) == 0) {
+ /* skip over opcode and size, and point to
+ * 2nd 32 bits in entry */
+ options_ptr += 6;
+ for (i = 0; i < (opsize - 2); i +=
+ RTE_CT_TCPOLEN_PER_SACK_ENTRY) {
+ sack_ptr =
+ (uint32_t *) &options_ptr[i];
+ uint32_t ack = rte_bswap32(*sack_ptr);
+
+ if (rte_after(ack, sack))
+ sack = ack;
+ }
+ return sack;
+ }
+ break;
+ default:
+ break;
+ }
+ if ((opsize < 2) || (opsize > length)) {
+ printf("rte_ct_tcp_sack: something wrong, opsize %i,",
+ opsize);
+ printf(" length %i\n", length);
+ return sack;
+ }
+ options_ptr += opsize;
+ length -= opsize;
+ }
+ return sack;
+}
+
+/*
+ * if this is a retransmission of last packet, increment retransmission count,
+ * otherwise record this as last packet.
+ */
+static inline void
+rte_ct_check_for_retransmissions(
+ struct rte_ct_tcp *state,
+ uint8_t dir,
+ uint32_t seq,
+ uint32_t ack,
+ uint32_t end,
+ uint16_t win)
+{
+ if (state->last_dir == dir
+ && state->last_seq == seq
+ && state->last_ack == ack
+ && state->last_end == end && state->last_win == win)
+ state->retrans++;
+ else {
+ state->last_dir = dir;
+ state->last_seq = seq;
+ state->last_ack = ack;
+ state->last_end = end;
+ state->last_win = win;
+ state->retrans = 0;
+ }
+}
+
+/*
+ * Verify that the sequence number in the given packet is within the valid
+ * range at this point in the connection
+ */
+static uint8_t
+rte_ct_tcp_in_window(
+ struct rte_ct_cnxn_data *cd,
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_tcp *state,
+ enum rte_ct_pkt_direction dir,
+ uint8_t index,
+ struct rte_mbuf *pkt,
+ uint8_t ip_hdr_size)
+{
+ struct rte_ct_tcp_state *sender = &state->seen[dir];
+ struct rte_ct_tcp_state *receiver = &state->seen[!dir];
+ uint32_t seq, ack, sack, end, win, swin;
+ uint8_t in_recv_win, tcp_flags;
+ enum rte_ct_packet_action res;
+
+ void *iphdr = RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
+ struct tcp_hdr *tcpheader =
+ (struct tcp_hdr *)RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ (IP_START + ip_hdr_size));
+
+ if (cd->ct_protocol.synproxy_data.synproxied)
+ rte_sp_adjust_client_ack_before_window_check(cd, iphdr,
+ tcpheader, dir);
+
+
+ seq = rte_bswap32(tcpheader->sent_seq);
+ ack = sack = rte_bswap32(tcpheader->recv_ack);
+ win = rte_bswap16(tcpheader->rx_win);
+ end = rte_ct_seq_plus_length(pkt, ip_hdr_size);
+ tcp_flags = tcpheader->tcp_flags;
+
+ if (receiver->flags & RTE_CT_TCP_FLAG_SACK_PERM)
+ sack = rte_ct_tcp_sack(pkt, ip_hdr_size);
+
+ if (unlikely(sender->maxwin == 0)) {
+ /* First packet for sender, initialize data. */
+ if (tcp_flags & RTE_CT_TCPHDR_SYN) {
+ /*
+ * SYN-ACK in reply to a SYN
+ * or SYN from reply direction in simultaneous open.
+ */
+ sender->end = sender->maxend = end;
+ sender->maxwin = RTE_MAX(win, (uint32_t)1);
+
+ rte_ct_check_for_scaling_and_sack_perm(pkt, sender,
+ ip_hdr_size);
+
+ /*
+ * RFC 1323: Both sides must send Window Scale option
+ * to enable scaling in either direction.
+ */
+ if ((sender->
+ flags & receiver->flags &
+ RTE_CT_TCP_FLAG_WINDOW_SCALE) == 0)
+ sender->scale = receiver->scale = 0;
+
+ if (!(tcp_flags & RTE_CT_TCPHDR_ACK))
+ /* Simultaneous open */
+ return 1;
+ } else {
+ /*
+ * In the middle of a connection with no setup data.
+ * Use available data from the packet.
+ */
+ sender->end = end;
+ swin = win << sender->scale;
+ sender->maxwin = (swin == 0 ? 1 : swin);
+ sender->maxend = end + sender->maxwin;
+ /*
+ * We haven't seen traffic in the other direction yet
+ * but we have to tweak window tracking to pass III
+ * and IV until that happens.
+ */
+ if (receiver->maxwin == 0)
+ receiver->end = receiver->maxend = sack;
+ }
+ }
+ /* if sender unititialized */
+ else if (((cd->ct_protocol.tcp_ct_data.state == RTE_CT_TCP_SYN_SENT &&
+ dir == RTE_CT_DIR_ORIGINAL) ||
+ (cd->ct_protocol.tcp_ct_data.state == RTE_CT_TCP_SYN_RECV &&
+ dir == RTE_CT_DIR_REPLY)) && rte_after(end, sender->end)) {
+ /*
+ * RFC 793: "if a TCP is reinitialized ... then it need
+ * not wait at all; it must only be sure to use sequence
+ * numbers larger than those recently used."
+ */
+ sender->end = sender->maxend = end;
+ sender->maxwin = RTE_MAX(win, (uint32_t)1);
+
+ rte_ct_check_for_scaling_and_sack_perm(pkt, sender,
+ ip_hdr_size);
+ }
+ /* If no ACK, just pretend there was. */
+ if (!(tcp_flags & RTE_CT_TCPHDR_ACK) ||
+ (((tcp_flags & RTE_CT_TCPHDR_RST_ACK) ==
+ RTE_CT_TCPHDR_RST_ACK) && (ack == 0))) {
+ /* Bad TCP Stacks */
+ ack = sack = receiver->end;
+ }
+
+ if ((tcp_flags & RTE_CT_TCPHDR_RST) && seq == 0 &&
+ cd->ct_protocol.tcp_ct_data.state == RTE_CT_TCP_SYN_SENT)
+ /* RST sent answering SYN. */
+ seq = end = sender->end;
+
+ /* Is the ending sequence in the receive window (if available)? */
+ in_recv_win = !receiver->maxwin ||
+ rte_after(end, sender->end - receiver->maxwin - 1);
+
+ if (rte_before(seq, sender->maxend + 1) && in_recv_win &&
+ rte_before(sack, receiver->end + 1) &&
+ rte_after(sack,
+ receiver->end - RTE_MAX(sender->maxwin,
+ (uint32_t)RTE_MAX_ACKWIN_CONST) - 1)) {
+ /*
+ * Apply window scaling (RFC 1323). Only valid if both
+ * directions sent this option in a SYN packet,
+ * so ignore until not a SYN packet. Scale will be
+ * set to zero if connection set up but no valid scale is there.
+ */
+ if (!(tcp_flags & RTE_CT_TCPHDR_SYN))
+ win <<= sender->scale;
+
+ /* Update sender data. */
+ swin = win + (sack - ack);
+ sender->maxwin = RTE_MAX(sender->maxwin, swin);
+
+ if (rte_after(end, sender->end)) {
+ sender->end = end;
+ sender->flags |= RTE_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
+ }
+
+ if (tcp_flags & RTE_CT_TCPHDR_ACK) {
+ if (!(sender->flags & RTE_CT_TCP_FLAG_MAXACK_SET)) {
+ sender->maxack = ack;
+ sender->flags |= RTE_CT_TCP_FLAG_MAXACK_SET;
+ } else if (rte_after(ack, sender->maxack))
+ sender->maxack = ack;
+ }
+
+ /* Update receiver data. */
+ if (receiver->maxwin != 0 && rte_after(end, sender->maxend))
+ receiver->maxwin += end - sender->maxend;
+
+ if (rte_after(sack + win, receiver->maxend - 1))
+ receiver->maxend = sack + RTE_MAX(win, (uint32_t)1);
+
+ if (ack == receiver->end)
+ receiver->flags &= ~RTE_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
+
+ /* If this packet has an ACK, it may be a retransmission. */
+ if (index == RTE_CT_TCP_ACK_FLAG)
+ rte_ct_check_for_retransmissions(state, dir, seq, ack,
+ end, win);
+ res = 1;
+ } else {
+ res = (sender->flags & RTE_CT_TCP_FLAG_BE_LIBERAL ||
+ ct->misc_options.tcp_be_liberal);
+ }
+
+ if (CT_DEBUG) {
+ if (!res) {
+ /* CT_DEBUG = 0; */
+ printf("tcp_in_window FAILED for %p\n", cd);
+ printf("rte_before(%u, %u + 1) is %d\n",
+ seq, sender->maxend + 1,
+ rte_before(seq, sender->maxend + 1));
+ printf("!%u || rte_after(%u, %u - %u - 1) is %d\n",
+ receiver->maxwin, end, sender->end,
+ receiver->maxwin, in_recv_win);
+ printf("rte_before(%u, %u + 1) is %d\n", sack,
+ receiver->end, rte_before(sack,
+ receiver->end + 1));
+ printf
+ ("rte_after(%u,(%u - RTE_MAX(%u, %u) - 1))) is%d\n",
+ sack, receiver->end, sender->maxwin,
+ RTE_MAX_ACKWIN_CONST, rte_after(sack,
+ receiver->end - RTE_MAX(sender->maxwin,
+ (uint32_t)RTE_MAX_ACKWIN_CONST)
+ - 1));
+
+ }
+ }
+ if (cd->ct_protocol.synproxy_data.synproxied)
+ rte_sp_adjust_server_seq_after_window_check(cd, iphdr,
+ tcpheader, dir);
+ return res;
+}
+
+/*for the given two FSM states,return the one with the smallest timeout value*/
+static inline uint8_t
+rte_ct_choose_min_timeout_state(
+ struct rte_ct_cnxn_tracker *ct,
+ uint8_t state1,
+ uint8_t state2)
+{
+ if (ct->ct_timeout.tcptimeout.tcp_timeouts[state1] <
+ ct->ct_timeout.tcptimeout.tcp_timeouts[state2])
+ return state1;
+ else
+ return state2;
+}
+
+
+/* Returns verdict for packet */
+enum rte_ct_packet_action
+rte_ct_verify_tcp_packet(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt,
+ uint8_t key_was_flipped,
+ uint8_t ip_hdr_size)
+{
+ struct tcp_hdr *tcpheader = (struct tcp_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, (IP_START + ip_hdr_size));
+
+ enum rte_ct_tcp_states new_state, old_state;
+ enum rte_ct_pkt_direction dir;
+ uint8_t index;
+
+ /* state whose timeout value will be used. In odd cases,
+ * not always current state */
+ uint8_t timeout_state;
+
+ dir = (cd->key_is_client_order == !key_was_flipped);
+
+ if (cd->ct_protocol.synproxy_data.synproxied &&
+ cd->ct_protocol.synproxy_data.half_established &&
+ !cd->ct_protocol.synproxy_data.cnxn_established &&
+ dir == RTE_CT_DIR_ORIGINAL) {
+ /*
+ * Packet from client, but only client side of this connection
+ * has been set up. Buffer packet until server side of
+ * connection complete.
+ */
+ rte_ct_buffer_packet(ct, cd, pkt);
+ return RTE_CT_HIJACK;
+ }
+
+ uint32_t recv_ack = rte_bswap32(tcpheader->recv_ack);
+ uint32_t sent_seq = rte_bswap32(tcpheader->sent_seq);
+
+ int check_window = 1;
+ enum rte_ct_packet_action return_action = RTE_CT_FORWARD_PACKET;
+
+ /* rte_ct_tcpdisplay_hdr(tcpheader); */
+
+ old_state = cd->ct_protocol.tcp_ct_data.state;
+ index = rte_ct_get_index(tcpheader->tcp_flags);
+ new_state = rte_ct_tcp_state_table[dir][index][old_state];
+
+ if (new_state == RTE_CT_TCP_MAX) {
+ if (CT_DEBUG) {
+ printf("!!!!invalid state transition from %s ",
+ rte_ct_tcp_names[old_state]);
+ printf("with flags 0x%02x\n",
+ tcpheader->tcp_flags);
+ }
+
+ ct->counters->pkts_drop_invalid_state++;
+ return RTE_CT_DROP_PACKET;
+ }
+
+ if (STATE_TRACKING && new_state != old_state)
+ printf(" new state %s\n", rte_ct_tcp_names[new_state]);
+
+ switch (new_state) {
+
+ case RTE_CT_TCP_ESTABLISHED:
+
+ if (cd->ct_protocol.synproxy_data.synproxied &&
+ !cd->ct_protocol.synproxy_data.half_established &&
+ (old_state == RTE_CT_TCP_SYN_RECV)) {
+ /*
+ * During synproxy setup, ESTABLISHED state entered by
+ * ACK arriving from client. The proxy must now send a
+ * spoofed SYN to the server.
+ * Reset the state to RTE_CT_TCP_SYN_SENT.
+ */
+
+ if (STATE_TRACKING) {
+ printf(" synproxy first half-cnxn complete,");
+ printf(" new state %s\n",
+ rte_ct_tcp_names[RTE_CT_TCP_SYN_SENT]);
+ }
+ cd->ct_protocol.synproxy_data.half_established = true;
+
+ rte_sp_cvt_to_spoofed_server_syn(cd, pkt);
+ rte_ct_clear_cnxn_data(ct, cd, pkt);
+ cd->ct_protocol.tcp_ct_data.state = RTE_CT_TCP_SYN_SENT;
+
+ struct rte_ct_tcp_state *sender =
+ &cd->ct_protocol.tcp_ct_data.
+ seen[RTE_CT_DIR_ORIGINAL];
+ uint16_t win = rte_bswap16(tcpheader->rx_win);
+
+ sender->end = sender->maxend =
+ rte_ct_seq_plus_length(pkt, ip_hdr_size);
+ sender->maxwin = RTE_MAX(win, (uint32_t)1);
+ rte_ct_check_for_scaling_and_sack_perm(pkt, sender,
+ ip_hdr_size);
+ /* TODO seq number code */
+ rte_ct_set_cnxn_timer_for_tcp(ct, cd,
+ RTE_CT_TCP_SYN_SENT);
+ return RTE_CT_SEND_SERVER_SYN;
+ }
+
+
+ case RTE_CT_TCP_SYN_RECV:
+
+ if (cd->ct_protocol.synproxy_data.synproxied &&
+ cd->ct_protocol.synproxy_data.half_established &&
+ !cd->ct_protocol.synproxy_data.cnxn_established) {
+ /*
+ * The reply SYN/ACK has been received from the server.
+ * The connection can now be considered established,
+ * even though an ACK stills needs to be sent to
+ * the server.
+ */
+
+ if (!rte_ct_tcp_in_window(cd, ct,
+ &cd->ct_protocol.tcp_ct_data,
+ dir, index, pkt, ip_hdr_size)) {
+ ct->counters->pkts_drop_outof_window++;
+ return RTE_CT_DROP_PACKET;
+ }
+
+ if (STATE_TRACKING) {
+ printf("synproxy full cnxn complete,");
+ printf(" new state %s\n", rte_ct_tcp_names
+ [RTE_CT_TCP_ESTABLISHED]);
+ }
+
+ /*
+ * Convert the packet to an ack to return to the server.
+ * This routine also saves the real sequence number
+ * from the server.
+ */
+
+ rte_sp_cvt_to_spoofed_server_ack(cd, pkt);
+
+ index = rte_ct_get_index(tcpheader->tcp_flags);
+
+ if (!rte_ct_tcp_in_window(cd, ct,
+ &cd->ct_protocol.tcp_ct_data,
+ !dir, index, pkt, ip_hdr_size)) {
+ ct->counters->pkts_drop_outof_window++;
+ return RTE_CT_DROP_PACKET;
+ }
+
+ /* good packets, OK to update state */
+
+ cd->ct_protocol.tcp_ct_data.state =
+ RTE_CT_TCP_ESTABLISHED;
+ ct->counters->sessions_established++;
+ cd->ct_protocol.synproxy_data.cnxn_established = true;
+ cd->ct_protocol.tcp_ct_data.last_index = index;
+ cd->ct_protocol.tcp_ct_data.last_dir = !dir;
+
+ rte_ct_set_cnxn_timer_for_tcp(ct, cd,
+ RTE_CT_TCP_ESTABLISHED);
+ rte_ct_release_buffered_packets(ct, cd);
+
+ return RTE_CT_SEND_SERVER_ACK;
+ }
+
+ case RTE_CT_TCP_SYN_SENT:
+
+ /*
+ * A connection that is actively closed goes to TIME-WAIT state.
+ * It can be re-opened (before it times out) by a SYN packet.
+ */
+
+ if (old_state < RTE_CT_TCP_TIME_WAIT)
+ break;
+ /*
+ * Due to previous check and state machine transitions,
+ * old state must be RTE_CT_TCP_TIME_WAIT or RTE_CT_TCP_CLOSE .
+ * Need to re-open connection.
+ */
+
+ return RTE_CT_REOPEN_CNXN_AND_FORWARD_PACKET;
+
+ case RTE_CT_TCP_IGNORE:
+
+ /*
+ * Ignored packets usually mean the connection data is
+ * out of sync with client/server. Ignore, but forward
+ * these packets since they may be valid for the connection.
+ * If the ignored packet is invalid, the receiver will send
+ * an RST which should get the connection entry back in sync.
+ */
+
+ /*
+ * However, if connection is running synproxy and the full
+ * connection is not yet established, there is no where
+ * for test packets to go so drop these packets.
+ */
+
+ if (cd->ct_protocol.synproxy_data.synproxied &&
+ !cd->ct_protocol.synproxy_data.cnxn_established)
+ return RTE_CT_DROP_PACKET;
+
+ if (index == RTE_CT_TCP_SAK_FLAG &&
+ cd->ct_protocol.tcp_ct_data.last_index ==
+ RTE_CT_TCP_SYN_FLAG
+ && cd->ct_protocol.tcp_ct_data.last_dir != dir
+ && recv_ack == cd->ct_protocol.tcp_ct_data.last_end) {
+ /*
+ * SYN/ACK in reply direction acknowledging a SYN
+ * earlier ignored as invalid.Client and server in sync,
+ * but connection tracker is not. Use previous values
+ * to get back in sync.
+ */
+
+ struct rte_ct_tcp_state *last_seen =
+ &cd->ct_protocol.tcp_ct_data.seen[cd->ct_protocol.
+ tcp_ct_data.
+ last_dir];
+
+ /* reset new and old states to what they should
+ * have been */
+ old_state = RTE_CT_TCP_SYN_SENT;
+ new_state = RTE_CT_TCP_SYN_RECV;
+
+ last_seen->end = cd->ct_protocol.tcp_ct_data.last_end;
+ last_seen->maxend =
+ cd->ct_protocol.tcp_ct_data.last_end;
+ last_seen->maxwin =
+ RTE_MAX(cd->ct_protocol.tcp_ct_data.last_win,
+ (uint32_t)1);
+ last_seen->scale =
+ cd->ct_protocol.tcp_ct_data.last_wscale;
+ cd->ct_protocol.tcp_ct_data.last_flags &=
+ ~RTE_CT_EXP_CHALLENGE_ACK;
+ last_seen->flags =
+ cd->ct_protocol.tcp_ct_data.last_flags;
+ memset(&cd->ct_protocol.tcp_ct_data.seen[dir], 0,
+ sizeof(struct rte_ct_tcp_state));
+ break;
+ }
+
+ cd->ct_protocol.tcp_ct_data.last_index = index;
+ cd->ct_protocol.tcp_ct_data.last_dir = dir;
+ cd->ct_protocol.tcp_ct_data.last_seq = sent_seq;
+ cd->ct_protocol.tcp_ct_data.last_end =
+ rte_ct_seq_plus_length(pkt, ip_hdr_size);
+ cd->ct_protocol.tcp_ct_data.last_win =
+ rte_bswap16(tcpheader->rx_win);
+
+ /*
+ * An orinal SYN. Client and the server may be in sync, but
+ * the tracker is not . Annotate
+ * the TCP options and let the packet go through. If it is a
+ * valid SYN packet, the server will reply with a SYN/ACK, and
+ * then we'll get in sync. Otherwise, the server potentially
+ * responds with a challenge ACK if implementing RFC5961.
+ */
+ if (index == RTE_CT_TCP_SYN_FLAG &&
+ dir == RTE_CT_DIR_ORIGINAL) {
+ struct rte_ct_tcp_state seen;
+
+ /* call following to set "flag" and "scale" fields */
+ rte_ct_check_for_scaling_and_sack_perm(pkt, &seen,
+ ip_hdr_size);
+
+ /* only possible flags set for scling and sack */
+ cd->ct_protocol.tcp_ct_data.last_flags = seen.flags;
+ cd->ct_protocol.tcp_ct_data.last_wscale =
+ (seen.flags & RTE_CT_TCP_FLAG_WINDOW_SCALE) == 0 ?
+ 0 : seen.scale;
+
+ /*
+ * Mark the potential for RFC5961 challenge ACK,
+ * this pose a special problem for LAST_ACK state
+ * as ACK is intrepretated as ACKing last FIN.
+ */
+ if (old_state == RTE_CT_TCP_LAST_ACK)
+ cd->ct_protocol.tcp_ct_data.last_flags |=
+ RTE_CT_EXP_CHALLENGE_ACK;
+ }
+ return RTE_CT_FORWARD_PACKET;
+
+ case RTE_CT_TCP_TIME_WAIT:
+ /*
+ * RFC5961 compliance cause stack to send "challenge-ACK" in
+ * response to unneeded SYNs. Do not treat this as acking
+ * last FIN.
+ */
+ if (old_state == RTE_CT_TCP_LAST_ACK &&
+ index == RTE_CT_TCP_ACK_FLAG &&
+ cd->ct_protocol.tcp_ct_data.last_dir != dir &&
+ cd->ct_protocol.tcp_ct_data.last_index ==
+ RTE_CT_TCP_SYN_FLAG
+ && (cd->ct_protocol.tcp_ct_data.
+ last_flags & RTE_CT_EXP_CHALLENGE_ACK)) {
+ /* Detected RFC5961 challenge ACK */
+ cd->ct_protocol.tcp_ct_data.last_flags &=
+ ~RTE_CT_EXP_CHALLENGE_ACK;
+ return RTE_CT_FORWARD_PACKET; /* Don't change state */
+ }
+ break;
+
+ case RTE_CT_TCP_CLOSE:
+
+ if (index == RTE_CT_TCP_RST_FLAG) {
+ /*
+ * Can only transition to CLOSE state with an RST,
+ * but can remain in
+ * CLOSE state with ACK, FIN, or RST. Do special checks.
+ */
+
+ if ((cd->ct_protocol.tcp_ct_data.seen[!dir].flags &
+ RTE_CT_TCP_FLAG_MAXACK_SET) &&
+ rte_before(sent_seq, cd->ct_protocol.
+ tcp_ct_data.seen[!dir].maxack)) {
+
+ ct->counters->pkts_drop_invalid_rst++;
+ /* Invalid RST */
+ return RTE_CT_DROP_PACKET;
+ }
+
+ if (((cd->connstatus == RTE_SEEN_REPLY_CONN &&
+ cd->ct_protocol.tcp_ct_data.last_index ==
+ RTE_CT_TCP_SYN_FLAG) ||
+ (cd->connstatus != RTE_ASSURED_CONN &&
+ cd->ct_protocol.tcp_ct_data.last_index ==
+ RTE_CT_TCP_ACK_FLAG)) &&
+ recv_ack ==
+ cd->ct_protocol.tcp_ct_data.last_end) {
+ /* RST sent to invalid SYN or ACK previously
+ * let through */
+ check_window = 0;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (likely(check_window)) {
+ if (unlikely(!rte_ct_tcp_in_window(cd, ct,
+ &cd->ct_protocol.tcp_ct_data,
+ dir, index,
+ pkt, ip_hdr_size))) {
+ ct->counters->pkts_drop_outof_window++;
+ return RTE_CT_DROP_PACKET;
+ }
+ }
+
+ if (new_state == RTE_CT_TCP_ESTABLISHED &&
+ old_state != RTE_CT_TCP_ESTABLISHED)
+ /* only increment for first state transition to established */
+ /* synproxy established count handled elswhere */
+ ct->counters->sessions_established++;
+ /* From this point on, all packets are in-window */
+ cd->ct_protocol.tcp_ct_data.last_index = index;
+ cd->ct_protocol.tcp_ct_data.last_dir = dir;
+
+ if (index == RTE_CT_TCP_SAK_FLAG)
+ cd->connstatus = RTE_SEEN_REPLY_CONN;
+
+ timeout_state = new_state;
+
+ if (cd->ct_protocol.tcp_ct_data.retrans >=
+ ct->misc_options.tcp_max_retrans)
+ timeout_state =
+ rte_ct_choose_min_timeout_state(ct, timeout_state,
+ RTE_CT_TCP_RETRANS);
+ else if (rte_ct_either_direction_has_flags(cd,
+ RTE_CT_TCP_FLAG_DATA_UNACKNOWLEDGED))
+ timeout_state =
+ rte_ct_choose_min_timeout_state(ct, timeout_state,
+ RTE_CT_TCP_UNACK);
+
+ if (cd->connstatus != RTE_SEEN_REPLY_CONN) {
+ if (tcpheader->tcp_flags & RTE_CT_TCPHDR_RST) {
+ /*
+ * if only reply seen is RST, there is not an
+ * established connection, so just destroy
+ * connection now.
+ */
+
+ return RTE_CT_DESTROY_CNXN_AND_FORWARD_PACKET;
+ }
+ /* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
+ pickup with loose=1. Avoid large ESTABLISHED timeout. */
+ if (new_state == RTE_CT_TCP_ESTABLISHED)
+ timeout_state = rte_ct_choose_min_timeout_state(ct,
+ timeout_state,
+ RTE_CT_TCP_UNACK);
+
+ } else if (cd->connstatus != RTE_ASSURED_CONN &&
+ (old_state == RTE_CT_TCP_SYN_RECV
+ || old_state == RTE_CT_TCP_ESTABLISHED)
+ && new_state == RTE_CT_TCP_ESTABLISHED)
+ cd->connstatus = RTE_ASSURED_CONN;
+
+ cd->ct_protocol.tcp_ct_data.state = new_state;
+ rte_ct_set_cnxn_timer_for_tcp(ct, cd, timeout_state);
+
+ return return_action;
+}
diff --git a/common/VIL/conntrack/rte_ct_tcp.h b/common/VIL/conntrack/rte_ct_tcp.h
new file mode 100644
index 00000000..391200c6
--- /dev/null
+++ b/common/VIL/conntrack/rte_ct_tcp.h
@@ -0,0 +1,484 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_RTE_CT_TCP_H__
+#define __INCLUDE_RTE_CT_TCP_H__
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <rte_tcp.h>
+#include <rte_port.h>
+#include <rte_timer.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_port.h>
+#include <rte_byteorder.h>
+#include "rte_cnxn_tracking.h"
+
+/* AN INNER, PRIVATE INTERFACE FOR RTE_CNXN_TRACKING */
+
+/* constants for TCP options */
+
+#define RTE_CT_TCPOPT_EOL 0 /* End of options */
+#define RTE_CT_TCPOPT_NOP 1 /* Padding */
+#define RTE_CT_TCPOPT_MSS 2 /* Segment size negotiating */
+#define RTE_CT_TCPOPT_WINDOW 3 /* Window scaling */
+#define RTE_CT_TCPOPT_SACK_PERM 4 /* SACK Permitted */
+#define RTE_CT_TCPOPT_SACK 5 /* SACK Block */
+#define RTE_CT_TCPOPT_TIMESTAMP 8 /* RTT estimations */
+
+#define RTE_CT_TCPOLEN_MSS 4
+#define RTE_CT_TCPOLEN_WINDOW 3
+#define RTE_CT_TCPOLEN_SACK_PERM 2
+#define RTE_CT_TCPOLEN_TIMESTAMP 10
+#define RTE_CT_TCPOLEN_PER_SACK_ENTRY 8
+
+#define RTE_CT_TCPOLEN_MSS_ALIGNED 4
+#define RTE_CT_TCPOLEN_WINDOW_ALIGNED 4
+#define RTE_CT_TCPOLEN_SACK_PERM_ALIGNED 4
+#define RTE_CT_TCPOLEN_TIMESTAMP_ALIGNED 12
+
+#define RTE_CT_MAX_TCP_WINDOW_SCALE 14
+
+#define RTE_SP_OPTIONS_MSS 1
+#define RTE_SP_OPTIONS_WINDOW_SCALE 2
+#define RTE_SP_OPTIONS_TIMESTAMP 4
+#define RTE_SP_OPTIONS_SACK_PERM 8
+
+
+enum rte_ct_packet_action {
+ RTE_CT_OPEN_CONNECTION,
+ RTE_CT_DROP_PACKET,
+ RTE_CT_FORWARD_PACKET,
+ RTE_CT_DESTROY_CNXN_AND_FORWARD_PACKET,
+ RTE_CT_REOPEN_CNXN_AND_FORWARD_PACKET,
+ RTE_CT_SEND_CLIENT_SYNACK,
+ RTE_CT_SEND_SERVER_SYN,
+ RTE_CT_SEND_SERVER_ACK,
+ RTE_CT_HIJACK
+};
+
+enum rte_ct_connstatus {
+ RTE_INIT_CONN,
+ RTE_SEEN_REPLY_CONN,
+ RTE_ASSURED_CONN
+};
+
+/* TCP tracking. */
+
+static const char *const rte_ct_tcp_names[] = {
+ "NONE",
+ "SYN_SENT",
+ "SYN_RECV",
+ "ESTABLISHED",
+ "FIN_WAIT",
+ "CLOSE_WAIT",
+ "LAST_ACK",
+ "TIME_WAIT",
+ "CLOSE",
+ "SYN_SENT2",
+ "RETRANS",
+ "UNACK",
+ "IGNORE"
+};
+
+static const char *const rte_ct_udp_names[] = {
+ "NONE_UDP",
+ "UNREPLIED",
+ "REPLIED"
+};
+
+/* Fixme: what about big packets? */
+#define RTE_MAX_ACKWIN_CONST 66000
+
+/* Window scaling is advertised by the sender */
+#define RTE_CT_TCP_FLAG_WINDOW_SCALE 0x01
+
+/* SACK is permitted by the sender */
+#define RTE_CT_TCP_FLAG_SACK_PERM 0x02
+
+/* This sender sent FIN first */
+#define RTE_CT_TCP_FLAG_CLOSE_INIT 0x04
+
+/* Be liberal in window checking */
+#define RTE_CT_TCP_FLAG_BE_LIBERAL 0x08
+
+/* Has unacknowledged data */
+#define RTE_CT_TCP_FLAG_DATA_UNACKNOWLEDGED 0x10
+
+/* The field td_maxack has been set */
+#define RTE_CT_TCP_FLAG_MAXACK_SET 0x20
+/* Marks possibility for expected RFC5961 challenge ACK */
+#define RTE_CT_EXP_CHALLENGE_ACK 0x40
+
+
+
+/* TCP header flags of interest */
+#define RTE_CT_TCPHDR_FIN 0x01
+#define RTE_CT_TCPHDR_SYN 0x02
+#define RTE_CT_TCPHDR_RST 0x04
+#define RTE_CT_TCPHDR_ACK 0x10
+
+#define RTE_CT_TCPHDR_RST_ACK (RTE_CT_TCPHDR_RST | RTE_CT_TCPHDR_ACK)
+
+
+
+/* state machine values. Note that order is important as relative checks made */
+enum rte_ct_tcp_states {
+ RTE_CT_TCP_NONE,
+ RTE_CT_TCP_SYN_SENT,
+ RTE_CT_TCP_SYN_RECV,
+ RTE_CT_TCP_ESTABLISHED,
+ RTE_CT_TCP_FIN_WAIT,
+ RTE_CT_TCP_CLOSE_WAIT,
+ RTE_CT_TCP_LAST_ACK,
+ RTE_CT_TCP_TIME_WAIT,
+ RTE_CT_TCP_CLOSE,
+ RTE_CT_TCP_SYN_SENT_2,
+ RTE_CT_TCP_RETRANS,
+ RTE_CT_TCP_UNACK,
+ RTE_CT_TCP_IGNORE
+};
+
+enum rte_ct_udp_states {
+ RTE_CT_UDP_NONE,
+ RTE_CT_UDP_UNREPLIED,
+ RTE_CT_UDP_REPLIED,
+ RTE_CT_UDP_MAX
+};
+
+
+
+#define RTE_CT_TCP_MAX RTE_CT_TCP_UNACK
+
+enum rte_ct_pkt_direction {
+ RTE_CT_DIR_ORIGINAL,
+ RTE_CT_DIR_REPLY
+};
+
+struct rte_ct_tcp_state {
+ uint32_t end; /* max of seq + len */
+ uint32_t maxend; /* max of ack + max(win, 1) */
+ uint32_t maxwin; /* max(win) */
+ uint32_t maxack; /* max of ack */
+ uint8_t scale; /* window scale factor */
+ uint8_t flags; /* per direction options */
+};
+
+struct rte_synproxy_options {
+ uint8_t options;
+ uint8_t window_scale;
+ uint16_t mss;
+ uint32_t ts_val;
+ uint32_t ts_echo_reply;
+ uint16_t initial_window;
+};
+
+struct ct_sp_cnxn_data {
+ /* buffer client pkt while waiting on server setup,
+ * store in reverse order
+ */
+ struct rte_mbuf *buffered_pkt_list;
+ uint32_t original_spoofed_seq;
+ /* difference between spoofed and real seq from server */
+ uint32_t seq_diff;
+ struct rte_synproxy_options cnxn_options;
+ /* non-zero if this connection created using synproxy */
+ uint8_t synproxied;
+ bool half_established;
+ /* non-zero after both half-connections established */
+ bool cnxn_established;
+};
+
+struct rte_ct_tcp {
+ struct rte_ct_tcp_state seen[2]; /* connection parms per direction */
+ uint8_t state;
+ uint8_t last_dir; /* Direction of the last packet
+ * (TODO: enum ip_conntrack_dir)
+ */
+ uint8_t retrans; /* Number of retransmitted packets */
+ uint8_t last_index; /* Index of the last packet */
+ uint32_t last_seq; /* Last seq number seen in dir */
+ uint32_t last_ack; /* Last seq number seen opposite dir */
+ uint32_t last_end; /* Last seq + len */
+ uint16_t last_win; /* Last window seen in dir */
+ /* For SYN packets while we may be out-of-sync */
+ uint8_t last_wscale; /* Last window scaling factor seen */
+ uint8_t last_flags; /* Last flags set */
+};
+
+/*
+ * rte_ct_cnxn_counters holds all the connection-specicif counters.
+ * TODO: Make available in public interface
+ */
+
+struct rte_ct_cnxn_counters {
+ uint64_t packets_received;//Added for CT-NAT
+ uint64_t packets_forwarded;
+ uint64_t packets_dropped;
+};
+
+struct rte_ct_proto {
+ struct rte_ct_tcp tcp_ct_data; /* TCP specific data fields*/
+ struct ct_sp_cnxn_data synproxy_data;
+};
+
+
+/*
+ * rte_ct_cnxn_data contains all the data for a TCP connection. This include
+ * state data as necessary for verifying the validity of TCP packets. In
+ * addition, it holds data necessary for implementing the TCP timers.
+ */
+
+struct rte_ct_cnxn_data {
+ /* The timer will be kept as part of the cnxn_data. When it fires, the
+ * pointer to the timer can be cast as the pointer to the cnxn_data
+ */
+ struct rte_timer timer; /* !!!!! IMPORTANT: Keep as first field !!!!! */
+
+ struct rte_ct_cnxn_counters counters;
+
+ /* full key stored here to allow the timer to remove the connection */
+ /* TODO: Consider storing key signature as well to speed up deletions.*/
+ uint32_t key[10];
+
+ struct rte_ct_proto ct_protocol;
+
+ /* the 100 ms timing step that a packet was seen for connection */
+ uint64_t expected_timeout;
+
+ /* Abstract states also used for timer values, e.g. RTE_CT_TCP_UNACK*/
+ uint8_t state_used_for_timer;
+
+ /* used to compute the "direction" of the packet */
+ uint8_t key_is_client_order;
+ uint8_t connstatus;
+ uint8_t protocol;
+ /* used to store the type of packet ipv4 or ipv6 */
+ uint8_t type;
+ //#ifdef FTP_ALG
+ // Bypass flag to indicate that ALG checking is no more needed;
+ uint8_t alg_bypass_flag;
+ // Can we use key_is_client_order for direction checking
+ uint8_t server_direction;
+ int16_t tcpSeqdiff;
+ // PORT = 0, PASV = 1
+ uint8_t ftp_session_type;
+ uint32_t tcp_payload_size;
+ int16_t seq_client;
+ int16_t ack_client;
+ int16_t seq_server;
+ int16_t ack_server;
+ //#endif
+} __rte_cache_aligned;
+
+
+#define RTE_CT_TCP_MAX_RETRANS 3
+
+struct rte_ct_tcptimeout {
+ /* a table of timeouts for each state of TCP */
+ uint64_t tcp_timeouts[RTE_CT_TCP_MAX + 1];
+};
+
+
+struct rte_ct_misc_options {
+ uint8_t synproxy_enabled;
+ uint32_t tcp_loose;
+ uint32_t tcp_be_liberal;
+ uint32_t tcp_max_retrans;
+};
+
+struct rte_ct_udptimeout {
+ uint64_t udp_timeouts[RTE_CT_UDP_MAX + 1];
+};
+
+struct rte_ct_timeout {
+ struct rte_ct_tcptimeout tcptimeout;
+ struct rte_ct_udptimeout udptimeout;
+};
+
+struct rte_ct_cnxn_tracker {
+ struct rte_hash *rhash;
+
+ /*
+ * Data for bulk hash lookup. Use this memory as temporary space.
+ * Too big for stack (64*16 bytes)
+ */
+ uint32_t hash_keys[RTE_HASH_LOOKUP_BULK_MAX][10];
+
+ /* table of pointers to above, for bulk hash lookup */
+ void *hash_key_ptrs[RTE_HASH_LOOKUP_BULK_MAX];
+ #ifdef CT_CGNAT
+ uint32_t positions[RTE_HASH_LOOKUP_BULK_MAX];/*added for ALG*/
+ #endif
+ /* hash table and timer storage */
+ uint32_t num_cnxn_entries;
+
+ /*
+ * pointer to data space used for hash table, "num_cnxn_entries" long.
+ * Memory allocated during initialization.
+ */
+ struct rte_ct_cnxn_data *hash_table_entries;
+ struct rte_CT_counter_block *counters;
+
+ uint64_t hertz;
+ uint64_t timing_cycles_per_timing_step;
+ uint64_t timing_100ms_steps;
+ uint64_t timing_100ms_steps_previous;
+ uint64_t timing_last_time;
+ struct rte_ct_timeout ct_timeout;
+ struct rte_ct_misc_options misc_options;
+
+ char name[16];
+ struct rte_ct_cnxn_data *new_connections[64];
+ struct rte_mbuf *buffered_pkt_list;
+ int latest_connection;
+ /* offset into mbuf where synnproxy can store a pointer */
+ uint16_t pointer_offset;
+} __rte_cache_aligned;
+
+/*
+ * Returns a value stating if this is a valid TCP open connection attempt.
+ * If valid, updates cnxn with any data fields it need to save.
+ */
+
+enum rte_ct_packet_action
+rte_ct_tcp_new_connection(
+ struct rte_ct_cnxn_tracker *inst,
+ struct rte_ct_cnxn_data *cnxn,
+ struct rte_mbuf *pkt,
+ int use_synproxy,
+ uint8_t ip_hdr_size);
+
+/*
+* Returns a value stating if this is a valid TCP packet for the give connection.
+* If valid, updates cnxn with any data fields it need to save.
+*/
+
+enum rte_ct_packet_action
+rte_ct_verify_tcp_packet(
+ struct rte_ct_cnxn_tracker *inst,
+ struct rte_ct_cnxn_data *cnxn,
+ struct rte_mbuf *pkt,
+ uint8_t key_was_flipped,
+ uint8_t ip_hdr_size);
+
+/*
+* Returns a value stating if this is a valid UDP open connection attempt.
+* If valid, updates cnxn with any data fields it need to save.
+*/
+
+uint8_t
+rte_ct_udp_new_connection(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt);
+
+/*
+* Returns a value stating if this is a valid UDP packet for the give connection.
+* If valid, updates cnxn with any data fields it need to save.
+*/
+
+enum rte_ct_packet_action
+rte_ct_udp_packet(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt,
+ uint8_t key_was_flipped);
+
+
+/*
+ * For the given connection, set a timeout based on the given state. If the
+ * timer is already set, this call will reset the timer with a new value.
+ */
+
+void
+rte_ct_set_cnxn_timer_for_tcp(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ uint8_t tcp_state);
+
+void
+rte_ct_set_cnxn_timer_for_udp(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ uint8_t tcp_state);
+
+/* Cancel timer associated with the connection. Safe to call if no timer set.*/
+void rte_ct_cancel_cnxn_timer(struct rte_ct_cnxn_data *cd);
+
+
+/*
+ * SYNPROXY related routines. Detailed comments are available in
+ * rte_ct_synproxy.c where they are implemented.
+ */
+
+
+/* these 3 routines convert a received packet to a different one */
+
+void
+rte_sp_cvt_to_spoofed_client_synack(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt);
+
+void
+rte_sp_cvt_to_spoofed_server_syn(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt);
+
+void
+rte_sp_cvt_to_spoofed_server_ack(struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *old_pkt);
+
+/* These two routines adjust seq or ack numbers,
+ * as part of the proxy mechanism
+ */
+
+void
+rte_sp_adjust_client_ack_before_window_check(
+ struct rte_ct_cnxn_data *cd,
+ void *i_hdr,
+ struct tcp_hdr *thdr,
+ enum rte_ct_pkt_direction dir);
+
+void
+rte_sp_adjust_server_seq_after_window_check(
+ struct rte_ct_cnxn_data *cd,
+ void *i_hdr,
+ struct tcp_hdr *thdr,
+ enum rte_ct_pkt_direction dir);
+
+
+
+/* parse tcp options and save in t_opts */
+void
+rte_sp_parse_options(struct rte_mbuf *pkt, struct rte_ct_cnxn_data *cd);
+
+
+/* these two routines deal with packet buffering */
+
+void
+rte_ct_buffer_packet(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ struct rte_mbuf *pkt);
+
+void
+ rte_ct_release_buffered_packets(
+ struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd);
+
+#endif /* TCPCONNTRACK_H */
diff --git a/common/VIL/conntrack/rte_ct_udp.c b/common/VIL/conntrack/rte_ct_udp.c
new file mode 100644
index 00000000..88f3a9a4
--- /dev/null
+++ b/common/VIL/conntrack/rte_ct_udp.c
@@ -0,0 +1,49 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdlib.h>
+#include <string.h>
+#include "rte_ct_tcp.h"
+#include "rte_cnxn_tracking.h"
+
+uint8_t rte_ct_udp_new_connection(__rte_unused struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ __rte_unused struct rte_mbuf *pkt)
+{
+ /* printf("New connection UDP packet received\n"); */
+ cd->connstatus = RTE_INIT_CONN;
+ return 1;
+}
+enum rte_ct_packet_action rte_ct_udp_packet(struct rte_ct_cnxn_tracker *ct,
+ struct rte_ct_cnxn_data *cd,
+ __rte_unused struct rte_mbuf *pkt,
+ uint8_t key_was_flipped)
+{
+ enum rte_ct_pkt_direction dir;
+
+ dir = (cd->key_is_client_order == !key_was_flipped);
+ /* printf("packet received verify"); */
+ if (dir == RTE_CT_DIR_REPLY &&
+ cd->connstatus == RTE_INIT_CONN) {
+ rte_ct_set_cnxn_timer_for_udp(ct, cd, RTE_CT_UDP_REPLIED);
+ cd->connstatus = RTE_ASSURED_CONN;
+ } else if (dir == RTE_CT_DIR_REPLY &&
+ cd->connstatus == RTE_ASSURED_CONN)
+ rte_ct_set_cnxn_timer_for_udp(ct, cd, RTE_CT_UDP_REPLIED);
+ else
+ rte_ct_set_cnxn_timer_for_udp(ct, cd, RTE_CT_UDP_UNREPLIED);
+ return RTE_CT_FORWARD_PACKET;
+}
diff --git a/common/VIL/l2l3_stack/Makefile b/common/VIL/l2l3_stack/Makefile
new file mode 100644
index 00000000..b85bf1d4
--- /dev/null
+++ b/common/VIL/l2l3_stack/Makefile
@@ -0,0 +1,35 @@
+# Copyright (c) 2017 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http:#www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = Protocol
+
+# all source are stored in SRCS-y
+SRCS-y := main.c l2_proto.c interface.c lib_arp.c lib_icmpv6.c l3fwd_main.c l3fwd_lpm4.c l3fwd_lpm6.c bond.c tsx.c hle.c
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(USER_FLAGS)
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O0 -g
+CFLAGS += -mrtm -mhle
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/common/VIL/l2l3_stack/bond.c b/common/VIL/l2l3_stack/bond.c
new file mode 100644
index 00000000..8fd11712
--- /dev/null
+++ b/common/VIL/l2l3_stack/bond.c
@@ -0,0 +1,1595 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <interface.h>
+#include "tsx.h"
+extern interface_main_t ifm;
+extern uint8_t ifm_debug;
+extern int USE_RTM_LOCKS;
+extern rte_rwlock_t rwlock;
+
+int ifm_bond_port_create(const char *name, int mode, port_config_t * portconf)
+{
+ int port_id;
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: i/p name %p, mode %d\n\r", __FUNCTION__,
+ name, mode);
+ if (name == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param name cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (mode < 0 || mode > 6) {
+ RTE_LOG(ERR, IFM, "%s: Param mode should be withing 0 to 6\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (portconf == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param portconf cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ bond_port = ifm_get_port_by_name(name);
+ if (bond_port == NULL) {
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "Call ifm_port_setup %s\n\r", name);
+ port_id = rte_eth_bond_create(name, mode, 0);
+ if (port_id < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to create bond port %s with mode %u\n\r",
+ __FUNCTION__, name, mode);
+ return IFM_FAILURE;
+ }
+ RTE_LOG(INFO, IFM,
+ "%s: Created bond port %s(%u) on socket %u with "
+ "mode %u.\n\r", __FUNCTION__, name, port_id,
+ rte_eth_dev_socket_id(port_id), mode);
+
+ bond_port = (l2_phy_interface_t *) rte_zmalloc(NULL,
+ sizeof
+ (l2_phy_interface_t),
+ RTE_CACHE_LINE_SIZE);
+ bond_port->pmdid = port_id;
+ strncpy(bond_port->ifname, name, IFM_IFNAME_LEN);
+ memcpy(&bond_port->port_config, portconf,
+ sizeof(port_config_t));
+ bond_port->flags |= IFM_MASTER;
+ struct bond_port *bond_info;
+ bond_info = (struct bond_port *)rte_zmalloc(NULL,
+ sizeof(struct
+ bond_port),
+ RTE_CACHE_LINE_SIZE);
+ bond_info->socket_id = rte_eth_dev_socket_id(port_id);
+ bond_info->mode = mode;
+ bond_info->bond_portid = port_id;
+ bond_port->bond_config = bond_info;
+ if (mode == IFM_BONDING_MODE_8023AD)
+ bond_port->tx_buf_len =
+ (2 * RTE_ETH_TX_BUFFER_SIZE(IFM_BURST_SIZE)) *
+ RTE_MAX_ETHPORTS;
+ //ifm_add_port_to_port_list(bond_port);
+ ifm.port_list[port_id] = bond_port;
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s: Added bond port %s(%u) to port list\n\r",
+ __FUNCTION__, name, port_id);
+ } else {
+ RTE_LOG(INFO, IFM, "%s: Port %s already exists in the"
+ " port list\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (!(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Previously port %s was not "
+ "configured as Bond port\n\r", __FUNCTION__,
+ name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->mode != mode) {
+ if (rte_eth_bond_mode_set(bond_port->pmdid, mode) < 0) {
+ RTE_LOG(ERR, IFM, "%s: rte_eth_bond_mode_set "
+ "failed\n\r", __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->mode =
+ rte_eth_bond_mode_get(bond_port->pmdid);
+ /* xmit policy may change for based on mode */
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ifm_debug && IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s: Bond port %u mode is updated. Mode %u xmit_policy %u."
+ "\n\r", __FUNCTION__, bond_port->pmdid,
+ bond_port->bond_config->mode,
+ bond_port->bond_config->xmit_policy);
+ }
+ port_id = bond_port->pmdid;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return port_id;
+}
+
+int ifm_bond_port_delete(const char *name)
+{
+ l2_phy_interface_t *bond_port;
+ if (name == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Param name cannot be NULL\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ bond_port = ifm_get_port_by_name(name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port with name %s not"
+ " found in the list\n\r", __FUNCTION__, name);
+ return IFM_FAILURE;
+ }
+ if (!(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %s is not "
+ "configured is not bond port\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config && bond_port->bond_config->slave_count > 0) {
+ RTE_LOG(ERR, IFM, "%s: First unbind all slave "
+ "ports from the bond port %s\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ int ret;
+ ret = rte_eth_bond_free(name);
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to delete "
+ "bond port %s\n\r", __FUNCTION__, name);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: Bond port %s deleted successfully\n\r",
+ __FUNCTION__, name);
+
+ if (bond_port && bond_port->bond_config != NULL) {
+ rte_free(bond_port->bond_config);
+ bond_port->bond_config = NULL;
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ //ifm.port_list[bond_port->pmdid] = NULL;
+ return IFM_SUCCESS;
+}
+
+int ifm_add_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port, *slave_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ // slave_port = ifm.port_list[slave_port_id];
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: i/p bond id %u, slave id %u\n\r",
+ __FUNCTION__, bonded_port_id, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available in "
+ "port list.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port && !(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not configured "
+ "as Master port. %u\n\r", __FUNCTION__, bonded_port_id,
+ bond_port->flags & IFM_MASTER);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port && bond_port->bond_config
+ && bond_port->bond_config->slave_count == RTE_MAX_ETHPORTS) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to bind.Already %u ports are bonded to master port...\n\r ",
+ __FUNCTION__, RTE_MAX_ETHPORTS);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (slave_port && slave_port->flags & IFM_SLAVE) {
+ /* Have to check whether the port is already part of someother bond port */
+ if (slave_port->bond_config != NULL) {
+ if (bonded_port_id !=
+ slave_port->bond_config->bond_portid) {
+ RTE_LOG(ERR, IFM,
+ "%s: Slave port %u is already part"
+ " of other bond port %u.\n\r",
+ __FUNCTION__, slave_port_id,
+ slave_port->bond_config->bond_portid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ } else {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Slave port %u is already bounded to %u\n\r",
+ __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ }
+ }
+ if (bond_port->bond_config && bond_port->bond_config->slave_count &&
+ bond_port->link_speed != slave_port->link_speed
+ && bond_port->link_duplex != slave_port->link_duplex) {
+ RTE_LOG(ERR, IFM,
+ "%s: Error in adding slave port to bond port. Reason speed mismatch\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Slave port %u Master port %u\n\r",
+ __FUNCTION__, slave_port_id, bonded_port_id);
+ int ret;
+ ret = rte_eth_bond_slave_add(bond_port->pmdid, slave_port->pmdid);
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to add slave port %u to bond "
+ "port %u.\n\r", __FUNCTION__, slave_port->pmdid,
+ bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ slave_port->flags |= IFM_SLAVE;
+ /* Populate bond config information */
+ if (bond_port->bond_config) {
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ bond_port->bond_config->internal_ms =
+ rte_eth_bond_link_monitoring_get(bond_port->pmdid);
+ bond_port->bond_config->link_up_delay_ms =
+ rte_eth_bond_link_up_prop_delay_get(bond_port->pmdid);
+ bond_port->bond_config->link_down_delay_ms =
+ rte_eth_bond_link_down_prop_delay_get(bond_port->pmdid);
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ bond_port->bond_config->slave_count =
+ rte_eth_bond_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->slaves,
+ RTE_MAX_ETHPORTS);
+ bond_port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->
+ active_slaves,
+ RTE_MAX_ETHPORTS);
+ slave_port->bond_config = bond_port->bond_config;
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Slave count is %u\n\r",
+ __FUNCTION__,
+ bond_port->bond_config->slave_count);
+ if (bond_port->bond_config->slave_count == 1) {
+ ret =
+ ifm_port_setup(bond_port->pmdid,
+ &(bond_port->port_config));
+ if (ret < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to start bond port %u.\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ } else {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM, "%s: Skipping"
+ " port setup\n\r", __FUNCTION__);
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int ifm_remove_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port, *slave_port;
+
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ //slave_port = ifm.port_list[slave_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available "
+ "in port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available "
+ "in port list.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port && !(bond_port->flags & IFM_MASTER)) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not configured "
+ "as Master port.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port && !(slave_port->flags & IFM_SLAVE)) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not configured"
+ " as slave port.\n\r", __FUNCTION__, slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int i;
+ int found = 0;
+ for (i = 0; i < bond_port->bond_config->slave_count; i++) {
+ if (slave_port_id == bond_port->bond_config->slaves[i]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not binded "
+ "with bond port %u\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_slave_remove(bonded_port_id, slave_port_id) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to unbind slave port %u"
+ " from bond port %u\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ slave_port->flags &= ~IFM_SLAVE;
+ slave_port->bond_config = NULL;
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ bond_port->bond_config->slave_count =
+ rte_eth_bond_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->slaves,
+ RTE_MAX_ETHPORTS);
+ bond_port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(bond_port->pmdid,
+ bond_port->bond_config->
+ active_slaves, RTE_MAX_ETHPORTS);
+
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(ERR, IFM, "%s: Unbinded slave port %u from the bond "
+ "port %u %d\n\r", __FUNCTION__, slave_port_id,
+ bonded_port_id,
+ rte_eth_bond_primary_get(bond_port->pmdid));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int set_bond_mode(uint8_t bonded_port_id, uint8_t mode)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if(bond_port)
+ ifm_remove_port_details(bond_port->pmdid);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ if (bond_port && bond_port->bond_config->mode == mode) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Already bond port is set with the given"
+ " mode %u\n\r.", __FUNCTION__, mode);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_SUCCESS;
+
+ }
+ if (rte_eth_bond_mode_set(bond_port->pmdid, mode) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to set bond mode %u for port id %u\n\r.",
+ __FUNCTION__, mode, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->mode = rte_eth_bond_mode_get(bond_port->pmdid);
+ /* xmit policy may change for based on mode */
+ bond_port->bond_config->xmit_policy =
+ rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Bond port %u mode is updated. Mode %u xmit_policy %u."
+ "\n\r.", __FUNCTION__, bond_port->pmdid,
+ bond_port->bond_config->mode,
+ bond_port->bond_config->xmit_policy);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ ifm_remove_port_details(bond_port->pmdid);
+ return IFM_SUCCESS;
+}
+
+int get_bond_mode(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t mode = bond_port->bond_config->mode;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return mode;
+}
+
+int set_bond_primary(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ l2_phy_interface_t *slave_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+ slave_port = ifm_get_port(slave_port_id);
+ // slave_port = ifm.port_list[slave_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (slave_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given slave port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int i;
+ int found = 0;
+ for (i = 0; i < bond_port->bond_config->slave_count; i++) {
+ if (slave_port_id == bond_port->bond_config->slaves[i]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ RTE_LOG(ERR, IFM, "%s: Slave port %u is not binded "
+ "with bond port %u. Slave port should be binded first\n\r",
+ __FUNCTION__, slave_port_id, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ if (bond_port->bond_config->primary == slave_port_id) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Already slave port %u is primary for bond port"
+ "%u\n\r.", __FUNCTION__, bonded_port_id,
+ slave_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+
+ }
+ if (rte_eth_bond_primary_set(bond_port->pmdid, slave_port->pmdid) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s:Failed to set slave %u as primary for bond port %u\n\r.",
+ __FUNCTION__, slave_port->pmdid, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+
+ bond_port->bond_config->primary =
+ rte_eth_bond_primary_get(bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: Primary port is updated as %u for bond port %u",
+ __FUNCTION__, bond_port->bond_config->primary,
+ bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int get_bond_primary_port(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t primary = bond_port->bond_config->primary;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return primary;
+}
+
+int get_bond_slave_count(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t slave_count = bond_port->bond_config->slave_count;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return slave_count;
+}
+
+int get_bond_active_slave_count(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t slave_count = bond_port->bond_config->active_slave_count;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return slave_count;
+}
+
+int get_bond_slaves(uint8_t bonded_port_id, uint8_t slaves[RTE_MAX_ETHPORTS])
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ memcpy(slaves, bond_port->bond_config->slaves,
+ bond_port->bond_config->slave_count);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_active_slaves(uint8_t bonded_port_id,
+ uint8_t active_slaves[RTE_MAX_ETHPORTS])
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ return IFM_FAILURE;
+ }
+ memcpy(active_slaves, bond_port->bond_config->active_slaves,
+ bond_port->bond_config->active_slave_count);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int set_bond_mac_address(uint8_t bonded_port_id, struct ether_addr *mac_addr)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (mac_addr == NULL) {
+ RTE_LOG(ERR, IFM, "%s: MAC address cannot be NULL.\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_mac_address_set(bond_port->pmdid, mac_addr) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set MAC addr for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ rte_eth_macaddr_get(bond_port->pmdid,
+ (struct ether_addr *)bond_port->macaddr);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int reset_bond_mac_addr(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ // bond_port = ifm.port_list[bonded_port_id];
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (rte_eth_bond_mac_address_reset(bond_port->pmdid) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to reset MAC addr for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ rte_eth_macaddr_get(bond_port->pmdid,
+ (struct ether_addr *)bond_port->macaddr);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+}
+
+int set_bond_xmitpolicy(uint8_t bonded_port_id, uint8_t policy)
+{
+
+ l2_phy_interface_t *bond_port;
+ bond_port = ifm_get_port(bonded_port_id);
+ //bond_port = ifm.port_list[bonded_port_id];
+ int ret = 0;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->xmit_policy == policy) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old policy value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_xmit_policy_set(bond_port->pmdid, policy) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set policy for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ ret = rte_eth_bond_xmit_policy_get(bond_port->pmdid);
+ if (ret < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_xmit_policy_set failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->xmit_policy = policy;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_xmitpolicy(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: RD Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s:Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint8_t policy = bond_port->bond_config->xmit_policy;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s:Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return policy;
+}
+
+int set_bond_link_montitor_frequency(uint8_t bonded_port_id,
+ uint32_t internal_ms)
+{
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ int ret = 0;
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->internal_ms == internal_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old frequency value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_monitoring_set(bond_port->pmdid, internal_ms) < 0) {
+ RTE_LOG(ERR, IFM,
+ "%s: Failed to set link monitor frequency for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ ret = rte_eth_bond_link_monitoring_get(bond_port->pmdid);
+ if (ret < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_monitoring_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->internal_ms = internal_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_monitor_frequency(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t internal_ms = bond_port->bond_config->internal_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return internal_ms;
+}
+
+int set_bond_linkdown_delay(uint8_t bonded_port_id, uint32_t delay_ms)
+{
+ l2_phy_interface_t *bond_port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+// bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ int delay = 0;
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->link_down_delay_ms == delay_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old delay value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_down_prop_delay_set(bond_port->pmdid, delay_ms) <
+ 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set delay for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ delay = rte_eth_bond_link_down_prop_delay_get(bond_port->pmdid);
+ if (delay < 0) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_down_prop_delay_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->link_down_delay_ms = delay;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_down_delay(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ //bond_port = ifm.port_list[bonded_port_id];
+ bond_port = ifm_get_port(bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return IFM_FAILURE;
+ }
+ uint32_t delay_ms = bond_port->bond_config->link_down_delay_ms;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return delay_ms;
+
+}
+
+int set_bond_linkup_delay(uint8_t bonded_port_id, uint32_t delay_ms)
+{
+ l2_phy_interface_t *bond_port;
+ int delay = 0;
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+
+ if (bond_port == NULL) {
+ RTE_LOG(ERR, IFM, "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if (bond_port->bond_config->link_up_delay_ms == delay_ms) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(INFO, IFM,
+ "%s: For port %u, old delay value and new value are same\n\r",
+ __FUNCTION__, bonded_port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+ }
+ if (rte_eth_bond_link_up_prop_delay_set(bond_port->pmdid, delay_ms) < 0) {
+ RTE_LOG(ERR, IFM, "%s: Failed to set delay for port %u\n\r",
+ __FUNCTION__, bond_port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ delay = rte_eth_bond_link_up_prop_delay_get(bond_port->pmdid);
+ if (delay < 0) {
+ RTE_LOG(INFO, IFM,
+ "%s: rte_eth_bond_link_up_prop_delay_get failed\n\r",
+ __FUNCTION__);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ bond_port->bond_config->link_up_delay_ms = delay;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_SUCCESS;
+}
+
+int get_bond_link_up_delay(uint8_t bonded_port_id)
+{
+ l2_phy_interface_t *bond_port;
+ uint32_t delay_ms;
+
+ bond_port = ifm_get_port(bonded_port_id);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ if (bond_port == NULL) {
+ if (ifm_debug & IFM_DEBUG) {
+ RTE_LOG(ERR, IFM,
+ "%s: Given bond port %u is not available in"
+ " port list.\n\r", __FUNCTION__,
+ bonded_port_id);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ delay_ms = bond_port->bond_config->link_up_delay_ms;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return delay_ms;
+}
diff --git a/common/VIL/l2l3_stack/build/.interface.o.d b/common/VIL/l2l3_stack/build/.interface.o.d
new file mode 100644
index 00000000..582958f4
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.interface.o.d
@@ -0,0 +1,180 @@
+dep_interface.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h /usr/include/string.h \
+ /usr/include/xlocale.h /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h \
+ /usr/include/stdint.h /usr/include/x86_64-linux-gnu/bits/wchar.h \
+ /usr/include/inttypes.h /usr/include/x86_64-linux-gnu/sys/queue.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h /usr/include/setjmp.h \
+ /usr/include/x86_64-linux-gnu/bits/setjmp.h /usr/include/ctype.h \
+ /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \
+ /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/lib_arp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pipeline.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/tsx.h
diff --git a/common/VIL/l2l3_stack/build/.l2_proto.o.d b/common/VIL/l2l3_stack/build/.l2_proto.o.d
new file mode 100644
index 00000000..13bcf504
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.l2_proto.o.d
@@ -0,0 +1,175 @@
+dep_l2_proto.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h /usr/include/string.h \
+ /usr/include/xlocale.h /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h \
+ /usr/include/stdint.h /usr/include/x86_64-linux-gnu/bits/wchar.h \
+ /usr/include/inttypes.h /usr/include/x86_64-linux-gnu/sys/queue.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h /usr/include/setjmp.h \
+ /usr/include/x86_64-linux-gnu/bits/setjmp.h /usr/include/ctype.h \
+ /usr/include/errno.h /usr/include/x86_64-linux-gnu/bits/errno.h \
+ /usr/include/linux/errno.h /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h
diff --git a/common/VIL/l2l3_stack/build/.main.o.d b/common/VIL/l2l3_stack/build/.main.o.d
new file mode 100644
index 00000000..9d27accd
--- /dev/null
+++ b/common/VIL/l2l3_stack/build/.main.o.d
@@ -0,0 +1,209 @@
+dep_main.o = \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/main.c \
+ /usr/include/stdc-predef.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_config.h \
+ /usr/include/stdio.h /usr/include/features.h \
+ /usr/include/x86_64-linux-gnu/sys/cdefs.h \
+ /usr/include/x86_64-linux-gnu/bits/wordsize.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs.h \
+ /usr/include/x86_64-linux-gnu/gnu/stubs-64.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stddef.h \
+ /usr/include/x86_64-linux-gnu/bits/types.h \
+ /usr/include/x86_64-linux-gnu/bits/typesizes.h /usr/include/libio.h \
+ /usr/include/_G_config.h /usr/include/wchar.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdarg.h \
+ /usr/include/x86_64-linux-gnu/bits/stdio_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/sys_errlist.h /usr/include/stdlib.h \
+ /usr/include/x86_64-linux-gnu/bits/waitflags.h \
+ /usr/include/x86_64-linux-gnu/bits/waitstatus.h /usr/include/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/endian.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap.h \
+ /usr/include/x86_64-linux-gnu/bits/byteswap-16.h \
+ /usr/include/x86_64-linux-gnu/sys/types.h /usr/include/time.h \
+ /usr/include/x86_64-linux-gnu/sys/select.h \
+ /usr/include/x86_64-linux-gnu/bits/select.h \
+ /usr/include/x86_64-linux-gnu/bits/sigset.h \
+ /usr/include/x86_64-linux-gnu/bits/time.h \
+ /usr/include/x86_64-linux-gnu/sys/sysmacros.h \
+ /usr/include/x86_64-linux-gnu/bits/pthreadtypes.h /usr/include/alloca.h \
+ /usr/include/x86_64-linux-gnu/bits/stdlib-float.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdint.h /usr/include/stdint.h \
+ /usr/include/x86_64-linux-gnu/bits/wchar.h /usr/include/inttypes.h \
+ /usr/include/string.h /usr/include/xlocale.h \
+ /usr/include/x86_64-linux-gnu/sys/queue.h /usr/include/errno.h \
+ /usr/include/x86_64-linux-gnu/bits/errno.h /usr/include/linux/errno.h \
+ /usr/include/x86_64-linux-gnu/asm/errno.h \
+ /usr/include/asm-generic/errno.h /usr/include/asm-generic/errno-base.h \
+ /usr/include/getopt.h /usr/include/signal.h \
+ /usr/include/x86_64-linux-gnu/bits/signum.h \
+ /usr/include/x86_64-linux-gnu/bits/siginfo.h \
+ /usr/include/x86_64-linux-gnu/bits/sigaction.h \
+ /usr/include/x86_64-linux-gnu/bits/sigcontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigstack.h \
+ /usr/include/x86_64-linux-gnu/sys/ucontext.h \
+ /usr/include/x86_64-linux-gnu/bits/sigthread.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/stdbool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /usr/include/ctype.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/limits.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include-fixed/syslimits.h \
+ /usr/include/limits.h /usr/include/x86_64-linux-gnu/bits/posix1_lim.h \
+ /usr/include/x86_64-linux-gnu/bits/local_lim.h \
+ /usr/include/linux/limits.h \
+ /usr/include/x86_64-linux-gnu/bits/posix2_lim.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/emmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_vect.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/x86intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ia32intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/ammintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/smmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/popcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/wmmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/immintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512fintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512erintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512pfintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512cdintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512bwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512dqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vlbwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vldqintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512ifmavlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/avx512vbmivlintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/shaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lzcntintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmiintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/bmi2intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fmaintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/f16cintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rtmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xtestintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mm3dnow.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/prfchwintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fma4intrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xopintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/lwpintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/tbmintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/rdseedintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/fxsrintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsaveoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/adxintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clwbintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/pcommitintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/clflushoptintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavesintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/xsavecintrin.h \
+ /usr/lib/gcc/x86_64-linux-gnu/5/include/mwaitxintrin.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_byteorder.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_byteorder_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memory.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_dom0_common.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memcpy.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_memzone.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eal.h \
+ /usr/include/sched.h /usr/include/x86_64-linux-gnu/bits/sched.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_per_lcore.h \
+ /usr/include/pthread.h /usr/include/x86_64-linux-gnu/bits/setjmp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_launch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_atomic.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_atomic_64.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cycles.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_debug.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_log.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_branch_prediction.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_prefetch.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lcore.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/exec-env/rte_interrupts.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pci.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_random.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mbuf.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_mempool.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_spinlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rtm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ring.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_devargs.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ether.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_dev_info.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_ip.h \
+ /usr/include/netinet/in.h /usr/include/x86_64-linux-gnu/sys/socket.h \
+ /usr/include/x86_64-linux-gnu/sys/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/uio.h \
+ /usr/include/x86_64-linux-gnu/bits/socket.h \
+ /usr/include/x86_64-linux-gnu/bits/socket_type.h \
+ /usr/include/x86_64-linux-gnu/bits/sockaddr.h \
+ /usr/include/x86_64-linux-gnu/asm/socket.h \
+ /usr/include/asm-generic/socket.h \
+ /usr/include/x86_64-linux-gnu/asm/sockios.h \
+ /usr/include/asm-generic/sockios.h \
+ /usr/include/x86_64-linux-gnu/bits/in.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_tcp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_udp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_string_fns.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_cpuflags.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_timer.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/lib_arp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_pipeline.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l2_proto.h \
+ /usr/include/setjmp.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_malloc.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_ctrl.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /usr/include/arpa/inet.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_errno.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port_ethdev.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_port.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_eth_bond.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/generic/rte_rwlock.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_spinlock.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/interface.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_common.h \
+ /usr/include/x86_64-linux-gnu/sys/param.h \
+ /usr/include/x86_64-linux-gnu/bits/param.h /usr/include/linux/param.h \
+ /usr/include/x86_64-linux-gnu/asm/param.h \
+ /usr/include/asm-generic/param.h /usr/include/unistd.h \
+ /usr/include/x86_64-linux-gnu/bits/posix_opt.h \
+ /usr/include/x86_64-linux-gnu/bits/environments.h \
+ /usr/include/x86_64-linux-gnu/bits/confname.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_hash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_jhash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_hash.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_lpm.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm4.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_compat.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm_sse.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_lpm6.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm6.h \
+ /home/ubuntu/dpdk-16.07/x86_64-native-linuxapp-gcc/include/rte_table_lpm_ipv6.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm4.h \
+ /home/ubuntu/feb_cgnat/csig_sdnd_benchmarking-industrial_standard_benchmarking/common/VIL/l2l3_stack/l3fwd_lpm6.h
diff --git a/common/VIL/l2l3_stack/hle.c b/common/VIL/l2l3_stack/hle.c
new file mode 100644
index 00000000..a0661b32
--- /dev/null
+++ b/common/VIL/l2l3_stack/hle.c
@@ -0,0 +1,43 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include "tsx.h"
+//#include "hle.h"
+#include <xmmintrin.h>
+
+void hle_init(void)
+{
+ mutex_val = 0;
+}
+
+int hle_lock(void)
+{
+ while (__atomic_exchange_n
+ (&mutex_val, 1, __ATOMIC_ACQUIRE | __ATOMIC_HLE_ACQUIRE))
+ _mm_pause();
+ return TRUE;
+}
+
+int hle_release(void)
+{
+ __atomic_store_n(&mutex_val, 0,
+ __ATOMIC_RELEASE | __ATOMIC_HLE_RELEASE);
+ return TRUE;
+}
+
+int is_hle_locked(void)
+{
+ return (mutex_val == 0) ? FALSE : TRUE;
+}
diff --git a/common/VIL/l2l3_stack/hle.h b/common/VIL/l2l3_stack/hle.h
new file mode 100644
index 00000000..21da710d
--- /dev/null
+++ b/common/VIL/l2l3_stack/hle.h
@@ -0,0 +1,40 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#define HLE_TRUE 1
+#define HLE_FALSE 0
+
+volatile int mutex_val;
+/*
+ * hle mutex
+ * @param void
+ */
+void hle_mutex(void);
+/*
+ * To lock instrution
+ * @param void
+ */
+int hle_lock(void);
+/*
+ * To release held lock
+ * @param void
+ */
+int hle_release(void);
+/*
+ * To check whether lock is held
+ * @param void
+ */
+int is_locked(void);
diff --git a/common/VIL/l2l3_stack/interface.c b/common/VIL/l2l3_stack/interface.c
new file mode 100644
index 00000000..84c390da
--- /dev/null
+++ b/common/VIL/l2l3_stack/interface.c
@@ -0,0 +1,1478 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include <interface.h>
+#include <rte_byteorder.h>
+#include <lib_arp.h>
+#include <tsx.h>
+
+interface_main_t ifm;
+int USE_RTM_LOCKS = 0;
+rte_rwlock_t rwlock;
+uint8_t ifm_debug;
+static int prev_state;
+
+void config_ifm_debug(int dbg, int flag)
+{
+ switch (dbg) {
+ case IFM_DEBUG_CONFIG:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_CONFIG;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_CONFIG;
+ }
+ break;
+ case IFM_DEBUG_RXTX:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_RXTX;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_RXTX;
+ }
+ break;
+ case IFM_DEBUG_LOCKS:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG_LOCKS;
+ } else {
+ ifm_debug &= ~IFM_DEBUG_LOCKS;
+ }
+ break;
+ case IFM_DEBUG:
+ if (flag) {
+ ifm_debug |= IFM_DEBUG;
+ } else {
+ ifm_debug &= ~IFM_DEBUG;
+ }
+ break;
+ }
+}
+
+void ifm_init(void)
+{
+ int i = 0;
+ config_ifm_debug(IFM_DEBUG_CONFIG, 1);
+ if (can_use_intel_core_4th_gen_features()) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "TSX not currently supported...\n\r");
+ USE_RTM_LOCKS = 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "TSX not supported\n\r");
+ USE_RTM_LOCKS = 0;
+ }
+ if (USE_RTM_LOCKS)
+ rtm_init();
+ else
+ rte_rwlock_init(&rwlock);
+
+ for (i = 0; i < IFM_MAX_PORTARR_SZ; i++) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ ifm.port_list[i] = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ ifm.nport_intialized = rte_eth_dev_count();
+ ifm.nport_configured = 0;
+ RTE_LOG(INFO, IFM, "IFM_INIT: Number of ports initialized during "
+ "PCI probing %u.\n\r", ifm.nport_intialized);
+}
+
+void ifm_remove_port_details(uint8_t portid)
+{
+ if (ifm.port_list[portid] != NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ l2_phy_interface_t *port = ifm.port_list[portid];
+ ifm.port_list[portid] = NULL;
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: NULL set for port %u\n\r",
+ __FUNCTION__, portid);
+ rte_free(port);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Failed to remove port details.Port %u info"
+ " is already Null.\n\r", __FUNCTION__, portid);
+ }
+}
+
+l2_phy_interface_t *ifm_get_port(uint8_t port_id)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ port = ifm.port_list[port_id];
+
+ if (port == NULL) {
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ if (port->pmdid == port_id) {
+ /*RTE_LOG(INFO, IFM, "%s: Port %u found....\n\r",
+ __FUNCTION__, port_id); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+ } else {
+
+/* RTE_LOG(INFO, IFM,"%s: Mismatch given port %u port in loc %u\n\r",__FUNCTION__,port_id,
+ ifm.port_list[port_id]->pmdid);
+*/
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+}
+
+l2_phy_interface_t *ifm_get_first_port(void)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[0];
+ if (port == NULL) {
+ /*RTE_LOG(ERR, IFM, "%s: Port info not found... configure it first.\n\r",
+ __FUNCTION__); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info is found...%p\n\r",
+ __FUNCTION__, port->pmdid, port); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+}
+
+l2_phy_interface_t *ifm_get_next_port(uint8_t port_id)
+{
+ l2_phy_interface_t *port = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[port_id + 1];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+ }
+ /*RTE_LOG(ERR, IFM, "%s: Port %u info is found...\n\r",
+ __FUNCTION__, port_id); */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+}
+
+l2_phy_interface_t *ifm_get_port_by_name(const char *name)
+{
+ l2_phy_interface_t *port = NULL;
+ int i;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ for (i = 0; i < RTE_MAX_ETHPORTS && ifm.port_list[i]; i++) {
+ port = ifm.port_list[i];
+ if (strcmp(name, port->ifname) == 0) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "FOUND! port %u %s\n\r",
+ port->pmdid, port->ifname);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return port;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return NULL;
+}
+
+void lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param)
+{
+ struct rte_eth_link link;
+ l2_phy_interface_t *port;
+ int nclients = ifm.nclient;
+ int i;
+
+ RTE_SET_USED(param);
+ RTE_SET_USED(type);
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ rte_eth_link_get(port_id, &link);
+ for (i = 0; i < nclients; i++)
+ ifm.if_client[i].cb_linkupdate(port_id, link.link_status);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (link.link_status) {
+ port->link_status = IFM_ETH_LINK_UP;
+ port->link_speed = link.link_speed;
+ port->link_duplex = link.link_duplex;
+ RTE_LOG(INFO, IFM,
+ "EVENT-- PORT %u Link UP - Speed %u Mbps - %s.\n",
+ port_id, (unsigned)link.link_speed,
+ (link.link_duplex ==
+ ETH_LINK_FULL_DUPLEX) ? ("full-duplex")
+ : ("half-duplex"));
+ if (port->flags & IFM_MASTER) {
+ port->flags |= IFM_BONDED;
+ port->bond_config->active_slave_count =
+ rte_eth_bond_active_slaves_get(port->pmdid,
+ port->
+ bond_config->
+ active_slaves,
+ RTE_MAX_ETHPORTS);
+ struct ether_addr new_mac;
+ rte_eth_macaddr_get(port->pmdid,
+ (struct ether_addr *)
+ &new_mac);
+ if (memcmp
+ (&new_mac, port->macaddr,
+ sizeof(struct ether_addr))) {
+ RTE_LOG(INFO, IFM,
+ "Bond port %u MAC has changed.\n\r",
+ port->pmdid);
+ } else {
+ RTE_LOG(INFO, IFM,
+ "Bond port %u MAC remains same\n\r",
+ port->pmdid);
+ }
+ }
+ if (port->flags & IFM_SLAVE) {
+ uint8_t master_portid =
+ port->bond_config->bond_portid;
+ struct rte_eth_link linkstatus;
+ rte_eth_link_get(master_portid, &linkstatus);
+ RTE_LOG(INFO, IFM, "Port %u 's Master(%u) status is %u\n\r", port_id,
+ master_portid, linkstatus.link_status);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (port->ipv4_list != NULL) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "Sending garp on port %u\n\r",
+ port->pmdid);
+ if (!prev_state) {
+ send_gratuitous_arp(port);
+ prev_state = 1;
+ }
+ }
+#if 0
+ else {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "IP is not enabled on port %u, not sending GARP\n\r",
+ port->pmdid);
+ }
+#endif
+ } else {
+ if (port->flags & IFM_MASTER) {
+ port->flags &= ~IFM_BONDED;
+ //RTE_LOG(INFO, IFM, "IFM_MASTER port, resetting IFM_BONDED. %u\n\r", port->flags);
+ }
+ port->link_status = IFM_ETH_LINK_DOWN;
+ RTE_LOG(INFO, IFM, "EVENT-- PORT %u is Link DOWN.\n",
+ port_id);
+ if (port->flags & IFM_SLAVE) {
+ struct rte_eth_link linkstatus;
+ uint8_t master_portid =
+ port->bond_config->bond_portid;
+ rte_eth_link_get_nowait(master_portid,
+ &linkstatus);
+ RTE_LOG(INFO, IFM,
+ "Port %u 's Master(%u) status is %u\n\r",
+ port_id, master_portid,
+ linkstatus.link_status);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ prev_state = 0;
+ }
+ }
+ //print_interface_details();
+}
+
+void ifm_update_linkstatus(uint8_t port_id, uint16_t linkstatus)
+{
+ struct rte_eth_link link;
+ l2_phy_interface_t *port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ rte_eth_link_get(port_id, &link);
+ if (linkstatus == IFM_ETH_LINK_UP) {
+ port->admin_status = IFM_ETH_LINK_UP;
+ if(!link.link_status) {
+ if (rte_eth_dev_set_link_up(port_id) < 0) {
+ RTE_LOG(INFO, IFM,
+ "%s:Port %u admin up is unsuccessful\n\r",
+ __FUNCTION__, port->pmdid);
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM,
+ "%s:Port %u admin up...\n\r",
+ __FUNCTION__, port->pmdid);
+ send_gratuitous_arp(port);
+ return;
+ }
+ }
+ } else if (linkstatus == IFM_ETH_LINK_DOWN)
+ {
+ int status;
+ port->admin_status = IFM_ETH_LINK_DOWN;
+ /* need to check the following if */
+ if(link.link_status) {
+ status = rte_eth_dev_set_link_down(port_id);
+ if (status < 0)
+ {
+ rte_panic("(%" PRIu32 "): PMD set link down error %"
+ PRId32 "\n", port_id, status);
+ }
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+}
+
+void ifm_set_l2_interface_mtu(uint8_t port_id, uint16_t mtu)
+{
+ int ret;
+ l2_phy_interface_t *port;
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+
+ if (port != NULL && port->pmdid == port_id) {
+ ret = rte_eth_dev_set_mtu(port_id, mtu);
+ if (ret != 0)
+ RTE_LOG(INFO, IFM,
+ "set_l2_interface_mtu: Set MTU failed. ret=%d\n",
+ ret);
+ else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Acquiring lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port->mtu = mtu;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return;
+ }
+ }
+}
+
+void ifm_set_port_promisc(uint8_t port_id, uint8_t enable)
+{
+ l2_phy_interface_t *port;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (enable == 1) {
+ rte_eth_promiscuous_enable(port_id);
+ port->promisc = 1;
+ } else {
+ rte_eth_promiscuous_disable(port_id);
+ port->promisc = 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+}
+
+int32_t ifm_get_nactive_ports(void)
+{
+ return ifm.nport_configured;
+}
+
+int32_t ifm_get_nports_initialized(void)
+{
+ return ifm.nport_intialized;
+}
+
+uint16_t ifm_receive_bulk_pkts(uint8_t port_id, uint16_t qid,
+ struct rte_mbuf **rx_pkts)
+{
+ uint64_t no_of_rcvd_pkt;
+ no_of_rcvd_pkt =
+ rte_eth_rx_burst(port_id, qid, rx_pkts, IFM_BURST_SIZE);
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_receive_bulk_pkts: port_id %u no_of_rcvd_pkt %lu\n\r",
+ port_id, no_of_rcvd_pkt);
+ return no_of_rcvd_pkt;
+}
+
+uint16_t ifm_transmit_bulk_pkts(l2_phy_interface_t *port,
+ struct rte_mbuf **tx_pkts, uint64_t npkts)
+{
+ uint32_t burst_tx_delay_time = IFM_BURST_TX_WAIT_US;
+ uint32_t burst_tx_retry_num = IFM_BURST_TX_RETRIES;
+ uint32_t retry;
+ uint32_t no_of_tx_pkt;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ no_of_tx_pkt = rte_eth_tx_burst(port->pmdid, IFM_TX_DEFAULT_Q, tx_pkts,
+ npkts);
+ if (unlikely(no_of_tx_pkt < npkts)) {
+ retry = 0;
+ while (no_of_tx_pkt < IFM_BURST_SIZE
+ && retry++ < burst_tx_retry_num) {
+ rte_delay_us(burst_tx_delay_time);
+ no_of_tx_pkt =
+ rte_eth_tx_burst(port->pmdid, IFM_TX_DEFAULT_Q,
+ &tx_pkts[no_of_tx_pkt],
+ IFM_BURST_SIZE - no_of_tx_pkt);
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_bulk_pkts: no_of_tx_pkt %u\n\r",
+ no_of_tx_pkt);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ return no_of_tx_pkt;
+}
+
+int ifm_transmit_single_pkt(l2_phy_interface_t *port, struct rte_mbuf *tx_pkts)
+{
+ uint64_t tx_npkts = 0;
+ if (tx_pkts == NULL || port == NULL) {
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: tx_pkts and port are NULL ");
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: port->pmdid %u\n\r",
+ port->pmdid);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_read_lock(&rwlock);
+ }
+ tx_npkts =
+ rte_eth_tx_buffer(port->pmdid, IFM_TX_DEFAULT_Q, port->tx_buffer,
+ tx_pkts);
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: port->pmdid %u No of packets buffered %lu\n\r",
+ port->pmdid, tx_npkts);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_read_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port->n_txpkts +=
+ rte_eth_tx_buffer_flush(port->pmdid, IFM_TX_DEFAULT_Q,
+ port->tx_buffer);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ if (ifm_debug & IFM_DEBUG_RXTX)
+ RTE_LOG(INFO, IFM,
+ "ifm_transmit_single_pkt: no of pkts flushed %lu\n\r",
+ port->n_txpkts);
+ return tx_npkts;
+}
+
+int16_t ifm_add_ipv4_port(uint8_t port_id, uint32_t ipaddr, uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv4list_t *ipconf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ ipconf = (ipv4list_t *) rte_zmalloc(NULL, sizeof(ipv4list_t),
+ RTE_CACHE_LINE_SIZE);
+ if (ipconf != NULL) {
+ ipconf->next = NULL;
+ //ipconf->ipaddr = rte_bswap32(ipaddr);
+ ipconf->ipaddr = ipaddr;
+ ipconf->port = port;
+ ipconf->addrlen = addrlen;
+ if (port->ipv4_list == NULL)
+ port->flags |= IFM_IPV4_ENABLED;
+ ipconf->next = (ipv4list_t *) port->ipv4_list;
+ port->ipv4_list = (ipv4list_t *) ipconf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int16_t ifm_remove_ipv4_port(uint8_t port_id, uint32_t ipaddr,
+ uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv4list_t *iplist, *previplist = NULL;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (port->ipv4_list == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+ }
+ iplist = (ipv4list_t *) port->ipv4_list;
+ while (iplist != NULL) {
+ if (addrlen == iplist->addrlen &&
+ memcpy(&iplist->ipaddr, &ipaddr, addrlen)) {
+ if (iplist == port->ipv4_list) {
+ port->ipv4_list = iplist->next;
+ } else {
+ if (previplist != NULL)
+ previplist->next = iplist->next;
+ }
+ port->flags &= ~IFM_IPV4_ENABLED;
+ rte_free(iplist);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ } else {
+ previplist = iplist;
+ iplist = iplist->next;
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int8_t ifm_add_ipv6_port(uint8_t port_id, uint8_t ip6addr[], uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv6list_t *ip6conf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_lock();
+ } else {
+ rte_rwlock_write_lock(&rwlock);
+ }
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ ip6conf = (ipv6list_t *) rte_zmalloc(NULL, sizeof(ipv6list_t),
+ RTE_CACHE_LINE_SIZE);
+ if (ip6conf != NULL) {
+ ip6conf->next = NULL;
+ memcpy(ip6conf->ipaddr, ip6addr, IFM_IPV6_ADDR_SIZE);
+ ip6conf->port = port;
+ ip6conf->addrlen = addrlen;
+
+ if (port->ipv6_list == NULL) {
+ port->flags |= IFM_IPV6_ENABLED;
+ }
+ ip6conf->next = (ipv6list_t *) port->ipv6_list;
+ port->ipv6_list = (ipv6list_t *) ip6conf;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+}
+
+int16_t ifm_remove_ipv6_port(uint8_t port_id, uint32_t ip6addr,
+ uint32_t addrlen)
+{
+ l2_phy_interface_t *port;
+ ipv6list_t *ip6list, *previp6list = NULL;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ }
+ if (port != NULL && port->pmdid == port_id) {
+ if (port->ipv6_list == NULL) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return -1;
+ }
+ ip6list = (ipv6list_t *) port->ipv6_list;
+ while (ip6list != NULL) {
+ if (addrlen == ip6list->addrlen &&
+ memcpy(&ip6list->ipaddr, &ip6addr, addrlen)) {
+ if (ip6list == port->ipv6_list) {
+ port->ipv6_list = ip6list->next;
+ } else {
+ if (previp6list != NULL)
+ previp6list->next =
+ ip6list->next;
+ }
+ port->flags &= ~IFM_IPV6_ENABLED;
+ rte_free(ip6list);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+
+ if (USE_RTM_LOCKS) {
+ rtm_unlock();
+ } else {
+ rte_rwlock_write_unlock(&rwlock);
+ }
+ return 0;
+ } else {
+ previp6list = ip6list;
+ ip6list = ip6list->next;
+ }
+ }
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return -1;
+}
+
+int32_t ifm_chk_port_ipv4_enabled(uint8_t port_id)
+{
+ l2_phy_interface_t *port;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ RTE_LOG(ERR, IFM,
+ "%s: Port %u info not found... configure it first.\n\r",
+ __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if ((port->flags & IFM_IPV4_ENABLED) == 0) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 1;
+ }
+}
+
+int32_t ifm_chk_port_ipv6_enabled(uint8_t port_id)
+{
+ l2_phy_interface_t *port;
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ port = ifm.port_list[port_id];
+ if (port == NULL) {
+ if (ifm_debug & IFM_DEBUG)
+ RTE_LOG(ERR, IFM, "%s: Port %u info not found..."
+ " configure it first.\n\r",
+ __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ if ((port->flags & IFM_IPV6_ENABLED) == 0) {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 0;
+ } else {
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RD lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+ return 1;
+ }
+}
+
+void ifm_register_for_linkupdate(uint32_t clientid,
+ void (*cb_linkupdate) (uint8_t, unsigned int))
+{
+ ifm.if_client[ifm.nclient].cb_linkupdate = cb_linkupdate;
+ ifm.if_client[ifm.nclient].clientid = clientid;
+ ifm.nclient++;
+}
+
+int ifm_port_setup(uint8_t port_id, port_config_t *pconfig)
+{
+ int status, sock;
+ char buf[12];
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_link linkstatus;
+ l2_phy_interface_t *port = NULL;
+
+ if (!ifm.nport_intialized) {
+ RTE_LOG(ERR, IFM, "%s: Failed to configure port %u. 0 ports"
+ "were intialized during PCI probe...\n\r",
+ __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "%s: Configuring port %u with "
+ "nrxq: %u, ntxq: %u\n\r", __FUNCTION__,
+ port_id, pconfig->nrx_queue, pconfig->ntx_queue);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock1 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (ifm.port_list[port_id] == NULL) {
+ ifm.port_list[port_id] =
+ (l2_phy_interface_t *) rte_zmalloc(NULL,
+ sizeof
+ (l2_phy_interface_t),
+ RTE_CACHE_LINE_SIZE);
+ ifm.port_list[port_id]->pmdid = port_id;
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock1 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+
+ rte_eth_link_get(port_id, &linkstatus);
+ if (linkstatus.link_status) {
+ if (ifm_debug & IFM_DEBUG_CONFIG) {
+ RTE_LOG(INFO, IFM, "%s: %u is up.Stop it before"
+ " reconfiguring.\n\r", __FUNCTION__, port_id);
+ }
+ rte_eth_dev_stop(port_id);
+ }
+ /*Configure an Ethernet device. rets 0 on success queue */
+ status = rte_eth_dev_configure(port_id, pconfig->nrx_queue,
+ pconfig->ntx_queue, &pconfig->port_conf);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_configure is failed"
+ "for port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ status = rte_eth_dev_callback_register(port_id,
+ RTE_ETH_EVENT_INTR_LSC,
+ lsi_event_callback, NULL);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_callback_register()"
+ " failed for port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ /*promiscuous mode is enabled set it */
+ if (pconfig->promisc)
+ rte_eth_promiscuous_enable(port_id);
+
+ sock = rte_eth_dev_socket_id(port_id);
+ if (sock == -1)
+ RTE_LOG(ERR, IFM, "%s: Warning: rte_eth_dev_socket_id,"
+ " port_id value is"
+ "out of range %u\n\r", __FUNCTION__, port_id);
+ /*Port initialization */
+ int ntxqs;
+ for (ntxqs = 0; ntxqs < pconfig->ntx_queue; ntxqs++) {
+ status = rte_eth_tx_queue_setup(port_id, ntxqs,
+ IFM_TX_DESC_DEFAULT, sock,
+ &(pconfig->tx_conf));
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_tx_queue_setup failed"
+ " for port %u\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ }
+ port = ifm_get_port(port_id);
+ if (port == NULL) {
+ RTE_LOG(INFO, IFM, "%s: Port is NULL @ %d\n\r", __FUNCTION__,
+ __LINE__);
+ return IFM_FAILURE;
+ }
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock 2 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+
+ if (port->tx_buf_len == 0) {
+ port->tx_buf_len = RTE_ETH_TX_BUFFER_SIZE(IFM_BURST_SIZE);
+ }
+ port->tx_buffer = rte_zmalloc_socket("tx_buffer", port->tx_buf_len, 0,
+ rte_eth_dev_socket_id(port_id));
+
+ if (port->tx_buffer == NULL) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: Failed to allocate tx buffers for"
+ " port %u\n\r", __FUNCTION__, port_id);
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ rte_eth_tx_buffer_init(port->tx_buffer, IFM_BURST_SIZE);
+
+ sprintf(buf, "MEMPOOL%d", port_id);
+ port->mempool = rte_mempool_create(buf,
+ pconfig->mempool.pool_size,
+ pconfig->mempool.buffer_size,
+ pconfig->mempool.cache_size,
+ sizeof(struct
+ rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ rte_pktmbuf_init, NULL, sock, 0);
+ if (port->mempool == NULL) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_mempool_create is failed for port"
+ " %u. Error: %s\n\r",
+ __FUNCTION__, port_id, rte_strerror(rte_errno));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ int nrxqs;
+ for (nrxqs = 0; nrxqs < pconfig->nrx_queue; nrxqs++) {
+ status = rte_eth_rx_queue_setup(port_id, nrxqs,
+ IFM_RX_DESC_DEFAULT, sock,
+ &(pconfig->rx_conf),
+ port->mempool);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM,
+ "%s: rte_eth_rx_queue_setup is failed "
+ "for port %u queue %u. Error: %s\n\r",
+ __FUNCTION__, port_id, nrxqs,
+ rte_strerror(rte_errno));
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM,
+ "%s: Releasing WR lock2 %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ return IFM_FAILURE;
+ }
+ }
+ /*Start link */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR lock2 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ status = rte_eth_dev_start(port_id);
+ if (status < 0) {
+ ifm_remove_port_details(port_id);
+ RTE_LOG(ERR, IFM, "%s: rte_eth_dev_start is failed for"
+ " port %u.\n\r", __FUNCTION__, port_id);
+ return IFM_FAILURE;
+ }
+ rte_delay_ms(5000);
+ /*Get device info and populate interface structure */
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring WR lock3 @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_write_lock(&rwlock);
+ rte_eth_macaddr_get(port_id, (struct ether_addr *)port->macaddr);
+ if (pconfig->promisc)
+ port->promisc = 1;
+ rte_eth_link_get(port_id, &linkstatus);
+ /*Link status */
+ port->link_duplex = linkstatus.link_duplex;
+ port->link_autoneg = linkstatus.link_autoneg;
+ port->link_speed = linkstatus.link_speed;
+ port->admin_status = pconfig->state;
+
+ /*Get dev_info */
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(port_id, &dev_info);
+ port->min_rx_bufsize = dev_info.min_rx_bufsize;
+ port->max_rx_pktlen = dev_info.max_rx_pktlen;
+ port->max_rx_queues = dev_info.max_rx_queues;
+ port->max_tx_queues = dev_info.max_tx_queues;
+ rte_eth_dev_get_mtu(port_id, &(port->mtu));
+
+ /*Add rx and tx packet function ptrs */
+ port->retrieve_bulk_pkts = &ifm_receive_bulk_pkts;
+ port->transmit_bulk_pkts = &ifm_transmit_bulk_pkts;
+ port->transmit_single_pkt = &ifm_transmit_single_pkt;
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing WR3 lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_write_unlock(&rwlock);
+ RTE_LOG(INFO, IFM, "%s: Port %u is successfully configured.\n\r",
+ __FUNCTION__, port_id);
+ return IFM_SUCCESS;
+}
+
+int ifm_configure_ports(port_config_t *pconfig)
+{
+ uint8_t port_id;
+ int status = 0;
+ if (!ifm.nport_intialized) {
+ RTE_LOG(ERR, IFM, "%s, Configuring ports failed. Zero ports "
+ "are intialized during PCI probe", __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ if (pconfig == NULL) {
+ RTE_LOG(ERR, IFM, "%s, Configuring ports failed. "
+ "Param pconfig is NULL\n\r", __FUNCTION__);
+ return IFM_FAILURE;
+ }
+
+ /*Initialize all ports */
+ for (port_id = 0; port_id < ifm.nport_intialized; port_id++) {
+ if (ifm_debug & IFM_DEBUG_CONFIG)
+ RTE_LOG(INFO, IFM, "Call ifm_port_setup %u\n\r",
+ port_id);
+ status =
+ ifm_port_setup(pconfig[port_id].port_id, &pconfig[port_id]);
+ if (status == IFM_SUCCESS)
+ ifm.nport_configured++;
+ }
+ if (!ifm.nport_configured) {
+ RTE_LOG(ERR, IFM, "%s: Zero ports are configured\n\r",
+ __FUNCTION__);
+ return IFM_FAILURE;
+ }
+ RTE_LOG(INFO, IFM, "%s: Number of ports sucessfully configured:"
+ " %d\n\r", __FUNCTION__, ifm.nport_configured);
+ return IFM_SUCCESS;
+}
+
+void print_interface_details(void)
+{
+ l2_phy_interface_t *port;
+ int i = 0;
+ struct sockaddr_in ip;
+ printf("\n\r");
+
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Acquiring RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_lock();
+ else
+ rte_rwlock_read_lock(&rwlock);
+
+ for (i = 0; i < RTE_MAX_ETHPORTS && ifm.port_list[i]; i++) {
+ port = ifm.port_list[i];
+ printf(" %u", port->pmdid);
+ if (port->ifname && strlen(port->ifname)) {
+ printf(" (%s)\t", port->ifname);
+ } else
+ printf("\t\t");
+ printf("MAC:%02x:%02x:%02x:%02x:%02x:%02x Adminstate:%s"
+ " Operstate:%s \n\r",
+ port->macaddr[0], port->macaddr[1],
+ port->macaddr[2], port->macaddr[3],
+ port->macaddr[4], port->macaddr[5],
+ port->admin_status ? "UP" : "DOWN",
+ port->link_status ? "UP" : "DOWN");
+ printf("\t\t");
+ printf("Speed: %u, %s-duplex\n\r", port->link_speed,
+ port->link_duplex ? "full" : "half");
+ printf("\t\t");
+
+ if (port->ipv4_list != NULL) {
+ ip.sin_addr.s_addr =
+ (unsigned long)((ipv4list_t *) (port->ipv4_list))->
+ ipaddr;
+ printf("IP: %s/%d", inet_ntoa(ip.sin_addr),
+ ((ipv4list_t *) (port->ipv4_list))->addrlen);
+ } else {
+ printf("IP: NA");
+ }
+
+ printf("\r\n");
+ printf("\t\t");
+ if (port->ipv6_list != NULL) {
+ uint8_t *addr =
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr;
+ printf
+ ("IPv6: %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x",
+ addr[0], addr[1], addr[2], addr[3], addr[4],
+ addr[5], addr[6], addr[7], addr[8], addr[9],
+ addr[10], addr[11], addr[12], addr[13], addr[14],
+ addr[15]);
+ } else {
+ printf("IPv6: NA");
+ }
+
+ if (port->flags & IFM_SLAVE) {
+ printf(" IFM_SLAVE ");
+ printf(" MasterPort: %u",
+ port->bond_config->bond_portid);
+ }
+ if (port->flags & IFM_MASTER) {
+ printf(" IFM_MASTER ");
+ printf(" Mode: %u", port->bond_config->mode);
+ printf(" PrimaryPort: %u", port->bond_config->primary);
+ printf("\n\r");
+ printf("\t\tSlavePortCount: %u",
+ port->bond_config->slave_count);
+ printf(" SlavePorts:");
+ int i;
+ for (i = 0; i < port->bond_config->slave_count; i++) {
+ printf(" %u ", port->bond_config->slaves[i]);
+ }
+ printf(" ActivePortCount: %u",
+ port->bond_config->active_slave_count);
+ printf(" ActivePorts:");
+ for (i = 0; i < port->bond_config->active_slave_count;
+ i++) {
+ printf(" %u ",
+ port->bond_config->active_slaves[i]);
+ }
+ printf("\n\r");
+ printf("\t\t");
+ printf("Link_monitor_freq: %u ms ",
+ port->bond_config->internal_ms);
+ printf(" Link_up_prop_delay: %u ms ",
+ port->bond_config->link_up_delay_ms);
+ printf(" Link_down_prop_delay: %u ms ",
+ port->bond_config->link_down_delay_ms);
+ printf("\n\r");
+ printf("\t\t");
+ printf("Xmit_policy: %u",
+ port->bond_config->xmit_policy);
+ }
+ printf("\n\r");
+ printf("\t\t");
+ printf("n_rxpkts: %" PRIu64 " ,n_txpkts: %" PRIu64 " ,",
+ port->n_rxpkts, port->n_txpkts);
+ struct rte_eth_stats eth_stats;
+ rte_eth_stats_get(port->pmdid, &eth_stats);
+ printf("pkts_in: %" PRIu64 " ,", eth_stats.ipackets);
+ printf("pkts_out: %" PRIu64 " ", eth_stats.opackets);
+ printf("\n\r");
+ printf("\t\t");
+ printf("in_errs: %" PRIu64 " ,", eth_stats.ierrors);
+ printf("in_missed: %" PRIu64 " ,", eth_stats.imissed);
+ printf("out_errs: %" PRIu64 " ,", eth_stats.oerrors);
+ printf("mbuf_errs: %" PRIu64 " ", eth_stats.rx_nombuf);
+ printf("\n\r");
+ printf("\n\r");
+ }
+ if (ifm_debug & IFM_DEBUG_LOCKS)
+ RTE_LOG(INFO, IFM, "%s: Releasing RW lock @ %d\n\r",
+ __FUNCTION__, __LINE__);
+ if (USE_RTM_LOCKS)
+ rtm_unlock();
+ else
+ rte_rwlock_read_unlock(&rwlock);
+}
diff --git a/common/VIL/l2l3_stack/interface.h b/common/VIL/l2l3_stack/interface.h
new file mode 100644
index 00000000..0f654fa1
--- /dev/null
+++ b/common/VIL/l2l3_stack/interface.h
@@ -0,0 +1,873 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef INTERFACE_H
+#define INTERFACE_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_eth_ctrl.h>
+#include <rte_errno.h>
+#include <rte_port_ethdev.h>
+#include <rte_eth_bond.h>
+#include <rte_rwlock.h>
+
+#define RTE_LOGTYPE_IFM RTE_LOGTYPE_USER1
+#define IFM_SUCCESS 0
+#define IFM_FAILURE -1
+/*
+ * IFM Ether link related macros
+ */
+#define IFM_ETH_LINK_HALF_DUPLEX 0
+#define IFM_ETH_LINK_FULL_DUPLEX 1
+#define IFM_ETH_LINK_DOWN 0
+#define IFM_ETH_LINK_UP 1
+#define IFM_ETH_LINK_FIXED 0
+
+/*
+ * Bonding
+ */
+#define IFM_SLAVE (1<<0)
+#define IFM_MASTER (1<<1)
+#define IFM_BONDED (1<<2)
+#define IFM_IPV4_ENABLED (1<<3)
+#define IFM_IPV6_ENABLED (1<<4)
+
+#define IFM_BONDING_MODE_ROUND_ROBIN 0
+#define IFM_BONDING_MODE_ACTIVE_BACKUP 1
+#define IFM_BONDING_MODE_BALANCE 2
+#define IFM_BONDING_MODE_BROADCAST 3
+#define IFM_BONDING_MODE_8023AD 4
+#define IFM_BONDING_MODE_TLB 5
+#define IFM_BONDING_MODE_ALB 6
+
+#define IFM_BALANCE_XMIT_POLICY_LAYER2 0
+#define IFM_BALANCE_XMIT_POLICY_LAYER23 1
+#define IFM_BALANCE_XMIT_POLICY_LAYER34 2
+/*
+ * Queue related macros
+ */
+#define IFM_QUEUE_STAT_CNTRS 16
+#define IFM_TX_DEFAULT_Q 0
+#define IFM_RX_DEFAULT_Q 0
+#define IFM_RX_DESC_DEFAULT 128
+#define IFM_TX_DESC_DEFAULT 512
+#define IFM_BURST_SIZE 32
+#define IFM_BURST_TX_WAIT_US 1
+#define IFM_BURST_TX_RETRIES 64
+#define BURST_TX_DRAIN_US 100
+
+/*
+ * Misc
+ */
+#define IFM_IFNAME_LEN 16
+#define IFM_CLIENT_NAME 20
+#define IFM_MAX_CLIENT 10
+
+#define IFM_ETHER_ADDR_SIZE 6
+#define IFM_IPV6_ADDR_SIZE 16
+
+#define IFM_DEBUG_CONFIG (1<<0)
+#define IFM_DEBUG_RXTX (1<<1)
+#define IFM_DEBUG_LOCKS (1<<2)
+#define IFM_DEBUG (1<<4)
+#define IFM_MAX_PORTARR_SZ 64
+#define IFM_MAX_PORTARR_SZ 64
+/**
+ * Mempool configuration details:
+ * Stores the mempool configuration information for the port.
+ */
+struct mempool_config {
+ uint32_t pool_size;/**< The number of elements in the mempool.*/
+ uint32_t buffer_size;
+ /**< The size of an element*/
+ uint32_t cache_size;
+ /**< Cache size */
+ uint32_t cpu_socket_id;
+ /**< The socket identifier in the case of NUMA.*/
+} __rte_cache_aligned;
+
+/**
+ * Port configuration:
+ * Stores the configuration information for the port.
+ * This structure is used during port and tx/rx queue setup.
+ */
+typedef struct _port_config_ {
+ uint8_t port_id; /**< port id or pmd id to be configured */
+ int nrx_queue; /**< no of rx queues */
+ int ntx_queue; /**< no of tx queues */
+ uint32_t tx_buf_size;
+ uint32_t state; /**< noshut/shut the admin state of the port*/
+ uint32_t promisc; /**< enable/diable promisc mode*/
+ struct mempool_config mempool;
+ /**< Mempool configurations */
+ struct rte_eth_conf port_conf;
+ /**< port configuration */
+ struct rte_eth_rxconf rx_conf;
+ /**< rx queue configurations */
+ struct rte_eth_txconf tx_conf;
+ /**< tx queue configurations */
+} port_config_t;
+
+/**
+ * Port statistics:
+ * if_stats structure is a member variable of structure l2_phy_interface_t.
+ * Used to maintain stats retreived from rte_eth_stats structure.
+ */
+typedef struct _if_stats_ {
+ uint64_t rx_npkts;/**< Total number of successfully received packets.*/
+ uint64_t tx_npkts;/**< Total number of successfully transmitted bytes. */
+ uint64_t rx_bytes;/**< Total number of successfully received bytes.*/
+ uint64_t tx_bytes;/**< Total number of successfully transmitted bytes.*/
+ uint64_t rx_missed_pkts;
+ /**< no of packets dropped by hw due because rx queues are full*/
+ uint64_t rx_err_pkts;/**< Total number of erroneous received packets. */
+ uint64_t rx_nobuf_fail;/**< Total number of RX mbuf allocation failures. */
+ uint64_t tx_failed_pkts;/**< Total number of failed transmitted packets.*/
+ uint64_t q_rxpkts[IFM_QUEUE_STAT_CNTRS];/**< Total number of queue RX packets.*/
+ uint64_t q_txpkts[IFM_QUEUE_STAT_CNTRS];/**< Total number of queue TX packets.*/
+ uint64_t q_rx_bytes[IFM_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully received queue bytes.*/
+ uint64_t q_tx_bytes[IFM_QUEUE_STAT_CNTRS];
+ /**< Total number of successfully transmitted queue bytes.*/
+ uint64_t q_rx_pkt_drop[IFM_QUEUE_STAT_CNTRS];
+ /**<Total number of queue packets received that are dropped.*/
+} __rte_cache_aligned if_stats;
+/**
+ * structure to store bond port information
+ */
+struct bond_port {
+ uint8_t bond_portid;
+ /**<portid of the bond port.*/
+ uint8_t socket_id;
+ /**<socketid of the port.*/
+ uint8_t mode;
+ /**<mode config.*/
+ uint8_t xmit_policy;
+ /**<xmit policy for this port.*/
+ uint32_t internal_ms;
+ /**<in frequency.*/
+ uint32_t link_up_delay_ms;
+ /**<frequency of informing linkup delay.*/
+ uint32_t link_down_delay_ms;
+ /**<frequency of informing linkdown delay.*/
+ uint8_t primary;
+ /**<primary port of this bond.*/
+ uint8_t slaves[RTE_MAX_ETHPORTS];
+ /**<list of slaves*/
+ int slave_count;
+ /**<slave count.*/
+ uint8_t active_slaves[RTE_MAX_ETHPORTS];
+ /**<list of active slaves.*/
+ int active_slave_count;
+ /**<cnt of active slave.*/
+} __rte_cache_aligned;
+
+/**
+ * Physical port details:
+ * Used to store information about configured port.
+ * Most of the member variables in this structure are populated
+ * from struct rte_eth_dev_info
+ */
+typedef struct _l2_phy_interface_ {
+ struct _l2_phy_interface_ *next; /**< pointer to physical interface list */
+ uint8_t pmdid; /**< populated from rth_eth_dev_info */
+ unsigned int if_index; /**< populated from rth_eth_dev_info */
+ char ifname[IFM_IFNAME_LEN]; /**< populated from rth_eth_dev_info */
+ uint16_t mtu; /**< mtu value - configurable */
+ uint8_t macaddr[IFM_ETHER_ADDR_SIZE]; /**< Ether addr*/
+ uint32_t promisc; /**< promisc mode - configurable*/
+ uint32_t flags; /**< Used for link bonding */
+ /* Link status */
+ uint32_t link_speed; /**< line speed */
+ uint16_t link_duplex:1; /**< duplex mode */
+ uint16_t link_autoneg:1; /**< auto negotiation*/
+ uint16_t link_status:1; /**< operational status */
+ uint16_t admin_status:1; /**< Admin status of a port*/
+ /* queue details */
+ struct rte_mempool *mempool; /**< HW Q*/
+ uint32_t min_rx_bufsize; /**< rx buffer size supported */
+ uint32_t max_rx_pktlen; /**< max size of packet*/
+ uint16_t max_rx_queues; /**< max number of rx queues supported */
+ uint16_t max_tx_queues; /**< max number queues supported*/
+ uint64_t n_rxpkts; /**< number of packets received */
+ uint64_t n_txpkts; /**< number of packets transmitted */
+ if_stats stats; /**< port stats - populated from rte_eth_ifstats */
+ uint16_t(*retrieve_bulk_pkts) (uint8_t, uint16_t, struct rte_mbuf **);
+ /**< pointer to read packets*/
+ uint16_t(*transmit_bulk_pkts) (struct _l2_phy_interface_ *, struct rte_mbuf **, uint64_t);
+ /**< pointer to transmit the bulk of packets */
+ int (*transmit_single_pkt) (struct _l2_phy_interface_ *, struct rte_mbuf *);
+ /**< pointer to transmit the a single packet*/
+ struct rte_eth_dev_tx_buffer *tx_buffer;
+ uint64_t tx_buf_len; /**< number of packets in tx_buf */
+ void *ipv4_list; /**< pointer to ip list */
+ void *ipv6_list; /**< pointer to ipv6 list */
+ struct bond_port *bond_config; /**< pointer to bond info*/
+ port_config_t port_config;
+} __rte_cache_aligned l2_phy_interface_t;
+
+/**
+ * Port IPv4 address details:
+ * Used to maintain IPv4 information of a port.
+ */
+typedef struct _ipv4list_ {
+ struct _ipv4list_ *next;/**< pointer to IPv4 list */
+ uint32_t ipaddr; /**< Configured ipv4 address */
+ unsigned int addrlen; /**< subnet mask or addrlen */
+ unsigned int mtu; /**< IPv6 mtu*/
+ l2_phy_interface_t *port;
+ /**< pointer to a port on which this ipaddr is configured*/
+} ipv4list_t;
+
+/**
+ * Port IPv6 address details:
+ * Used to maintain IPv6 information of a port.
+ */
+typedef struct _ipv6list_ {
+ struct _ipv6list_ *next; /**< Ptr IPv6 list */
+ uint8_t ipaddr[IFM_IPV6_ADDR_SIZE]; /**< Configured ipv6 address */
+ unsigned int addrlen; /**< subnet mask or addrlen*/
+ unsigned int mtu; /**< IPv6 mtu*/
+ l2_phy_interface_t *port; /**< ptr to a port on whicch ipv6 addr is configured*/
+} ipv6list_t;
+
+/**
+ * Interface Manager client details:
+ * Maintains information about clients who registered for link status update.
+ * Stores callback function to be called in case of link state change.
+ */
+typedef struct _ifm_client_ {
+ uint32_t clientid; /**< unique client id identifies the client used for indexing*/
+ void (*cb_linkupdate) (uint8_t, unsigned int);
+ /**< callback function to be triggered during an event*/
+} __rte_cache_aligned ifm_client;
+
+/**
+ * Interface manager global structure:
+ * IFM main structure has pointer configured port list.
+ */
+typedef struct _interface_main_ {
+ l2_phy_interface_t *port_list[IFM_MAX_PORTARR_SZ];
+ uint32_t nport_configured; /**< no of ports sucessfully configured during PCI probe*/
+ uint32_t nport_intialized; /**< no of ports sucessfully initialized through ifm_init*/
+ uint8_t nclient; /**< no of clients registered for Interface manager events*/
+ ifm_client if_client[IFM_MAX_CLIENT]; /**< Array of interface manager client details*/
+} __rte_cache_aligned interface_main_t;
+
+/**
+ * Init function of Interface manager. Calls port_setup function for every port.
+ *
+ * @param *pconfig
+ * A pointer to port_config_t contains port configuration.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int ifm_configure_ports(port_config_t *pconfig);
+
+/**
+ * Returns first port from port list.
+ *
+ * @param
+ * None
+ *
+ * @returns
+ * On success - Returns a pointer to first port in the list of
+ * type l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_first_port(void);
+
+/**
+ * Get a port from the physical port list which is next node to
+ * the given portid in the list.
+ *
+ * @param portid
+ * A pmdid of port.
+ *
+ * @returns
+ * On success - Returns a pointer to next port in the list of
+ * type l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_next_port(uint8_t port_id);
+
+/**
+ * Get a pointer to port for the given portid from the physical port list.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * On success - returns pointer to l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_port(uint8_t);
+
+/**
+ * Get a pointer to port for the given port name from the physical port list.
+ *
+ * @param name
+ * Name of the port
+ *
+ * @returns
+ * On success - returns pointer to l2_phy_interface_t.
+ * NULL - On Failure.
+ */
+l2_phy_interface_t *ifm_get_port_by_name(const char *name);
+/**
+ * Removes given port from the physical interface list.
+ *
+ * @params
+ * portid - pmd_id of port.
+ * @returns
+ * none
+ */
+void ifm_remove_port_details(uint8_t portid);
+
+/**
+ * Adds give port to the begining of physical interface list.
+ *
+ * @param l2_phy_interface_t *
+ * pointer to l2_phy_interface_t.
+ * @returns
+ * none
+ */
+void ifm_add_port_to_port_list(l2_phy_interface_t *);
+
+/**
+ * Checks whether the global physical port list is NULL.
+ *
+ * @returns
+ * 0 - On success.
+ * 1 - On Failure.
+ */
+int is_port_list_null(void);
+
+/**
+ * Configures the device port. Also sets tx and rx queue.
+ * Populates port structure and adds it physical interface list.
+ *
+ * @param portconfig
+ * Contains configuration about rx queue, tx queue.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int ifm_port_setup(uint8_t port_id, port_config_t *);
+
+/**
+ * Initializes interface manager main structure
+ * @params
+ * none
+ * @returns
+ * none
+ */
+void ifm_init(void);
+
+/**
+ * Returns number of ports initialized during pci probe.
+ *
+ * @params
+ * void
+ *
+ * @returns
+ * number of ports initialized - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_get_nports_initialized(void);
+
+/**
+ * Returns number of ports initialized ifm_init.
+ *
+ * @params
+ * void
+ *
+ * @returns
+ * number of ports initialized - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_get_nactive_ports(void);
+
+/**
+ * Checks whether port is ipv4 enabled.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_chk_port_ipv4_enabled(uint8_t port_id);
+
+/**
+ * Checks whether port is ipv6 enabled.
+ *
+ * @param portid
+ * A pmd id of the port.
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int32_t ifm_chk_port_ipv6_enabled(uint8_t port_id);
+
+/**
+ * Remove ipv4 address from the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ipaddr
+ * ipv4 address to be removed
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_remove_ipv4_port(uint8_t port_id, uint32_t ipaddr,
+ uint32_t addrlen);
+
+/**
+ * Remove ipv6 address from the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ip6addr
+ * ipv4 address to be removed
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_remove_ipv6_port(uint8_t port_id, uint32_t ip6addr,
+ uint32_t addrlen);
+
+/**
+ * Add ipv4 address to the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ipaddr
+ * ipv4 address to be configured
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int16_t ifm_add_ipv4_port(uint8_t port_id, uint32_t ipaddr, uint32_t addrlen);
+
+/**
+ * Add ipv6 address to the given port.
+ *
+ * @param portid
+ * A pmd id of the port.
+ * @param ip6addr
+ * ipv6 address to be configured
+ * @param addrlen
+ * ipv4 address length
+ *
+ * @returns
+ * IFM_SUCCESS - On success.
+ * IFM_FAILURE - On Failure.
+ */
+int8_t ifm_add_ipv6_port(uint8_t port_id, uint8_t ip6addr[], uint32_t addrlen);
+
+/**
+ * Buffers the packet in the tx quueue.
+ *
+ * @param *port
+ * pointer to the port.
+ * @param *tx_pkts
+ * packet to be transmitted
+ *
+ * @returns
+ * number of packets transmitted
+ */
+int ifm_transmit_single_pkt(l2_phy_interface_t *port,
+ struct rte_mbuf *tx_pkts);
+
+/**
+ * Transmit the packet
+ *
+ * @param *port
+ * pointer to the port.
+ * @param *tx_pkts
+ * packets to be transmitted
+ * @param npkts
+ * number of packets to be transmitted
+ *
+ * @returns
+ * number of packets transmitted
+ */
+uint16_t ifm_transmit_bulk_pkts(l2_phy_interface_t *, struct rte_mbuf **tx_pkts,
+ uint64_t npkts);
+
+/**
+ * Receive burst of 32 packets
+ *
+ * @param portid
+ * From which port we need to read packets
+ * @param qid
+ * From which port we need to read packets
+ * @param npkts
+ * mbuf in which read packets will be placed
+ *
+ * @returns
+ * number of packets read
+ */
+uint16_t ifm_receive_bulk_pkts(uint8_t port_id, uint16_t qid,
+ struct rte_mbuf **rx_pkts);
+
+/**
+ * Enable or disable promiscmous mode
+ *
+ * @param portid
+ * pmd id of the port
+ * @param enable
+ * 1 - enable, IFM_SUCCESS - disable
+ *
+ * @returns
+ * none
+ */
+void ifm_set_port_promisc(uint8_t port_id, uint8_t enable);
+
+/**
+ * Enable or disable promiscmous mode
+ *
+ * @param portid
+ * pmd id of the port
+ * @param enable
+ * 1 - enable, 0 - disable
+ *
+ * @returns
+ * none
+ */
+void ifm_set_l2_interface_mtu(uint8_t port_id, uint16_t mtu);
+
+/**
+ * Set MTU value for the port
+ *
+ * @param portid
+ * pmd id of the port
+ * @param mtu
+ * MTU value
+ *
+ * @returns
+ * none
+ */
+void ifm_update_linkstatus(uint8_t port_id, uint16_t linkstatus);
+
+/**
+ * Register for link state event
+ *
+ * @param clientid
+ * Unique number identifies client.
+ * @param cb_linkupdate
+ * Callback function which has to be called at time of event
+ *
+ * @returns
+ * none
+ */
+void ifm_register_for_linkupdate(uint32_t clientid,
+ void (*cb_linkupdate) (uint8_t, unsigned int));
+
+/**
+ * Callback which is triggered at the time of link state change which in turn triggers registered
+ * clients callback
+ *
+ * @param portid
+ * pmd id of the port
+ * @param type
+ * lsi event type
+ * @param
+ * Currently not used
+ *
+ * @returns
+ * none
+ */
+void lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+ void *param);
+/*
+ * Prints list of interfaces
+ * @param vois
+ */
+void print_interface_details(void);
+/*
+ * Creates bond interface
+ * @Param name
+ * name of bond port
+ * @Param mode
+ * mode
+ * @Param portconf
+ * port configuration to be applied
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_bond_port_create(const char *name, int mode, port_config_t *portconf);
+/*
+ * Deletes bond interface
+ * @Param name
+ * name of bond port
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_bond_port_delete(const char *name);
+/*
+ * Addes a port as slave to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_add_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Removes a port as slave to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int ifm_remove_slave_port(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Sets bond port 's mode
+ * @Param bonded_port_id
+ * bond port id
+ * @Param mode
+ * mode 0 ... 5
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_mode(uint8_t bonded_port_id, uint8_t mode);
+/*
+ * Get bond port 's mode
+ * @Param bonded_port_id
+ * bond port id
+ * @returns mode value or -1 on failure
+ */
+int get_bond_mode(uint8_t bonded_port_id);
+/*
+ * Set a slave port to bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slave_port_id
+ * slave port s port id
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_primary(uint8_t bonded_port_id, uint8_t slave_port_id);
+/*
+ * Get primary port of the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns port id of primary on success and 1 on failure
+ */
+int get_bond_primary_port(uint8_t bonded_port_id);
+/*
+ * Get slave count for the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns slave count on success and 1 on failure
+ */
+int get_bond_slave_count(uint8_t bonded_port_id);
+/*
+ * Get active slave count for the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @returns active slaves count on success and 1 on failure
+ */
+int get_bond_active_slave_count(uint8_t bonded_port_id);
+/*
+ * Get slaves in the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slaves
+ * array to save slave port
+ * @returns 0 on success and 1 on failure
+ */
+int get_bond_slaves(uint8_t bonded_port_id, uint8_t slaves[RTE_MAX_ETHPORTS]);
+/*
+ * Get active slaves in the bond
+ * @Param bonded_port_id
+ * bond port id
+ * @Param slaves
+ * array to save slave port
+ * @returns 0 on success and 1 on failure
+ */
+int get_bond_active_slaves(uint8_t bonded_port_id,
+ uint8_t slaves[RTE_MAX_ETHPORTS]);
+/*
+ * Sets bond port 's mac address
+ * @Param bonded_port_id
+ * bond port id
+ * @Param mode
+ * mac_addr - mac addr
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_mac_address(uint8_t bonded_port_id, struct ether_addr *mac_addr);
+/*
+ * Sets bond port 's MAC
+ * @Param bonded_port_id
+ * bond port id
+ * @returns 0 on success and 1 on failure
+ */
+int reset_bond_mac_addr(uint8_t bonded_port_id);
+int get_bond_mac(uint8_t bonded_port_id, struct ether_addr *macaddr);
+/*
+ * Sets bond port 's policy
+ * @Param bonded_port_id
+ * bond port id
+ * @Param policy
+ * xmit policy
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_xmitpolicy(uint8_t bonded_port_id, uint8_t policy);
+/*
+ * Get bond port 's xmit policy
+ * @Param bonded_port_id
+ * bond port id
+ * @returns xmit policy value or -1 on failure
+ */
+int get_bond_xmitpolicy(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's monitor frequency
+ * @Param bonded_port_id
+ * bond port id
+ * @Param internal_ms
+ * frequency in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_link_montitor_frequency(uint8_t bonded_port_id,
+ uint32_t internal_ms);
+/*
+ * Get bond port 's monitor frequency
+ * @Param bonded_port_id
+ * bond port id
+ * @returns frequency value or -1 on failure
+ */
+int get_bond_link_monitor_frequency(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's link down delay
+ * @Param bonded_port_id
+ * bond port id
+ * @Param delay_ms
+ * delay time in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_linkdown_delay(uint8_t bonded_port_id, uint32_t delay_ms);
+/*
+ * Get bond port 's link down delay
+ * @Param bonded_port_id
+ * bond port id
+ * @returns delay ms value or -1 on failure
+ */
+int get_bond_link_down_delay(uint8_t bonded_port_id);
+/*
+ * Sets bond port 's link up delay
+ * @Param bonded_port_id
+ * bond port id
+ * @Param delay_ms
+ * delay time in ms
+ * @returns 0 on success and 1 on failure
+ */
+int set_bond_linkup_delay(uint8_t bonded_port_id, uint32_t delay_ms);
+/*
+ * Get bond port 's link up delay
+ * @Param bonded_port_id
+ * bond port id
+ * @returns delay ms value or -1 on failure
+ */
+int get_bond_link_up_delay(uint8_t bonded_port_id);
+/*
+ * Print port s statistics
+ * @Param void
+ * @returns void
+ */
+void print_stats(void);
+/*
+ * Gets information about port
+ * @Param port_id
+ * portid of the port
+ * @param port_info
+ * port to address to copy port info
+ * @returns 0 on success otherwise -1
+ */
+int ifm_get_port_info(uint8_t port_id, l2_phy_interface_t *port_info);
+/*
+ * Gets information about next port of given portid
+ * @Param port_id
+ * portid of the port
+ * @param port_info
+ * port to address to copy port info
+ * @returns 0 on success otherwise -1
+ */
+int ifm_get_next_port_info(uint8_t port_id, l2_phy_interface_t *port_info);
+/*
+ * Enable ifm debug
+ * @Param dbg value
+ * Debug- 1(port config),2(port RXTX),3(hle LOCKS),4(GENERALDEBUG)
+ * @param flag
+ * Enable 1, disable 0
+ * @returns 0 on success otherwise -1
+ */
+void config_ifm_debug(int dbg, int flag);
+#endif
diff --git a/common/VIL/l2l3_stack/l2_proto.c b/common/VIL/l2l3_stack/l2_proto.c
new file mode 100644
index 00000000..44c50b08
--- /dev/null
+++ b/common/VIL/l2l3_stack/l2_proto.c
@@ -0,0 +1,239 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/*
+ * Filename - l2_proto.c
+ * L2 Protocol Handler
+ */
+
+#include "l2_proto.h"
+
+static struct proto_packet_type *proto_list[3];
+/*
+ * Function to register the rx functions for different ethertypes. This is maintained in a list.
+ */
+void
+list_add_type(uint16_t type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *))
+{
+ if (type == ETHER_TYPE_IPv4) {
+ proto_list[IPv4_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[IPv4_VAL]->type = type;
+ proto_list[IPv4_VAL]->func = func;
+ }
+
+ else if (type == ETHER_TYPE_ARP) {
+ proto_list[ARP_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[ARP_VAL]->type = type;
+ proto_list[ARP_VAL]->func = func;
+ } else if (type == ETHER_TYPE_IPv6) {
+ proto_list[IPv6_VAL] =
+ rte_malloc(NULL, sizeof(struct proto_packet_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_list[IPv6_VAL]->type = type;
+ proto_list[IPv6_VAL]->func = func;
+ }
+
+}
+
+/*
+ * Check the mac address to see whether it is destined to this host or not.
+ * Call relevant functions registered by other modules when the ethertype matches,
+ * if it is destined to this host. Drop the packet otherwise.
+ */
+
+void
+l2_check_mac(struct rte_mbuf *m[IFM_BURST_SIZE], l2_phy_interface_t *port,
+ uint8_t i, uint64_t *pkts_mask, uint64_t *arp_pkts_mask,
+ uint64_t *ipv4_pkts_mask, uint64_t *ipv6_pkts_mask)
+{
+ struct ether_hdr *eth=NULL;
+ uint16_t same_mac=0;
+ uint16_t ethtype = 0;
+
+ if (m[i] != NULL) {
+ eth = rte_pktmbuf_mtod(m[i], struct ether_hdr *);
+ if(eth)
+ ethtype = rte_be_to_cpu_16(eth->ether_type);
+ if (eth == NULL) {
+ /*Destination MAC address inside the packet */
+ printf("l2_check_mac: Ethernet Dest Addr NULL !!!\n");
+ return;
+ }
+ ethtype = rte_be_to_cpu_16(eth->ether_type);
+#if L2_PROTO_DBG
+ printf("%s => mbuf pkt dest mac addr: %x:%x:%x:%x:%x:%x\n",
+ __FUNCTION__, eth->d_addr.addr_bytes[0],
+ eth->d_addr.addr_bytes[1], eth->d_addr.addr_bytes[2],
+ eth->d_addr.addr_bytes[3], eth->d_addr.addr_bytes[4],
+ eth->d_addr.addr_bytes[5]);
+ printf("%s => port mac addr: %x:%x:%x:%x:%x:%x\n", __FUNCTION__,
+ port->macaddr[0], port->macaddr[1], port->macaddr[2],
+ port->macaddr[3], port->macaddr[4], port->macaddr[5]);
+
+#endif
+ /* Compare the mac addresses */
+ same_mac =
+ (is_same_ether_addr
+ (&eth->d_addr, (struct ether_addr *)port->macaddr)
+ ||
+ ((is_broadcast_ether_addr
+ ((struct ether_addr *)&eth->d_addr)
+ && (ethtype == ETHER_TYPE_ARP)))
+ || (ethtype == ETHER_TYPE_IPv6
+ && eth->d_addr.addr_bytes[0] == 0x33
+ && eth->d_addr.addr_bytes[1] == 0x33));
+
+ if (!same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *pkts_mask ^= temp_mask;
+ rte_pktmbuf_free(m[i]);
+ m[i] = NULL;
+ } else if ((ethtype == ETHER_TYPE_IPv4) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *ipv4_pkts_mask ^= temp_mask;
+ } else if ((ethtype == ETHER_TYPE_ARP) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *arp_pkts_mask ^= temp_mask;
+ } else if ((ethtype == ETHER_TYPE_IPv6) && same_mac) {
+ uint64_t temp_mask = 1LLU << i;
+ *ipv6_pkts_mask ^= temp_mask;
+ }
+ }
+ printf("\n%s: arp_pkts_mask = %" PRIu64 ", ipv4_pkts_mask = %" PRIu64
+ ", ipv6_pkts_mask =%" PRIu64 ", pkt-type = %x, sam_mac = %d\n",
+ __FUNCTION__, *arp_pkts_mask, *ipv4_pkts_mask, *ipv6_pkts_mask,
+ ethtype, same_mac);
+}
+
+void
+protocol_handler_recv(struct rte_mbuf **pkts_burst, uint16_t nb_rx,
+ l2_phy_interface_t *port)
+{
+ uint8_t i;
+ uint64_t pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t arp_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t ipv4_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+ uint64_t ipv6_pkts_mask = 0; //RTE_LEN2MASK(nb_rx, uint64_t);
+
+ /*Check the mac address of every single packet and unset the bits in the packet mask
+ *for those packets which are not destined to this host
+ */
+ for (i = 0; i < nb_rx; i++) {
+ l2_check_mac(pkts_burst, port, i, &pkts_mask, &arp_pkts_mask,
+ &ipv4_pkts_mask, &ipv6_pkts_mask);
+ }
+ if (nb_rx) {
+ if (arp_pkts_mask) {
+ proto_list[ARP_VAL]->func(pkts_burst, nb_rx,
+ arp_pkts_mask, port);
+ printf
+ ("=================After ARP ==================\n");
+ }
+ if (ipv4_pkts_mask) {
+ printf
+ ("=================Calling IPV4 L3 RX ==================\n");
+ printf("====nb_rx:%u, ipv4_pkts_mask: %lu\n\n", nb_rx,
+ ipv4_pkts_mask);
+ proto_list[IPv4_VAL]->func(pkts_burst, nb_rx,
+ ipv4_pkts_mask, port);
+ }
+ if (ipv6_pkts_mask) {
+ printf
+ ("=================Calling IPV6 L3 RX ==================\n");
+ printf("====nb_rx:%u, ipv6_pkts_mask: %lu\n\n", nb_rx,
+ ipv6_pkts_mask);
+ proto_list[IPv6_VAL]->func(pkts_burst, nb_rx,
+ ipv6_pkts_mask, port);
+ }
+ }
+}
+
+#if 0
+switch (qid) {
+case 1:
+ {
+#if 0
+ printf
+ ("=====================ENTERED ARP CASE================\n");
+ while (cur->type != ETHER_TYPE_ARP && cur != NULL) {
+ cur = cur->next;
+ }
+ if (cur != NULL) {
+ //printf("L2 PROTO TEST-14=================================\n");
+ printf
+ ("==============\nARPARPARPARP \n=======================\n");
+ cur->func(pkts_burst, nb_rx, pkts_mask, portid);
+ }
+#endif
+ proto_list[ARP_VAL]->func(pkts_burst, nb_rx, arp_pkts_mask,
+ portid);
+ break;
+ }
+case 0:
+ {
+#if 0
+ while (cur->type != ETHER_TYPE_IPv4 && cur != NULL) {
+ cur = cur->next;
+ }
+ if (cur != NULL) {
+ //printf("L2 PROTO TEST-15=================================\n");
+ //printf("==============\nPkts mask in while calling IPv4 %d \n=======================\n",ipv4_pkts_mask);
+ cur->func(pkts_burst, nb_rx, ipv4_pkts_mask, portid);
+ }
+ break;
+#endif
+ // printf("=========Inside switch==============\n");
+ proto_list[IPv4_VAL]->func(pkts_burst, nb_rx, ipv4_pkts_mask,
+ portid);
+ break;
+ }
+ /* case 2:
+ {
+ while(cur->type != ETHER_TYPE_IPv6 && cur != NULL)
+ {
+ cur = cur->next;
+ }
+ if(cur != NULL)
+ {
+ cur->func(pkts_burst, nb_rx, ipv6_pkts_mask, portid);
+ }
+ break;
+ } */
+default:
+ {
+ rte_exit(EXIT_FAILURE, "Ethertype not found \n");
+ break;
+ }
+}
+#endif
+
+/*
+ * L2 Stack Init for future
+
+
+ void
+l2_stack_init(void)
+{
+
+}
+
+*/
diff --git a/common/VIL/l2l3_stack/l2_proto.h b/common/VIL/l2l3_stack/l2_proto.h
new file mode 100644
index 00000000..05466070
--- /dev/null
+++ b/common/VIL/l2l3_stack/l2_proto.h
@@ -0,0 +1,150 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+ * @file
+ * L2 Protocol Handler
+ * Reads the packet from the interface and sets the
+ * masks for a burst of packets based on ethertype and
+ * calls the relevant function registered for that ethertype
+ *
+ */
+
+#ifndef L2_PROTO_H
+#define L2_PROTO_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_eth_ctrl.h>
+#include <interface.h>
+
+/* Array indexes of proto_packet_type structure */
+#define IPv4_VAL 0 /**< Array index for IPv4 */
+#define ARP_VAL 1 /**< Array index for ARP */
+#define IPv6_VAL 2 /**< Array index for IPv6 */
+
+/* Enable to print L2_Proto debugs */
+#define L2_PROTO_DBG 1 /**< Enable to print L2 Proto debugs */
+
+/**
+ * A structure used to call the function handlers for a certain ethertype
+ */
+struct proto_packet_type {
+ uint16_t type; /**< Ethertype */
+ void (*func) (struct rte_mbuf **m, uint16_t nb_pkts, uint64_t pkt_mask, l2_phy_interface_t *port); /**< Function pointer to the registered callback function */
+} __rte_cache_aligned;/**< RTE Cache alignment */
+
+/**
+ * Function called from other modules to add the certain rx functions for particular ethertypes
+ *
+ * @param type
+ * Ethertype
+ * @param (*func)()
+ * Function pointer to the function being registered by different modules
+ */
+void
+list_add_type(uint16_t type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+/**
+ * Function to check whether the destination mac address of the packet is the mac address of the received port.
+ * Drop the packet if it is not destined to the host.
+ * If it is destined to this host, then set the packet masks for IPv4, IPv6 and ARP packet types for a burst of packets.
+ *
+ * @param m
+ * rte_mbuf packet
+ *
+ * @param portid
+ * Portid from which the packet was received
+ *
+ * @param pos
+ * Index of the packet in the burst
+ *
+ * @param pkts_mask
+ * Packet mask where bits are set at positions for the packets in the burst which were destined to the host
+ *
+ * @param arp_pkts_mask
+ * Packet mask for ARP where bits are set for valid ARP packets
+ *
+ * @param ipv4_pkts_mask
+ * Packet mask for IPv4 where bits are set for valid IPv4 packets
+ *
+ * @param ipv6_pkts_mask
+ * Packet mask for IPv6 where bits are set for valid IPv6 packets
+ *
+ */
+void
+l2_check_mac(struct rte_mbuf *m[IFM_BURST_SIZE], l2_phy_interface_t *port,
+ uint8_t pos, uint64_t *pkts_mask, uint64_t *arp_pkts_mask,
+ uint64_t *ipv4_pkts_mask, uint64_t *ipv6_pkts_mask);
+
+/**
+ * Entry function to L2 Protocol Handler where appropriate functions are called for particular ethertypes
+ *
+ * @param m
+ * rte_mbuf packet
+ *
+ * @param nb_rx
+ * Number of packets read
+ *
+ * @param portid
+ * Port-id of the port in which packet was received
+ */
+void
+protocol_handler_recv(struct rte_mbuf *m[IFM_BURST_SIZE], uint16_t nb_rx,
+ l2_phy_interface_t *port);
+
+#endif
diff --git a/common/VIL/l2l3_stack/l3fwd_common.h b/common/VIL/l2l3_stack/l3fwd_common.h
new file mode 100644
index 00000000..cece57c0
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_common.h
@@ -0,0 +1,111 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd common header file for LPM IPv4 and IPv6 stack initialization
+*/
+
+#ifndef L3FWD_COMMON_H
+#define L3FWD_COMMON_H
+
+/* Standard Libraries */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/param.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <getopt.h>
+#include <unistd.h>
+
+/* DPDK RTE Libraries */
+#include <rte_common.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_port.h>
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_table_hash.h>
+#include <rte_table.h>
+#include <rte_table_lpm.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <l3fwd_lpm4.h>
+#include <l3fwd_lpm6.h>
+#include <rte_table_lpm_ipv6.h>
+
+/**
+* Define the Macros
+*/
+#define MAX_ROUTES 4 /**< MAX route that can be added*/
+#define L3FWD_DEBUG 1 /**< if set, enables the fast path logs */
+#define MULTIPATH_FEAT 1 /**< if set, enables the ECMP Multicast feature */
+
+//#define IPPROTO_ICMPV6 58 /**< Protocol ID for ICMPv6 */
+
+/**
+* L3fwd initilazation for creating IPv4 and IPv6 LPM table.
+*/
+void l3fwd_init(void);
+
+/**
+* L3fwd IPv4 LPM table population, it calls IPv4 route add function which stores all the route in LPM table
+*/
+void populate_lpm4_table_routes(void);
+
+/**
+* L3fwd IPv6 LPM table population, it calls IPv6 route add function which stores all the route in LPM6 table
+*/
+void populate_lpm6_table_routes(void);
+
+/**
+* L3fwd LPM table population for both IPv4 and IPv6.
+*/
+void populate_lpm_routes(void);
+
+#endif
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm4.c b/common/VIL/l2l3_stack/l3fwd_lpm4.c
new file mode 100644
index 00000000..081038b6
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm4.c
@@ -0,0 +1,1119 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "l3fwd_common.h"
+#include "interface.h"
+#include "l2_proto.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+#include <inttypes.h>
+
+/* Declare Global variables */
+
+/* Global for IPV6 */
+void *lpm4_table; /**< lpm4_table handler */
+
+/*Hash table for L2 adjacency */
+struct rte_hash *l2_adj_hash_handle; /**< l2 adjacency hash table handler */
+struct rte_hash *fib_path_hash_handle; /**< fib path hash table handler */
+
+l3_stats_t stats; /**< L3 statistics */
+
+/* Global load balancing hash table for ECMP*/
+uint8_t nh_links[MAX_SUPPORTED_FIB_PATHS][HASH_BUCKET_SIZE] = /**< Round Robin Hash entries for ECMP only*/
+{
+ /* 1 path, No Load balancing is required */
+ {0},
+
+ /* 2 path */
+ {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
+
+ /* 3 path */
+ {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0,
+ 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1,
+ 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2,
+ 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
+
+ /* 4 path */
+ {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
+ 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
+
+ /* 5 path */
+ {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0,
+ 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1,
+ 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2,
+ 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3},
+
+ /* 6 path */
+ {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
+ 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1,
+ 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
+ 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
+
+ /* 7 path */
+ {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1,
+ 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3,
+ 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5,
+ 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0},
+
+ /* 8 path */
+ {0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7}
+};
+
+#if 0
+#define META_DATA_OFFSET 128
+
+#define RTE_PKTMBUF_HEADROOM 128 /* where is this defined ? */
+#define ETHERNET_START (META_DATA_OFFSET + RTE_PKTMBUF_HEADROOM)
+#define ETH_HDR_SIZE 14
+#define IP_START (ETHERNET_START + ETH_HDR_SIZE)
+#define TCP_START (IP_START + 20)
+
+static void print_pkt(struct rte_mbuf *pkt)
+{
+ int i;
+ int size = 14;
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, ETHERNET_START);
+
+ printf("Meta-data:\n");
+ for (i = 0; i < size; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+ printf("\n");
+ printf("IP and TCP/UDP headers:\n");
+ rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, IP_START);
+ for (i = 0; i < 40; i++) {
+ printf("%02x ", rd[i]);
+ if ((i & 3) == 3)
+ printf("\n");
+ }
+
+}
+#endif
+static struct ip_protocol_type *proto_type[2];
+int lpm_init(void)
+{
+
+ /* Initiliaze LPMv4 params */
+ struct rte_table_lpm_params lpm_params = {
+ .name = "LPMv4",
+ .n_rules = IPV4_L3FWD_LPM_MAX_RULES,
+ .number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S,
+ .flags = 0,
+ .entry_unique_size = sizeof(struct fib_info),
+ .offset = 128,
+ };
+
+ /* Create LPMv4 tables */
+ lpm4_table =
+ rte_table_lpm_ops.f_create(&lpm_params, rte_socket_id(),
+ sizeof(struct fib_info));
+ if (lpm4_table == NULL) {
+ printf("Failed to create LPM IPV4 table\n");
+ return 0;
+ }
+
+ /*Initialize L2 ADJ hash params */
+ struct rte_hash_parameters l2_adj_ipv4_params = {
+ .name = "l2_ADJ_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct l2_adj_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ /* Create IPv4 L2 Adj Hash tables */
+ l2_adj_hash_handle = rte_hash_create(&l2_adj_ipv4_params);
+
+ if (l2_adj_hash_handle == NULL) {
+ printf("L2 ADJ rte_hash_create failed\n");
+ return 0;
+ } else {
+ printf("l2_adj_hash_handle %p\n\n", (void *)l2_adj_hash_handle);
+ }
+
+ /*Initialize Fib PAth hassh params */
+ struct rte_hash_parameters fib_path_ipv4_params = {
+ .name = "FIB_PATH_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct fib_path_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ /* Create FIB PATH Hash tables */
+ fib_path_hash_handle = rte_hash_create(&fib_path_ipv4_params);
+
+ if (fib_path_hash_handle == NULL) {
+ printf("FIB path rte_hash_create failed\n");
+ return 0;
+ }
+ return 1;
+}
+
+int lpm4_table_route_add(struct routing_info *data)
+{
+
+ struct routing_info *fib = data;
+ struct rte_table_lpm_key lpm_key = {
+ .ip = fib->dst_ip_addr,
+ .depth = fib->depth,
+ };
+ uint8_t i;
+ static int Total_route_count;
+ struct fib_info entry;
+ entry.dst_ip_addr = rte_bswap32(fib->dst_ip_addr);
+ entry.depth = fib->depth;
+ entry.fib_nh_size = fib->fib_nh_size; /**< For Single Path, greater then 1 for Multipath(ECMP)*/
+
+#if MULTIPATH_FEAT
+ if (entry.fib_nh_size == 0 || entry.fib_nh_size > MAX_FIB_PATHS)
+#else
+ if (entry.fib_nh_size != 1) /**< For Single FIB_PATH */
+#endif
+ {
+ printf("Route can't be configured!!, entry.fib_nh_size = %d\n",
+ entry.fib_nh_size);
+ return 0;
+ }
+ /* Populate L2 adj and precomputes l2 encap string */
+#if MULTIPATH_FEAT
+ for (i = 0; i < entry.fib_nh_size; i++)
+#else
+ for (i = 0; i < 1; i++)
+#endif
+ {
+ struct fib_path *fib_path_addr = NULL;
+
+ fib_path_addr =
+ populate_fib_path(fib->nh_ip_addr[i], fib->out_port[i]);
+ if (fib_path_addr) {
+
+ entry.path[i] = fib_path_addr;
+ printf("Fib info for the Dest IP");
+ printf(" : %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "/%" PRIu8
+ " => fib_path Addr: %p, l2_adj Addr: %p\n",
+ (fib->dst_ip_addr & 0xFF000000) >> 24,
+ (fib->dst_ip_addr & 0x00FF0000) >> 16,
+ (fib->dst_ip_addr & 0x0000FF00) >> 8,
+ (fib->dst_ip_addr & 0x000000FF), fib->depth,
+ fib_path_addr,
+ (void *)entry.path[i]->l2_adj_ptr);
+ } else {
+ printf("Fib info for the Dest IP :\
+ %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 "/%" PRIu8 " => fib_path Addr: NULL \n", (fib->dst_ip_addr & 0xFF000000) >> 24, (fib->dst_ip_addr & 0x00FF0000) >> 16, (fib->dst_ip_addr & 0x0000FF00) >> 8, (fib->dst_ip_addr & 0x000000FF), fib->depth);
+ entry.path[i] = NULL; /**< setting all other fib_paths to NULL */
+ }
+ }
+
+ int key_found, ret;
+ void *entry_ptr;
+ ret =
+ rte_table_lpm_ops.f_add(lpm4_table, (void *)&lpm_key, &entry,
+ &key_found, &entry_ptr);
+
+ if (ret != 0) {
+ printf("Failed to Add IP route\n");
+ return 0;
+ }
+ Total_route_count++;
+ printf("Total Routed Added : %u, Key_found: %d\n", Total_route_count,
+ key_found);
+ printf("Adding Route to LPM table...\n");
+
+ printf("Iterate with Cuckoo Hash table\n");
+ iterate_cuckoo_hash_table();
+ return 1;
+}
+
+int lpm4_table_route_delete(uint32_t dst_ip, uint8_t depth)
+{
+
+ struct rte_table_lpm_key lpm_key = {
+ .ip = dst_ip,
+ .depth = depth,
+ };
+
+ int key_found, ret;
+ void *entry = NULL;
+
+ entry = rte_zmalloc(NULL, 512, RTE_CACHE_LINE_SIZE);
+
+ /* Deleting a IP route from LPMv4 table */
+ ret =
+ rte_table_lpm_ops.f_delete(lpm4_table, &lpm_key, &key_found, entry);
+
+ if (ret) {
+ printf("Failed to Delete IP route from LPMv4 table\n");
+ return 0;
+ }
+
+ printf("Deleted route from LPM table (IPv4 Address = %"
+ PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "/%u , key_found = %d\n", (lpm_key.ip & 0xFF000000) >> 24,
+ (lpm_key.ip & 0x00FF0000) >> 16, (lpm_key.ip & 0x0000FF00) >> 8,
+ (lpm_key.ip & 0x000000FF), lpm_key.depth, key_found);
+
+ /* Deleting a L2 Adj entry if refcount is 1, Else decrement Refcount */
+ remove_fib_l2_adj_entry(entry);
+ rte_free(entry);
+ printf("Iterate with Cuckoo Hash table\n");
+ iterate_cuckoo_hash_table();
+ return 1;
+}
+
+int
+lpm4_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask)
+{
+
+ struct routing_table_entry *ipv4_entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t lookup_hit_mask_ipv4 = 0;
+ int status;
+ uint64_t pkts_key_mask = pkts_mask;
+ uint64_t lookup_miss_mask_ipv4 = pkts_mask;
+
+ static uint64_t sent_count;
+ static uint64_t rcvd_count;
+ rcvd_count += nb_pkts;
+ if (L3FWD_DEBUG) {
+ printf
+ (" Received IPv4 nb_pkts: %u, Rcvd_count: %lu\n, pkts_mask: %p\n",
+ nb_pkts, rcvd_count, (void *)pkts_mask);
+ }
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+
+ for (; pkts_key_mask;) {
+/**< Populate key offset in META DATA for all valid pkts */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_key_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ pkts_key_mask &= ~pkt_mask;
+ struct rte_mbuf *mbuf = pkts_burst[pos];
+ uint32_t *lpm_key = NULL;
+ uint32_t *dst_addr = NULL;
+ lpm_key = (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, 128);
+ dst_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ dst_addr_offset);
+ *lpm_key = *dst_addr;
+ if (L3FWD_DEBUG) {
+
+ printf("Rcvd Pakt (IPv4 Address = %"
+ PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ")\n",
+ (rte_cpu_to_be_32(*lpm_key) & 0xFF000000) >> 24,
+ (rte_cpu_to_be_32(*lpm_key) & 0x00FF0000) >> 16,
+ (rte_cpu_to_be_32(*lpm_key) & 0x0000FF00) >> 8,
+ (rte_cpu_to_be_32(*lpm_key) & 0x000000FF));
+ }
+ }
+
+ /* Lookup for IP route in LPM table */
+ if (L3FWD_DEBUG)
+ printf("\nIPV4 Lookup Mask Before = %p\n",
+ (void *)lookup_hit_mask_ipv4);
+ status =
+ rte_table_lpm_ops.f_lookup(lpm4_table, pkts_burst, pkts_mask,
+ &lookup_hit_mask_ipv4,
+ (void **)ipv4_entries);
+
+ if (status) {
+ printf("LPM Lookup failed for IP route\n");
+ return 0;
+ }
+
+ lookup_miss_mask_ipv4 = lookup_miss_mask_ipv4 & (~lookup_hit_mask_ipv4);
+ if (L3FWD_DEBUG) {
+ printf
+ ("AFTER lookup_hit_mask_ipv4 = %p, lookup_miss_mask_ipv4 =%p\n",
+ (void *)lookup_hit_mask_ipv4,
+ (void *)lookup_miss_mask_ipv4);
+ }
+
+ for (; lookup_miss_mask_ipv4;) {
+/**< Drop packets for lookup_miss_mask */
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_miss_mask_ipv4);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_miss_mask_ipv4 &= ~pkt_mask;
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ if (L3FWD_DEBUG)
+ printf("\n DROP PKT IPV4 Lookup_miss_Mask = %p\n",
+ (void *)lookup_miss_mask_ipv4);
+ }
+
+ *hit_mask = lookup_hit_mask_ipv4;
+ for (; lookup_hit_mask_ipv4;) {
+/**< Process the packets for lookup_hit_mask*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_hit_mask_ipv4);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_hit_mask_ipv4 &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts_burst[pos];
+
+ struct fib_info *entry = (struct fib_info *)ipv4_entries[pos];
+
+#if MULTIPATH_FEAT
+
+ uint8_t ecmp_path = 0;
+ ecmp_path = ip_hash_load_balance(pkts_burst[pos]);
+ uint8_t selected_path = 0;
+ struct fib_path *fib_path = NULL;
+ if (((entry->fib_nh_size != 0)
+ && (entry->fib_nh_size - 1) < MAX_SUPPORTED_FIB_PATHS)
+ && ((ecmp_path != 0) && (ecmp_path - 1) < HASH_BUCKET_SIZE))
+ selected_path =
+ nh_links[entry->fib_nh_size - 1][ecmp_path - 1];
+ if (selected_path < MAX_FIB_PATHS)
+ fib_path = entry->path[selected_path];
+ if (L3FWD_DEBUG) {
+ printf
+ ("Total supported Path :%u, Hashed ECMP Key : %u, selected Fib_path: %u\n",
+ entry->fib_nh_size, ecmp_path, selected_path);
+ }
+#else
+ struct fib_path *fib_path = entry->path[0];
+#endif
+
+ if (fib_path == NULL) {
+ rte_pktmbuf_free(pkt);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("Fib_path is NULL, ARP has not resolved, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ if (fib_path->l2_adj_ptr->flags == L2_ADJ_UNRESOLVED) {
+ if (fib_path->l2_adj_ptr->phy_port->ipv4_list != NULL)
+ request_arp(fib_path->l2_adj_ptr->phy_port->
+ pmdid, fib_path->nh_ip);
+
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("L2_ADJ_UNRESOLVED, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ /* extract ip headers and MAC */
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC BEFORE- DST MAC %02x:%02x:%02x:%02x:%02x:%02x, \
+ SRC MAC %02x:%02x:%02x:%02x:%02x:%02x \n",
+ eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3], eth_dest[4], eth_dest[5], eth_src[0], eth_src[1],
+ eth_src[2], eth_src[3], eth_src[4], eth_src[5]);
+ }
+ /* Rewrite the packet with L2 string */
+ memcpy(eth_dest, fib_path->l2_adj_ptr->l2_string, sizeof(struct ether_addr) * 2); // For MAC
+ if (L3FWD_DEBUG) {
+ int k = 0;
+ for (k = 0; k < 14; k++) {
+ printf("%02x ",
+ fib_path->l2_adj_ptr->l2_string[k]);
+ printf("\n");
+ }
+ printf
+ ("MAC AFTER DST MAC %02x:%02x:%02x:%02x:%02x:%02x, \
+ SRC MAC %02x:%02x:%02x:%02x:%02x:%02x\n", eth_dest[0], eth_dest[1], eth_dest[2], eth_dest[3], eth_dest[4], eth_dest[5], eth_src[0], eth_src[1], eth_src[2], eth_src[3], eth_src[4], eth_src[5]);
+ }
+ port_ptr[pos] = fib_path->l2_adj_ptr->phy_port;
+ if (L3FWD_DEBUG) {
+ printf("l3fwd_lookup API!!!!\n");
+ //print_pkt(pkt);
+ }
+
+ sent_count++;
+ stats.nb_tx_l3_pkt++;
+ if (L3FWD_DEBUG)
+ printf
+ ("Successfully sent to port %u, sent_count : %lu\n\r",
+ fib_path->out_port, sent_count);
+ }
+ return 1;
+}
+
+int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+{
+ if (link_len < sizeof(struct ipv4_hdr))
+ return -1;
+ if (((pkt->version_ihl) >> 4) != 4)
+ return -1;
+ if ((pkt->version_ihl & 0xf) < 5)
+ return -1;
+ if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+ return -1;
+ return 0;
+}
+
+int
+get_dest_mac_for_nexthop(uint32_t next_hop_ip,
+ uint8_t out_phy_port, struct ether_addr *hw_addr)
+{
+ struct arp_entry_data *arp_data = NULL;
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = out_phy_port;
+ arp_key.ip = next_hop_ip;
+
+ arp_data = retrieve_arp_entry(arp_key);
+ if (arp_data == NULL) {
+ printf("ARP entry is not found for ip %x, port %d\n",
+ next_hop_ip, out_phy_port);
+ return 0;
+ }
+ ether_addr_copy(&arp_data->eth_addr, hw_addr);
+ return 1;
+}
+
+struct l2_adj_entry *retrieve_l2_adj_entry(struct l2_adj_key_ipv4 l2_adj_key)
+{
+ struct l2_adj_entry *ret_l2_adj_data = NULL;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ int ret =
+ rte_hash_lookup_data(l2_adj_hash_handle, &l2_adj_key,
+ (void **)&ret_l2_adj_data);
+ if (ret < 0) {
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ return NULL;
+ } else {
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 Adj hash lookup Success, Entry Already Exist ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ #endif
+ return ret_l2_adj_data;
+ }
+}
+
+void remove_fib_l2_adj_entry(void *entry)
+{
+ struct fib_info entry1;
+ memcpy(&entry1, entry, sizeof(struct fib_info));
+
+ struct fib_path *fib_path_addr = entry1.path[0]; /**< For Single path */
+ if (fib_path_addr->refcount > 1) {
+ printf
+ (" BEFORE fib_path entry, nh_ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ fib_path_addr->refcount--; /**< Just decrement the refcount this entry is still referred*/
+ printf("AFTER fib_path entry, nh_ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ } else {
+/**< Refcount is 1 so delete both fib_path and l2_adj_entry */
+
+ struct l2_adj_entry *adj_addr = NULL;
+ adj_addr = fib_path_addr->l2_adj_ptr;
+
+ if (adj_addr != NULL) {
+/** < l2_adj_entry is has some entry in hash table*/
+ struct l2_adj_key_ipv4 l2_adj_key = {
+ .Next_hop_ip = fib_path_addr->nh_ip,
+ .out_port_id = fib_path_addr->out_port,
+ };
+ #ifdef L3FWD_DEBUG
+ printf
+ (" l2_adj_entry is removed for ip %x, port %d, refcount %d\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_addr->refcount);
+ #endif
+
+ rte_hash_del_key(l2_adj_hash_handle, &l2_adj_key);
+ rte_free(adj_addr); /**< free the memory which was allocated for Hash entry */
+ adj_addr = NULL;
+ }
+
+ struct fib_path_key_ipv4 path_key = {
+ .nh_ip = fib_path_addr->nh_ip,
+ .out_port = fib_path_addr->out_port,
+ };
+
+ printf
+ ("fib_path entry is removed for ip %x, port %d, refcount %d\n",
+ fib_path_addr->nh_ip, fib_path_addr->out_port,
+ fib_path_addr->refcount);
+ rte_hash_del_key(fib_path_hash_handle, &path_key);
+ rte_free(fib_path_addr); /**< Free the memory which was allocated for Hash entry*/
+ fib_path_addr = NULL;
+ }
+}
+
+struct l2_adj_entry *populate_l2_adj(uint32_t ipaddr, uint8_t portid)
+{
+
+ struct l2_adj_key_ipv4 l2_adj_key;
+ l2_adj_key.out_port_id = portid;
+ l2_adj_key.Next_hop_ip = ipaddr;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ struct ether_addr eth_dst;
+ struct l2_adj_entry *adj_data = NULL;
+
+ /* Populate L2 adj if the MAC Address is already present in L2 Adj HAsh Table */
+ adj_data = retrieve_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) { /**< L2 Adj Entry Exists*/
+
+ printf
+ ("l2_adj_entry exists ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+ ether_addr_copy(&adj_data->eth_addr, &eth_dst);
+ adj_data->refcount++;
+ printf
+ ("l2_adj_entry UPDATED Refcount for NH ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+ return adj_data;
+ }
+
+ struct ether_addr eth_src;
+ l2_phy_interface_t *port;
+ //uint16_t ether_type = 0x0800;
+ port = ifm_get_port(portid);
+
+ if (port != NULL) {
+ memcpy(&eth_src, &port->macaddr, sizeof(struct ether_addr));
+ unsigned char *p = (unsigned char *)eth_src.addr_bytes;
+ printf("S-MAC %x:%x:%x:%x:%x:%x\n\r", p[0], p[1], p[2], p[3],
+ p[4], p[5]);
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return NULL;
+ }
+
+ adj_data->out_port_id = portid;
+ adj_data->Next_hop_ip = ipaddr;
+ adj_data->refcount++;
+
+ adj_data->phy_port = port;
+ memset(&adj_data->eth_addr, 0, sizeof(struct ether_addr));
+ memset(&adj_data->l2_string, 0, 256);
+
+ /**< Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(l2_adj_hash_handle, &l2_adj_key,
+ adj_data);
+ #ifdef L2L3_DEBUG
+ printf
+ ("L2 adj data stored in l2_adj_entry hash table,Addr:%p\n",
+ adj_data);
+ #endif
+ } else {
+ #ifdef L2L3_DEBUG
+ printf("\n PORT %u IS DOWN...\n", portid);
+ #endif
+ return NULL;
+ }
+ /* Query ARP to get L2 Adj */
+ if (get_dest_mac_for_nexthop(ipaddr, portid, &eth_dst)) {
+ unsigned char *p = (unsigned char *)eth_dst.addr_bytes;
+ printf
+ ("ARP resolution success and stored in l2_adj_entry hash table:D-MAC %x:%x:%x:%x:%x:%x\n\r",
+ p[0], p[1], p[2], p[3], p[4], p[5]);
+
+ memcpy(adj_data->l2_string, &eth_dst, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6], &eth_src,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+ } else {
+ adj_data->flags = L2_ADJ_UNRESOLVED;
+ printf
+ (" ARP resolution Failed !! , unable to write in l2_adj_entry\n");
+ }
+ return adj_data;
+}
+
+struct fib_path *populate_fib_path(uint32_t nh_ip, uint8_t portid)
+{
+
+ struct fib_path_key_ipv4 path_key;
+ path_key.out_port = portid;
+ path_key.nh_ip = nh_ip;
+ path_key.filler1 = 0;
+ path_key.filler2 = 0;
+ path_key.filler3 = 0;
+
+ struct fib_path *fib_data = NULL;
+
+ /* Populate fib_path */
+ fib_data = retrieve_fib_path_entry(path_key);
+
+ if (fib_data) {/**< fib_path entry already exists */
+
+ /* Already present in FIB_PATH cuckoo HAsh Table */
+ printf
+ ("fib_path_entry already exists for NextHop ip: %x, port %d\n, Refcount %u Addr:%p\n",
+ fib_data->nh_ip, fib_data->out_port, fib_data->refcount,
+ fib_data);
+ fib_data->refcount++;
+ fib_data->l2_adj_ptr->refcount++;
+ printf
+ ("fib_path Refcount Updated NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port, fib_data->refcount);
+ return fib_data;
+ } else {
+ printf("fib_path entry Doesn't Exists.......\n");
+ }
+
+ fib_data = NULL;
+ struct l2_adj_entry *l2_adj_ptr = NULL;
+ l2_adj_ptr = populate_l2_adj(nh_ip, portid);
+
+ if (l2_adj_ptr) {
+
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct fib_path));
+ fib_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ fib_data->out_port = portid;
+ fib_data->nh_ip = nh_ip;
+ fib_data->refcount++;
+ fib_data->l2_adj_ptr = l2_adj_ptr;
+
+ printf("%s: get port details %u %d\n\r", __FUNCTION__, portid,
+ __LINE__);
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ int status;
+ status =
+ rte_hash_add_key_data(fib_path_hash_handle, &path_key,
+ fib_data);
+ if (status) {
+ printf
+ ("fib_path entry addition to hash table FAILED!! NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port,
+ fib_data->refcount);
+
+ rte_free(fib_data);
+ } else {
+ printf
+ ("fib_path entry Added into hash table for the NextHop :%x , port %u, Refcount %u\n\r",
+ fib_data->nh_ip, fib_data->out_port,
+ fib_data->refcount);
+ printf
+ (" l2_adj_entry Addr: %p, Fib_path Addr: %p, FibPath->l2ADJ Addr:%p \n",
+ l2_adj_ptr, fib_data, fib_data->l2_adj_ptr);
+ printf
+ (" ARP resolution success l2_adj_entry Addr: %p, Fib_path Addr: %p \n",
+ l2_adj_ptr, fib_data);
+ return fib_data;
+ }
+ } else {
+ printf
+ (" ARP resolution failed and unable to write fib path in fib_path cuckoo hash\n");
+ }
+ return NULL;
+}
+
+struct fib_path *retrieve_fib_path_entry(struct fib_path_key_ipv4 path_key)
+{
+ printf("FIB PATH for NExtHOP IP : %x, port :%u\n", path_key.nh_ip,
+ path_key.out_port);
+
+ struct fib_path *ret_fib_path_data = NULL;
+ int ret =
+ rte_hash_lookup_data(fib_path_hash_handle, &path_key,
+ (void **)&ret_fib_path_data);
+ if (ret < 0) {
+ printf
+ ("FIB PATH hash lookup Failed!! ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ return NULL;
+ } else {
+ printf("FIB PATH ALREADY Exists for NExtHOP IP: %x, port: %u\n",
+ path_key.nh_ip, path_key.out_port);
+ return ret_fib_path_data;
+ }
+}
+
+void iterate_cuckoo_hash_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ printf("\n\t\t\t FIB_path Cache table....");
+ printf
+ ("\n----------------------------------------------------------------");
+ printf("\n\tNextHop IP Port Refcount l2_adj_ptr_addrress\n");
+ printf
+ ("\n----------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (fib_path_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct fib_path *tmp_data = (struct fib_path *)next_data;
+ struct fib_path_key_ipv4 tmp_key;
+ memcpy(&tmp_key, next_key, sizeof(tmp_key));
+ printf("\t %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ " \t %u \t %u \t %p\n",
+ (tmp_data->nh_ip & 0xFF000000) >> 24,
+ (tmp_data->nh_ip & 0x00FF0000) >> 16,
+ (tmp_data->nh_ip & 0x0000FF00) >> 8,
+ (tmp_data->nh_ip & 0x000000FF), tmp_data->out_port,
+ tmp_data->refcount, tmp_data->l2_adj_ptr);
+
+ }
+ iter = 0;
+
+ printf("\n\t\t\t L2 ADJ Cache table.....");
+ printf
+ ("\n------------------------------------------------------------------------------------");
+ printf
+ ("\n\tNextHop IP Port \t l2 Encap string \t l2_Phy_interface\n");
+ printf
+ ("\n------------------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (l2_adj_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct l2_adj_entry *l2_data = (struct l2_adj_entry *)next_data;
+ struct l2_adj_key_ipv4 l2_key;
+ memcpy(&l2_key, next_key, sizeof(l2_key));
+ printf("\t %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32
+ "\t %u \t%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x\t%p\n",
+ (l2_data->Next_hop_ip & 0xFF000000) >> 24,
+ (l2_data->Next_hop_ip & 0x00FF0000) >> 16,
+ (l2_data->Next_hop_ip & 0x0000FF00) >> 8,
+ (l2_data->Next_hop_ip & 0x000000FF),
+ l2_data->out_port_id, l2_data->l2_string[0],
+ l2_data->l2_string[1], l2_data->l2_string[2],
+ l2_data->l2_string[3], l2_data->l2_string[4],
+ l2_data->l2_string[5], l2_data->l2_string[6],
+ l2_data->l2_string[7], l2_data->l2_string[8],
+ l2_data->l2_string[9], l2_data->l2_string[10],
+ l2_data->l2_string[11], l2_data->phy_port);
+ }
+}
+
+void print_l3_stats(void)
+{
+ printf("==============================================\n");
+ printf("\t\t L3 STATISTICS \t\n");
+ printf("==============================================\n");
+ printf(" Num of Received L3 Pkts : %lu\n", stats.nb_rx_l3_pkt);
+ printf(" Num of Dropped L3 Pkts : %lu\n", stats.nb_l3_drop_pkt);
+ printf(" Num of Transmitted L3 Pkts : %lu\n", stats.nb_tx_l3_pkt);
+ printf(" Num of ICMP Pkts Rcvd at L3 : %lu\n", stats.nb_rx_l3_icmp_pkt);
+ printf(" Num of ICMP Pkts Tx to ICMP : %lu\n", stats.nb_tx_l3_icmp_pkt);
+ stats.total_nb_rx_l3_pkt = stats.nb_rx_l3_icmp_pkt + stats.nb_rx_l3_pkt;
+ stats.total_nb_tx_l3_pkt = stats.nb_tx_l3_icmp_pkt + stats.nb_tx_l3_pkt;
+ printf(" Total Num of Rcvd pkts at L3: %lu\n",
+ stats.total_nb_rx_l3_pkt);
+ printf(" Total Num of Sent pkts at L3: %lu\n",
+ stats.total_nb_tx_l3_pkt);
+}
+
+void
+ip_local_packets_process(struct rte_mbuf **pkt_burst, uint16_t nb_rx,
+ uint64_t icmp_pkt_mask, l2_phy_interface_t *port)
+{
+ process_arpicmp_pkt_parse(pkt_burst, nb_rx, icmp_pkt_mask, port);
+}
+
+void
+ip_forward_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_pkts,
+ uint64_t ipv4_forward_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("ip_forward_deliver BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = ipv4_forward_pkts_mask;
+
+ struct ipv4_hdr *ipv4_hdr;
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t hit_mask = 0;
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(pkt_burst[pos], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ /* Make sure the IPv4 packet is valid */
+ if (is_valid_ipv4_pkt(ipv4_hdr, pkt_burst[pos]->pkt_len) < 0) {
+ rte_pktmbuf_free(pkt_burst[pos]); /**< Drop the Unknown IPv4 Packet */
+ pkt_burst[pos] = NULL;
+ ipv4_forward_pkts_mask &= ~(1LLU << pos); /**< That will clear bit of that position*/
+ nb_pkts--;
+ stats.nb_l3_drop_pkt++;
+ }
+ }
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("\nl3fwd_rx_ipv4_packets_received AFTER DROP: nb_pkts: %u, valid_Pkts_mask :%lu\n",
+ nb_pkts, ipv4_forward_pkts_mask);
+ }
+
+ /* Lookup for IP destination in LPMv4 table */
+ lpm4_table_lookup(pkt_burst, nb_pkts, ipv4_forward_pkts_mask, port_ptr,
+ &hit_mask);
+
+ for (; hit_mask;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(hit_mask);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ hit_mask &= ~pkt_mask; /**< remove this packet from the mask */
+
+ port_ptr[pos]->transmit_single_pkt(port_ptr[pos],
+ pkt_burst[pos]);
+ }
+
+}
+
+void
+l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *port))
+{
+ switch (protocol_type) {
+ case IPPROTO_ICMP:
+ proto_type[IP_LOCAL] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_LOCAL]->protocol_type = protocol_type;
+ proto_type[IP_LOCAL]->func = func;
+ break;
+
+ case IPPROTO_TCP: // Time being treared as Remote forwarding
+ case IPPROTO_UDP:
+ proto_type[IP_REMOTE] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_REMOTE]->protocol_type = protocol_type;
+ proto_type[IP_REMOTE]->func = func;
+ break;
+
+ }
+
+}
+
+void l3fwd_rx_ipv4_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("l3fwd_rx_ipv4_packets_received BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = valid_pkts_mask;
+
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t configure_port_ip = 0;
+ uint64_t icmp_pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ uint64_t ipv4_forward_pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ uint16_t nb_icmp_pkt = 0;
+ uint16_t nb_l3_pkt = 0;
+
+ if (port->ipv4_list != NULL)
+ configure_port_ip =
+ (uint32_t) (((ipv4list_t *) (port->ipv4_list))->ipaddr);
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv4_hdr =
+ rte_pktmbuf_mtod_offset(m[pos], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+
+ if ((ipv4_hdr->next_proto_id == IPPROTO_ICMP)
+ && (ipv4_hdr->dst_addr == configure_port_ip)) {
+ ipv4_forward_pkts_mask &= ~pkt_mask; /**< Its ICMP, remove this packet from the ipv4_forward_pkts_mask*/
+ stats.nb_rx_l3_icmp_pkt++; /**< Increment stats for ICMP PKT */
+ nb_icmp_pkt++;
+ } else{ // Forward the packet
+ icmp_pkts_mask &= ~pkt_mask; /**< Not ICMP, remove this packet from the icmp_pkts_mask*/
+ stats.nb_rx_l3_pkt++;
+ nb_l3_pkt++; /**< Increment stats for L3 PKT */
+ }
+ }
+
+ if (icmp_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEiVED LOCAL ICMP PKT at L3...\n PROCESSING ICMP LOCAL PKT...\n");
+ proto_type[IP_LOCAL]->func(m, nb_icmp_pkt, icmp_pkts_mask,
+ port);
+ }
+
+ if (ipv4_forward_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEIVED L3 PKT, \n\n FORWARDING L3 PKT....\n");
+ proto_type[IP_REMOTE]->func(m, nb_l3_pkt,
+ ipv4_forward_pkts_mask, port);
+ }
+}
+
+void
+resolve_l2_adj(uint32_t nexthop_ip, uint8_t out_port_id,
+ const struct ether_addr *hw_addr)
+{
+ struct l2_adj_key_ipv4 l2_adj_key = {
+ .Next_hop_ip = nexthop_ip,
+ .out_port_id = out_port_id,
+ };
+ //uint16_t ether_type = 0x0800;
+
+ struct l2_adj_entry *adj_data = retrieve_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) { /**< L2 Adj Entry Exists*/
+
+ printf
+ ("l2_adj_entry exists ip%x, port %d, Refcnt :%u Address :%p\n",
+ l2_adj_key.Next_hop_ip, l2_adj_key.out_port_id,
+ adj_data->refcount, adj_data);
+
+ if (adj_data->flags == L2_ADJ_UNRESOLVED
+ || memcmp(hw_addr, &adj_data->eth_addr,
+ sizeof(struct ether_addr))) {
+ memcpy(adj_data->l2_string, hw_addr, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6],
+ &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+ }
+
+ return;
+ }
+
+ l2_phy_interface_t *port;
+ port = ifm_get_port(out_port_id);
+ if (port != NULL) {
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return;
+ }
+
+ adj_data->out_port_id = out_port_id;
+ adj_data->Next_hop_ip = nexthop_ip;
+ adj_data->phy_port = port;
+
+ memcpy(adj_data->l2_string, hw_addr, sizeof(struct ether_addr)); //** < Precompute the L2 String encap*/
+ memcpy(&adj_data->l2_string[6], &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ //memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+ adj_data->flags = L2_ADJ_RESOLVED;
+
+ rte_hash_add_key_data(l2_adj_hash_handle, &l2_adj_key,
+ adj_data);
+ printf
+ ("L2 adj data stored in l2_adj_entry hash table,Addr:%p\n",
+ adj_data);
+ } else
+ printf("PORT:%u IS DOWN...\n", out_port_id);
+
+ return;
+}
+
+uint8_t ip_hash_load_balance(struct rte_mbuf *mbuf)
+{
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST;
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr = NULL;
+ uint32_t *src_addr = NULL;
+ src_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, src_addr_offset);
+ dst_addr =
+ (uint32_t *) RTE_MBUF_METADATA_UINT8_PTR(mbuf, dst_addr_offset);
+
+ uint32_t hash_key1 = *src_addr; /* STORE SRC IP in key1 variable */
+ uint32_t hash_key2 = *dst_addr; /* STORE DST IP in key variable */
+
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With SRC and DST IP, Result is hask_key1 */
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 16); /* Circular Rotate to 16 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 8); /* Circular Rotate to 8 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key1 = hash_key1 & (HASH_BUCKET_SIZE - 1); /* MASK the KEY with BUCKET SIZE */
+ if (L3FWD_DEBUG)
+ printf("Hash Result_key: %d, \n", hash_key1);
+ return hash_key1;
+}
+
+uint32_t rotr32(uint32_t value, unsigned int count)
+{
+ const unsigned int mask = (CHAR_BIT * sizeof(value) - 1);
+ count &= mask;
+ return (value >> count) | (value << ((-count) & mask));
+}
+
+void
+ip_local_out_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_rx,
+ uint64_t ipv4_pkts_mask, l2_phy_interface_t *port)
+{
+ ip_forward_deliver(pkt_burst, nb_rx, ipv4_pkts_mask, port);
+}
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm4.h b/common/VIL/l2l3_stack/l3fwd_lpm4.h
new file mode 100644
index 00000000..69e62368
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm4.h
@@ -0,0 +1,374 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd lpm4 header file is for IPv4 specific declarations
+*/
+#ifndef L3FWD_LPM_H
+#define L3FWD_LPM_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdbool.h>
+
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include "l3fwd_common.h"
+#include "l3fwd_lpm6.h"
+#include "interface.h"
+
+/**
+* Define all RTE MBUF offset size
+*/
+
+#define MBUF_HDR_ROOM 256 /**< MBUF HEADER ROOM OFFSET */
+
+/* IPv4 */
+#define ETH_HDR_SIZE 14 /**< ETHER HEADER OFFSET */
+#define IP_HDR_SIZE 20 /**< IP HEADER OFFSET */
+#define IP_HDR_DST_ADR_OFST 16 /**< IP HEADER DST IP ADDRESS OFFSET */
+#define IP_HDR_SRC_ADR_OFST 12 /**< IP HEADER SRC IP ADDRESS OFFSET */
+
+/* Rules and Tables8s */
+#define IPV4_L3FWD_LPM_MAX_RULES 256 /**< Number of LPM RULES */
+#define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8) /**< Number of TABLE 8s for LPM */
+#define MAX_FIB_PATHS 8 /**< MAX FIB PATH, If ECMP feature is enabled */
+#define IP_LOCAL 0 /**< for ICMP Packet destined to Local */
+#define IP_REMOTE 1 /**< for ICMP Packet destined to Local */
+
+/* ECMP MACROS */
+#define MAX_SUPPORTED_FIB_PATHS 8 /**< for ECMP max supported FIB Paths */
+#define HASH_BUCKET_SIZE 64 /**< size of HASH bucket for ECMP */
+
+/* L2 Adjacency Macro */
+#define L2_ADJ_RESOLVED 0x00 /** <MACRO to define a flag as Resolved*/
+#define L2_ADJ_UNRESOLVED 0x01 /** <MacrO to define a flag as Unresolved */
+/**
+* A structure used to define the routing information for IPv4
+* This structure is used as input parameters for route ADD
+*/
+struct routing_info {
+ uint32_t dst_ip_addr; /**< DST IP Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint32_t nh_ip_addr[MAX_FIB_PATHS]; /**< NextHop IP Address */
+ uint8_t out_port[MAX_FIB_PATHS]; /**< OUTGOING PORT */
+} __rte_cache_aligned;
+
+/**
+* A structure used to define the fib path for Destination IP Address
+* This fib path is shared accross different fib_info.
+*/
+struct fib_path {
+ uint32_t nh_ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t out_port; /**< Output port */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_info has same fib_path*/
+ struct l2_adj_entry *l2_adj_ptr; /**< Address of the L2 ADJ table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib info (Route info)
+* This fib info structure can have multiple fib paths.
+*/
+struct fib_info {
+ uint32_t dst_ip_addr; /**< DST IP Address */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint8_t depth; /**< Depth */
+ struct fib_path *path[MAX_FIB_PATHS]; /**< Array of pointers to the fib_path */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_entry {
+ struct ether_addr eth_addr; /**< Ether address */
+ uint32_t Next_hop_ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t out_port_id; /**< Output port */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_path has same L2_adj_entry*/
+ uint8_t l2_string[256]; /**< L2 string, to rewrite the packet before transmission */
+ l2_phy_interface_t *phy_port; /**< Address of the L2 physical interface structure */
+ uint8_t flags; /**< Set to unresolved, when ARP entry not available. Set to resolved, when ARP is available */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct fib_path_key_ipv4 {
+ uint32_t nh_ip; /**< Next hop IP address */
+ uint8_t out_port; /**< Output port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct l2_adj_key_ipv4 {
+ uint32_t Next_hop_ip; /**< Next hop IP address */
+ uint8_t out_port_id; /**< Output port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to hold the fib info after LPM Lookup
+*/
+struct routing_table_entry {
+ uint32_t ip; /**< Next hop IP address (only valid for remote routes) */
+ uint8_t port_id; /**< Output port ID */
+ struct l2_adj_entry *l2_adj_ptr; /**< Address of L2 Adjacency table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L3 counter statistics
+*/
+typedef struct l3fwd_stats {
+ uint64_t nb_rx_l3_pkt; /**< Num of L3 pkts Received */
+ uint64_t nb_tx_l3_pkt; /**< Num of L3 pkts Transmitted */
+ uint64_t nb_rx_l3_icmp_pkt;
+ /**< Num of ICMP pkts Received at L3*/
+ uint64_t nb_tx_l3_icmp_pkt;
+ /**< Num of ICMP pkts Transmitted at L3*/
+ uint64_t nb_l3_drop_pkt; /**< Num of L3 Packets Dropped*/
+ uint64_t total_nb_rx_l3_pkt;
+ /**< Total Num of L3 Packets received, includes ICMP Pkt*/
+ uint64_t total_nb_tx_l3_pkt;
+ /**< Total Num of L3 Packets Transmitted, includes ICMP Pkt*/
+} l3_stats_t;
+
+struct ip_protocol_type {
+ uint8_t protocol_type; /**< Protocol Type */
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+} __rte_cache_aligned;
+
+/* Function Declarations */
+
+/**
+ * To creare LPM table, Cuckoo hash table for fib_path and l2_adj_entry tables
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm_init(void);
+
+/**
+ * To add a route in LPM table by populating fib_path and L2 Adjacency.
+ * @param input_array
+ * To add the route based on routing_info stucture.
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_route_add(struct routing_info *input_array);
+
+/**
+ * To Delete the IP route and corresponding fib_path and L2 Adjacency entries.
+ * @param ip
+ * Destionation IP for which the route need to deleted
+ * @param depth
+ * netmask for the Destination IP
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_route_delete(uint32_t ip, uint8_t depth);
+
+/**
+ * To perform a LPM table lookup
+ * @param pkts_burst
+ * Burst of packets that needs to be lookup in LPM table
+ * @param nb_pkts
+ * number of packets that needs to be lookup in LPM table
+ * @param valid_pkts_mask
+ * lookup of the valid IPv4 Pkt mask
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm4_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask,
+ l2_phy_interface_t *port[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask);
+
+/**
+ * To Verify whether the received IPv4 Packet is valid or not
+ * @param pkt
+ * packet pointing to IPv4 header that needs to be verifed
+ * @param link_len
+ * length of the IPv4 Pkt
+ * @return
+ * 0 for failure, 1 for success
+*/
+int is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
+
+/**
+ * To forward the valid L3 packets for LMP table lookup and forward ICMP Pkts to ICMP module
+ * @param m
+ * packet burst of type rte_mbuf
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param pkt_mask
+ * Valid IPv4 packets mask that needs to be processed
+ * @param port
+ * IPv4 Pkt received form the input port structure.
+ * @return
+ * 0 for failure, 1 for success
+*/
+void l3fwd_rx_ipv4_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t pkt_mask, l2_phy_interface_t *port);
+
+/**
+ * To get the destination MAC Address for the nexthop IP and outgoing port
+ * @param next_hop_ip
+ * Next HOP IP Address for which MAC address is needed
+ * @param out_phy_port
+ * Outgoing physical port
+ * @param hw_addr
+ * pointer to the ether_add, This gets update with valid MAC address based on nh_ip and out port
+ * @return
+ * 0 if failure, 1 if success
+ */
+int get_dest_mac_for_nexthop(uint32_t next_hop_ip,
+ uint8_t out_phy_port, struct ether_addr *hw_addr);
+/**
+ * To retrieve the l2_adj_entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the l2_adj_key_ipv4
+ * @param l2_adj_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_entry if lookup success
+*/
+
+struct l2_adj_entry *retrieve_l2_adj_entry(struct l2_adj_key_ipv4 l2_adj_key);
+
+/**
+ * To populate the l2_adj_entry for the nexthop IP and outgoing port
+ * @param ipaddr
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_entry if lookup success
+*/
+
+struct l2_adj_entry *populate_l2_adj(uint32_t ipaddr, uint8_t portid);
+
+/**
+ * To populate the fib_path for the nexthop IP and outgoing port
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the type fib_path if lookup success
+*/
+struct fib_path *populate_fib_path(uint32_t nh_ip, uint8_t portid);
+
+/**
+ * To retrieve the fib_path entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the fib_path_key_ipv4
+ * @param path_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type fib_path if lookup success
+*/
+
+struct fib_path *retrieve_fib_path_entry(struct fib_path_key_ipv4 path_key);
+
+/**
+ * To delete the fib path and l2 adjacency entry from the cuckoo hash table
+ * @return
+ * None
+*/
+void remove_fib_l2_adj_entry(void *);
+
+/**
+ * To iterate the cuckoo hash table for fib_path and l2_adj_entry and print the table contents
+ * @return
+ * None
+*/
+void iterate_cuckoo_hash_table(void);
+
+/**
+ * To print the l3 counter statitics
+ * @return
+ * None
+*/
+void print_l3_stats(void);
+
+/**
+ * To get the hash resultant value based on SRC IP and DST IP
+ * @param mbuf
+ * packet of type rte_mbuf
+ * @return
+ * It returns a result of type uint8_t
+ */
+
+uint8_t ip_hash_load_balance(struct rte_mbuf *mbuf);
+
+/**
+ * Rotates the count number of bits from the value
+ * @param value
+ * an integer value
+ * @param count
+ * rotates a count number of bits from integer value
+ * @return
+ * It returns a result.
+ */
+
+uint32_t rotr32(uint32_t value, unsigned int count);
+
+void
+resolve_l2_adj(uint32_t nexthop_ip, uint8_t out_port_id,
+ const struct ether_addr *hw_addr);
+
+void
+l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+void
+ip_local_packets_process(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+void ip_local_out_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+void
+ip_forward_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+#endif /* L3FWD_LPM_H */
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm6.c b/common/VIL/l2l3_stack/l3fwd_lpm6.c
new file mode 100644
index 00000000..7aa7fb6a
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm6.c
@@ -0,0 +1,1058 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "l3fwd_common.h"
+#include "interface.h"
+#include "l2_proto.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+
+/* Declare Global variables */
+
+/* Global for IPV6 */
+void *lpm6_table; /**< lpm6 table handler */
+struct rte_hash *l2_adj_ipv6_hash_handle; /**< IPv6 l2 adjacency table handler */
+struct rte_hash *fib_path_ipv6_hash_handle; /**< IPv6 fib path hash table handler */
+extern uint8_t nh_links[MAX_SUPPORTED_FIB_PATHS][HASH_BUCKET_SIZE];
+extern l3_stats_t stats; /**< L3 statistics */
+
+static struct ipv6_protocol_type *proto_type[2];
+
+int lpm6_init(void)
+{
+
+ /* Initiliaze LPMv6 params */
+
+ struct rte_table_lpm_ipv6_params lpm6_params = {
+ .name = "LPMv6",
+ .n_rules = IPV6_L3FWD_LPM_MAX_RULES,
+ .number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S,
+ .entry_unique_size = sizeof(struct ipv6_fib_info),
+ .offset = 128,
+ };
+
+ /* Create LPMv6 tables */
+ lpm6_table =
+ rte_table_lpm_ipv6_ops.f_create(&lpm6_params, rte_socket_id(),
+ sizeof(struct ipv6_fib_info));
+ if (lpm6_table == NULL) {
+ printf("Failed to create LPM IPV6 table\n");
+ return 0;
+ }
+
+ /*Initialize IPv6 params for l2 Adj */
+ struct rte_hash_parameters l2_adj_ipv6_params = {
+ .name = "l2_ADJ_IPV6_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct l2_adj_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ };
+
+ l2_adj_ipv6_hash_handle = rte_hash_create(&l2_adj_ipv6_params);
+ if (l2_adj_ipv6_hash_handle == NULL) {
+ printf("ND for IPV6 rte_hash_create failed.\n");
+ return 0;
+ } else {
+ printf("ND IPV6_hash_handle %p\n\n",
+ (void *)l2_adj_ipv6_hash_handle);
+ }
+
+ /*Initialize Fib PAth hassh params */
+ struct rte_hash_parameters fib_path_ipv6_params = {
+ .name = "FIB_PATH_IPV6_HASH",
+ .entries = 64,
+ .key_len = sizeof(struct fib_path_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+ .extra_flag = 1,
+ };
+
+ /* Create FIB PATH Hash tables */
+ fib_path_ipv6_hash_handle = rte_hash_create(&fib_path_ipv6_params);
+
+ if (fib_path_ipv6_hash_handle == NULL) {
+ printf("FIB path rte_hash_create failed\n");
+ return 0;
+ }
+ return 1;
+}
+
+int lpm6_table_route_add(struct ipv6_routing_info *data)
+{
+
+ struct ipv6_routing_info *fib = data;
+ /* Populate the Key */
+ struct rte_table_lpm_ipv6_key lpm6_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ lpm6_key.ip[i] = fib->dst_ipv6[i];
+ }
+ lpm6_key.depth = fib->depth;
+
+ static int Total_route_count;
+ struct ipv6_fib_info entry;
+ for (i = 0; i < 16; i++) {
+ entry.dst_ipv6[i] = fib->dst_ipv6[i];
+ }
+ entry.depth = fib->depth;
+ entry.fib_nh_size = fib->fib_nh_size;
+
+#if MULTIPATH_FEAT
+ if (entry.fib_nh_size == 0 || entry.fib_nh_size > MAX_FIB_PATHS)
+#else
+ if (entry.fib_nh_size != 1) /**< For Single FIB_PATH */
+#endif
+ {
+ printf
+ ("Route's can't be configured!!, entry.fib_nh_size = %d\n",
+ entry.fib_nh_size);
+ return 0;
+ }
+
+ /* Populate L2 adj and precomputes l2 encap string */
+#if MULTIPATH_FEAT
+ for (i = 0; i < entry.fib_nh_size; i++)
+#else
+ for (i = 0; i < 1; i++)
+#endif
+ {
+ struct ipv6_fib_path *ipv6_fib_path_addr = NULL;
+ ipv6_fib_path_addr =
+ populate_ipv6_fib_path(fib->nh_ipv6[i], fib->out_port[i]);
+
+ if (ipv6_fib_path_addr) {
+ entry.path[i] = ipv6_fib_path_addr;
+ printf("Fib path for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u) ==> fib_path Addr :%p, L2_adj Addr ;%p\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2],
+ lpm6_key.ip[3], lpm6_key.ip[4], lpm6_key.ip[5],
+ lpm6_key.ip[6], lpm6_key.ip[7], lpm6_key.ip[8],
+ lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13],
+ lpm6_key.ip[14], lpm6_key.ip[15], fib->depth,
+ ipv6_fib_path_addr,
+ (void *)entry.path[i]->l2_adj_ipv6_ptr);
+ } else {
+ printf("Fib path for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u) ==> fib_path Addr : NULL\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2],
+ lpm6_key.ip[3], lpm6_key.ip[4], lpm6_key.ip[5],
+ lpm6_key.ip[6], lpm6_key.ip[7], lpm6_key.ip[8],
+ lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13],
+ lpm6_key.ip[14], lpm6_key.ip[15], fib->depth);
+ entry.path[i] = NULL; /**< setting all other fib_paths to NULL */
+ }
+ }
+
+ int key_found, ret;
+ void *entry_ptr;
+
+ /* Adding a IP route in LPMv6 table */
+ printf("%s, Line %u \n", __FUNCTION__, __LINE__);
+
+ ret =
+ rte_table_lpm_ipv6_ops.f_add(lpm6_table, (void *)&lpm6_key, &entry,
+ &key_found, &entry_ptr);
+ printf("%s, Line %u \n", __FUNCTION__, __LINE__);
+
+ if (ret) {
+ printf("Failed to Add IP route in LPMv6\n");
+ return 0;
+ }
+ printf("Added route to IPv6 LPM table (IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u)\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2], lpm6_key.ip[3],
+ lpm6_key.ip[4], lpm6_key.ip[5], lpm6_key.ip[6], lpm6_key.ip[7],
+ lpm6_key.ip[8], lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13], lpm6_key.ip[14],
+ lpm6_key.ip[15], fib->depth);
+
+ Total_route_count++;
+ printf("Total Routed Added : %u, Key_found: %d\n", Total_route_count,
+ key_found);
+
+ if (Total_route_count == 2)
+ ipv6_iterate__hash_table();
+
+ return 1;
+}
+
+int
+lpm6_table_route_delete(uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE], uint8_t depth)
+{
+
+ /* Populate the Key */
+ struct rte_table_lpm_ipv6_key lpm6_key;
+ memcpy(&lpm6_key.ip, &dst_ipv6, sizeof(RTE_LPM_IPV6_ADDR_SIZE));
+ lpm6_key.depth = depth;
+ int key_found, ret;
+ char *entry = NULL;
+ entry = rte_zmalloc(NULL, 512, RTE_CACHE_LINE_SIZE);
+ /* Delete a IP route in LPMv6 table */
+ ret =
+ rte_table_lpm_ipv6_ops.f_delete(lpm6_table, &lpm6_key, &key_found,
+ entry);
+
+ if (ret) {
+ printf("Failed to Delete IP route from LPMv6 table\n");
+ return 0;
+ }
+
+ printf("Deleted route from IPv6 LPM table (IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x/%u, key_found = %d\n",
+ lpm6_key.ip[0], lpm6_key.ip[1], lpm6_key.ip[2], lpm6_key.ip[3],
+ lpm6_key.ip[4], lpm6_key.ip[5], lpm6_key.ip[6], lpm6_key.ip[7],
+ lpm6_key.ip[8], lpm6_key.ip[9], lpm6_key.ip[10], lpm6_key.ip[11],
+ lpm6_key.ip[12], lpm6_key.ip[13], lpm6_key.ip[14],
+ lpm6_key.ip[15], lpm6_key.depth, key_found);
+
+ /* Deleting a L2 Adj entry if refcount is 1, Else decrement Refcount */
+ remove_ipv6_fib_l2_adj_entry(entry);
+ rte_free(entry); // free memory
+ return 1;
+}
+
+int
+lpm6_table_lookup(struct rte_mbuf **pkts_burst,
+ uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask)
+{
+ struct ipv6_routing_table_entry
+ *ipv6_entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t lookup_hit_mask_ipv6 = 0;
+ int status;
+ uint64_t lookup_miss_mask = pkts_mask;
+ /*Populate the key offset in META DATA */
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST_IPV6;
+ uint64_t pkts_key_mask = pkts_mask;
+
+ //for(i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++)
+ for (; pkts_key_mask;) {
+/**< Populate key offset in META DATA for all valid pkts */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_key_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ pkts_key_mask &= ~pkt_mask;
+
+ uint8_t *lpm6_key;
+ uint8_t dst_addr[RTE_LPM_IPV6_ADDR_SIZE];
+ memcpy(dst_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(pkts_burst[pos],
+ dst_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ lpm6_key =
+ (uint8_t *) RTE_MBUF_METADATA_UINT8_PTR(pkts_burst[pos],
+ 128);
+ memcpy(lpm6_key, dst_addr, RTE_LPM_IPV6_ADDR_SIZE);
+ }
+ /* Lookup for IP route in LPM6 table */
+ printf(" IPV6 Lookup Mask Before = %p, nb_pkts :%u\n",
+ (void *)pkts_mask, nb_pkts);
+ status =
+ rte_table_lpm_ops.f_lookup(lpm6_table, pkts_burst, pkts_mask,
+ &lookup_hit_mask_ipv6,
+ (void **)ipv6_entries);
+ if (status) {
+ printf("LPM Lookup failed for IP route\n");
+ return 0;
+ }
+ printf(" IPV6 Lookup Mask After = %p\n", (void *)lookup_hit_mask_ipv6);
+ lookup_miss_mask = lookup_miss_mask & (~lookup_hit_mask_ipv6);
+ if (L3FWD_DEBUG) {
+ printf("AFTER lookup_hit_mask = %p, lookup_miss_mask =%p\n",
+ (void *)lookup_hit_mask_ipv6, (void *)lookup_miss_mask);
+ }
+
+ for (; lookup_miss_mask;) {
+/**< Drop packets for lookup_miss_mask */
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_miss_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_miss_mask &= ~pkt_mask;
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ if (L3FWD_DEBUG)
+ printf("\n DROP PKT IPV4 Lookup_miss_Mask = %p\n",
+ (void *)lookup_miss_mask);
+
+ }
+ *hit_mask = lookup_hit_mask_ipv6;
+ for (; lookup_hit_mask_ipv6;) {
+ uint8_t pos = (uint8_t) __builtin_ctzll(lookup_hit_mask_ipv6);
+ uint64_t pkt_mask = 1LLU << pos;
+ lookup_hit_mask_ipv6 &= ~pkt_mask;
+ struct rte_mbuf *pkt = pkts_burst[pos];
+
+ struct ipv6_fib_info *entry =
+ (struct ipv6_fib_info *)ipv6_entries[pos];
+
+#if MULTIPATH_FEAT
+
+ uint8_t ecmp_path = ipv6_hash_load_balance(pkts_burst[pos]);
+ uint8_t selected_path = 0;
+ struct ipv6_fib_path *fib_path = NULL;
+ if (((entry->fib_nh_size != 0)
+ && (entry->fib_nh_size - 1) < MAX_SUPPORTED_FIB_PATHS)
+ && ((ecmp_path != 0) && (ecmp_path - 1) < HASH_BUCKET_SIZE))
+ selected_path =
+ nh_links[entry->fib_nh_size - 1][ecmp_path - 1];
+ if (selected_path < MAX_FIB_PATHS)
+ fib_path = entry->path[selected_path];
+ printf
+ ("Total supported Path :%u, Hashed ECMP Key : %u, selected Fib_path: %u\n",
+ entry->fib_nh_size, ecmp_path, selected_path);
+#else
+ struct ipv6_fib_path *fib_path = entry->path[0];
+#endif
+ if (fib_path == NULL) {
+ printf("Fib_path is NULL, ND has not resolved\n");
+ rte_pktmbuf_free(pkt);
+ pkts_burst[pos] = NULL;
+ stats.nb_l3_drop_pkt++; /**< Peg the L3 Drop counter */
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ printf
+ ("Fib_path is NULL, ND has not resolved, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ if (fib_path->l2_adj_ipv6_ptr->flags == L2_ADJ_UNRESOLVED) {
+ rte_pktmbuf_free(pkts_burst[pos]);
+ pkts_burst[pos] = NULL;
+ *hit_mask &= ~pkt_mask; /**< Remove this pkt from port Mask */
+ if (L3FWD_DEBUG)
+ printf
+ ("L2_ADJ_UNRESOLVED, DROPPED UNKNOWN PKT\n");
+ continue;
+ }
+
+ uint8_t *eth_dest =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+ uint8_t *eth_src =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM + 6);
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC BEFORE- DST MAC %02x:%02x:%02x:%02x"
+ ":%02x:%02x, "
+ "SRC MAC %02x:%02x:%02x:%02x:"
+ "%02x:%02x \n",
+ eth_dest[0], eth_dest[1], eth_dest[2],
+ eth_dest[3],
+ eth_dest[4], eth_dest[5], eth_src[0],
+ eth_src[1],
+ eth_src[2], eth_src[3],
+ eth_src[4], eth_src[5]);
+ }
+
+ /* Rewrite the packet with L2 string */
+ memcpy(eth_dest, fib_path->l2_adj_ipv6_ptr->l2_string,
+ sizeof(struct ether_addr) * 2 + 2);
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("MAC AFTER DST MAC %02x:%02x:%02x:%02x:%02x:%02x,"
+ "SRC MAC %02x:%02x:%02x:%02x:"
+ "%02x:%02x\n", eth_dest[0],
+ eth_dest[1], eth_dest[2], eth_dest[3],
+ eth_dest[4],
+ eth_dest[5], eth_src[0], eth_src[1],
+ eth_src[2],
+ eth_src[3], eth_src[4], eth_src[5]);
+ }
+ port_ptr[pos] = fib_path->l2_adj_ipv6_ptr->phy_port;
+
+ //fib_path->l2_adj_ipv6_ptr->phy_port->transmit_single_pkt(fib_path->l2_adj_ipv6_ptr->phy_port, pkt);
+ if (L3FWD_DEBUG)
+ printf("Successfully sent to port %u \n\r",
+ fib_path->out_port);
+ }
+ return 1;
+}
+
+void l3fwd_rx_ipv6_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask, l2_phy_interface_t *port)
+{
+ if (!port)
+ return;
+ if (L3FWD_DEBUG) {
+ printf
+ ("l3fwd_rx_ipv6_packets_received BEFORE DROP: nb_pkts: %u, from in_port %u, valid_pkts_mask:%"
+ PRIu64 "\n", nb_pkts, port->pmdid, valid_pkts_mask);
+ }
+ uint64_t pkts_for_process = valid_pkts_mask;
+
+ struct ipv6_hdr *ipv6_hdr;
+ //struct ether_hdr *eth_h;
+ uint64_t icmp_pkts_mask = valid_pkts_mask;
+ uint64_t ipv6_forward_pkts_mask = valid_pkts_mask;
+ uint16_t nb_icmpv6_pkt = 0;
+ uint16_t nb_l3_pkt = 0;
+
+ uint8_t configured_port_ipv6[RTE_LPM_IPV6_ADDR_SIZE] = { 0 };
+ int8_t solicited_node_multicast_addr[RTE_LPM_IPV6_ADDR_SIZE] = {
+ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xff, 0x00, 0x00, 0x00 };
+ uint8_t dest_ipv6_addr[RTE_LPM_IPV6_ADDR_SIZE];
+
+ memset(dest_ipv6_addr, 0, RTE_LPM_IPV6_ADDR_SIZE);
+
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ int ii;
+ if (port->ipv6_list != NULL) {
+ for (ii = 0; ii < 16; ii += 1) {
+ configured_port_ipv6[ii] =
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii];
+ }
+ }
+ // memcpy(&configured_port_ipv6, &(((ipv6list_t*)(port->ipv6_list))->ipaddr), RTE_LPM_IPV6_ADDR_SIZE);
+
+ for (ii = 0; ii < 16; ii += 2) {
+ if (port && port->ipv6_list)
+ printf("%02X%02X ",
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii],
+ ((ipv6list_t *) (port->ipv6_list))->ipaddr[ii +
+ 1]);
+ }
+
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", configured_port_ipv6[ii],
+ configured_port_ipv6[ii + 1]);
+ }
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ //printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ //eth_h = rte_pktmbuf_mtod(m[pos], struct ether_hdr *);
+ printf("\n%s : LINE #%u, POS%u\n", __FUNCTION__, __LINE__,
+ pos);
+ //ipv6_hdr = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ if (m[pos] == NULL) {
+ printf("\n%s : M_POS IS NULLLLLLL, LINE: %u\n",
+ __FUNCTION__, __LINE__);
+ return;
+ }
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(m[pos], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 13; ii += 1) {
+ dest_ipv6_addr[ii] = ipv6_hdr->dst_addr[ii];
+ }
+
+ printf("\n");
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_hdr->dst_addr[ii],
+ ipv6_hdr->dst_addr[ii + 1]);
+ }
+ printf("\n");
+ printf("\n%s : LINE # %u\n", __FUNCTION__, __LINE__);
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", dest_ipv6_addr[ii],
+ dest_ipv6_addr[ii + 1]);
+ }
+
+ printf("\n%s : LINE # %u", __FUNCTION__, __LINE__);
+ if ((ipv6_hdr->proto == IPPROTO_ICMPV6) &&
+ (!memcmp
+ (&ipv6_hdr->dst_addr, &configured_port_ipv6[0],
+ RTE_LPM_IPV6_ADDR_SIZE)
+ || !memcmp(&dest_ipv6_addr[0],
+ &solicited_node_multicast_addr[0],
+ RTE_LPM_IPV6_ADDR_SIZE))) {
+ ipv6_forward_pkts_mask &= ~pkt_mask; /**< Its ICMP, remove this packet from the ipv6_forward_pkts_mask*/
+ stats.nb_rx_l3_icmp_pkt++; /**< Increment stats for ICMP PKT */
+ nb_icmpv6_pkt++;
+ } else{ // Forward the packet
+ icmp_pkts_mask &= ~pkt_mask; /**< Not ICMP, remove this packet from the icmp_pkts_mask*/
+ stats.nb_rx_l3_pkt++;
+ nb_l3_pkt++; /**< Increment stats for L3 PKT */
+ }
+ }
+
+ if (icmp_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEiVED LOCAL ICMP PKT at L3...\n PROCESSING ICMP LOCAL PKT...\n");
+ proto_type[IP_LOCAL]->func(m, nb_icmpv6_pkt, icmp_pkts_mask,
+ port);
+ }
+
+ if (ipv6_forward_pkts_mask) {
+ if (L3FWD_DEBUG)
+ printf
+ ("\n RECEIVED L3 PKT, \n\n FORWARDING L3 PKT....\n");
+ proto_type[IP_REMOTE]->func(m, nb_l3_pkt,
+ ipv6_forward_pkts_mask, port);
+ }
+}
+
+struct ipv6_fib_path *populate_ipv6_fib_path(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid)
+{
+
+ struct fib_path_key_ipv6 path_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ path_key.nh_ipv6[i] = nh_ipv6[i];
+ }
+ path_key.out_port = portid;
+ path_key.filler1 = 0;
+ path_key.filler2 = 0;
+ path_key.filler3 = 0;
+
+ struct ipv6_fib_path *fib_data = NULL;
+ /* Populate fib_path if it is present in FIB_PATH cuckoo HAsh Table */
+ fib_data = retrieve_ipv6_fib_path_entry(path_key);
+
+ if (fib_data) {
+
+ printf(" Fib path entry exists for IPv6 destination = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u\n",
+ nh_ipv6[0], nh_ipv6[1], nh_ipv6[2], nh_ipv6[3],
+ nh_ipv6[4], nh_ipv6[5], nh_ipv6[6], nh_ipv6[7],
+ nh_ipv6[8], nh_ipv6[9], nh_ipv6[10], nh_ipv6[11],
+ nh_ipv6[12], nh_ipv6[13], nh_ipv6[14], nh_ipv6[15],
+ portid);
+
+ fib_data->refcount++;
+ return fib_data; // Entry Exists. Return True (1)
+ } else {
+ printf("IPv6 fib_path entry Doesn't Exists.......\n");
+ }
+
+ /* populate L2 Adj */
+ fib_data = NULL;
+ struct l2_adj_ipv6_entry *l2_adj_ptr = NULL;
+ l2_adj_ptr = populate_ipv6_l2_adj(nh_ipv6, portid);
+
+ if (l2_adj_ptr) {
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct ipv6_fib_path));
+ fib_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ for (i = 0; i < 16; i++) {
+ fib_data->nh_ipv6[i] = nh_ipv6[i];
+ }
+ fib_data->out_port = portid;
+ //memcpy(fib_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+
+ fib_data->refcount++;
+ fib_data->l2_adj_ipv6_ptr = l2_adj_ptr;
+
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(fib_path_ipv6_hash_handle, &path_key,
+ fib_data);
+ printf
+ (" ND resolution success l2_adj_entry %p\n, ipv6_fib_path_addr %p",
+ l2_adj_ptr, fib_data);
+ return fib_data;
+ } else {
+ printf
+ ("ND resolution failed and unable to write fib path in fib_path cuckoo hash\n");
+ }
+ return NULL;
+
+}
+
+struct l2_adj_ipv6_entry *populate_ipv6_l2_adj(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid)
+{
+
+ struct l2_adj_key_ipv6 l2_adj_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ l2_adj_key.nh_ipv6[i] = nh_ipv6[i];
+ }
+ l2_adj_key.out_port_id = portid;
+ l2_adj_key.filler1 = 0;
+ l2_adj_key.filler2 = 0;
+ l2_adj_key.filler3 = 0;
+
+ struct l2_adj_ipv6_entry *adj_data = NULL;
+ struct ether_addr eth_dst;
+ /* Populate L2 adj if the MAC Address is present in L2 Adj HAsh Table */
+ adj_data = retrieve_ipv6_l2_adj_entry(l2_adj_key);
+
+ if (adj_data) {
+
+ printf("ipv6_l2_adj_entry exists for Next Hop IPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u\n",
+ nh_ipv6[0], nh_ipv6[1], nh_ipv6[2], nh_ipv6[3],
+ nh_ipv6[4], nh_ipv6[5], nh_ipv6[6], nh_ipv6[7],
+ nh_ipv6[8], nh_ipv6[9], nh_ipv6[10], nh_ipv6[11],
+ nh_ipv6[12], nh_ipv6[13], nh_ipv6[14], nh_ipv6[15],
+ portid);
+
+ ether_addr_copy(&adj_data->eth_addr, &eth_dst);
+ adj_data->refcount++;
+ return adj_data; // Entry Exists. Return True (1)
+ }
+
+ struct ether_addr eth_src;
+ uint16_t ether_type = 0x086DD;
+ l2_phy_interface_t *port;
+ port = ifm_get_port(portid);
+ if (port == NULL) {
+ printf("PORT %u IS DOWN.. Unable to process !\n", portid);
+ return NULL;
+ }
+
+ memcpy(&eth_src, &port->macaddr, sizeof(struct ether_addr));
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return NULL;
+ }
+
+ adj_data->out_port_id = portid;
+ //memcpy(adj_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+ for (i = 0; i < 16; i++) {
+ adj_data->nh_ipv6[i] = nh_ipv6[i];
+ }
+ adj_data->refcount++;
+ adj_data->phy_port = port;
+
+ rte_hash_add_key_data(l2_adj_ipv6_hash_handle, &l2_adj_key, adj_data);
+
+ /* Query ND to get L2 Adj */
+ if (get_dest_mac_for_nexthop_ipv6(nh_ipv6, portid, &eth_dst)) {
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, &eth_dst,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6], &eth_src,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+ printf
+ (" ND resolution successful and stored in ipv6_l2_adj_entry %p\n",
+ adj_data);
+
+ return adj_data;
+ } else {
+ adj_data->flags = L2_ADJ_UNRESOLVED;
+ printf
+ ("ND resolution failed and unable to write in ipv6_l2_adj_entry\n");
+ }
+ return NULL;
+}
+
+struct l2_adj_ipv6_entry *retrieve_ipv6_l2_adj_entry(struct l2_adj_key_ipv6
+ l2_adj_key)
+{
+ struct l2_adj_ipv6_entry *ret_l2_adj_data = NULL;
+
+ int ret =
+ rte_hash_lookup_data(l2_adj_ipv6_hash_handle, &l2_adj_key,
+ (void **)&ret_l2_adj_data);
+ if (ret < 0) {
+ printf
+ ("L2 Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ } else {
+ printf("L2 Adj hash lookup Successful..!!!\n");
+ return ret_l2_adj_data;
+ }
+ return NULL;
+}
+
+int get_dest_mac_for_nexthop_ipv6(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint32_t out_phy_port,
+ struct ether_addr *hw_addr)
+{
+ struct nd_entry_data *nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ uint8_t i;
+ for (i = 0; i < 16; i++) {
+ tmp_nd_key.ipv6[i] = nh_ipv6[i];
+ }
+ tmp_nd_key.port_id = out_phy_port;
+
+ nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (nd_data == NULL) {
+ printf("ND entry is not found\n");
+ return 0;
+ }
+ ether_addr_copy(&nd_data->eth_addr, hw_addr);
+
+ return 1;
+}
+
+struct ipv6_fib_path *retrieve_ipv6_fib_path_entry(struct fib_path_key_ipv6
+ path_key)
+{
+
+ struct ipv6_fib_path *ret_fib_path_data = NULL;
+ int ret =
+ rte_hash_lookup_data(fib_path_ipv6_hash_handle, &path_key,
+ (void **)&ret_fib_path_data);
+ if (ret < 0) {
+ printf
+ ("FIB Path Adj hash lookup failed ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ return NULL;
+ } else {
+ return ret_fib_path_data;
+ }
+}
+
+void remove_ipv6_fib_l2_adj_entry(void *entry)
+{
+ struct ipv6_fib_info entry1;
+ memcpy(&entry1, entry, sizeof(struct ipv6_fib_info));
+
+ struct ipv6_fib_path *fib_path_addr = entry1.path[0]; //fib_info->path[0];
+ if (fib_path_addr->refcount > 1) {
+ printf("BEFORE fib_path entry is not Removed! nh_iPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u, refcount :%d\n",
+ fib_path_addr->nh_ipv6[0], fib_path_addr->nh_ipv6[1],
+ fib_path_addr->nh_ipv6[2], fib_path_addr->nh_ipv6[3],
+ fib_path_addr->nh_ipv6[4], fib_path_addr->nh_ipv6[5],
+ fib_path_addr->nh_ipv6[6], fib_path_addr->nh_ipv6[7],
+ fib_path_addr->nh_ipv6[8], fib_path_addr->nh_ipv6[9],
+ fib_path_addr->nh_ipv6[10], fib_path_addr->nh_ipv6[11],
+ fib_path_addr->nh_ipv6[12], fib_path_addr->nh_ipv6[13],
+ fib_path_addr->nh_ipv6[14], fib_path_addr->nh_ipv6[15],
+ fib_path_addr->out_port, fib_path_addr->refcount);
+ fib_path_addr->refcount--; // Just decrement the refcount this entry is still referred
+ printf("AFTER fib_path entry is not Removed! nh_iPv6 = "
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x:"
+ "%.2x%.2x:%.2x%.2x:%.2x%.2x:%.2x%.2x and out port :%u, refcount :%d\n",
+ fib_path_addr->nh_ipv6[0], fib_path_addr->nh_ipv6[1],
+ fib_path_addr->nh_ipv6[2], fib_path_addr->nh_ipv6[3],
+ fib_path_addr->nh_ipv6[4], fib_path_addr->nh_ipv6[5],
+ fib_path_addr->nh_ipv6[6], fib_path_addr->nh_ipv6[7],
+ fib_path_addr->nh_ipv6[8], fib_path_addr->nh_ipv6[9],
+ fib_path_addr->nh_ipv6[10], fib_path_addr->nh_ipv6[11],
+ fib_path_addr->nh_ipv6[12], fib_path_addr->nh_ipv6[13],
+ fib_path_addr->nh_ipv6[14], fib_path_addr->nh_ipv6[15],
+ fib_path_addr->out_port, fib_path_addr->refcount);
+ } else { // Refcount is 1 so delete both fib_path and l2_adj_entry
+
+ struct l2_adj_ipv6_entry *adj_addr = NULL;
+ adj_addr = fib_path_addr->l2_adj_ipv6_ptr;
+
+ if (adj_addr != NULL) { //l2_adj_entry is has some entry in hash table
+ printf("%s: CHECK %d\n\r", __FUNCTION__, __LINE__);
+ struct l2_adj_key_ipv6 l2_adj_key;
+ memcpy(&l2_adj_key.nh_ipv6, fib_path_addr->nh_ipv6,
+ RTE_LPM_IPV6_ADDR_SIZE);
+ l2_adj_key.out_port_id =
+ fib_path_addr->out_port,
+ rte_hash_del_key(l2_adj_ipv6_hash_handle,
+ &l2_adj_key);
+ rte_free(adj_addr); // free memory
+ adj_addr = NULL;
+ }
+
+ struct fib_path_key_ipv6 path_key;
+ memcpy(&path_key.nh_ipv6, fib_path_addr->nh_ipv6,
+ RTE_LPM_IPV6_ADDR_SIZE);
+ path_key.out_port = fib_path_addr->out_port;
+ rte_hash_del_key(fib_path_ipv6_hash_handle, &path_key);
+ rte_free(fib_path_addr); //Free the memory
+ fib_path_addr = NULL;
+ }
+}
+
+int is_valid_ipv6_pkt(struct ipv6_hdr *pkt, uint32_t link_len)
+{
+ if (link_len < sizeof(struct ipv4_hdr))
+ return -1;
+ if (rte_cpu_to_be_16(pkt->payload_len) < sizeof(struct ipv6_hdr))
+ return -1;
+
+ return 0;
+}
+
+void
+ipv6_l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *))
+{
+ switch (protocol_type) {
+ case IPPROTO_ICMPV6:
+ proto_type[IP_LOCAL] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_LOCAL]->protocol_type = protocol_type;
+ proto_type[IP_LOCAL]->func = func;
+ break;
+
+ case IPPROTO_TCP: // Time being treared as Remote forwarding
+ case IPPROTO_UDP:
+ proto_type[IP_REMOTE] =
+ rte_malloc(NULL, sizeof(struct ip_protocol_type),
+ RTE_CACHE_LINE_SIZE);
+ proto_type[IP_REMOTE]->protocol_type = protocol_type;
+ proto_type[IP_REMOTE]->func = func;
+ break;
+ }
+}
+
+void
+ipv6_local_deliver(struct rte_mbuf **pkt_burst, __rte_unused uint16_t nb_rx,
+ uint64_t icmp_pkt_mask, l2_phy_interface_t *port)
+{
+ for (; icmp_pkt_mask;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(icmp_pkt_mask);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ icmp_pkt_mask &= ~pkt_mask; /**< remove this packet from the mask */
+
+ process_icmpv6_pkt(pkt_burst[pos], port);
+ }
+}
+
+void
+ipv6_forward_deliver(struct rte_mbuf **pkt_burst, uint16_t nb_pkts,
+ uint64_t ipv6_forward_pkts_mask, l2_phy_interface_t *port)
+{
+ if (L3FWD_DEBUG) {
+ printf
+ ("ip_forward_deliver BEFORE DROP: nb_pkts: %u\n from in_port %u",
+ nb_pkts, port->pmdid);
+ }
+ uint64_t pkts_for_process = ipv6_forward_pkts_mask;
+
+ struct ipv6_hdr *ipv6_hdr;
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t hit_mask = 0;
+
+ for (; pkts_for_process;) {
+/**< process only valid packets.*/
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkt_mask = 1LLU << pos; /**< bitmask representing only this packet */
+ pkts_for_process &= ~pkt_mask; /**< remove this packet from the mask */
+ ipv6_hdr =
+ rte_pktmbuf_mtod_offset(pkt_burst[pos], struct ipv6_hdr *,
+ sizeof(struct ether_hdr));
+ /* Make sure the IPv4 packet is valid */
+
+ if (is_valid_ipv6_pkt(ipv6_hdr, pkt_burst[pos]->pkt_len) < 0) {
+ rte_pktmbuf_free(pkt_burst[pos]); /**< Drop the Unknown IPv4 Packet */
+ pkt_burst[pos] = NULL;
+ ipv6_forward_pkts_mask &= ~(1LLU << pos); /**< That will clear bit of that position*/
+ nb_pkts--;
+ stats.nb_l3_drop_pkt++;
+ }
+ }
+
+ if (L3FWD_DEBUG) {
+ printf
+ ("\nl3fwd_rx_ipv4_packets_received AFTER DROP: nb_pkts: %u, valid_Pkts_mask :%lu\n",
+ nb_pkts, ipv6_forward_pkts_mask);
+ }
+
+ /* Lookup for IP destination in LPMv4 table */
+ lpm6_table_lookup(pkt_burst, nb_pkts, ipv6_forward_pkts_mask, port_ptr,
+ &hit_mask);
+}
+
+uint8_t ipv6_hash_load_balance(struct rte_mbuf *mbuf)
+{
+ uint32_t src_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_SRC_ADR_OFST_IPV6;
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST_IPV6;
+ uint8_t src_addr[RTE_LPM_IPV6_ADDR_SIZE];
+ uint8_t dst_addr[RTE_LPM_IPV6_ADDR_SIZE];
+
+ memcpy(&src_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(mbuf, src_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ memcpy(&dst_addr,
+ (uint8_t *) RTE_MBUF_METADATA_UINT32_PTR(mbuf, dst_addr_offset),
+ RTE_LPM_IPV6_ADDR_SIZE);
+ uint32_t hash_key1 = 0; /* STORE Accumulated value of SRC IP in key1 variable */
+ uint32_t hash_key2 = 0; /* STORE Accumulated value of DST IP in key2 variable */
+ uint8_t i;
+ for (i = 0; i < RTE_LPM_IPV6_ADDR_SIZE; i++) {
+ hash_key1 += src_addr[i]; /* Accumulate */
+ hash_key2 += dst_addr[i]; /* Accumulate */
+ }
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With SRC and DST IP, Result is hask_key1 */
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+ hash_key1 = rotr32(hash_key1, RTE_LPM_IPV6_ADDR_SIZE); /* Circular Rotate to 16 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key2 = hash_key1; /* MOVE The result to hask_key2 */
+
+ hash_key1 = rotr32(hash_key1, 8); /* Circular Rotate to 8 bit */
+ hash_key1 = hash_key1 ^ hash_key2; /* XOR With Key1 with Key2 */
+
+ hash_key1 = hash_key1 & (HASH_BUCKET_SIZE - 1); /* MASK the KEY with BUCKET SIZE */
+ if (L3FWD_DEBUG)
+ printf("Hash Result_key: %d, \n", hash_key1);
+ return hash_key1;
+}
+
+void
+resolve_ipv6_l2_adj(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE], uint8_t portid,
+ struct ether_addr *hw_addr)
+{
+ struct l2_adj_ipv6_entry *adj_data = NULL;
+ struct ether_addr eth_dst;
+ uint16_t ether_type = 0x086DD;
+
+ struct l2_adj_key_ipv6 l2_adj_key;
+ memcpy(&l2_adj_key.nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+ l2_adj_key.out_port_id = portid;
+
+ adj_data = retrieve_ipv6_l2_adj_entry(l2_adj_key);
+ if (adj_data) {
+ if (adj_data->flags == L2_ADJ_UNRESOLVED
+ || memcmp(&adj_data->eth_addr, hw_addr, 6)) {
+ ether_addr_copy(hw_addr, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, hw_addr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6],
+ &adj_data->phy_port->macaddr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+ }
+
+ return;
+ }
+
+ l2_phy_interface_t *port;
+ port = ifm_get_port(portid);
+ if (port == NULL) {
+ printf("PORT %u IS DOWN..! Unable to Process\n", portid);
+ return;
+ }
+ uint32_t size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct l2_adj_entry));
+ adj_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (adj_data == NULL) {
+ printf("L2 Adjacency memory allocation failed !\n");
+ return;
+ }
+
+ adj_data->out_port_id = portid;
+ memcpy(adj_data->nh_ipv6, &nh_ipv6, RTE_LPM_IPV6_ADDR_SIZE);
+
+ adj_data->phy_port = port;
+
+ ether_addr_copy(&eth_dst, &adj_data->eth_addr);
+
+ /* Precompute the L2 string encapsulation */
+ memcpy(&adj_data->l2_string, hw_addr, sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[6], &port->macaddr,
+ sizeof(struct ether_addr));
+ memcpy(&adj_data->l2_string[12], &ether_type, 2);
+
+ adj_data->flags = L2_ADJ_RESOLVED;
+
+ /* Store the received MAC Address in L2 Adj HAsh Table */
+ rte_hash_add_key_data(l2_adj_ipv6_hash_handle, &l2_adj_key, adj_data);
+
+ printf(" ND resolution successful and stored in ipv6_l2_adj_entry %p\n",
+ adj_data);
+}
+
+void ipv6_iterate__hash_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ uint8_t ii;
+ printf("\n\t\t\t IPv6 FIB_path Cache table....");
+ printf
+ ("\n------------------------------------------------------------------------------");
+ printf
+ ("\n\tNextHop IP \t\t\t\t Port Refcount l2_adj_ptr_addrress\n\n");
+ printf
+ ("--------------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate
+ (fib_path_ipv6_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct ipv6_fib_path *tmp_data =
+ (struct ipv6_fib_path *)next_data;
+ struct fib_path_key_ipv6 tmp_key;
+ memcpy(&tmp_key, next_key, sizeof(tmp_key));
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", tmp_data->nh_ipv6[ii],
+ tmp_data->nh_ipv6[ii + 1]);
+ }
+ printf(" \t %u \t %u \t %p\n", tmp_data->out_port,
+ tmp_data->refcount, tmp_data->l2_adj_ipv6_ptr);
+
+ }
+
+ iter = 0;
+
+ printf("\n\t\t\t L2 ADJ Cache table.....");
+ printf
+ ("\n----------------------------------------------------------------------------------\n");
+ printf
+ ("\tNextHop IP \t\t\t\t Port \t l2 Encap string \t l2_Phy_interface\n");
+ printf
+ ("\n------------------------------------------------------------------------------------\n");
+ while (rte_hash_iterate
+ (l2_adj_ipv6_hash_handle, &next_key, &next_data, &iter) >= 0) {
+ struct l2_adj_ipv6_entry *l2_data =
+ (struct l2_adj_ipv6_entry *)next_data;
+ struct l2_adj_key_ipv6 l2_key;
+ memcpy(&l2_key, next_key, sizeof(l2_key));
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", l2_data->nh_ipv6[ii],
+ l2_data->nh_ipv6[ii + 1]);
+ }
+ printf(" \t%u\t%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x:%x\t%p\n",
+ l2_data->out_port_id,
+ l2_data->l2_string[0],
+ l2_data->l2_string[1],
+ l2_data->l2_string[2],
+ l2_data->l2_string[3],
+ l2_data->l2_string[4],
+ l2_data->l2_string[5],
+ l2_data->l2_string[6],
+ l2_data->l2_string[7],
+ l2_data->l2_string[8],
+ l2_data->l2_string[9],
+ l2_data->l2_string[10],
+ l2_data->l2_string[11], l2_data->phy_port);
+ }
+}
diff --git a/common/VIL/l2l3_stack/l3fwd_lpm6.h b/common/VIL/l2l3_stack/l3fwd_lpm6.h
new file mode 100644
index 00000000..fdd8287a
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_lpm6.h
@@ -0,0 +1,315 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/**
+* @file
+* L3fwd lpm6 header file is for IPv6 specific declarations
+*/
+
+#ifndef L3FWD_LPM6_H
+#define L3FWD_LPM6_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <stdbool.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_lpm.h>
+#include <rte_lpm6.h>
+#include <rte_table_lpm_ipv6.h>
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "interface.h"
+
+/**
+* Define all RTE MBUF offset size
+*/
+
+#define MBUF_HDR_ROOM 256 /**< MBUF HEADER ROOM OFFSET */
+/* IPv6 */
+#define IP_HDR_SIZE_IPV6 40 /**< IPv6 HEADER OFFSET */
+#define IP_HDR_SRC_ADR_OFST_IPV6 8 /**< IPv6 HEADER SRC IP ADDRESS OFFSET */
+#define IP_HDR_DST_ADR_OFST_IPV6 24 /**< IPv6 HEADER DST IP ADDRESS OFFSET */
+
+/* IPV6 Rules and Tables8s */
+#define IPV6_L3FWD_LPM_MAX_RULES 1024 /**< Number of LPM6 Rules*/
+#define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16) /**< Number of Table 8 for LPM6 */
+
+#define MAX_FIB_PATHS 8 /**< MAX FIB PATH, If ECMP feature is enabled */
+
+/**
+* A structure used to define the routing information for IPv6
+* This structure is used as input parameters for route ADD
+*/
+struct ipv6_routing_info {
+ uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< DST IPv6 Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metrics */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ uint8_t nh_ipv6[MAX_FIB_PATHS][RTE_LPM_IPV6_ADDR_SIZE]; /**< NextHop IP Address */
+ uint8_t out_port[MAX_FIB_PATHS]; /**< OUTGOING PORT */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib path for Destination IPv6 Address
+* This fib path is shared accross different fib_info.
+*/
+struct ipv6_fib_path {
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IP address (only valid for remote routes) */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_info has same fib_path*/
+ uint8_t out_port; /**< Output port */
+ struct l2_adj_ipv6_entry *l2_adj_ipv6_ptr;/**< Address of the L2 ADJ table entry */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the fib info (Route info)
+* This fib info structure can have multiple fib paths.
+*/
+struct ipv6_fib_info {
+ uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< DST IPv6 Address */
+ uint8_t depth; /**< Depth */
+ uint32_t metric; /**< Metric */
+ uint32_t fib_nh_size; /**< num of fib paths, greater than if Multipath(ECMP) feature is supported*/
+ struct ipv6_fib_path *path[MAX_FIB_PATHS]; /**< Array of pointers to the fib_path */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_ipv6_entry {
+ struct ether_addr eth_addr; /**< Ether address */
+ uint8_t out_port_id; /**< Outgoing port */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IP address (only valid for remote routes) */
+ uint32_t refcount; /**< Refcount, greater then 1 if multiple fib_path has same L2_adj_entry*/
+ uint8_t l2_string[256]; /**< L2 string, to rewrite the packet before transmission */
+ l2_phy_interface_t *phy_port; /**< Address of the L2 physical interface structure */
+ uint8_t flags; /**< flags for marking this entry as resolved or unresolved. */
+} __rte_cache_aligned; /**< RTE CACHE ALIGNED */
+
+/**
+* A structure used to define the L2 Adjacency table
+*/
+struct l2_adj_key_ipv6 {
+ /*128 Bit of IPv6 Address */
+ /*<48bit Network> <16bit Subnet> <64bit Interface> */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IPv6 address */
+ uint8_t out_port_id; /**< Outgoing port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+/**
+* A structure used to define the fib path key for hash table
+*/
+struct fib_path_key_ipv6 {
+ /*128 Bit of IPv6 Address */
+ /*<48bit Network> <16bit Subnet> <64bit Interface> */
+ uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE]; /**< Next hop IPv6 address */
+ uint8_t out_port; /**< Outgoing port */
+ uint8_t filler1; /**< Filler 1, for better hash key */
+ uint8_t filler2; /**< Filler2, for better hash key*/
+ uint8_t filler3; /**< Filler3, for better hash Key */
+};
+
+struct ipv6_protocol_type {
+ uint8_t protocol_type; /**< Protocol Type */
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+} __rte_cache_aligned;
+
+/* Function Declarations */
+/**
+ * To creare LPM6 table, Cuckoo hash table for fib_path and l2_adj_entry tables
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_init(void);
+
+/**
+ * To add a route in LPM6 table by populating fib_path and L2 Adjacency.
+ * @param data
+ * To add the route based on ipv6_routing_info stucture.
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_route_add(struct ipv6_routing_info *data);
+
+/**
+ * To Delete the IP route and corresponding fib_path and L2 Adjacency entries.
+ * @param dst_ipv6
+ * Destionation IPv6 for which the route need to deleted
+ * @param depth
+ * netmask for the Destination IP
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_route_delete(uint8_t dst_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t depth);
+
+/**
+ * To perform a LPM6 table lookup
+ * @param pkts_burst
+ * Burst of packets that needs to be lookup in LPM6 table
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param pkts_mask
+ * number of valid pkts mask that needs to be lookup in LPM6 table
+ * @return
+ * 0 for failure, 1 for success
+ */
+int lpm6_table_lookup(struct rte_mbuf **pkts_burst, uint16_t nb_pkts,
+ uint64_t pkts_mask,
+ l2_phy_interface_t *port_ptr[RTE_PORT_IN_BURST_SIZE_MAX],
+ uint64_t *hit_mask);
+
+/**
+ * To forward the valid L3 packets for LMP6 table lookup and forward ICMP Pkts to ICMP module
+ * @param m
+ * packet burst of type rte_mbuf
+ * @param nb_pkts
+ * Number of valid L3 packets
+ * @param valid_pkts_mask
+ * Valid IPv6 packets mask that needs to be processed
+ * @param in_port
+ * IPv6 Pkt received form the input port.
+ * @return
+ * None
+ */
+void l3fwd_rx_ipv6_packets(struct rte_mbuf **m, uint16_t nb_pkts,
+ uint64_t valid_pkts_mask,
+ l2_phy_interface_t *in_port);
+
+/**
+ * To populate the fib_path for the nexthop IPv6 and outgoing port
+ * @param nh_ipv6
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param out_port
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the type ipv6_fib_path if lookup success
+*/
+struct ipv6_fib_path *populate_ipv6_fib_path(uint8_t
+ nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t out_port);
+
+/**
+ * To retrieve the fib_path entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the fib_path_key_ipv4
+ * @param path_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type ipv6_fib_path if lookup success
+*/
+struct ipv6_fib_path *retrieve_ipv6_fib_path_entry(struct fib_path_key_ipv6
+ path_key);
+
+/**
+ * To retrieve the l2_adj_entry for the nexthop IP and outgoing port
+ * This queries with cuckoo hash table based on the l2_adj_key_ipv6
+ * @param l2_adj_key
+ * Key which is required for Cuckook hash table lookup
+ * @return
+ * NULL if lookup fails, Address of type l2_adj_ipv6_entry if lookup success
+*/
+struct l2_adj_ipv6_entry *retrieve_ipv6_l2_adj_entry(struct l2_adj_key_ipv6
+ l2_adj_key);
+
+/**
+ * To populate the l2_adj_entry for the nexthop IP and outgoing port
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be populated
+ * @param portid
+ * outgong port ID
+ * @return
+ * NULL if lookup fails, Address of the L2_adj_ipv6_entry if lookup success
+*/
+struct l2_adj_ipv6_entry *populate_ipv6_l2_adj(uint8_t
+ nh_ip[RTE_LPM_IPV6_ADDR_SIZE],
+ uint8_t portid);
+
+/**
+ * To get the destination MAC Address for the nexthop IP and outgoing port
+ * @param nh_ipv6
+ * Next HOP IP Address for which MAC address is needed
+ * @param out_phy_port
+ * Outgoing physical port
+ * @param hw_addr
+ * pointet to the ether_add, This gets update with valid MAC address based on nh_ip and out port
+ * @return
+ * 0 if failure, 1 if success
+ */
+int get_dest_mac_for_nexthop_ipv6(uint8_t nh_ipv6[RTE_LPM_IPV6_ADDR_SIZE],
+ uint32_t out_phy_port,
+ struct ether_addr *hw_addr);
+
+/**
+ * To delete the ipv6 fib path and l2 adjacency entry from the cuckoo hash table
+ * @return
+ * None
+*/
+void remove_ipv6_fib_l2_adj_entry(void *entry);
+
+void
+ipv6_l3_protocol_type_add(uint8_t protocol_type,
+ void (*func) (struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *));
+
+void
+ipv6_local_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+void
+ipv6_forward_deliver(struct rte_mbuf **, uint16_t, uint64_t,
+ l2_phy_interface_t *);
+
+int is_valid_ipv6_pkt(struct ipv6_hdr *pkt, uint32_t link_len);
+uint8_t ipv6_hash_load_balance(struct rte_mbuf *mbuf);
+
+/**
+ * To resolve l2_adj_entry based on nexthop IP, outgoing port and ether hw address.
+ * @param nh_ip
+ * NextHop Ip Address for which L2_adj_entry needs to be resolved
+ * @param portid
+ * outgong port ID
+ * @hw_addr
+ * Ethernet hardware address for the above nexthop IP and out port ID.
+ * @return
+ * Return is void.
+*/
+
+void resolve_ipv6_l2_adj(uint8_t nh_ip[RTE_LPM_IPV6_ADDR_SIZE], uint8_t portid,
+ struct ether_addr *hw_addr);
+
+void ipv6_iterate__hash_table(void);
+#endif /* L3FWD_LPM_H */
diff --git a/common/VIL/l2l3_stack/l3fwd_main.c b/common/VIL/l2l3_stack/l3fwd_main.c
new file mode 100644
index 00000000..247d8876
--- /dev/null
+++ b/common/VIL/l2l3_stack/l3fwd_main.c
@@ -0,0 +1,145 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+/****************************************************************************
+*
+* filename : :l3fwd_main.c
+*
+*
+******************************************************************************/
+
+#include "l3fwd_common.h"
+#include "l2_proto.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#include "interface.h"
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+
+struct routing_info input_array[] = {
+#if MULTIPATH_FEAT
+ {IPv4(30, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(40, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(50, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(60, 12, 0, 1), 24, 0, 4,
+ {IPv4(192, 168, 0, 2), IPv4(1, 1, 1, 7), IPv4(120, 0, 0, 2),
+ IPv4(30, 40, 50, 60)}, {1, 1, 1, 1} },
+
+ {IPv4(100, 100, 100, 100), 24, 0, 2,
+ {IPv4(120, 0, 0, 2), IPv4(120, 0, 0, 2)}, {1, 1} }, // FIb Path Available
+
+ {IPv4(200, 100, 100, 100), 24, 0, 2,
+ {IPv4(80, 0, 0, 2), IPv4(80, 40, 50, 60)}, {1, 1} }, // Fib path Not Available
+#else
+ {IPv4(30, 12, 0, 1), 24, 0, 1,
+ {IPv4(192, 168, 0, 2)}, {1} },
+
+ {IPv4(20, 12, 0, 1), 24, 0, 1,
+ {IPv4(120, 0, 0, 2)}, {1} },
+#endif
+};
+
+struct ipv6_routing_info ipv6_input_array[] = {
+
+ {{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, 48, 0, 2,
+ {{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}
+ },
+ {1, 1}
+ },
+
+ {{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2}, 48, 0, 2,
+ {{10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10},
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}
+ },
+ {1, 1}
+ },
+};
+
+void l3fwd_init(void)
+{
+ printf(" *********** L3 Initialization START ************\n");
+ if (lpm_init() == 0) {
+ rte_exit(EXIT_FAILURE, "L3 Initialization IPv4 Failed\n");
+ }
+ if (lpm6_init() == 0) {
+ rte_exit(EXIT_FAILURE, "L3 Initialization for IPV6 Failed\n");
+ }
+
+ list_add_type(ETHER_TYPE_IPv4, l3fwd_rx_ipv4_packets);
+ list_add_type(ETHER_TYPE_IPv6, l3fwd_rx_ipv6_packets);
+
+ l3_protocol_type_add(IPPROTO_ICMP, ip_local_packets_process);
+ l3_protocol_type_add(IPPROTO_TCP, ip_forward_deliver);
+ l3_protocol_type_add(IPPROTO_UDP, ip_forward_deliver);
+
+ ipv6_l3_protocol_type_add(IPPROTO_ICMPV6, ipv6_local_deliver);
+ ipv6_l3_protocol_type_add(IPPROTO_TCP, ipv6_forward_deliver);
+ ipv6_l3_protocol_type_add(IPPROTO_UDP, ipv6_forward_deliver);
+
+}
+
+void populate_lpm_routes(void)
+{
+ populate_lpm4_table_routes();
+ //populate_lpm6_table_routes();
+}
+
+void populate_lpm4_table_routes(void)
+{
+ uint8_t i;
+ printf
+ (" *********** L3 IPV4 Route Initialization START ************\n");
+ for (i = 0; i < MAX_ROUTES; i++) {
+ if (lpm4_table_route_add(&input_array[i])) {
+
+ printf("Total routes Added# %d\n", i + 1);
+ } else {
+ rte_exit(EXIT_FAILURE,
+ "L3 route addition failed for the route# %d\n",
+ i);
+ }
+ }
+ printf
+ (" *********** L3 IPV4 Route Initialization END ************\n\n");
+}
+
+void populate_lpm6_table_routes(void)
+{
+ uint8_t i;
+ printf
+ (" *********** L3 IPV6 Route Initialization START ************\n");
+ for (i = 0; i < 2; i++) {
+ if (lpm6_table_route_add(&ipv6_input_array[i])) {
+
+ printf("Added route # %d\n", i);
+ } else {
+ rte_exit(EXIT_FAILURE,
+ "L3 route addition failed for the route# %d\n",
+ i);
+ }
+ }
+ printf(" *********** L3 IPV6 Route Initialization END ************\n");
+}
diff --git a/common/VIL/l2l3_stack/lib_arp.c b/common/VIL/l2l3_stack/lib_arp.c
new file mode 100644
index 00000000..042dd39c
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_arp.c
@@ -0,0 +1,2655 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <execinfo.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include <rte_timer.h>
+#include "interface.h"
+#include "l2_proto.h"
+#include "lib_arp.h"
+#include "l3fwd_lpm4.h"
+#include "vnf_common.h"
+
+#if (RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN)
+#define CHECK_ENDIAN_16(x) rte_be_to_cpu_16(x)
+#define CHECK_ENDIAN_32(x) rte_be_to_cpu_32(x)
+#else
+#define CHECK_ENDIAN_16(x) (x)
+#define CHECK_ENDIAN_32(x) (x)
+#endif
+
+#define NB_ARPICMP_MBUF 64
+#define NB_NDICMP_MBUF 64
+#define IP_VERSION_4 0x40
+#define IP_HDRLEN 0x05 /**< default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION_4 | IP_HDRLEN)
+
+#define is_multicast_ipv4_addr(ipv4_addr) \
+ (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0)
+
+extern uint8_t prv_in_port_a[16];
+extern uint32_t timer_lcore;
+uint32_t arp_timeout = ARP_TIMER_EXPIRY;
+
+/*ND IPV6 */
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+static int my_inet_pton_ipv6(int af, const char *src, void *dst);
+static int inet_pton_ipv6(const char *src, unsigned char *dst);
+static int inet_pton_ipv4(const char *src, unsigned char *dst);
+extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
+ uint8_t netmask_ipv6[]);
+
+uint8_t vnf_common_arp_lib_init;
+uint8_t vnf_common_nd_lib_init;
+uint8_t loadb_pipeline_count;
+
+uint32_t ARPICMP_DEBUG;
+uint32_t NDIPV6_DEBUG;
+
+uint32_t arp_route_tbl_index;
+uint32_t nd_route_tbl_index;
+uint32_t link_hw_addr_array_idx;
+
+uint32_t lib_arp_get_mac_req;
+uint32_t lib_arp_nh_found;
+uint32_t lib_arp_no_nh_found;
+uint32_t lib_arp_arp_entry_found;
+uint32_t lib_arp_no_arp_entry_found;
+uint32_t lib_arp_populate_called;
+uint32_t lib_arp_delete_called;
+uint32_t lib_arp_duplicate_found;
+
+uint32_t lib_nd_get_mac_req;
+uint32_t lib_nd_nh_found;
+uint32_t lib_nd_no_nh_found;
+uint32_t lib_nd_nd_entry_found;
+uint32_t lib_nd_no_arp_entry_found;
+uint32_t lib_nd_populate_called;
+uint32_t lib_nd_delete_called;
+uint32_t lib_nd_duplicate_found;
+struct rte_mempool *lib_arp_pktmbuf_tx_pool;
+struct rte_mempool *lib_nd_pktmbuf_tx_pool;
+
+struct rte_mbuf *lib_arp_pkt;
+struct rte_mbuf *lib_nd_pkt;
+
+uint8_t default_ether_addr[6] = { 0, 0, 0, 0, 1, 1 };
+uint8_t default_ip[4] = { 0, 0, 1, 1 };
+
+static struct rte_hash_parameters arp_hash_params = {
+ .name = "ARP",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct arp_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+static struct rte_hash_parameters nd_hash_params = {
+ .name = "ND",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct nd_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+struct rte_hash *arp_hash_handle;
+struct rte_hash *nd_hash_handle;
+
+void print_pkt1(struct rte_mbuf *pkt);
+
+struct app_params *myApp;
+struct rte_pipeline *myP;
+uint8_t num_vnf_threads;
+
+/**
+* A structure for Arp port address
+*/
+struct arp_port_address {
+ uint32_t ip; /**< Ip address */
+ uint8_t mac_addr[6]; /**< Mac address */
+};
+
+struct arp_port_address arp_port_addresses[RTE_MAX_ETHPORTS];
+struct rte_mempool *timer_mempool_arp;
+
+int timer_objs_mempool_count = 70000;
+
+#define MAX_NUM_ARP_ENTRIES 64
+#define MAX_NUM_ND_ENTRIES 64
+
+uint32_t get_nh(uint32_t, uint32_t *);
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[]);
+
+#define MAX_ARP_DATA_ENTRY_TABLE 7
+
+struct table_arp_entry_data arp_entry_data_table[MAX_ARP_DATA_ENTRY_TABLE] = {
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(192, 168, 0, 2)},
+ {{0, 0, 0, 0, 0, 2}, 0, INCOMPLETE, IPv4(192, 168, 0, 3)},
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(30, 40, 50, 60)},
+ {{0, 0, 0, 0, 0, 1}, 1, INCOMPLETE, IPv4(120, 0, 0, 2)},
+ {{0, 0, 0, 0, 0, 4}, 3, INCOMPLETE, IPv4(1, 1, 1, 4)},
+ {{0, 0, 0, 0, 0, 5}, 4, INCOMPLETE, IPv4(1, 1, 1, 5)},
+ {{0, 0, 0, 0, 0, 6}, 1, INCOMPLETE, IPv4(1, 1, 1, 7)},
+};
+
+#define MAX_ND_DATA_ENTRY_TABLE 7
+struct table_nd_entry_data nd_entry_data_table[MAX_ND_DATA_ENTRY_TABLE] = {
+ {{0, 0, 0, 0, 0, 8}, 1, INCOMPLETE,
+ {10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, 0},
+
+ {{0, 0, 0, 0, 0, 9}, 1, INCOMPLETE,
+ {20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20}, 0},
+ {{0, 0, 0, 0, 0, 10}, 2, INCOMPLETE,
+ {3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 11}, 3, INCOMPLETE,
+ {4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 12}, 4, INCOMPLETE,
+ {5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 13}, 5, INCOMPLETE,
+ {6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+ {{0, 0, 0, 0, 0, 14}, 6, INCOMPLETE,
+ {7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1}, 0},
+};
+
+struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY] = {
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
+};
+
+struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY] = {
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+void print_trace(void);
+
+/* Obtain a backtrace and print it to stdout. */
+void print_trace(void)
+{
+ void *array[10];
+ size_t size;
+ char **strings;
+ size_t i;
+
+ size = backtrace(array, 10);
+ strings = backtrace_symbols(array, size);
+
+ RTE_LOG(INFO, LIBARP, "Obtained %zd stack frames.\n", size);
+
+ for (i = 0; i < size; i++)
+ RTE_LOG(INFO, LIBARP, "%s\n", strings[i]);
+
+ free(strings);
+}
+
+uint32_t get_nh(uint32_t ip, uint32_t *port)
+{
+ int i = 0;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+ if (((lib_arp_route_table[i].
+ ip & lib_arp_route_table[i].mask) ==
+ (ip & lib_arp_route_table[i].mask))) {
+
+ *port = lib_arp_route_table[i].port;
+ lib_arp_nh_found++;
+ return lib_arp_route_table[i].nh;
+ }
+ if (ARPICMP_DEBUG)
+ printf("No nh match ip 0x%x, port %u, t_ip "
+ "0x%x, t_port %u, mask 0x%x, r1 %x, r2 %x\n",
+ ip, *port, lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].port,
+ lib_arp_route_table[i].mask,
+ (lib_arp_route_table[i].ip &
+ lib_arp_route_table[i].mask),
+ (ip & lib_arp_route_table[i].mask));
+ }
+ if (ARPICMP_DEBUG)
+ printf("No NH - ip 0x%x, port %u\n", ip, *port);
+ lib_arp_no_nh_found++;
+ return 0;
+}
+
+/*ND IPv6 */
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[])
+{
+ int i = 0;
+ uint8_t netmask_ipv6[16], netip_nd[16], netip_in[16];
+ uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
+ memset(netmask_ipv6, 0, sizeof(netmask_ipv6));
+ memset(netip_nd, 0, sizeof(netip_nd));
+ memset(netip_in, 0, sizeof(netip_in));
+ if (!ipv6)
+ return;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+
+ convert_prefixlen_to_netmask_ipv6(lib_nd_route_table[i].depth,
+ netmask_ipv6);
+
+ for (k = 0; k < 16; k++) {
+ if (lib_nd_route_table[i].ipv6[k] & netmask_ipv6[k]) {
+ depthflags++;
+ netip_nd[k] = lib_nd_route_table[i].ipv6[k];
+ }
+ }
+
+ for (l = 0; l < 16; l++) {
+ if (ipv6[l] & netmask_ipv6[l]) {
+ depthflags1++;
+ netip_in[l] = ipv6[l];
+ }
+ }
+ int j = 0;
+ if ((depthflags == depthflags1)
+ && (memcmp(netip_nd, netip_in, sizeof(netip_nd)) == 0)) {
+ //&& (lib_nd_route_table[i].port == port))
+ *port = lib_nd_route_table[i].port;
+ lib_nd_nh_found++;
+
+ for (j = 0; j < 16; j++)
+ nhipv6[j] = lib_nd_route_table[i].nhipv6[j];
+
+ return;
+ }
+
+ if (NDIPV6_DEBUG)
+ printf("No nh match\n");
+ depthflags = 0;
+ depthflags1 = 0;
+ }
+ if (NDIPV6_DEBUG)
+ printf("No NH - ip 0x%x, port %u\n", ipv6[0], *port);
+ lib_nd_no_nh_found++;
+}
+
+/* Added for Multiport changes*/
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr)
+{
+ lib_arp_get_mac_req++;
+ uint32_t nhip = 0;
+
+ nhip = get_nh(ipaddr, phy_port);
+ if (nhip == 0) {
+ if (ARPICMP_DEBUG)
+ printf("ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ //return 0;
+ return NH_NOT_FOUND;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port; /* Changed for Multi Port */
+ tmp_arp_key.ip = nhip;
+
+ if (ARPICMP_DEBUG)
+ printf("%s: nhip: %x, phyport: %d\n", __FUNCTION__, nhip,
+ *phy_port);
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG) {
+ printf
+ ("ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+ if (nhip != 0) {
+ if (ARPICMP_DEBUG)
+ printf("CG-NAPT requesting ARP for ip %x, "
+ "port %d\n", nhip, *phy_port);
+ request_arp(*phy_port, nhip); //Changed for Multiport
+
+ }
+ lib_arp_no_arp_entry_found++;
+ return ARP_NOT_FOUND;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ if (ARPICMP_DEBUG)
+ printf("%s: ARPICMP hwaddr found\n", __FUNCTION__);
+ return ARP_FOUND;
+}
+
+int get_dest_mac_address(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint32_t *nhip)
+{
+ lib_arp_get_mac_req++;
+
+ *nhip = get_nh(ipaddr, phy_port);
+ if (*nhip == 0) {
+ if (ARPICMP_DEBUG && ipaddr)
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ return 0;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port;
+ tmp_arp_key.ip = *nhip;
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG && ipaddr) {
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+ lib_arp_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ return 1;
+
+}
+
+int get_dest_mac_addr(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr)
+{
+ lib_arp_get_mac_req++;
+ uint32_t nhip = 0;
+
+ nhip = get_nh(ipaddr, phy_port);
+ if (nhip == 0) {
+ if (ARPICMP_DEBUG && ipaddr)
+ RTE_LOG(INFO, LIBARP,
+ "ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ return 0;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port;
+ tmp_arp_key.ip = nhip;
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG && ipaddr) {
+ printf
+ ("ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+
+ if (nhip != 0) {
+ if (ARPICMP_DEBUG > 4)
+ printf
+ ("CG-NAPT requesting ARP for ip %x, port %d\n",
+ nhip, *phy_port);
+ if (ifm_chk_port_ipv4_enabled(*phy_port)) {
+ request_arp(*phy_port, nhip);
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s: IP is not enabled on port %u, not sending ARP REQ\n\r",
+ __FUNCTION__, *phy_port);
+ }
+
+ }
+ lib_arp_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ return 1;
+}
+
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ get_nh_ipv6(ipv6addr, phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j])
+ flag++;
+ }
+ if (flag == 0) {
+ if (NDIPV6_DEBUG)
+ printf("NDIPV6 no nh found for ipv6 "
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x, port %d\n",
+ ipv6addr[0], ipv6addr[1], ipv6addr[2],
+ ipv6addr[3], ipv6addr[4], ipv6addr[5],
+ ipv6addr[6], ipv6addr[7], ipv6addr[8],
+ ipv6addr[9], ipv6addr[10], ipv6addr[11],
+ ipv6addr[12], ipv6addr[13], ipv6addr[14],
+ ipv6addr[15], *phy_port);
+ return 0;
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = *phy_port;
+
+ for (i = 0; i < 16; i++)
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG) {
+ printf("NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ }
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+}
+
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ get_nh_ipv6(ipv6addr, phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j]) {
+ flag++;
+ }
+ }
+ if (flag == 0) {
+ if (NDIPV6_DEBUG && ipv6addr)
+ RTE_LOG(INFO, LIBARP,
+ "NDIPV6 no nh found for ipv6 %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ return 0;
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = *phy_port;
+
+ for (i = 0; i < 16; i++) {
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+ }
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG && ipv6addr) {
+ RTE_LOG(INFO, LIBARP,
+ "NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ }
+ if (flag != 0) {
+ if (ARPICMP_DEBUG > 4)
+ printf
+ ("Requesting ARP for ipv6 addr and port %d\n",
+ *phy_port);
+ request_nd(&nhipv6[0], ifm_get_port(*phy_port));
+
+ }
+
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+}
+
+/**
+* A structure for arp entries in Arp table
+*
+*/
+struct lib_arp_arp_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint64_t macaddr; /**< Mac address */
+};
+
+static const char *arp_op_name(uint16_t arp_op)
+{
+ switch (CHECK_ENDIAN_16(arp_op)) {
+ case (ARP_OP_REQUEST):
+ return "ARP Request";
+ case (ARP_OP_REPLY):
+ return "ARP Reply";
+ case (ARP_OP_REVREQUEST):
+ return "Reverse ARP Request";
+ case (ARP_OP_REVREPLY):
+ return "Reverse ARP Reply";
+ case (ARP_OP_INVREQUEST):
+ return "Peer Identify Request";
+ case (ARP_OP_INVREPLY):
+ return "Peer Identify Reply";
+ default:
+ break;
+ }
+ return "Unkwown ARP op";
+}
+
+static void print_icmp_packet(struct icmp_hdr *icmp_h)
+{
+ RTE_LOG(INFO, LIBARP, " ICMP: type=%d (%s) code=%d id=%d seqnum=%d\n",
+ icmp_h->icmp_type,
+ (icmp_h->icmp_type == IP_ICMP_ECHO_REPLY ? "Reply" :
+ (icmp_h->icmp_type ==
+ IP_ICMP_ECHO_REQUEST ? "Reqest" : "Undef")),
+ icmp_h->icmp_code, CHECK_ENDIAN_16(icmp_h->icmp_ident),
+ CHECK_ENDIAN_16(icmp_h->icmp_seq_nb));
+}
+
+static void print_ipv4_h(struct ipv4_hdr *ip_h)
+{
+ struct icmp_hdr *icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ RTE_LOG(INFO, LIBARP, " IPv4: Version=%d HLEN=%d Type=%d Length=%d\n",
+ (ip_h->version_ihl & 0xf0) >> 4, (ip_h->version_ihl & 0x0f),
+ ip_h->type_of_service, rte_cpu_to_be_16(ip_h->total_length));
+ if (ip_h->next_proto_id == IPPROTO_ICMP) {
+ print_icmp_packet(icmp_h);
+ }
+}
+
+static void print_arp_packet(struct arp_hdr *arp_h)
+{
+ RTE_LOG(INFO, LIBARP, " ARP: hrd=%d proto=0x%04x hln=%d "
+ "pln=%d op=%u (%s)\n",
+ CHECK_ENDIAN_16(arp_h->arp_hrd),
+ CHECK_ENDIAN_16(arp_h->arp_pro), arp_h->arp_hln,
+ arp_h->arp_pln, CHECK_ENDIAN_16(arp_h->arp_op),
+ arp_op_name(arp_h->arp_op));
+
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_hrd format for IPv4 ARP (%d)\n",
+ (arp_h->arp_hrd));
+ } else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_pro format for IPv4 ARP (%d)\n",
+ (arp_h->arp_pro));
+ } else if (arp_h->arp_hln != 6) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_hln format for IPv4 ARP (%d)\n",
+ arp_h->arp_hln);
+ } else if (arp_h->arp_pln != 4) {
+ RTE_LOG(INFO, LIBARP,
+ "incorrect arp_pln format for IPv4 ARP (%d)\n",
+ arp_h->arp_pln);
+ } else {
+ RTE_LOG(INFO, LIBARP,
+ " sha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_sha.addr_bytes[0],
+ arp_h->arp_data.arp_sha.addr_bytes[1],
+ arp_h->arp_data.arp_sha.addr_bytes[2],
+ arp_h->arp_data.arp_sha.addr_bytes[3],
+ arp_h->arp_data.arp_sha.addr_bytes[4],
+ arp_h->arp_data.arp_sha.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " sip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) & 0xFF);
+ RTE_LOG(INFO, LIBARP,
+ " tha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_tha.addr_bytes[0],
+ arp_h->arp_data.arp_tha.addr_bytes[1],
+ arp_h->arp_data.arp_tha.addr_bytes[2],
+ arp_h->arp_data.arp_tha.addr_bytes[3],
+ arp_h->arp_data.arp_tha.addr_bytes[4],
+ arp_h->arp_data.arp_tha.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " tip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) & 0xFF);
+ }
+}
+
+static void print_eth(struct ether_hdr *eth_h)
+{
+ RTE_LOG(INFO, LIBARP, " ETH: src=%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_h->s_addr.addr_bytes[0],
+ eth_h->s_addr.addr_bytes[1],
+ eth_h->s_addr.addr_bytes[2],
+ eth_h->s_addr.addr_bytes[3],
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5]);
+ RTE_LOG(INFO, LIBARP, " dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth_h->d_addr.addr_bytes[0],
+ eth_h->d_addr.addr_bytes[1],
+ eth_h->d_addr.addr_bytes[2],
+ eth_h->d_addr.addr_bytes[3],
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5]);
+
+}
+
+static void
+print_mbuf(const char *rx_tx, uint8_t portid, struct rte_mbuf *mbuf,
+ unsigned line)
+{
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ struct arp_hdr *arp_h =
+ (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ struct ipv4_hdr *ipv4_h =
+ (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+
+ RTE_LOG(INFO, LIBARP, "%s(%d): on port %d pkt-len=%u nb-segs=%u\n",
+ rx_tx, line, portid, mbuf->pkt_len, mbuf->nb_segs);
+ print_eth(eth_h);
+ switch (rte_cpu_to_be_16(eth_h->ether_type)) {
+ case ETHER_TYPE_IPv4:
+ print_ipv4_h(ipv4_h);
+ break;
+ case ETHER_TYPE_ARP:
+ print_arp_packet(arp_h);
+ break;
+ default:
+ RTE_LOG(INFO, LIBARP, " unknown packet type\n");
+ break;
+ }
+ fflush(stdout);
+}
+
+struct arp_entry_data *retrieve_arp_entry(struct arp_key_ipv4 arp_key)
+{
+ struct arp_entry_data *ret_arp_data = NULL;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ int ret = rte_hash_lookup_data(arp_hash_handle, &arp_key,
+ (void **)&ret_arp_data);
+ if (ret < 0) {
+ // RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ } else {
+
+ if (ret_arp_data->mode == DYNAMIC_ARP) {
+ struct arp_timer_key callback_key;
+ callback_key.port_id = ret_arp_data->port;
+ callback_key.ip = ret_arp_data->ip;
+ /*lcore need to check which parameter need to be put */
+ if (rte_timer_reset(ret_arp_data->timer,
+ (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore,
+ arp_timer_callback,
+ &callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ }
+
+ return ret_arp_data;
+ }
+
+ return NULL;
+}
+
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key)
+{
+ struct nd_entry_data *ret_nd_data = NULL;
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+ int i = 0;
+
+ /*Find a nd IPv6 key-data pair in the hash table for ND IPv6 */
+ int ret = rte_hash_lookup_data(nd_hash_handle, &nd_key,
+ (void **)&ret_nd_data);
+ if (ret < 0) {
+/* RTE_LOG(INFO, LIBARP,"nd-hash: no lookup Entry Found - ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);*/
+ } else {
+ if (ret_nd_data->mode == DYNAMIC_ND) {
+ struct nd_timer_key callback_key;
+ callback_key.port_id = ret_nd_data->port;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key.ipv6[i] = ret_nd_data->ipv6[i];
+
+ }
+
+ if (rte_timer_reset
+ (ret_nd_data->timer,
+ (arp_timeout * rte_get_tsc_hz()), SINGLE,
+ timer_lcore, nd_timer_callback, &callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ }
+ return ret_nd_data;
+ }
+
+ return NULL;
+}
+
+void print_arp_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ printf
+ ("------------------------ ARP CACHE -----------------------------------------\n");
+ printf
+ ("----------------------------------------------------------------------------\n");
+ printf("\tport hw addr status ip addr\n");
+ printf
+ ("----------------------------------------------------------------------------\n");
+
+ while (rte_hash_iterate(arp_hash_handle, &next_key, &next_data, &iter)
+ >= 0) {
+
+ struct arp_entry_data *tmp_arp_data =
+ (struct arp_entry_data *)next_data;
+ struct arp_key_ipv4 tmp_arp_key;
+ memcpy(&tmp_arp_key, next_key, sizeof(struct arp_key_ipv4));
+ printf
+ ("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s %d.%d.%d.%d\n",
+ tmp_arp_data->port, tmp_arp_data->eth_addr.addr_bytes[0],
+ tmp_arp_data->eth_addr.addr_bytes[1],
+ tmp_arp_data->eth_addr.addr_bytes[2],
+ tmp_arp_data->eth_addr.addr_bytes[3],
+ tmp_arp_data->eth_addr.addr_bytes[4],
+ tmp_arp_data->eth_addr.addr_bytes[5],
+ tmp_arp_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE",
+ (tmp_arp_data->ip >> 24),
+ ((tmp_arp_data->ip & 0x00ff0000) >> 16),
+ ((tmp_arp_data->ip & 0x0000ff00) >> 8),
+ ((tmp_arp_data->ip & 0x000000ff)));
+ }
+
+ uint32_t i = 0;
+ printf("\nARP routing table has %d entries\n", arp_route_tbl_index);
+ printf("\nIP_Address Mask Port NH_IP_Address\n");
+ for (i = 0; i < arp_route_tbl_index; i++) {
+ printf("0x%x 0x%x %d 0x%x\n",
+ lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].mask,
+ lib_arp_route_table[i].port, lib_arp_route_table[i].nh);
+ }
+
+ printf
+ ("\nARP Stats: Total Queries %u, ok_NH %u, no_NH %u, ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_arp_get_mac_req, lib_arp_nh_found, lib_arp_no_nh_found,
+ lib_arp_arp_entry_found, lib_arp_no_arp_entry_found,
+ lib_arp_populate_called, lib_arp_delete_called,
+ lib_arp_duplicate_found);
+
+ printf("ARP table key len is %lu\n", sizeof(struct arp_key_ipv4));
+}
+
+/* ND IPv6 */
+void print_nd_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ uint8_t ii = 0, j = 0, k = 0;
+ printf
+ ("------------------------------------------------------------------------------------------------------\n");
+ printf("\tport hw addr status ip addr\n");
+
+ printf
+ ("------------------------------------------------------------------------------------------------------\n");
+ while (rte_hash_iterate(nd_hash_handle, &next_key, &next_data, &iter) >=
+ 0) {
+
+ struct nd_entry_data *tmp_nd_data =
+ (struct nd_entry_data *)next_data;
+ struct nd_key_ipv6 tmp_nd_key;
+ memcpy(&tmp_nd_key, next_key, sizeof(struct nd_key_ipv6));
+ printf("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s\n",
+ tmp_nd_data->port,
+ tmp_nd_data->eth_addr.addr_bytes[0],
+ tmp_nd_data->eth_addr.addr_bytes[1],
+ tmp_nd_data->eth_addr.addr_bytes[2],
+ tmp_nd_data->eth_addr.addr_bytes[3],
+ tmp_nd_data->eth_addr.addr_bytes[4],
+ tmp_nd_data->eth_addr.addr_bytes[5],
+ tmp_nd_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE");
+ printf("\t\t\t\t\t\t");
+ for (ii = 0; ii < ND_IPV6_ADDR_SIZE; ii += 2) {
+ printf("%02X%02X ", tmp_nd_data->ipv6[ii],
+ tmp_nd_data->ipv6[ii + 1]);
+ }
+ printf("\n");
+ }
+
+ uint32_t i = 0;
+ printf("\n\nND IPV6 routing table has %d entries\n",
+ nd_route_tbl_index);
+ printf
+ ("\nIP_Address Depth Port NH_IP_Address\n");
+ for (i = 0; i < nd_route_tbl_index; i++) {
+ printf("\n");
+
+ for (j = 0; j < ND_IPV6_ADDR_SIZE; j += 2) {
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ lib_nd_route_table[i].ipv6[j],
+ lib_nd_route_table[i].ipv6[j + 1]);
+ }
+
+ printf
+ ("\n\t\t\t %d %d \n",
+ lib_nd_route_table[i].depth, lib_nd_route_table[i].port);
+ printf("\t\t\t\t\t\t\t\t\t");
+ for (k = 0; k < ND_IPV6_ADDR_SIZE; k += 2) {
+ printf("%02X%02X ", lib_nd_route_table[i].nhipv6[k],
+ lib_nd_route_table[i].ipv6[k + 1]);
+ }
+ }
+ printf
+ ("\nND IPV6 Stats: \nTotal Queries %u, ok_NH %u, no_NH %u, ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_nd_get_mac_req, lib_nd_nh_found, lib_nd_no_nh_found,
+ lib_nd_nd_entry_found, lib_nd_no_arp_entry_found,
+ lib_nd_populate_called, lib_nd_delete_called,
+ lib_nd_duplicate_found);
+ printf("ND table key len is %lu\n\n", sizeof(struct nd_key_ipv6));
+}
+
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid, void *arg)
+{
+
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_delete_called++;
+
+ struct arp_entry_data *ret_arp_data = NULL;
+
+ int ret = rte_hash_lookup_data(arp_hash_handle, &arp_key,
+ (void **)&ret_arp_data);
+ if (ret < 0) {
+// RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ return;
+ } else {
+ if (ret_arp_data->mode == DYNAMIC_ARP) {
+ if (ret_arp_data->retry_count == 3) {
+ rte_timer_stop(ret_arp_data->timer);
+ rte_free(ret_arp_data->timer_key);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP Entry Deleted for IP :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >>
+ 16),
+ ((arp_key.ip & 0x0000ff00) >>
+ 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ rte_hash_del_key(arp_hash_handle, &arp_key);
+ //print_arp_table();
+ } else {
+ ret_arp_data->retry_count++;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "RETRY ARP..retry count : %u\n",
+ ret_arp_data->retry_count);
+ //print_arp_table();
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "TIMER STARTED FOR %u seconds\n",
+ ARP_TIMER_EXPIRY);
+ if (ifm_chk_port_ipv4_enabled
+ (ret_arp_data->port)) {
+ request_arp(ret_arp_data->port,
+ ret_arp_data->ip);
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s: IP is not enabled on port %u, not sending GARP\n\r",
+ __FUNCTION__,
+ ret_arp_data->port);
+ }
+ if (rte_timer_reset(ret_arp_data->timer,
+ (arp_timeout *
+ rte_get_tsc_hz()), SINGLE,
+ timer_lcore,
+ arp_timer_callback,
+ arg) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+
+ }
+ } else {
+ rte_hash_del_key(arp_hash_handle, &arp_key);
+ }
+ }
+}
+
+/* ND IPv6 */
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid)
+{
+ int i = 0;
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ nd_key.ipv6[i] = ipv6addr[i];
+ }
+
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_delete_called++;
+
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "Deletes rte hash table nd entry for port %d ipv6=",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ RTE_LOG(INFO, LIBARP, "%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+ struct nd_timer_key callback_key;
+ callback_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key.ipv6[i] = ipv6addr[i];
+
+ }
+ int ret = rte_hash_lookup_data(arp_hash_handle, &callback_key,
+ (void **)&ret_nd_data);
+ if (ret < 0) {
+// RTE_LOG(INFO, LIBARP,"arp-hash lookup failed ret %d, EINVAL %d, ENOENT %d\n", ret, EINVAL, ENOENT);
+ } else {
+ if (ret_nd_data->mode == DYNAMIC_ND) {
+ rte_timer_stop(ret_nd_data->timer);
+ rte_free(ret_nd_data->timer);
+ }
+ }
+ rte_hash_del_key(nd_hash_handle, &nd_key);
+}
+
+void
+populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid, uint8_t mode)
+{
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_populate_called++;
+
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "populate_arp_entry ip %x, port %d\n",
+ arp_key.ip, arp_key.port_id);
+
+ struct arp_entry_data *new_arp_data = retrieve_arp_entry(arp_key);
+ if (new_arp_data && ((new_arp_data->mode == STATIC_ARP
+ && mode == DYNAMIC_ARP) || (new_arp_data->mode == DYNAMIC_ARP
+ && mode == STATIC_ARP))) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,"populate_arp_entry: ARP entry already exists(%d %d)\n",
+ new_arp_data->mode, mode);
+
+ return;
+ }
+
+ if (mode == DYNAMIC_ARP) {
+ if (new_arp_data
+ && is_same_ether_addr(&new_arp_data->eth_addr, hw_addr)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ lib_arp_duplicate_found++;
+ new_arp_data->retry_count = 0; // Reset
+ if (rte_timer_reset(new_arp_data->timer,
+ (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore,
+ arp_timer_callback,
+ new_arp_data->timer_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+ return;
+ }
+
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct arp_entry_data));
+ new_arp_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ new_arp_data->eth_addr = *hw_addr;
+ new_arp_data->status = COMPLETE;
+ new_arp_data->port = portid;
+ new_arp_data->ip = ipaddr;
+ new_arp_data->mode = mode;
+ if (rte_mempool_get
+ (timer_mempool_arp, (void **)&(new_arp_data->timer)) < 0) {
+ RTE_LOG(INFO, LIBARP,
+ "TIMER - Error in getting timer alloc buffer\n");
+ return;
+ }
+
+ rte_hash_add_key_data(arp_hash_handle, &arp_key, new_arp_data);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)), arp_key.port_id);
+ }
+ // Call l3fwd module for resolving 2_adj structure.
+ resolve_l2_adj(ipaddr, portid, hw_addr);
+
+ rte_timer_init(new_arp_data->timer);
+ struct arp_timer_key *callback_key =
+ (struct arp_timer_key *)rte_malloc(NULL,
+ sizeof(struct
+ arp_timer_key *),
+ RTE_CACHE_LINE_SIZE);
+ callback_key->port_id = portid;
+ callback_key->ip = ipaddr;
+
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "TIMER STARTED FOR %u seconds\n",
+ ARP_TIMER_EXPIRY);
+ if (rte_timer_reset
+ (new_arp_data->timer, (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore, arp_timer_callback, callback_key) < 0)
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Err : Timer already running\n");
+
+ new_arp_data->timer_key = callback_key;
+ } else {
+ if (new_arp_data
+ && is_same_ether_addr(&new_arp_data->eth_addr, hw_addr)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ lib_arp_duplicate_found++;
+ } else {
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof
+ (struct arp_entry_data));
+ new_arp_data =
+ rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ new_arp_data->eth_addr = *hw_addr;
+ new_arp_data->status = COMPLETE;
+ new_arp_data->port = portid;
+ new_arp_data->ip = ipaddr;
+ new_arp_data->mode = mode;
+
+ rte_hash_add_key_data(arp_hash_handle, &arp_key,
+ new_arp_data);
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "arp_entry exists ip :%d.%d.%d.%d , port %d\n",
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)),
+ arp_key.port_id);
+ }
+ // Call l3fwd module for resolving 2_adj structure.
+ resolve_l2_adj(ipaddr, portid, hw_addr);
+ }
+ }
+ if (ARPICMP_DEBUG) {
+ /* print entire hash table */
+ RTE_LOG(INFO, LIBARP,
+ "\tARP: table update - hwaddr=%02x:%02x:%02x:%02x:%02x:%02x ip=%d.%d.%d.%d on port=%d\n",
+ new_arp_data->eth_addr.addr_bytes[0],
+ new_arp_data->eth_addr.addr_bytes[1],
+ new_arp_data->eth_addr.addr_bytes[2],
+ new_arp_data->eth_addr.addr_bytes[3],
+ new_arp_data->eth_addr.addr_bytes[4],
+ new_arp_data->eth_addr.addr_bytes[5],
+ (arp_key.ip >> 24), ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)), portid);
+ puts("");
+ }
+}
+
+/*
+ * ND IPv6
+ *
+ * Install key - data pair in Hash table - From Pipeline Configuration
+ *
+ */
+
+void populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ipv6[],
+ uint8_t portid, uint8_t mode)
+{
+
+ /* need to lock here if multi-threaded */
+ /* rte_hash_add_key_data is not thread safe */
+ uint8_t i;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ nd_key.ipv6[i] = ipv6[i];
+
+// RTE_LOG(INFO, LIBARP,"\n");
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_populate_called++;
+
+ /* Validate if key-value pair already exists in the hash table for ND IPv6 */
+ struct nd_entry_data *new_nd_data = retrieve_nd_entry(nd_key);
+
+ if (mode == DYNAMIC_ND) {
+ if (new_nd_data
+ && is_same_ether_addr(&new_nd_data->eth_addr, hw_addr)) {
+
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "nd_entry exists port %d ipv6 = ",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+
+ lib_nd_duplicate_found++;
+ RTE_LOG(INFO, LIBARP, "nd_entry exists\n");
+ return;
+ }
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct nd_entry_data));
+ new_nd_data = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ //new_nd_data = (struct nd_entry_data *)rte_malloc(NULL, sizeof(struct nd_entry_data *),RTE_CACHE_LINE_SIZE);
+ new_nd_data->eth_addr = *hw_addr;
+ new_nd_data->status = COMPLETE;
+ new_nd_data->port = portid;
+ new_nd_data->mode = mode;
+ if (rte_mempool_get
+ (timer_mempool_arp, (void **)&(new_nd_data->timer)) < 0) {
+ RTE_LOG(INFO, LIBARP,
+ "TIMER - Error in getting timer alloc buffer\n");
+ return;
+ }
+
+ if (NDIPV6_DEBUG)
+ RTE_LOG(INFO, LIBARP, "populate_nd_entry ipv6=");
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ }
+
+ if (NDIPV6_DEBUG) {
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+ }
+
+ /*Add a key-data pair at hash table for ND IPv6 static routing */
+ rte_hash_add_key_data(nd_hash_handle, &nd_key, new_nd_data);
+ /* need to check the return value of the hash add */
+
+ /* after the hash is created then time is started */
+ rte_timer_init(new_nd_data->timer);
+ struct nd_timer_key *callback_key =
+ (struct nd_timer_key *)rte_malloc(NULL,
+ sizeof(struct nd_timer_key
+ *),
+ RTE_CACHE_LINE_SIZE);
+ callback_key->port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ callback_key->ipv6[i] = ipv6[i];
+ }
+ if (rte_timer_reset
+ (new_nd_data->timer, (arp_timeout * rte_get_tsc_hz()),
+ SINGLE, timer_lcore, nd_timer_callback, callback_key) < 0)
+ RTE_LOG(INFO, LIBARP, "Err : Timer already running\n");
+ } else {
+ if (new_nd_data
+ && is_same_ether_addr(&new_nd_data->eth_addr, hw_addr)) {
+ if (NDIPV6_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "nd_entry exists port %d ipv6 = ",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ RTE_LOG(INFO, LIBARP, "%02X%02X ",
+ nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+
+ lib_nd_duplicate_found++;
+ } else {
+ uint32_t size =
+ RTE_CACHE_LINE_ROUNDUP(sizeof
+ (struct nd_entry_data));
+ new_nd_data =
+ rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+
+ //new_nd_data = (struct nd_entry_data *)rte_malloc(NULL, sizeof(struct nd_entry_data *),RTE_CACHE_LINE_SIZE);
+ new_nd_data->eth_addr = *hw_addr;
+ new_nd_data->status = COMPLETE;
+ new_nd_data->port = portid;
+ new_nd_data->mode = mode;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ }
+
+ /*Add a key-data pair at hash table for ND IPv6 static routing */
+ rte_hash_add_key_data(nd_hash_handle, &nd_key,
+ new_nd_data);
+ /* need to check the return value of the hash add */
+ }
+ }
+ if (NDIPV6_DEBUG)
+ printf
+ ("\n....Added a key-data pair at rte hash table for ND IPv6 static routing\n");
+
+ if (1) {
+ /* print entire hash table */
+ printf
+ ("\tND: table update - hwaddr=%02x:%02x:%02x:%02x:%02x:%02x on port=%d\n",
+ new_nd_data->eth_addr.addr_bytes[0],
+ new_nd_data->eth_addr.addr_bytes[1],
+ new_nd_data->eth_addr.addr_bytes[2],
+ new_nd_data->eth_addr.addr_bytes[3],
+ new_nd_data->eth_addr.addr_bytes[4],
+ new_nd_data->eth_addr.addr_bytes[5], portid);
+ RTE_LOG(INFO, LIBARP, "\tipv6=");
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ RTE_LOG(INFO, LIBARP, "%02X%02X ", new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+
+ RTE_LOG(INFO, LIBARP, "\n");
+
+ puts("");
+ }
+}
+
+void print_pkt1(struct rte_mbuf *pkt)
+{
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
+ int i = 0, j = 0;
+ RTE_LOG(INFO, LIBARP, "\nPacket Contents...\n");
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ RTE_LOG(INFO, LIBARP, "%02x ", rd[(20 * i) + j]);
+ RTE_LOG(INFO, LIBARP, "\n");
+ }
+}
+
+struct ether_addr broadcast_ether_addr = {
+ .addr_bytes[0] = 0xFF,
+ .addr_bytes[1] = 0xFF,
+ .addr_bytes[2] = 0xFF,
+ .addr_bytes[3] = 0xFF,
+ .addr_bytes[4] = 0xFF,
+ .addr_bytes[5] = 0xFF,
+};
+
+static const struct ether_addr null_ether_addr = {
+ .addr_bytes[0] = 0x00,
+ .addr_bytes[1] = 0x00,
+ .addr_bytes[2] = 0x00,
+ .addr_bytes[3] = 0x00,
+ .addr_bytes[4] = 0x00,
+ .addr_bytes[5] = 0x00,
+};
+
+#define MAX_NUM_MAC_ADDRESS 16
+struct ether_addr link_hw_addr[MAX_NUM_MAC_ADDRESS] = {
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc8} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x18, 0x19, 0x1a, 0x1b, 0xcd, 0xef} }
+};
+
+struct ether_addr *get_link_hw_addr(uint8_t out_port)
+{
+ return &link_hw_addr[out_port];
+}
+
+void request_arp(uint8_t port_id, uint32_t ip)
+{
+
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+
+ l2_phy_interface_t *link;
+ link = ifm_get_port(port_id);
+ struct rte_mbuf *arp_pkt = lib_arp_pkt;
+
+ if (arp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating arp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(arp_pkt, struct ether_hdr *);
+
+ ether_addr_copy(&broadcast_ether_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &link->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ arp_h->arp_hrd = CHECK_ENDIAN_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = CHECK_ENDIAN_16(ARP_OP_REQUEST);
+
+ ether_addr_copy((struct ether_addr *)
+ &link->macaddr[0], &arp_h->arp_data.arp_sha);
+ if (link && link->ipv4_list) {
+ arp_h->arp_data.arp_sip =
+ (((ipv4list_t *) (link->ipv4_list))->ipaddr);
+ }
+ ether_addr_copy(&null_ether_addr, &arp_h->arp_data.arp_tha);
+ arp_h->arp_data.arp_tip = rte_cpu_to_be_32(ip);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "arp tip:%x arp sip :%x\n",
+ arp_h->arp_data.arp_tip, arp_h->arp_data.arp_sip);
+ // mmcd changed length from 60 to 42 - real length of arp request, no padding on ethernet needed - looks now like linux arp
+ arp_pkt->pkt_len = 42;
+ arp_pkt->data_len = 42;
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "Sending arp request\n");
+ print_mbuf("TX", port_id, arp_pkt, __LINE__);
+ }
+ if (link)
+ link->transmit_single_pkt(link, arp_pkt);
+}
+
+struct rte_mbuf *request_echo(uint32_t port_id, uint32_t ip)
+{
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ l2_phy_interface_t *port = ifm_get_port(port_id);
+
+ struct rte_mbuf *icmp_pkt = lib_arp_pkt;
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating icmp_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length =
+ rte_cpu_to_be_16(sizeof(struct ipv4_hdr) + sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = IPPROTO_ICMP;
+ if (port && port->ipv4_list)
+ ip_h->src_addr =
+ rte_cpu_to_be_32(((ipv4list_t *) port->ipv4_list)->ipaddr);
+ ip_h->dst_addr = rte_cpu_to_be_32(ip);
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+
+ icmp_h->icmp_type = IP_ICMP_ECHO_REQUEST;
+ icmp_h->icmp_code = 0;
+ icmp_h->icmp_ident = 0xdead;
+ icmp_h->icmp_seq_nb = 0xbeef;
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+
+ print_mbuf("TX", 0, icmp_pkt, __LINE__);
+
+ return icmp_pkt;
+}
+
+#if 0
+/**
+ * Function to send ICMP dest unreachable msg
+ *
+ */
+struct rte_mbuf *send_icmp_dest_unreachable_msg(uint32_t src_ip,
+ uint32_t dest_ip)
+{
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ struct rte_mbuf *icmp_pkt = lib_arp_pkt;
+
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating icmp_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length =
+ rte_cpu_to_be_16(sizeof(struct ipv4_hdr) + sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = 1;
+
+ ip_h->dst_addr = rte_bswap32(dest_ip);
+ ip_h->src_addr = rte_bswap32(src_ip);
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+
+ icmp_h->icmp_type = 3; /* Destination Unreachable */
+ icmp_h->icmp_code = 13; /* Communication administratively prohibited */
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+
+ return icmp_pkt;
+}
+#endif
+void
+process_arpicmp_pkt_parse(struct rte_mbuf **pkt, uint16_t pkt_num,
+ uint64_t pkt_mask, l2_phy_interface_t *port)
+{
+ RTE_SET_USED(pkt_num);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "============ARP ENTRY================\n");
+ if (pkt_mask) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "============ARP PROCESS================\n");
+ }
+
+ uint64_t pkts_for_process = pkt_mask;
+ for (; pkts_for_process;) {
+/**< process only valid packets. */
+ uint8_t pos = (uint8_t) __builtin_ctzll(pkts_for_process);
+ uint64_t pkts_mask = 1LLU << pos; /** <bitmask representing only this packet. */
+ pkts_for_process &= ~pkts_mask; /** <remove this packet from the mask. */
+ process_arpicmp_pkt(pkt[pos], port);
+ }
+
+}
+
+void process_arpicmp_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port)
+{
+ uint8_t in_port_id = pkt->port;
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+
+ uint32_t cksum;
+ uint32_t ip_addr;
+
+ uint32_t req_tip;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "%s, portid %u. Line %d\n\r",
+ __FUNCTION__, port->pmdid, __LINE__);
+ arp_h =
+ (struct arp_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid hardware format of hardware address - not processing ARP req\n");
+ else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid protocol address format - not processing ARP req\n");
+ else if (arp_h->arp_hln != 6)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid hardware address length - not processing ARP req\n");
+ else if (arp_h->arp_pln != 4)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid protocol address length - not processing ARP req\n");
+ else {
+ if (port->ipv4_list == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "Ports IPV4 List is NULL.. Unable to Process\n");
+ return;
+ }
+
+ if (arp_h->arp_data.arp_tip !=
+ ((ipv4list_t *) (port->ipv4_list))->ipaddr) {
+ if (arp_h->arp_data.arp_tip == arp_h->arp_data.arp_sip) {
+ populate_arp_entry(
+ (struct ether_addr *)&arp_h->arp_data.arp_sha,
+ rte_cpu_to_be_32(arp_h->arp_data.arp_sip),
+ in_port_id,
+ DYNAMIC_ARP);
+
+ } else {
+ RTE_LOG(INFO, LIBARP,"ARP requested IP address mismatches interface IP - discarding\n");
+ }
+ }
+ /// revise conditionals to allow processing of requests with target ip = this ip and
+ // processing of replies to destination ip = this ip
+ else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REQUEST)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__, port->pmdid,
+ __LINE__);
+
+ RTE_LOG(INFO, LIBARP,
+ "arp_op %d, ARP_OP_REQUEST %d\n",
+ arp_h->arp_op,
+ rte_cpu_to_be_16
+ (ARP_OP_REQUEST));
+ print_mbuf("RX", in_port_id, pkt,
+ __LINE__);
+ }
+
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_cpu_to_be_32
+ (arp_h->arp_data.arp_sip),
+ in_port_id, DYNAMIC_ARP);
+
+ /*build reply */
+ req_tip = arp_h->arp_data.arp_tip;
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr); /**< set sender mac address*/
+ arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ether_addr_copy(&eth_h->s_addr,
+ &arp_h->arp_data.arp_sha);
+ arp_h->arp_data.arp_tip =
+ arp_h->arp_data.arp_sip;
+ arp_h->arp_data.arp_sip = req_tip;
+ ether_addr_copy(&eth_h->d_addr,
+ &arp_h->arp_data.arp_tha);
+
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX ARP REPLY PKT",
+ port->pmdid, pkt, __LINE__);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX", port->pmdid, pkt,
+ __LINE__);
+
+ return;
+ } else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REPLY)) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP_OP_REPLY received");
+ print_mbuf("RX", port->pmdid, pkt,
+ __LINE__);
+ }
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_bswap32(arp_h->
+ arp_data.arp_sip),
+ in_port_id, DYNAMIC_ARP);
+
+ return;
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Invalid ARP opcode - not processing ARP req %x\n",
+ arp_h->arp_op);
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+ } else {
+ ip_h =
+ (struct ipv4_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+
+ if (ip_h->next_proto_id != IPPROTO_ICMP) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "IP protocol ID is not set to ICMP - discarding\n");
+ }
+ } else if ((ip_h->version_ihl & 0xf0) != IP_VERSION_4) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "IP version other than 4 - discarding\n");
+ }
+ } else if ((ip_h->version_ihl & 0x0f) != IP_HDRLEN) {
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP,
+ "Unknown IHL - discarding\n");
+ }
+ } else {
+ if (icmp_h->icmp_type == IP_ICMP_ECHO_REQUEST
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ ip_addr = ip_h->src_addr;
+ ether_addr_copy(&eth_h->s_addr,
+ &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0],
+ &eth_h->s_addr);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__,
+ port->pmdid, __LINE__);
+
+ if (is_multicast_ipv4_addr
+ (ip_h->dst_addr)) {
+ uint32_t ip_src;
+
+ ip_src =
+ rte_be_to_cpu_32(ip_addr);
+ if ((ip_src & 0x00000003) == 1)
+ ip_src =
+ (ip_src &
+ 0xFFFFFFFC) |
+ 0x00000002;
+ else
+ ip_src =
+ (ip_src &
+ 0xFFFFFFFC) |
+ 0x00000001;
+ ip_h->src_addr =
+ rte_cpu_to_be_32(ip_src);
+ ip_h->dst_addr = ip_addr;
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum =
+ ~rte_raw_cksum(ip_h,
+ sizeof(struct
+ ipv4_hdr));
+ } else {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "%s, portid %u. Line %d\n\r",
+ __FUNCTION__,
+ port->pmdid,
+ __LINE__);
+ ip_h->src_addr = ip_h->dst_addr;
+ ip_h->dst_addr = ip_addr;
+ }
+
+ icmp_h->icmp_type = IP_ICMP_ECHO_REPLY;
+ cksum = ~icmp_h->icmp_cksum & 0xffff;
+ cksum +=
+ ~htons(IP_ICMP_ECHO_REQUEST << 8) &
+ 0xffff;
+ cksum += htons(IP_ICMP_ECHO_REPLY << 8);
+ cksum =
+ (cksum & 0xffff) + (cksum >> 16);
+ cksum =
+ (cksum & 0xffff) + (cksum >> 16);
+ icmp_h->icmp_cksum = ~cksum;
+
+ if (ARPICMP_DEBUG)
+ print_mbuf
+ ("TX ICMP ECHO REPLY PKT",
+ in_port_id, pkt, __LINE__);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX", port->pmdid,
+ pkt, __LINE__);
+
+ return;
+ } else if (icmp_h->icmp_type ==
+ IP_ICMP_ECHO_REPLY
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = in_port_id;
+ arp_key.ip =
+ rte_bswap32(ip_h->src_addr);
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ struct arp_entry_data *arp_entry =
+ retrieve_arp_entry(arp_key);
+ if (arp_entry == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Received unsolicited ICMP echo reply from ip%x, port %d\n",
+ arp_key.ip,
+ arp_key.port_id);
+ return;
+ }
+ arp_entry->status = COMPLETE;
+ }
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+ }
+}
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int my_inet_pton_ipv6(int af, const char *src, void *dst)
+{
+ switch (af) {
+ case AF_INET:
+ return inet_pton_ipv4(src, dst);
+ case AF_INET6:
+ return inet_pton_ipv6(src, dst);
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton_ipv4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton_ipv6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton_ipv4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static int arp_parse_args(struct pipeline_params *params)
+{
+ uint32_t arp_route_tbl_present = 0;
+ uint32_t nd_route_tbl_present = 0;
+ uint32_t ports_mac_list_present = 0;
+ uint32_t numArg;
+ uint32_t n_vnf_threads_present = 0;
+
+ uint32_t pktq_in_prv_present = 0;
+ uint32_t prv_to_pub_map_present = 0;
+
+ uint8_t n_prv_in_port = 0;
+ int i;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ in_port_dir_a[i] = 0; //make all RX ports ingress initially
+ prv_to_pub_map[i] = 0xff;
+ pub_to_prv_map[i] = 0xff;
+ }
+
+ RTE_SET_USED(ports_mac_list_present);
+ RTE_SET_USED(nd_route_tbl_present);
+ RTE_SET_USED(arp_route_tbl_present);
+ for (numArg = 0; numArg < params->n_args; numArg++) {
+ char *arg_name = params->args_name[numArg];
+ char *arg_value = params->args_value[numArg];
+
+ /* arp timer expiry */
+ if (strcmp(arg_name, "arp_timer_expiry") == 0) {
+ arp_timeout = atoi(arg_value);
+ }
+
+ /* pktq_in_prv */
+ if (strcmp(arg_name, "pktq_in_prv") == 0) {
+ if (pktq_in_prv_present) {
+ printf
+ ("Duplicate pktq_in_prv ... parse failed..\n\n");
+ return -1;
+ }
+ pktq_in_prv_present = 1;
+
+ int rxport = 0, j = 0;
+ char phy_port_num[5];
+ char *token = strtok(arg_value, "RXQ");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != '.')) {
+ phy_port_num[j] = token[j];
+ j++;
+ }
+ phy_port_num[j] = '\0';
+ rxport = atoi(phy_port_num);
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ if (rxport < 0)
+ rxport = 0;
+ printf
+ ("token: %s, phy_port_str: %s, phy_port_num %d\n",
+ token, phy_port_num, rxport);
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ in_port_dir_a[rxport] = 1; // set rxport egress
+ token = strtok(NULL, "RXQ");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf
+ ("VNF common parse error - no prv RX phy port\n");
+ return -1;
+ }
+ continue;
+ }
+
+ /* prv_to_pub_map */
+ if (strcmp(arg_name, "prv_to_pub_map") == 0) {
+ if (prv_to_pub_map_present) {
+ printf
+ ("Duplicated prv_to_pub_map ... parse failed ...\n");
+ return -1;
+ }
+ prv_to_pub_map_present = 1;
+
+ int rxport = 0, txport = 0, j = 0, k = 0;
+ char rx_phy_port_num[5];
+ char tx_phy_port_num[5];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != ',')) {
+ rx_phy_port_num[j] = token[j];
+ j++;
+ }
+ rx_phy_port_num[j] = '\0';
+ rxport = atoi(rx_phy_port_num);
+ if (rxport < 0)
+ rxport = 0;
+
+ j++;
+ k = 0;
+ while ((k < 4) && (token[j + k] != ')')) {
+ tx_phy_port_num[k] = token[j + k];
+ k++;
+ }
+ tx_phy_port_num[k] = '\0';
+ txport = atoi(tx_phy_port_num);
+ if (txport < 0)
+ txport = 0;
+
+ RTE_LOG(INFO, LIBARP, "token: %s,"
+ "rx_phy_port_str: %s, phy_port_num %d,"
+ "tx_phy_port_str: %s, tx_phy_port_num %d\n",
+ token, rx_phy_port_num, rxport,
+ tx_phy_port_num, txport);
+
+ if ((rxport >= PIPELINE_MAX_PORT_IN) ||
+ (txport >= PIPELINE_MAX_PORT_IN) ||
+ (in_port_dir_a[rxport] != 1)) {
+ printf
+ ("CG-NAPT parse error - incorrect prv-pub translation. Rx %d, Tx %d, Rx Dir %d\n",
+ rxport, txport,
+ in_port_dir_a[rxport]);
+ return -1;
+ }
+
+ prv_to_pub_map[rxport] = txport;
+ pub_to_prv_map[txport] = rxport;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ //n_vnf_threads = 3
+ if (strcmp(arg_name, "n_vnf_threads") == 0) {
+ if (n_vnf_threads_present)
+ return -1;
+ n_vnf_threads_present = 1;
+ trim(arg_value);
+ num_vnf_threads = atoi(arg_value);
+ if (num_vnf_threads <= 0) {
+ RTE_LOG(INFO, LIBARP,
+ "n_vnf_threads is invalid\n");
+ return -1;
+ }
+ RTE_LOG(INFO, LIBARP, "n_vnf_threads: 0x%x\n",
+ num_vnf_threads);
+ }
+
+ /* lib_arp_debug */
+ if (strcmp(arg_name, "lib_arp_debug") == 0) {
+ ARPICMP_DEBUG = atoi(arg_value);
+
+ continue;
+ }
+
+ /* ports_mac_list */
+ if (strcmp(arg_name, "ports_mac_list") == 0) {
+ ports_mac_list_present = 1;
+
+ uint32_t i = 0, j = 0, k = 0, MAC_NUM_BYTES = 6;
+
+ char byteStr[MAC_NUM_BYTES][3];
+ uint32_t byte[MAC_NUM_BYTES];
+
+ char *token = strtok(arg_value, " ");
+ while (token) {
+ k = 0;
+ for (i = 0; i < MAC_NUM_BYTES; i++) {
+ for (j = 0; j < 2; j++) {
+ byteStr[i][j] = token[k++];
+ }
+ byteStr[i][j] = '\0';
+ k++;
+ }
+
+ for (i = 0; i < MAC_NUM_BYTES; i++) {
+ byte[i] = strtoul(byteStr[i], NULL, 16);
+ }
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "token: %s",
+ token);
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ RTE_LOG(INFO, LIBARP,
+ ", byte[%u] %u", i,
+ byte[i]);
+ RTE_LOG(INFO, LIBARP, "\n");
+ }
+ //Populate the static arp_route_table
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ link_hw_addr
+ [link_hw_addr_array_idx].addr_bytes
+ [i] = byte[i];
+
+ link_hw_addr_array_idx++;
+ token = strtok(NULL, " ");
+ }
+
+ continue;
+ }
+
+ /* arp_route_tbl */
+ if (strcmp(arg_name, "arp_route_tbl") == 0) {
+ arp_route_tbl_present = 1;
+
+ uint32_t dest_ip = 0, mask = 0, tx_port = 0, nh_ip =
+ 0, i = 0, j = 0, k = 0, l = 0;
+ uint32_t arp_route_tbl_str_max_len = 10;
+ char dest_ip_str[arp_route_tbl_str_max_len];
+ char mask_str[arp_route_tbl_str_max_len];
+ char tx_port_str[arp_route_tbl_str_max_len];
+ char nh_ip_str[arp_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (arp_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ip_str[i] = token[i];
+ i++;
+ }
+ dest_ip_str[i] = '\0';
+ dest_ip = strtoul(dest_ip_str, NULL, 16);
+
+ i++;
+ j = 0;
+ while ((j < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ mask_str[j] = token[i + j];
+ j++;
+ }
+ mask_str[j] = '\0';
+ mask = strtoul(mask_str, NULL, 16);
+
+ j++;
+ k = 0;
+ while ((k < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ tx_port = strtoul(tx_port_str, NULL, 16); //atoi(tx_port_str);
+
+ k++;
+ l = 0;
+ while ((l < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ip_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ip_str[l] = '\0';
+ nh_ip = strtoul(nh_ip_str, NULL, 16); //atoi(nh_ip_str);
+
+ if (1) {
+ RTE_LOG(INFO, LIBARP, "token: %s, "
+ "dest_ip_str: %s, dest_ip %u, "
+ "mask_str: %s, mask %u, "
+ "tx_port_str: %s, tx_port %u, "
+ "nh_ip_str: %s, nh_ip %u\n",
+ token, dest_ip_str, dest_ip,
+ mask_str, mask, tx_port_str,
+ tx_port, nh_ip_str, nh_ip);
+ }
+
+ /* if (tx_port >= params->n_ports_out)
+ {
+ RTE_LOG(INFO, LIBARP,"ARP-ICMP parse error - incorrect tx_port %d, max %d\n",
+ tx_port, params->n_ports_out);
+ return -1;
+ }
+ */
+ //Populate the static arp_route_table
+ lib_arp_route_table[arp_route_tbl_index].ip =
+ dest_ip;
+ lib_arp_route_table[arp_route_tbl_index].mask =
+ mask;
+ lib_arp_route_table[arp_route_tbl_index].port =
+ tx_port;
+ lib_arp_route_table[arp_route_tbl_index].nh =
+ nh_ip;
+ arp_route_tbl_index++;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /*ND IPv6 */
+ /* nd_route_tbl */
+ if (strcmp(arg_name, "nd_route_tbl") == 0) {
+ nd_route_tbl_present = 1;
+
+ uint8_t dest_ipv6[16], depth = 0, tx_port =
+ 0, nh_ipv6[16], i = 0, j = 0, k = 0, l = 0;
+ uint8_t nd_route_tbl_str_max_len = 128; //64;
+ char dest_ipv6_str[nd_route_tbl_str_max_len];
+ char depth_str[nd_route_tbl_str_max_len];
+ char tx_port_str[nd_route_tbl_str_max_len];
+ char nh_ipv6_str[nd_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (nd_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ipv6_str[i] = token[i];
+ i++;
+ }
+ dest_ipv6_str[i] = '\0';
+ my_inet_pton_ipv6(AF_INET6, dest_ipv6_str,
+ &dest_ipv6);
+
+ i++;
+ j = 0;
+ while ((j < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ depth_str[j] = token[i + j];
+ j++;
+ }
+ depth_str[j] = '\0';
+ //converting string char to integer
+ int s;
+ for (s = 0; depth_str[s] != '\0'; ++s)
+ depth = depth * 10 + depth_str[s] - '0';
+
+ j++;
+ k = 0;
+ while ((k < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ tx_port = strtoul(tx_port_str, NULL, 16); //atoi(tx_port_str);
+
+ k++;
+ l = 0;
+ while ((l < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ipv6_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ipv6_str[l] = '\0';
+ my_inet_pton_ipv6(AF_INET6, nh_ipv6_str,
+ &nh_ipv6);
+
+ //Populate the static arp_route_table
+ for (i = 0; i < 16; i++) {
+ lib_nd_route_table
+ [nd_route_tbl_index].ipv6[i] =
+ dest_ipv6[i];
+ lib_nd_route_table
+ [nd_route_tbl_index].nhipv6[i] =
+ nh_ipv6[i];
+ }
+ lib_nd_route_table[nd_route_tbl_index].depth =
+ depth;
+ lib_nd_route_table[nd_route_tbl_index].port =
+ tx_port;
+
+ nd_route_tbl_index++;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /* any other */
+ //return -1;
+ }
+ /* Check that mandatory arguments are present */
+ /*
+ if ((arp_route_tbl_present == 0) || (ports_mac_list_present == 0)) {
+ RTE_LOG(INFO, LIBARP,"VNF common not all mandatory arguments are present\n");
+ RTE_LOG(INFO, LIBARP,"%d, %d \n",
+ arp_route_tbl_present, ports_mac_list_present);
+ return -1;
+ }
+ */
+
+ return 0;
+}
+
+void lib_arp_init(struct pipeline_params *params,
+ __rte_unused struct app_params *app)
+{
+
+ RTE_LOG(INFO, LIBARP, "ARP initialization ...\n");
+
+ /* Parse arguments */
+ if (arp_parse_args(params)) {
+ RTE_LOG(INFO, LIBARP, "arp_parse_args failed ...\n");
+ return;
+ }
+
+ /* create the arp_icmp mbuf rx pool */
+ lib_arp_pktmbuf_tx_pool =
+ rte_pktmbuf_pool_create("lib_arp_mbuf_tx_pool", NB_ARPICMP_MBUF, 32,
+ 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+ if (lib_arp_pktmbuf_tx_pool == NULL) {
+ RTE_LOG(INFO, LIBARP, "ARP mbuf pool create failed.\n");
+ return;
+ }
+
+ lib_arp_pkt = rte_pktmbuf_alloc(lib_arp_pktmbuf_tx_pool);
+ if (lib_arp_pkt == NULL) {
+ RTE_LOG(INFO, LIBARP, "ARP lib_arp_pkt alloc failed.\n");
+ return;
+ }
+
+ arp_hash_params.socket_id = rte_socket_id();
+ arp_hash_params.entries = MAX_NUM_ARP_ENTRIES;
+ arp_hash_params.key_len = sizeof(struct arp_key_ipv4);
+ arp_hash_handle = rte_hash_create(&arp_hash_params);
+
+ if (arp_hash_handle == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "ARP rte_hash_create failed. socket %d ... \n",
+ arp_hash_params.socket_id);
+ } else {
+ RTE_LOG(INFO, LIBARP, "arp_hash_handle %p\n\n",
+ (void *)arp_hash_handle);
+ }
+
+ /* Create port alloc buffer */
+
+ timer_mempool_arp = rte_mempool_create("timer_mempool_arp",
+ timer_objs_mempool_count,
+ sizeof(struct rte_timer),
+ 0, 0,
+ NULL, NULL,
+ NULL, NULL, rte_socket_id(), 0);
+ if (timer_mempool_arp == NULL) {
+ rte_panic("timer_mempool create error\n");
+ }
+ rte_timer_subsystem_init();
+ list_add_type(ETHER_TYPE_ARP, process_arpicmp_pkt_parse);
+
+ /* ND IPv6 */
+ nd_hash_params.socket_id = rte_socket_id();
+ nd_hash_params.entries = MAX_NUM_ND_ENTRIES;
+ nd_hash_params.key_len = sizeof(struct nd_key_ipv6);
+ nd_hash_handle = rte_hash_create(&nd_hash_params);
+ if (nd_hash_handle == NULL) {
+ RTE_LOG(INFO, LIBARP,
+ "ND rte_hash_create failed. socket %d ... \n",
+ nd_hash_params.socket_id);
+ } else {
+ RTE_LOG(INFO, LIBARP, "nd_hash_handle %p\n\n",
+ (void *)nd_hash_handle);
+ }
+
+ return;
+}
+
+void arp_timer_callback(struct rte_timer *timer, void *arg)
+{
+ struct arp_timer_key *remove_key = (struct arp_timer_key *)arg;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "ARP TIMER callback : expire :%d\n",
+ (int)timer->expire);
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Remove ARP Entry for IP :%d.%d.%d.%d , port %d\n",
+ (remove_key->ip >> 24),
+ ((remove_key->ip & 0x00ff0000) >> 16),
+ ((remove_key->ip & 0x0000ff00) >> 8),
+ ((remove_key->ip & 0x000000ff)), remove_key->port_id);
+ remove_arp_entry((uint32_t) remove_key->ip,
+ (uint8_t) remove_key->port_id, arg);
+ return;
+}
+
+void nd_timer_callback(struct rte_timer *timer, void *arg)
+{
+ struct nd_timer_key *remove_key = (struct nd_timer_key *)arg;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "nd time callback : expire :%d\n",
+ (int)timer->expire);
+ remove_nd_entry_ipv6(remove_key->ipv6, remove_key->port_id);
+ return;
+}
+
+void create_arp_table(void)
+{
+
+ int i;
+ for (i = 0; i < MAX_ARP_DATA_ENTRY_TABLE; i++) {
+ populate_arp_entry((const struct ether_addr *)
+ &arp_entry_data_table[i].eth_addr,
+ arp_entry_data_table[i].ip,
+ (uint8_t) arp_entry_data_table[i].port,
+ STATIC_ARP);
+ }
+ print_arp_table();
+ return;
+}
+
+void create_nd_table(void)
+{
+
+ int i;
+ for (i = 0; i < MAX_ND_DATA_ENTRY_TABLE; i++) {
+ populate_nd_entry((const struct ether_addr *)
+ nd_entry_data_table[i].eth_addr,
+ nd_entry_data_table[i].ipv6,
+ (uint8_t) nd_entry_data_table[i].port,
+ STATIC_ND);
+ }
+ print_nd_table();
+ return;
+}
+
+void send_gratuitous_arp(l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+
+ struct rte_mbuf *arp_pkt = lib_arp_pkt;
+
+ if (port == NULL) {
+ RTE_LOG(INFO, LIBARP, "PORT ID DOWN.. %s\n", __FUNCTION__);
+ return;
+
+ }
+
+ if (arp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "Error allocating arp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(arp_pkt, struct ether_hdr *);
+
+ ether_addr_copy(&broadcast_ether_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ arp_h->arp_hrd = CHECK_ENDIAN_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = CHECK_ENDIAN_16(ARP_OP_REQUEST);
+
+ ether_addr_copy((struct ether_addr *)
+ &port->macaddr[0], &arp_h->arp_data.arp_sha);
+ if (port->ipv4_list == NULL) {
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "port->ipv4_list is NULL.. %s\n",
+ __FUNCTION__);
+ return;
+ }
+ arp_h->arp_data.arp_sip = (((ipv4list_t *) (port->ipv4_list))->ipaddr);
+ ether_addr_copy(&null_ether_addr, &arp_h->arp_data.arp_tha);
+ //arp_h->arp_data.arp_tip = rte_cpu_to_be_32(ip);
+ arp_h->arp_data.arp_tip = 0; //(((ipv4list_t *) (port->ipv4_list))->ipaddr);
+ // RTE_LOG(INFO, LIBARP,"arp tip:%x arp sip :%x\n", arp_h->arp_data.arp_tip,
+ //arp_h->arp_data.arp_sip);
+ // mmcd changed length from 60 to 42 - real length of arp request, no padding on ethernet needed - looks now like linux arp
+ arp_pkt->pkt_len = 42;
+ arp_pkt->data_len = 42;
+
+ if (ARPICMP_DEBUG) {
+ RTE_LOG(INFO, LIBARP, "SENDING GRATUITOUS ARP REQUEST\n");
+ print_mbuf("TX", port->pmdid, arp_pkt, __LINE__);
+ }
+ port->transmit_single_pkt(port, arp_pkt);
+}
+
+void set_arpdebug(int flag)
+{
+ if (flag) {
+ RTE_LOG(INFO, LIBARP, "Debugs turned on\n\r");
+ ARPICMP_DEBUG = 1;
+ NDIPV6_DEBUG = 1;
+
+ } else {
+ RTE_LOG(INFO, LIBARP, "Debugs turned off\n\r");
+ ARPICMP_DEBUG = 0;
+ NDIPV6_DEBUG = 0;
+ }
+}
+
+void set_arptimeout(uint32_t timeout_val)
+{
+ if (timeout_val == 0) {
+ RTE_LOG(INFO, LIBARP, "Cannot be zero...\n\r");
+ return;
+ }
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP,
+ "set_arptimeout: arp_timeout %u, timeout_val %u\n\r",
+ arp_timeout, timeout_val);
+ arp_timeout = timeout_val;
+ if (ARPICMP_DEBUG)
+ RTE_LOG(INFO, LIBARP, "set_arptimeout: arp_timeout %u\n\r",
+ arp_timeout);
+}
diff --git a/common/VIL/l2l3_stack/lib_arp.h b/common/VIL/l2l3_stack/lib_arp.h
new file mode 100644
index 00000000..33875679
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_arp.h
@@ -0,0 +1,506 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_LIB_ARP_H__
+#define __INCLUDE_LIB_ARP_H__
+
+#include <rte_pipeline.h>
+#include "rte_ether.h"
+#include "l2_proto.h"
+#include "app.h"
+
+#define ND_IPV6_ADDR_SIZE 16 /**< 16 Byte of IPv6 Address. */
+#define ND_IPV6_TIMER_EXPIRY 300 /**< in Seconds, Timer for ND IPv6 Expiry */
+#define ARP_TIMER_EXPIRY 1800 /**< in Seconds, TIMER for ARP Expiry */
+#define TIMER_MILLISECOND 1
+#define RTE_LOGTYPE_LIBARP RTE_LOGTYPE_USER1
+#define MAX_ND_RT_ENTRY 16
+#define MAX_ARP_RT_ENTRY 16
+
+/**
+* A structure for Route table entries of IPv4
+*/
+
+struct lib_arp_route_table_entry {
+ uint32_t ip; /**< Ipv4 address*/
+ uint32_t mask; /**< mask */
+ uint32_t port; /**< Physical port */
+ uint32_t nh; /**< next hop */
+};
+
+/**
+* A structure for Route table entires of IPv6
+*
+*/
+struct lib_nd_route_table_entry {
+ uint8_t ipv6[16]; /**< Ipv6 address */
+ uint8_t depth; /**< Depth */
+ uint32_t port; /**< Port */
+ uint8_t nhipv6[16]; /**< next hop Ipv6 */
+};
+
+extern struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY];
+extern struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY];
+
+enum {
+ ARP_FOUND,
+ ARP_NOT_FOUND,
+ NH_NOT_FOUND,
+};
+
+enum arp_key_type {
+ ARP_IPV4,
+ ND_IPV6,
+};
+
+struct arp_key_ipv4 {
+ uint32_t ip; /**< IP address */
+ uint8_t port_id; /**< Port id */
+ uint8_t filler1; /**< filler 1, for better hash key */
+ uint8_t filler2; /**< filler 2, for better hash key */
+ uint8_t filler3; /**< filler 3, for better hash key */
+};
+
+/**
+* IPv6
+*/
+struct nd_key_ipv6 {
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< 128 Bit of IPv6 Address*/
+ uint8_t port_id; /**< Port id */
+ uint8_t filler1;
+ uint8_t filler2;
+ uint8_t filler3;
+};
+
+/**
+* Arp Key
+*/
+struct arp_key {
+ enum arp_key_type type;
+ union {
+ struct arp_key_ipv4 ipv4;
+ } key; /**< Key of type arp key Ipv4 */
+};
+
+/**
+* call back function parameter pair remove nd entry
+*
+*/
+
+struct nd_timer_key {
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< IPv6 address */
+ uint8_t port_id; /**< Port id */
+} __rte_cache_aligned;
+
+/**
+* call back function parameter remove arp entry
+*
+*/
+struct arp_timer_key {
+ uint32_t ip; /**< Ip address */
+ uint8_t port_id; /**< Port id */
+} __rte_cache_aligned;
+
+extern uint32_t ARPICMP_DEBUG;
+
+#define COMPLETE 1 /**< ARP entry populated and echo reply recieved. */
+#define INCOMPLETE 0 /**< ARP entry populated and either awaiting echo reply or stale entry. */
+
+extern uint32_t NDIPV6_DEBUG; /**< ND IPv6 */
+
+#define ICMPv6_COMPLETE 1 /**< ICMPv6 entry populated and echo reply recieved. */
+#define ICMPv6_INCOMPLETE 0 /**< ICMPv6 entry populated and either awaiting echo reply or stale entry. */
+#define STATIC_ARP 1 /**< Static ARP Entry. */
+#define DYNAMIC_ARP 0 /**< Dynamic ARP Entry. */
+#define STATIC_ND 1 /**< Static ND Entry. */
+#define DYNAMIC_ND 0 /**< Dynamic ND Entry. */
+
+/**
+* A structure is used to defined the ARP entry data
+* This structure is used as a input parameters for entry of ARP data
+*/
+
+struct arp_entry_data {
+ struct ether_addr eth_addr; /**< ethernet address */
+ uint32_t ip; /**< IP address */
+ uint8_t port; /**< Port */
+ uint8_t status; /**< Status of entry */
+ uint8_t mode; /**< Mode */
+ uint8_t retry_count; /**< retry count for ARP*/
+ struct rte_timer *timer; /**< Timer Associated with ARP*/
+ struct arp_timer_key *timer_key;
+} __attribute__ ((packed));
+
+/**
+* A structure is used to defined the table for arp entry data
+* This structure is used to maintain the arp entry data
+*/
+
+struct table_arp_entry_data {
+ uint8_t eth_addr[6]; /**< Ethernet address */
+ uint8_t port; /**< port */
+ uint8_t status; /**< status of entry */
+ uint32_t ip; /**< Ip address */
+} __attribute__ ((packed));
+
+/**
+* A structure is used to define the ND entry data for IPV6
+* This structure is used as a input parameters for ND entry data
+*/
+
+struct nd_entry_data {
+ struct ether_addr eth_addr; /**< Ethernet address */
+ uint8_t port; /**< port */
+ uint8_t status; /**< statusof the entry */
+ uint8_t mode; /**< Mode */
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< Ipv6 address */
+ struct rte_timer *timer; /**< Timer */
+} __attribute__ ((packed));
+
+/**
+* A structure is used to define the table for ND entry data
+* This structure is used to maintain ND entry data
+*
+*/
+
+struct table_nd_entry_data {
+ uint8_t eth_addr[6]; /**< Ethernet address */
+ uint8_t port; /**< Port */
+ uint8_t status; /**< status of Entry */
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE]; /**< IPv6 address */
+ struct rte_timer *timer; /**< Timer */
+} __attribute__ ((packed));
+
+/**
+* To get the destination MAC address andnext hop for the ip address and outgoing port
+* @param1 ip addr
+* IP address for which MAC address is needed.
+* @param2 phy_port
+* Physical Port
+* @param3 ether_addr
+* pointer to the ether_addr, This gets update with valid MAC addresss
+* @Param4 next nhip
+* Gets the next hop IP by Ip address and physical port
+* @return
+* 0 if failure, and 1 if success
+*/
+
+int get_dest_mac_address(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint32_t *nhip);
+/**
+* To get the destination MAC address andnext hop for the ip address and outgoing port
+* @param1 ip addr
+* IP address for which MAC address is needed.
+* @param2 phy_port
+* Physical Port
+* @param3 ether_addr
+* pointer to the ether_addr, This gets update with valid MAC addresss
+* @Param4 next nhip
+* Gets the next hop IP by Ip address and physical port
+* @return
+* 0 if failure, and 1 if success
+*/
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr);
+
+/**
+* To get the destination mac address for IPv4 address
+* @param Ipaddr
+* IP address which need the destination mac address
+* @param Phy_port
+* physical port
+* @param ether_addr
+* pointer to the ether_addr, This gets update with valid mac address
+* @return
+* 0 if failure, 1 if success
+*/
+int get_dest_mac_addr(const uint32_t ipaddr, uint32_t *phy_port,
+ struct ether_addr *hw_addr);
+
+/**
+* To get the destination mac address for IPV6 address
+* @param ipv6addr
+* IPv6 address which need the destination mac adress
+* @param Phy_Port
+* physical prt
+* @param ether_addr
+* pointer to the ether_address, This gets update with valid mac address
+* @param Nhipv6[]
+* Gets the next hop ipv6 address by ipv6 address and physical port
+* @return
+* 0 if failure, 1 ifsuccess
+*/
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[]);
+/**
+* To get the destination mac address for IPV6 address
+* @param ipv6addr
+* IPv6 address which need the destination mac adress
+* @param Phy_Port
+* physical prt
+* @param ether_addr
+* pointer to the ether_address, This gets update with valid mac address
+* @param Nhipv6[]
+* Gets the next hop ipv6 address by ipv6 address and physical port
+* @return
+* 0 if failure, 1 ifsuccess
+*/
+
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr,
+ uint8_t nhipv6[]);
+
+/**
+* To get hardware link address
+* @param out_port
+* out going port
+*/
+
+struct ether_addr *get_link_hw_addr(uint8_t out_port);
+
+/**
+* This prints the Arp Table
+* @param void
+*
+*/
+void print_arp_table(void);
+
+/**
+* This prints the ND table
+* @param void
+*
+*/
+void print_nd_table(void);
+
+/**
+* This removes arp entry from Table
+* @param ipaddr
+* Ipv4 address
+* @param portid
+* Port id
+*/
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid, void *arg);
+
+/**
+* Removes ND entry from Nd Table
+* @Param ipv6addr[]
+* Ipv6 address
+* @Param portid
+* Port id
+*/
+
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid);
+
+/**
+* Populate arp entry in arp Table
+* @param ether_addr
+* Ethernet address
+* @param ipaddr
+* Ipv4 adress
+* @Param portid
+* port id
+* @Param mode
+* Mode
+*/
+void populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid, uint8_t mode);
+
+/**
+* Populate ND entry in ND Table
+* @param ether_addr
+* Ethernet address
+* @param ip[]
+* Ipv6 adress
+* @Param portid
+* port id
+* @Param mode
+* Mode
+*/
+
+void populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ip[],
+ uint8_t portid, uint8_t mode);
+
+/**
+* To send ARp request
+* @Param port_id
+* port id
+@ Param IP
+* Ip address
+*/
+
+void request_arp(uint8_t port_id, uint32_t ip);
+
+/**
+* TO send echo request
+* @param port_id
+* Port id
+* @Param ip
+* Ip address
+*/
+struct rte_mbuf *request_echo(uint32_t port_id, uint32_t ip);
+
+/**
+* To send icmpv6 echo request
+* @Param port_id
+* Port id
+* @Param ipv6
+* ipv6 address
+*/
+struct rte_mbuf *request_icmpv6_echo(uint8_t ipv6[], l2_phy_interface_t *port);
+
+/**
+* To request ND
+* @Param ipv6
+* ipv6 address
+* @Param port
+* pointer to port
+*/
+struct rte_mbuf *request_nd(uint8_t ipv6[], l2_phy_interface_t *port);
+
+/**
+* To process te ARP and ICMP packets
+* @Param Pkt
+* Packets to be processed
+* @Param pkt_num
+* packet number
+* @Param portid
+* port id
+*/
+void process_arpicmp_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port);
+
+/**
+* IPv4
+* Validate if key-value pair already exists in the hash table for given key - IPv4
+* @Param arp_key
+* Arp key to validate entry
+*/
+struct arp_entry_data *retrieve_arp_entry(const struct arp_key_ipv4 arp_key);
+
+/**
+* ND IPv6
+* Validate if key-value pair already exists in the hash table for given key - ND IPv6
+* @Param nd_key
+* Nd key to validate Nd entry
+*/
+
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key);
+
+/**
+* Setsup Arp Initilization
+*/
+//void lib_arp_init(void);
+void lib_arp_init(struct pipeline_params *params, struct app_params *app);
+#if 0
+void set_port_to_loadb_map(uint8_t pipeline_num);
+
+/**
+* Acts on port_to_loadb_map
+*/
+uint8_t get_port_to_loadb_map(uint8_t phy_port_id);
+
+void set_phy_inport_map(uint8_t pipeline_num, uint8_t *map);
+void set_phy_outport_map(uint8_t pipeline_num, uint8_t *map);
+
+/**
+* Acts on lb_outport_id
+*/
+
+uint8_t get_loadb_outport_id(uint8_t actual_phy_port);
+uint8_t get_vnf_set_num(uint8_t pipeline_num);
+
+void pipelines_port_info(void);
+void pipelines_map_info(void);
+#endif
+/**
+* A callback for arp Timer
+* @Param rte_timer
+* timer pointer
+* @Param arg
+* arguments to timer
+*/
+void arp_timer_callback(struct rte_timer *, void *arg);
+
+/**
+* A callback for ND timer
+* @Param rte_timer
+* timer pointer
+* @Param arg
+* arguments to timer
+*/
+void nd_timer_callback(struct rte_timer *timer, void *arg);
+
+/**
+* To create Arp Table
+* @param void
+*/
+void create_arp_table(void);
+/**
+* To create ND Table
+* @param void
+*/
+void create_nd_table(void);
+
+/**
+* To parse and process the Arp and icmp packets
+* @Param pkt
+* pkt to process
+* @Param pkt_num
+* pkt number
+* @Param pkt_mask
+* packet mask
+* @Param port
+* pointer to port
+*/
+void process_arpicmp_pkt_parse(struct rte_mbuf **pkt, uint16_t pkt_num,
+ uint64_t pkt_mask, l2_phy_interface_t *port);
+
+/**
+* Sends garp packet
+* @Param port
+* pointer to port
+*/
+void send_gratuitous_arp(l2_phy_interface_t *port);
+/**
+* To set arp debug
+* @Param flag
+* set 1 unset 0
+*/
+void set_arpdebug(int flag);
+/**
+* To set timer for arp entry
+* @Param timeout_val
+* timer val for arp entry
+*/
+void set_arptimeout(uint32_t timeout_val);
+/**
+* To get nexthop for ipv4
+* @Param ipv4
+* ipv4 address
+* @Param
+* timeout_val to set
+*/
+uint32_t get_nh(uint32_t, uint32_t *);
+/**
+* To get nexthop for ipv6
+* @Param ipv6
+* ipv6 address
+* @Param port
+* pointer to port
+* @Param nhipv6
+* next hop ipv6
+*/
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[]);
+#endif
diff --git a/common/VIL/l2l3_stack/lib_icmpv6.c b/common/VIL/l2l3_stack/lib_icmpv6.c
new file mode 100644
index 00000000..44f30cbf
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_icmpv6.c
@@ -0,0 +1,410 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+/* Santosh Sethupathi*/
+
+#include "lib_icmpv6.h"
+
+static void print_pkt(uint8_t *rd)
+{
+ int i = 0, j = 0;
+
+ printf("Packet Contents:\n");
+
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ printf("%02x ", rd[(20 * i) + j]);
+
+ printf("\n");
+ }
+}
+
+static uint16_t icmpv6_ipv6_nd_checksum(struct rte_mbuf *pkt)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+
+ size_t tmplen, offset;
+ uint8_t *tmppacket, *tpacket;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ uint32_t payloadlen = 0x20;
+ payloadlen = rte_bswap32(payloadlen);
+
+ tmplen = 40 + sizeof(struct icmpv6_hdr) + sizeof(struct icmpv6_nd_hdr);
+ tmplen = RTE_CACHE_LINE_ROUNDUP(tmplen);
+ tmppacket = rte_zmalloc(NULL, tmplen, RTE_CACHE_LINE_SIZE);
+ tpacket = tmppacket;
+
+ offset = 16;
+ memcpy(tpacket, &ipv6_h->src_addr[0], offset);
+ tpacket += offset;
+ memcpy(tpacket, &ipv6_h->dst_addr[0], offset);
+ tpacket += offset;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ memcpy(tpacket, &ipv6_h->proto, 1);
+ tpacket++;
+ memcpy(tpacket, &payloadlen, 4);
+ tpacket += 4;
+ memcpy(tpacket, icmpv6_h,
+ sizeof(struct icmpv6_hdr) + sizeof(struct icmpv6_nd_hdr));
+
+ if (ARPICMP_DEBUG)
+ print_pkt(tmppacket);
+
+ return rte_raw_cksum(tmppacket, tmplen);
+}
+
+static uint16_t icmpv6_ipv6_echo_checksum(struct rte_mbuf *pkt)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+
+ size_t tmplen, offset;
+ uint8_t *tmppacket, *tpacket;
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ uint32_t payloadlen = rte_bswap16(ipv6_h->payload_len);
+ uint32_t payloadlen_swap = rte_bswap32(payloadlen);
+
+ if (ARPICMP_DEBUG)
+ printf("%s: payloadlen: %u\n", __FUNCTION__, payloadlen);
+
+ tmplen = 40 + payloadlen;
+ tmplen = RTE_CACHE_LINE_ROUNDUP(tmplen);
+ tmppacket = rte_zmalloc(NULL, tmplen, RTE_CACHE_LINE_SIZE);
+ tpacket = tmppacket;
+
+ offset = 16;
+ memcpy(tpacket, &ipv6_h->src_addr[0], offset);
+ tpacket += offset;
+ memcpy(tpacket, &ipv6_h->dst_addr[0], offset);
+ tpacket += offset;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ *tpacket = 0;
+ tpacket++;
+ memcpy(tpacket, &ipv6_h->proto, 1);
+ tpacket++;
+ memcpy(tpacket, &payloadlen_swap, 4);
+ tpacket += 4;
+ memcpy(tpacket, icmpv6_h, payloadlen);
+
+ if (ARPICMP_DEBUG)
+ print_pkt(tmppacket);
+
+ return rte_raw_cksum(tmppacket, tmplen);
+}
+
+void process_icmpv6_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port)
+{
+
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_nd_hdr *icmpv6_nd_h;
+ uint8_t ipv6_addr[16];
+ uint8_t i = 0;
+ uint8_t req_tipv6[16];
+ /* To drop the packet */
+
+ if (port == NULL) {
+ printf("port is NULL");
+ return;
+ } else if (port->ipv6_list == NULL) {
+ printf("IPV6 address not configured on link\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+
+ if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REQUEST)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ for (i = 0; i < 16; i++) {
+ ipv6_addr[i] = ipv6_h->src_addr[i];
+ }
+
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0],
+ &eth_h->s_addr);
+
+ for (i = 0; i < 16; i++)
+ ipv6_h->src_addr[i] = ipv6_h->dst_addr[i];
+ for (i = 0; i < 16; i++)
+ ipv6_h->dst_addr[i] = ipv6_addr[i];
+
+ icmpv6_h->icmpv6_type = ICMPV6_ECHO_REPLY;
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_echo_checksum(pkt);
+ port->transmit_bulk_pkts(port, &pkt, 1);
+
+ return;
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REPLY)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = port->pmdid;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ nd_key.ipv6[i] = ipv6_h->src_addr[i];
+
+ }
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ /*Validate if key-value pair already exists in the hash table for ND IPv6 */
+ struct nd_entry_data *new_nd_data = retrieve_nd_entry(nd_key);
+ if (new_nd_data == NULL) {
+ printf
+ ("Received unsolicited ICMPv6 echo reply on port %d\n",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ printf("%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ return;
+ }
+
+ new_nd_data->status = COMPLETE;
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_SOLICITATION)
+ && (icmpv6_h->icmpv6_code == 0)) {
+
+ icmpv6_nd_h =
+ (struct icmpv6_nd_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t src_ipv6[16], dst_ipv6[16];
+ uint16_t multi_addr;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ src_ipv6[i] = ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ dst_ipv6[i] = ipv6_h->dst_addr[i];
+
+ multi_addr = dst_ipv6[0];
+
+ /* Check for Multicast Address */
+ if ((IPV6_MULTICAST & ((multi_addr << 8) | dst_ipv6[1]))
+ || !memcmp(&port->macaddr[0], &eth_h->d_addr, 6)) {
+ populate_nd_entry(src_hw_addr, src_ipv6, port->pmdid,
+ DYNAMIC_ND);
+
+ /* build a Neighbor Advertisement message */
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ req_tipv6[i] = icmpv6_nd_h->target_ipv6[i];
+
+ if (!memcmp
+ (&req_tipv6[0],
+ &((ipv6list_t *) port->ipv6_list)->ipaddr[0],
+ 16)) {
+
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&port->
+ macaddr[0], &eth_h->s_addr);
+
+ /* set sender mac address */
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->dst_addr[i] =
+ ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->src_addr[i] = req_tipv6[i];
+ icmpv6_h->icmpv6_type =
+ ICMPV6_NEIGHBOR_ADVERTISEMENT;
+ icmpv6_nd_h->type = e_Target_Link_Layer_Address;
+ icmpv6_nd_h->length = 1;
+ memcpy(&icmpv6_nd_h->link_layer_addr[0],
+ &port->macaddr[0], 6);
+ icmpv6_nd_h->icmpv6_reserved = 0;
+ icmpv6_nd_h->icmpv6_reserved |=
+ rte_cpu_to_be_32
+ (NEIGHBOR_ROUTER_OVERRIDE_SET);
+
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum =
+ ~icmpv6_ipv6_nd_checksum(pkt);
+
+ port->transmit_bulk_pkts(port, &pkt, 1);
+
+ } else if (ARPICMP_DEBUG) {
+ printf
+ ("............Some one else is the target host here !!!\n");
+ }
+
+ return;
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf
+ ("...............Malformed ND Solicitation message!!!\n");
+ }
+ }
+
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_ADVERTISEMENT)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t ipv6[16];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++) {
+ ipv6[i] = ipv6_h->src_addr[i];
+
+ }
+ populate_nd_entry(src_hw_addr, ipv6, port->pmdid, DYNAMIC_ND);
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf("ICMPv6 Type %d Not Supported yet !!!\n",
+ icmpv6_h->icmpv6_type);
+ }
+ }
+
+ rte_pktmbuf_free(pkt);
+}
+
+struct rte_mbuf *request_icmpv6_echo(uint8_t ipv6[], l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_info_hdr *icmpv6_info_h;
+ int i;
+ uint8_t *icmp_data;
+
+ struct rte_mbuf *icmpv6_pkt = lib_icmpv6_pkt;
+ if (icmpv6_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmpv6_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmpv6_pkt, struct ether_hdr *);
+
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ icmpv6_info_h =
+ (struct icmpv6_info_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = rte_bswap16(0x86dd);
+ for (i = 0; i < 6; i++) {
+ eth_h->d_addr.addr_bytes[i] = 0;
+ }
+
+ ipv6_h->vtc_flow = rte_bswap32(0x60000000);
+ ipv6_h->payload_len = rte_bswap16(64);
+ ipv6_h->proto = 58;
+ ipv6_h->hop_limits = 64;
+
+ for (i = 0; i < 16; i++) {
+ ipv6_h->src_addr[i] = 0x0;
+ ipv6_h->dst_addr[i] = ipv6[i];
+ }
+
+ icmpv6_h->icmpv6_type = ICMPV6_ECHO_REQUEST;
+ icmpv6_h->icmpv6_code = 0;
+ icmpv6_info_h->icmpv6_ident = rte_bswap16(0x5151);
+ icmpv6_info_h->icmpv6_seq_nb = rte_bswap16(0x1);
+
+ icmp_data = (uint8_t *) icmpv6_h + 8;
+ for (i = 0; i < 56; i++) {
+ *icmp_data = i + 1;
+ icmp_data++;
+ }
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_echo_checksum(icmpv6_pkt);
+
+ icmpv6_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr) + 64;
+ icmpv6_pkt->data_len = icmpv6_pkt->pkt_len;
+
+ return icmpv6_pkt;
+}
+
+struct rte_mbuf *request_nd(uint8_t ipv6[], l2_phy_interface_t *port)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_nd_hdr *icmpv6_nd_h;
+ int i;
+
+ struct rte_mbuf *icmpv6_pkt = lib_icmpv6_pkt;
+ if (icmpv6_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmpv6_pkt rte_mbuf\n");
+ return NULL;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmpv6_pkt, struct ether_hdr *);
+
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ icmpv6_nd_h =
+ (struct icmpv6_nd_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+
+ ether_addr_copy((struct ether_addr *)&port->macaddr[0], &eth_h->s_addr);
+ eth_h->ether_type = rte_bswap16(0x86dd);
+ for (i = 0; i < 6; i++) {
+ eth_h->d_addr.addr_bytes[i] = 0;
+ }
+
+ ipv6_h->vtc_flow = 0x60000000;
+ ipv6_h->payload_len = rte_bswap16(32);
+ ipv6_h->proto = 58;
+ ipv6_h->hop_limits = 64;
+
+ for (i = 0; i < 16; i++) {
+ ipv6_h->src_addr[i] = 0x0;
+ ipv6_h->dst_addr[i] = ipv6[i];
+ }
+
+ icmpv6_h->icmpv6_type = ICMPV6_NEIGHBOR_SOLICITATION;
+ icmpv6_h->icmpv6_code = 0;
+
+ icmpv6_nd_h->icmpv6_reserved = 0x0;
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ icmpv6_nd_h->target_ipv6[i] = ipv6[i];
+ icmpv6_nd_h->type = e_Source_Link_Layer_Address;
+ icmpv6_nd_h->length = 1;
+ memcpy(&icmpv6_nd_h->link_layer_addr[0], &port->macaddr[0], 6);
+
+ icmpv6_h->icmpv6_cksum = 0;
+ icmpv6_h->icmpv6_cksum = ~icmpv6_ipv6_nd_checksum(icmpv6_pkt);
+
+ icmpv6_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr) + 32;
+ icmpv6_pkt->data_len = icmpv6_pkt->pkt_len;
+
+ return icmpv6_pkt;
+}
diff --git a/common/VIL/l2l3_stack/lib_icmpv6.h b/common/VIL/l2l3_stack/lib_icmpv6.h
new file mode 100644
index 00000000..e9ccca14
--- /dev/null
+++ b/common/VIL/l2l3_stack/lib_icmpv6.h
@@ -0,0 +1,113 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+/* Author - Santosh Sethupathi */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include "lib_arp.h"
+#include <rte_pipeline.h>
+#include "rte_ether.h"
+
+/**
+* ICMPv6 Header
+*/
+
+struct icmpv6_hdr {
+ uint8_t icmpv6_type; /**< ICMPV6 packet type. */
+ uint8_t icmpv6_code; /**< ICMPV6 packet code. */
+ uint16_t icmpv6_cksum; /**< ICMPV6 packet checksum. */
+} __attribute__ ((__packed__));
+
+/**
+* ICMPV6 Info Header
+*/
+struct icmpv6_info_hdr {
+ uint16_t icmpv6_ident; /**< ICMPV6 packet identifier. */
+ uint16_t icmpv6_seq_nb; /**< ICMPV6 packet sequence number. */
+} __attribute__ ((__packed__));
+
+/**
+ * ICMPV6 ND Header
+ */
+struct icmpv6_nd_hdr {
+ /*ND Advertisement flags */
+ uint32_t icmpv6_reserved; /**< bit31-Router, bit30-Solicited, bit29-Override, bit28-bit0 unused */
+
+ uint8_t target_ipv6[16]; /**< target IPv6 address */
+ uint8_t type; /**< ICMPv6 Option*/
+ uint8_t length; /**< Length */
+ uint8_t link_layer_addr[6]; /**< Link layer address */
+} __attribute__ ((__packed__));
+
+/* Icmpv6 types */
+#define ICMPV6_PROTOCOL_ID 58
+#define ICMPV6_ECHO_REQUEST 0x0080
+#define ICMPV6_ECHO_REPLY 0x0081
+#define ICMPV6_NEIGHBOR_SOLICITATION 0x0087
+#define ICMPV6_NEIGHBOR_ADVERTISEMENT 0x0088
+#define IPV6_MULTICAST 0xFF02
+
+#define NEIGHBOR_SOLICITATION_SET 0x40000000
+#define NEIGHBOR_ROUTER_OVERRIDE_SET 0xa0000000
+enum icmpv6_link_layer_Address_type {
+ e_Source_Link_Layer_Address = 1,
+ e_Target_Link_Layer_Address,
+ e_Link_Layer_Address
+};
+
+/* Checks whether ipv6 is multicast
+ * @param ipv6
+ */
+uint8_t is_multicast_ipv6_addr(uint8_t ipv6[]);
+
+/**
+*Icmpv6 Port address
+*/
+struct icmpv6_port_address {
+ uint32_t ipv6[16]; /**< Ipv6 address */
+ uint64_t mac_addr; /**< Mac address */
+};
+
+/**
+* To store Icmpv6 Port address
+*/
+struct icmpv6_port_address icmpv6_port_addresses[RTE_MAX_ETHPORTS];
+
+#define MAX_NUM_ICMPv6_ENTRIES 64
+struct rte_mbuf *lib_icmpv6_pkt;
+
+/**
+ * Processes icmpv6 packets
+ * @param pkt
+ * pkt mbuf packets
+ * @param port
+ * port - port structure
+ */
+void process_icmpv6_pkt(struct rte_mbuf *pkt, l2_phy_interface_t *port);
diff --git a/common/VIL/l2l3_stack/main_l2l3.c b/common/VIL/l2l3_stack/main_l2l3.c
new file mode 100644
index 00000000..08c97641
--- /dev/null
+++ b/common/VIL/l2l3_stack/main_l2l3.c
@@ -0,0 +1,304 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_MAIN__
+#define __INCLUDE_MAIN_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_vect.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_timer.h>
+#include "lib_arp.h"
+#include "l2_proto.h"
+#include "interface.h"
+#include "l3fwd_common.h"
+#include "l3fwd_lpm4.h"
+#include "l3fwd_lpm6.h"
+#define TIMER_RESOLUTION_CYCLES 20000000ULL /* around 10ms at 2 Ghz */
+unsigned lcore_id = 1;
+void convert_ipstr_to_numeric(void);
+struct sockaddr_in ipaddr1, ipaddr2, ipaddr3, ipaddr4;
+uint8_t ipv6_addr0[16] = {
+ 0, 0x64, 0xff, 0x9b, 0, 0, 0, 0, 0, 0, 0, 0, 0xc0, 0x10, 0x28, 0x15
+};
+
+uint8_t ipv6_addr1[16] = {
+ 0x12, 0x64, 0xff, 0x9b, 0, 0, 0, 0, 0, 0, 0, 0, 0xc0, 0x10, 0x28, 0x15
+};
+
+/*{port_id, nrx_queue, ntx_queue, adminstate, promisc}*/
+port_config_t portconf[5] = {
+ {
+ .port_id = 0,
+ .nrx_queue = 1,
+ .ntx_queue = 1,
+ .state = 1,
+ .promisc = 1,
+ .mempool = {
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) +
+ RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+ },
+ .port_conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ _adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,},
+ .lpbk_mode = 0,
+ .intr_conf = {
+ .lsc = 1,
+ /**< lsc interrupt feature enabled */
+ }
+ },
+ .rx_conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ },
+ .tx_conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0, =
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+ },
+ {
+ .port_id = 1,
+ .nrx_queue = 1,
+ .ntx_queue = 1,
+ .state = 1,
+ .promisc = 1,
+ .mempool = {
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) +
+ RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+ },
+ .port_conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ _adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,},
+ .lpbk_mode = 0,
+ .intr_conf = {
+ .lsc = 1,
+ /**< lsc interrupt feature enabled */
+ }
+ },
+ .rx_conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ },
+ .tx_conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0, =
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+ },
+};
+
+static __attribute__ ((noreturn))
+int lcore_mainloop (__attribute__ ((unused))
+ void *arg)
+{
+ l2_phy_interface_t *port;
+ int8_t portid;
+ struct rte_mbuf *pkts_burst[IFM_BURST_SIZE];
+ uint32_t nb_tx, nb_rx;
+ const uint64_t drain_tsc =
+ (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+ uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
+ while (1) {
+ port = ifm_get_first_port();
+ while (port != NULL) {
+ rte_timer_manage();
+ portid = port->pmdid;
+ cur_tsc = rte_rdtsc();
+ diff_tsc = cur_tsc - prev_tsc;
+
+ /* call rx function ptr from port, with port.arpq, */
+ if (unlikely(diff_tsc > drain_tsc)) {
+ if (port->tx_buf_len > 0) {
+ RTE_SET_USED(nb_tx);
+
+ //nb_tx = port->transmit_bulk_pkts(port, port->tx_buf, port->tx_buf_len);
+ port->tx_buf_len = 0;
+ }
+ prev_tsc = cur_tsc;
+ }
+ nb_rx = port->retrieve_bulk_pkts(portid, 0, pkts_burst);
+ port->n_rxpkts += nb_rx;
+ protocol_handler_recv(pkts_burst, nb_rx, port);
+ port = ifm_get_next_port(portid);
+ if (port != NULL)
+ prev_tsc = cur_tsc;
+ }
+ }
+}
+
+void convert_ipstr_to_numeric(void)
+{
+ memset(&ipaddr1, '\0', sizeof(struct sockaddr_in));
+ ipaddr1.sin_addr.s_addr = inet_addr("30.0.0.10");
+ memset(&ipaddr2, '\0', sizeof(struct sockaddr_in));
+ ipaddr2.sin_addr.s_addr = inet_addr("120.0.0.10");
+}
+
+int main(int argc, char **argv)
+{
+ int ret = 0;
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+ /* Port init */
+ //lib_arp_init();
+ ifm_init();
+ ifm_configure_ports(portconf);
+
+ //convert_ipstr_to_numeric();
+ //ifm_add_ipv4_port(0, ipaddr1.sin_addr.s_addr, 24);
+ //ifm_add_ipv4_port(1, ipaddr2.sin_addr.s_addr, 24);
+ ifm_add_ipv6_port(0, ipv6_addr0, 96);
+ ifm_add_ipv6_port(1, ipv6_addr1, 96);
+ print_interface_details();
+
+ //filter_init();
+ l3fwd_init();
+ create_arp_table();
+ create_nd_table();
+ populate_lpm_routes();
+ /*call the main loop */
+ /* launch per-lcore init on every lcore */
+ int ii;
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_addr0[ii], ipv6_addr0[ii + 1]);
+ }
+ printf("\n");
+ for (ii = 0; ii < 16; ii += 2) {
+ printf("%02X%02X ", ipv6_addr1[ii], ipv6_addr1[ii + 1]);
+ }
+ printf("REMOTE LAUNCH STARTED........\n");
+ rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
+ printf("REMOTE LAUNCH DONE.......\n");
+ if (rte_eal_wait_lcore(lcore_id) < 0) {
+ }
+ return 0;
+}
+#endif
diff --git a/common/VIL/l2l3_stack/tsx.c b/common/VIL/l2l3_stack/tsx.c
new file mode 100644
index 00000000..a361c945
--- /dev/null
+++ b/common/VIL/l2l3_stack/tsx.c
@@ -0,0 +1,167 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <immintrin.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdint.h>
+#include "rte_atomic.h"
+#include "tsx.h"
+int max_retries = 3;
+
+static void
+run_cpuid (uint32_t eax, uint32_t ecx, uint32_t *abcd)
+{
+ uint32_t ebx = 0, edx = 0;
+
+#if defined(__i386__) && defined (__PIC__)
+ /* in case of PIC under 32-bit EBX cannot be clobbered */
+__asm__ ("movl %%ebx, %%edi \n\t cpuid \n\t xchgl %%ebx, %%edi":"=D" (ebx),
+#else
+__asm__ ("cpuid":"+b" (ebx),
+#endif
+ "+a" (eax), "+c" (ecx), "=d" (edx));
+ abcd[0] = eax;
+ abcd[1] = ebx;
+ abcd[2] = ecx;
+ abcd[3] = edx;
+}
+
+static int
+check_xcr0_ymm (void)
+{
+uint32_t xcr0;
+__asm__ ("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx");
+return ((xcr0 & 6) == 6);/* checking if xmm and ymm state are enabled in XCR0 */
+}
+
+static int
+check_4th_gen_intel_core_features (void)
+{
+ uint32_t abcd[4];
+ uint32_t fma_movbe_osxsave_mask = ((1 << 12) | (1 << 22) | (1 << 27));
+ uint32_t avx2_bmi12_mask = (1 << 5) | (1 << 3) | (1 << 8);
+
+ /* CPUID.(EAX=01H, ECX=0H):ECX.FMA[bit 12]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.MOVBE[bit 22]==1 &&
+ CPUID.(EAX=01H, ECX=0H):ECX.OSXSAVE[bit 27]==1 */
+ run_cpuid (1, 0, abcd);
+ if ((abcd[2] & fma_movbe_osxsave_mask) != fma_movbe_osxsave_mask) {
+ printf ("Failing in if cond-1\n");
+ return 0;
+ }
+ if (!check_xcr0_ymm ()) {
+ printf ("Failing in if cond-2\n");
+ return 0;
+ }
+
+ /* CPUID.(EAX=07H, ECX=0H):EBX.AVX2[bit 5]==1 &&
+ CPUID.(EAX=07H, ECX=0H):EBX.BMI1[bit 3]==1 &&
+ CPUID.(EAX=07H, ECX=0H):EBX.BMI2[bit 8]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & avx2_bmi12_mask) != avx2_bmi12_mask) {
+ printf ("Failing in if cond-3\n");
+ return 0;
+ }
+ /* CPUID.(EAX=80000001H):ECX.LZCNT[bit 5]==1 */
+ run_cpuid (0x80000001, 0, abcd);
+ if ((abcd[2] & (1 << 5)) == 0) {
+ printf ("Failing in if cond-4\n");
+ return 0;
+ }
+ /* CPUID.(EAX=07H, ECX=0H).EBX.RTM[bit 11]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & (1 << 11)) == 0) {
+ printf ("Failing in if cond-5\n");
+ return 0;
+ }
+ /* CPUID.(EAX=07H, ECX=0H).EBX.HLE[bit 4]==1 */
+ run_cpuid (7, 0, abcd);
+ if ((abcd[1] & (1 << 4)) == 0) {
+ printf ("Failing in if cond-6\n");
+ return 0;
+ }
+ return 1;
+}
+
+int
+can_use_intel_core_4th_gen_features (void)
+{
+ static int the_4th_gen_features_available = -1;
+ /* test is performed once */
+ if (the_4th_gen_features_available < 0)
+ the_4th_gen_features_available = check_4th_gen_intel_core_features ();
+ return the_4th_gen_features_available;
+}
+
+void
+rtm_init (void)
+{
+ naborted = (rte_atomic64_t) RTE_ATOMIC64_INIT (0);
+
+ //RTE_ATOMIC64_INIT(naborted);
+} int
+
+rtm_lock (void)
+{
+ int nretries = 0;
+ while (1) {
+ ++nretries;
+ unsigned int status = _xbegin ();
+ if (status == _XBEGIN_STARTED) {
+ if (!is_hle_locked ())
+ return 1; // successfully started transaction
+ // started transaction but someone executes the transaction section
+ // non-speculatively (acquired the fall-back lock)
+ _xabort (0xff); // abort with code 0xff
+ }
+ // abort handler
+ rte_atomic64_inc (&naborted); // do abort statistics
+ printf
+ ("DEBUG: Transaction aborted: %d time(s) with the status: %u\n",
+ nretries, status);
+ // handle _xabort(0xff) from above
+ if ((status & _XABORT_EXPLICIT)
+ && _XABORT_CODE (status) == 0xff && !(status & _XABORT_NESTED)) {
+ while (is_hle_locked ())
+ _mm_pause (); // wait until lock is free
+ }
+ else if (!(status & _XABORT_RETRY))
+ break; // take the fall-back lock if the retry abort flag is not set
+ if (nretries >= max_retries)
+ break; // too many retries, take the fall-back lock
+ }
+ hle_lock ();
+ return 1;
+}
+
+int
+rtm_unlock (void)
+{
+ if (is_hle_locked ())
+ hle_release ();
+
+ else
+ _xend ();
+ return 1;
+}
+
+int
+is_rtm_locked (void)
+{
+ return ((int) _xtest ());
+}
diff --git a/common/VIL/l2l3_stack/tsx.h b/common/VIL/l2l3_stack/tsx.h
new file mode 100644
index 00000000..8b748165
--- /dev/null
+++ b/common/VIL/l2l3_stack/tsx.h
@@ -0,0 +1,38 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef _TSX_H_
+#define _RSX_H_
+#include <rte_atomic.h>
+#define TRUE 1
+#define FALSE 0
+
+volatile int mutex_val;
+
+rte_atomic64_t naborted;
+
+void hle_init(void);
+int hle_lock(void);
+int hle_release(void);
+int is_hle_locked(void);
+
+void rtm_init(void);
+int rtm_lock(void);
+int rtm_unlock(void);
+int is_rtm_locked(void);
+
+int can_use_intel_core_4th_gen_features(void);
+
+#endif
diff --git a/common/VIL/pipeline_arpicmp/pipeline_arpicmp.c b/common/VIL/pipeline_arpicmp/pipeline_arpicmp.c
new file mode 100644
index 00000000..1ea9e749
--- /dev/null
+++ b/common/VIL/pipeline_arpicmp/pipeline_arpicmp.c
@@ -0,0 +1,2118 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_arpicmp_be.h"
+#include "pipeline_arpicmp.h"
+#include "vnf_common.h"
+
+#include "app.h"
+#include "vnf_common.h"
+#ifndef VNF_ACL
+#include "lib_arp.h"
+#endif
+
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+uint16_t verbose_level = 1; /**< should be Silent by default. */
+uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
+
+/*
+ * Work-around of a compilation error with ICC on invocations of the
+ * rte_be_to_cpu_16() function.
+ */
+#ifdef __GCC__
+#define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v))
+#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v))
+#else
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define RTE_BE_TO_CPU_16(be_16_v) (be_16_v)
+#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v)
+#else
+#define RTE_BE_TO_CPU_16(be_16_v) \
+ ((uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8)))
+#define RTE_CPU_TO_BE_16(cpu_16_v) \
+ ((uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8)))
+#endif
+#endif
+
+/*
+ * arp add
+ */
+
+struct cmd_arp_add_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arpadd_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+ struct ether_addr macaddr;
+
+};
+
+static void
+cmd_arp_add_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_add_result *params = parsed_result;
+ uint8_t ipv6[16];
+
+ #if 0
+ struct pipeline_arp_icmp_arp_key key;
+ key.type = PIPELINE_ARP_ICMP_ARP_IPV4;
+ key.key.ipv4.port_id = params->port_id;
+ key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+ populate_arp_entry(&req->macaddr, rte_bswap32(req->key.key.ipv4.ip),
+ req->key.key.ipv4.port_id);
+ #endif
+ if (params->ip.family == AF_INET) {
+ populate_arp_entry(&params->macaddr,
+ rte_cpu_to_be_32(params->ip.addr.
+ ipv4.s_addr),
+ params->port_id
+ #ifndef VNF_ACL
+ , STATIC_ARP
+ #endif
+ );
+ } else {
+ memcpy(ipv6, params->ip.addr.ipv6.s6_addr, 16);
+ populate_nd_entry(&params->macaddr, ipv6, params->port_id
+ #ifndef VNF_ACL
+ , STATIC_ND
+ #endif
+ );
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_add_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_add_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_add_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, arpadd_string, "arpadd");
+
+static cmdline_parse_token_num_t cmd_arp_add_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_add_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_arp_add_result, ip);
+
+static cmdline_parse_token_etheraddr_t cmd_arp_add_macaddr =
+TOKEN_ETHERADDR_INITIALIZER(struct cmd_arp_add_result, macaddr);
+
+static cmdline_parse_inst_t cmd_arp_add = {
+ .f = cmd_arp_add_parsed,
+ .data = NULL,
+ .help_str = "ARP add",
+ .tokens = {
+ (void *)&cmd_arp_add_p_string,
+ (void *)&cmd_arp_add_p,
+ (void *)&cmd_arp_add_arp_string,
+ (void *)&cmd_arp_add_port_id,
+ (void *)&cmd_arp_add_ip,
+ (void *)&cmd_arp_add_macaddr,
+ NULL,
+ },
+};
+
+/*
+ * arp del
+ */
+
+struct cmd_arp_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+};
+
+static void
+cmd_arp_del_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ uint8_t ipv6[16];
+
+ #if 0
+ struct pipeline_arp_icmp_arp_key key;
+ key.type = PIPELINE_ARP_ICMP_ARP_IPV4;
+ key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+ key.key.ipv4.port_id = params->port_id;
+ remove_arp_entry(rte_bswap32(req->key.key.ipv4.ip),
+ req->key.key.ipv4.port_id);
+ #endif
+ if (params->ip.family == AF_INET) {
+ remove_arp_entry(rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr),
+ params->port_id
+ #ifndef VNF_ACL
+ , NULL
+ #endif
+ );
+ } else {
+ memcpy(ipv6, params->ip.addr.ipv6.s6_addr, 16);
+ remove_nd_entry_ipv6(ipv6, params->port_id);
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_del_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_del_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_del_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arpdel");
+
+static cmdline_parse_token_num_t cmd_arp_del_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_del_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_arp_del_result, ip);
+
+static cmdline_parse_inst_t cmd_arp_del = {
+ .f = cmd_arp_del_parsed,
+ .data = NULL,
+ .help_str = "ARP delete",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_arp_del_arp_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arp req
+ */
+
+/*Re-uses delete structures*/
+
+static void
+cmd_arp_req_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ /*struct app_params *app = data;*/
+
+ struct arp_key_ipv4 key;
+/* int status;*/
+
+/* key.type = ARP_IPV4;*/
+/* key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);*/
+/* key.key.ipv4.port_id = params->port_id;*/
+ key.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+ key.port_id = params->port_id;
+ key.filler1 = 0;
+ key.filler2 = 0;
+ key.filler3 = 0;
+
+ struct arp_entry_data *arp_data = retrieve_arp_entry(key);
+
+ if (arp_data) {
+ if (ARPICMP_DEBUG)
+ printf("ARP entry exists for ip 0x%x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+ return;
+ }
+ /* else request an arp*/
+ if (ARPICMP_DEBUG)
+ printf("ARP - requesting arp for ip 0x%x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+
+ #ifdef VNF_ACL
+ request_arp_wrap(params->port_id, params->ip.addr.ipv4.s_addr);
+ #else
+ request_arp(params->port_id, params->ip.addr.ipv4.s_addr);
+ #endif
+ /*give pipeline number too*/
+}
+
+static cmdline_parse_token_string_t cmd_arp_req_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arpreq");
+
+static cmdline_parse_inst_t cmd_arp_req = {
+ .f = cmd_arp_req_parsed,
+ .data = NULL,
+ .help_str = "ARP request",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_arp_req_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arpicmp echo req
+ */
+
+/*Re-uses delete structures*/
+
+static void
+cmd_icmp_echo_req_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+
+ if (ARPICMP_DEBUG)
+ printf("Echo Req Handler ip %x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+
+ request_echo(params->port_id, params->ip.addr.ipv4.s_addr);
+}
+
+static cmdline_parse_token_string_t cmd_icmp_echo_req_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "icmpecho");
+
+static cmdline_parse_inst_t cmd_icmp_echo_req = {
+ .f = cmd_icmp_echo_req_parsed,
+ .data = NULL,
+ .help_str = "ICMP echo request",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_icmp_echo_req_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arp ls
+ */
+
+struct cmd_arp_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+};
+
+static void
+cmd_arp_ls_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ printf("\nARP table ...\n");
+ printf("-------------\n");
+ print_arp_table();
+
+ printf
+ ("............................................................\n");
+
+ printf("\nND IPv6 table:\n");
+ printf("--------------\n");
+ print_nd_table();
+}
+
+static cmdline_parse_token_string_t cmd_arp_ls_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_ls_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_ls_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_ls_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
+ "arpls");
+
+static cmdline_parse_inst_t cmd_arp_ls = {
+ .f = cmd_arp_ls_parsed,
+ .data = NULL,
+ .help_str = "ARP list",
+ .tokens = {
+ (void *)&cmd_arp_ls_p_string,
+ (void *)&cmd_arp_ls_p,
+ (void *)&cmd_arp_ls_arp_string,
+ NULL,
+ },
+};
+
+/*
+ * show ports info
+ */
+
+struct cmd_show_ports_info_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+};
+
+static void
+cmd_show_ports_info_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ show_ports_info();
+}
+
+static cmdline_parse_token_string_t cmd_show_ports_info_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
+ "showPortsInfo");
+
+static cmdline_parse_inst_t cmd_show_ports_info = {
+ .f = cmd_show_ports_info_parsed,
+ .data = NULL,
+ .help_str = "show ports info",
+ .tokens = {
+ (void *)&cmd_arp_ls_p_string,
+ (void *)&cmd_arp_ls_p,
+ (void *)&cmd_show_ports_info_string,
+ NULL,
+ },
+};
+
+#ifndef VNF_ACL
+struct cmd_arp_dbg_result {
+ cmdline_fixed_string_t arpdbg_str;
+ uint32_t flag;
+};
+
+cmdline_parse_token_string_t cmd_arp_dbg_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_dbg_result, arpdbg_str,
+ "arpdbg");
+cmdline_parse_token_num_t cmd_arp_dbg_flag =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_dbg_result, flag, UINT32);
+
+static void
+cmd_arp_dbg_parse(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_arp_dbg_result *params = parsed_result;
+ if(params)
+ {
+ set_arpdebug(params->flag);
+ }
+ else
+ {
+ printf("%s: Params is NULL",__FUNCTION__);
+ }
+}
+
+cmdline_parse_inst_t cmd_arp_dbg = {
+ .f = cmd_arp_dbg_parse,
+ .data = NULL,
+ .help_str = "Turn on/off(1/0) arp debug",
+ .tokens = {
+ (void *)&cmd_arp_dbg_string,
+ (void *)&cmd_arp_dbg_flag,
+ NULL,
+ },
+};
+
+struct cmd_arp_timer_result {
+ cmdline_fixed_string_t arptimer_str;
+ uint32_t arptimer_val;
+};
+
+cmdline_parse_token_string_t cmd_arp_timer_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_timer_result, arptimer_str,
+ "arptimerexpiry");
+cmdline_parse_token_num_t cmd_arp_timer_val =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_timer_result, arptimer_val, UINT32);
+
+static void
+cmd_arp_timer_parse(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_arp_timer_result *params = parsed_result;
+ if(params)
+ {
+ set_arptimeout(params->arptimer_val);
+ }
+ else
+ {
+ printf("%s: Params is NULL",__FUNCTION__);
+ }
+}
+
+cmdline_parse_inst_t cmd_arp_timer = {
+ .f = cmd_arp_timer_parse,
+ .data = NULL,
+ .help_str = "Timer expiry val by def 10 sec",
+ .tokens = {
+ (void *)&cmd_arp_timer_string,
+ (void *)&cmd_arp_timer_val,
+ NULL,
+ },
+};
+#endif
+
+/*
+ * Forwarding of packets in I/O mode.
+ * Forward packets "as-is".
+ * This is the fastest possible forwarding operation, as it does not access
+ * to packets data.
+ */
+ static void
+pkt_burst_io_forward(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ uint16_t nb_rx;
+ uint16_t nb_tx;
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+ #endif
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+ #endif
+
+ /*
+ * Receive a burst of packets and forward them.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+ #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+ #endif
+
+ fs->rx_packets += nb_rx;
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+ fs->tx_packets += nb_tx;
+
+ #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+ #endif
+
+ if (unlikely(nb_tx < nb_rx)) {
+ fs->fwd_dropped += (nb_rx - nb_tx);
+ do {
+ rte_pktmbuf_free(pkts_burst[nb_tx]);
+ } while (++nb_tx < nb_rx);
+ }
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+ #endif
+}
+
+
+struct fwd_engine io_fwd_engine = {
+ .fwd_mode_name = "io",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_io_forward,
+};
+
+static inline void print_ether_addr(
+ const char *what,
+ struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", what, buf);
+}
+
+/*
+ * Received a burst of packets.
+ */
+ static void
+pkt_burst_receive(struct fwd_stream *fs)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *mb;
+ struct ether_hdr *eth_hdr;
+ uint16_t eth_type;
+ uint64_t ol_flags;
+ uint16_t nb_rx;
+ uint16_t i, packet_type;
+ uint16_t is_encapsulation;
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t start_tsc;
+ uint64_t end_tsc;
+ uint64_t core_cycles;
+ #endif
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ start_tsc = rte_rdtsc();
+ #endif
+
+ /*
+ * Receive a burst of packets.
+ */
+ nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+ nb_pkt_per_burst);
+ if (unlikely(nb_rx == 0))
+ return;
+
+ #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+ #endif
+
+ fs->rx_packets += nb_rx;
+
+ /*
+ * Dump each received packet if verbose_level > 0.
+ */
+ if (verbose_level > 0)
+ printf("port %u/queue %u: received %u packets\n",
+ (unsigned int) fs->rx_port,
+ (unsigned int) fs->rx_queue,
+ (unsigned int) nb_rx);
+ for (i = 0; i < nb_rx; i++) {
+ mb = pkts_burst[i];
+ if (verbose_level == 0) {
+ rte_pktmbuf_free(mb);
+ continue;
+ }
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+ eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
+ ol_flags = mb->ol_flags;
+ packet_type = mb->packet_type;
+ is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type);
+
+ print_ether_addr(" src=", &eth_hdr->s_addr);
+ print_ether_addr(" - dst=", &eth_hdr->d_addr);
+ printf(" - type=0x%04x - length=%u - nb_segs=%d",
+ eth_type, (unsigned int) mb->pkt_len,
+ (int)mb->nb_segs);
+ if (ol_flags & PKT_RX_RSS_HASH) {
+ printf(" - RSS hash=0x%x", (unsigned int)
+ mb->hash.rss);
+ printf(" - RSS queue=0x%x", (unsigned int)
+ fs->rx_queue);
+ } else if (ol_flags & PKT_RX_FDIR) {
+ printf(" - FDIR matched ");
+ if (ol_flags & PKT_RX_FDIR_ID)
+ printf("ID=0x%x",
+ mb->hash.fdir.hi);
+ else if (ol_flags & PKT_RX_FDIR_FLX)
+ printf("flex bytes=0x%08x %08x",
+ mb->hash.fdir.hi, mb->hash.fdir.lo);
+ else
+ printf("hash=0x%x ID=0x%x ",
+ mb->hash.fdir.hash, mb->hash.fdir.id);
+ }
+ if (ol_flags & PKT_RX_VLAN_PKT)
+ printf(" - VLAN tci=0x%x", mb->vlan_tci);
+ if (ol_flags & PKT_RX_QINQ_PKT)
+ printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
+ mb->vlan_tci, mb->vlan_tci_outer);
+ if (mb->packet_type) {
+ uint32_t ptype;
+
+ /* (outer) L2 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L2_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L2_ETHER:
+ printf(" - (outer) L2 type: ETHER");
+ break;
+ case RTE_PTYPE_L2_ETHER_TIMESYNC:
+ printf(" - (outer) L2 type: ETHER_Timesync");
+ break;
+ case RTE_PTYPE_L2_ETHER_ARP:
+ printf(" - (outer) L2 type: ETHER_ARP");
+ break;
+ case RTE_PTYPE_L2_ETHER_LLDP:
+ printf(" - (outer) L2 type: ETHER_LLDP");
+ break;
+ default:
+ printf(" - (outer) L2 type: Unknown");
+ break;
+ }
+
+ /* (outer) L3 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L3_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L3_IPV4:
+ printf(" - (outer) L3 type: IPV4");
+ break;
+ case RTE_PTYPE_L3_IPV4_EXT:
+ printf(" - (outer) L3 type: IPV4_EXT");
+ break;
+ case RTE_PTYPE_L3_IPV6:
+ printf(" - (outer) L3 type: IPV6");
+ break;
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ printf(" - (outer) L3 type: IPV4_EXT_UNKNOWN");
+ break;
+ case RTE_PTYPE_L3_IPV6_EXT:
+ printf(" - (outer) L3 type: IPV6_EXT");
+ break;
+ case RTE_PTYPE_L3_IPV6_EXT_UNKNOWN:
+ printf(" - (outer) L3 type: IPV6_EXT_UNKNOWN");
+ break;
+ default:
+ printf(" - (outer) L3 type: Unknown");
+ break;
+ }
+
+ /* (outer) L4 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_L4_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_L4_TCP:
+ printf(" - (outer) L4 type: TCP");
+ break;
+ case RTE_PTYPE_L4_UDP:
+ printf(" - (outer) L4 type: UDP");
+ break;
+ case RTE_PTYPE_L4_FRAG:
+ printf(" - (outer) L4 type: L4_FRAG");
+ break;
+ case RTE_PTYPE_L4_SCTP:
+ printf(" - (outer) L4 type: SCTP");
+ break;
+ case RTE_PTYPE_L4_ICMP:
+ printf(" - (outer) L4 type: ICMP");
+ break;
+ case RTE_PTYPE_L4_NONFRAG:
+ printf(" - (outer) L4 type: L4_NONFRAG");
+ break;
+ default:
+ printf(" - (outer) L4 type: Unknown");
+ break;
+ }
+
+ /* packet tunnel type */
+ ptype = mb->packet_type & RTE_PTYPE_TUNNEL_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_TUNNEL_IP:
+ printf(" - Tunnel type: IP");
+ break;
+ case RTE_PTYPE_TUNNEL_GRE:
+ printf(" - Tunnel type: GRE");
+ break;
+ case RTE_PTYPE_TUNNEL_VXLAN:
+ printf(" - Tunnel type: VXLAN");
+ break;
+ case RTE_PTYPE_TUNNEL_NVGRE:
+ printf(" - Tunnel type: NVGRE");
+ break;
+ case RTE_PTYPE_TUNNEL_GENEVE:
+ printf(" - Tunnel type: GENEVE");
+ break;
+ case RTE_PTYPE_TUNNEL_GRENAT:
+ printf(" - Tunnel type: GRENAT");
+ break;
+ default:
+ printf(" - Tunnel type: Unknown");
+ break;
+ }
+
+ /* inner L2 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L2_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L2_ETHER:
+ printf(" - Inner L2 type: ETHER");
+ break;
+ case RTE_PTYPE_INNER_L2_ETHER_VLAN:
+ printf(" - Inner L2 type: ETHER_VLAN");
+ break;
+ default:
+ printf(" - Inner L2 type: Unknown");
+ break;
+ }
+ /* inner L3 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L3_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L3_IPV4:
+ printf(" - Inner L3 type: IPV4");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV4_EXT:
+ printf(" - Inner L3 type: IPV4_EXT");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6:
+ printf(" - Inner L3 type: IPV6");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN:
+ printf(" - Inner L3 type: "
+ "IPV4_EXT_UNKNOWN");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6_EXT:
+ printf(" - Inner L3 type: IPV6_EXT");
+ break;
+ case RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN:
+ printf(" - Inner L3 type: "
+ "IPV6_EXT_UNKNOWN");
+ break;
+ default:
+ printf(" - Inner L3 type: Unknown");
+ break;
+ }
+
+ /* inner L4 packet type */
+ ptype = mb->packet_type & RTE_PTYPE_INNER_L4_MASK;
+ switch (ptype) {
+ case RTE_PTYPE_INNER_L4_TCP:
+ printf(" - Inner L4 type: TCP");
+ break;
+ case RTE_PTYPE_INNER_L4_UDP:
+ printf(" - Inner L4 type: UDP");
+ break;
+ case RTE_PTYPE_INNER_L4_FRAG:
+ printf(" - Inner L4 type: L4_FRAG");
+ break;
+ case RTE_PTYPE_INNER_L4_SCTP:
+ printf(" - Inner L4 type: SCTP");
+ break;
+ case RTE_PTYPE_INNER_L4_ICMP:
+ printf(" - Inner L4 type: ICMP");
+ break;
+ case RTE_PTYPE_INNER_L4_NONFRAG:
+ printf(" - Inner L4 type: L4_NONFRAG");
+ break;
+ default:
+ printf(" - Inner L4 type: Unknown");
+ break;
+ }
+ printf("\n");
+ } else
+ printf("Unknown packet type\n");
+ if (is_encapsulation) {
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct udp_hdr *udp_hdr;
+ uint8_t l2_len;
+ uint8_t l3_len;
+ uint8_t l4_len;
+ uint8_t l4_proto;
+ struct vxlan_hdr *vxlan_hdr;
+
+ l2_len = sizeof(struct ether_hdr);
+
+ /* Do not support ipv4 option field */
+ if (RTE_ETH_IS_IPV4_HDR(packet_type)) {
+ l3_len = sizeof(struct ipv4_hdr);
+ ipv4_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv4_hdr *,
+ l2_len);
+ l4_proto = ipv4_hdr->next_proto_id;
+ } else {
+ l3_len = sizeof(struct ipv6_hdr);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct ipv6_hdr *,
+ l2_len);
+ l4_proto = ipv6_hdr->proto;
+ }
+ if (l4_proto == IPPROTO_UDP) {
+ udp_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct udp_hdr *,
+ l2_len + l3_len);
+ l4_len = sizeof(struct udp_hdr);
+ vxlan_hdr = rte_pktmbuf_mtod_offset(mb,
+ struct vxlan_hdr *,
+ l2_len + l3_len + l4_len);
+
+ printf(" - VXLAN packet: packet type =%d, "
+ "Destination UDP port =%d, VNI = %d",
+ packet_type,
+ RTE_BE_TO_CPU_16(udp_hdr->dst_port),
+ rte_be_to_cpu_32(
+ vxlan_hdr->vx_vni) >> 8);
+ }
+ }
+ printf(" - Receive queue=0x%x", (unsigned int) fs->rx_queue);
+ printf("\n");
+ if (ol_flags != 0) {
+ unsigned int rxf;
+ const char *name;
+
+ for (rxf = 0; rxf < sizeof(mb->ol_flags) * 8; rxf++) {
+ if ((ol_flags & (1ULL << rxf)) == 0)
+ continue;
+ name = rte_get_rx_ol_flag_name(1ULL << rxf);
+ if (name == NULL)
+ continue;
+ printf(" %s\n", name);
+ }
+ }
+ rte_pktmbuf_free(mb);
+ }
+
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ end_tsc = rte_rdtsc();
+ core_cycles = (end_tsc - start_tsc);
+ fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+ #endif
+}
+
+struct fwd_engine rx_only_engine = {
+ .fwd_mode_name = "rxonly",
+ .port_fwd_begin = NULL,
+ .port_fwd_end = NULL,
+ .packet_fwd = pkt_burst_receive,
+};
+
+/* *** SET FORWARDING MODE *** */
+struct cmd_set_fwd_mode_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t fwd;
+ cmdline_fixed_string_t mode;
+};
+
+/*
+ * Forwarding engines.
+ */
+struct fwd_engine *fwd_engines[] = {
+ &io_fwd_engine,
+ #if 0
+ &mac_fwd_engine,
+ &mac_retry_fwd_engine,
+ &mac_swap_engine,
+ &flow_gen_engine,
+ #endif
+ &rx_only_engine,
+ #if 0
+ &tx_only_engine,
+ &csum_fwd_engine,
+ &icmp_echo_engine,
+ #ifdef RTE_LIBRTE_IEEE1588
+ &ieee1588_fwd_engine,
+ #endif
+ #endif
+ NULL,
+};
+
+struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
+
+void set_pkt_forwarding_mode(const char *fwd_mode_name)
+{
+ struct fwd_engine *fwd_eng;
+ unsigned int i;
+
+ i = 0;
+ while ((fwd_eng = fwd_engines[i]) != NULL) {
+ if (!strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
+ printf("Set %s packet forwarding mode\n",
+ fwd_mode_name);
+ cur_fwd_eng = fwd_eng;
+ return;
+ }
+ i++;
+ }
+ printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
+}
+
+static void cmd_set_fwd_mode_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_fwd_mode_result *res = parsed_result;
+
+ set_pkt_forwarding_mode(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_setfwd_set =
+TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setfwd_fwd =
+TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, fwd, "fwd");
+cmdline_parse_token_string_t cmd_setfwd_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, mode,
+ "rxonly" /* defined at init */);
+
+cmdline_parse_inst_t cmd_set_fwd_mode = {
+ .f = cmd_set_fwd_mode_parsed,
+ .data = NULL,
+ .help_str = NULL, /* defined at init */
+ .tokens = {
+ (void *)&cmd_setfwd_set,
+ (void *)&cmd_setfwd_fwd,
+ (void *)&cmd_setfwd_mode,
+ NULL,
+ },
+};
+
+#if 1
+
+static uint16_t
+str2flowtype(char *string)
+{
+ uint8_t i = 0;
+ static const struct {
+ char str[32];
+ uint16_t type;
+ } flowtype_str[] = {
+ {"raw", RTE_ETH_FLOW_RAW},
+ {"ipv4", RTE_ETH_FLOW_IPV4},
+ {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
+ {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
+ {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
+ {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
+ {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
+ {"ipv6", RTE_ETH_FLOW_IPV6},
+ {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
+ {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
+ {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
+ {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
+ {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
+ {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
+ };
+
+ for (i = 0; i < RTE_DIM(flowtype_str); i++) {
+ if (!strcmp(flowtype_str[i].str, string))
+ return flowtype_str[i].type;
+ }
+ return RTE_ETH_FLOW_UNKNOWN;
+}
+
+static inline int
+parse_flexbytes(const char *q_arg, uint8_t *flexbytes, uint16_t max_num)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ unsigned long int_fld;
+ char *str_fld[max_num];
+ int i;
+ unsigned int size;
+ int ret = -1;
+
+ p = strchr(p0, '(');
+ if (p == NULL)
+ return -1;
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ ret = rte_strsplit(s, sizeof(s), str_fld, max_num, ',');
+ if (ret < 0 || ret > max_num)
+ return -1;
+ for (i = 0; i < ret; i++) {
+ errno = 0;
+ int_fld = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || *end != '\0' || int_fld > UINT8_MAX)
+ return -1;
+ flexbytes[i] = (uint8_t)int_fld;
+ }
+ return ret;
+}
+
+/* *** deal with flow director filter *** */
+struct cmd_flow_director_result {
+ cmdline_fixed_string_t flow_director_filter;
+ uint8_t port_id;
+ cmdline_fixed_string_t mode;
+ cmdline_fixed_string_t mode_value;
+ cmdline_fixed_string_t ops;
+ cmdline_fixed_string_t flow;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t ether;
+ uint16_t ether_type;
+ cmdline_fixed_string_t src;
+ cmdline_ipaddr_t ip_src;
+ uint16_t port_src;
+ cmdline_fixed_string_t dst;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t port_dst;
+ cmdline_fixed_string_t verify_tag;
+ uint32_t verify_tag_value;
+ cmdline_ipaddr_t tos;
+ uint8_t tos_value;
+ cmdline_ipaddr_t proto;
+ uint8_t proto_value;
+ cmdline_ipaddr_t ttl;
+ uint8_t ttl_value;
+ cmdline_fixed_string_t vlan;
+ uint16_t vlan_value;
+ cmdline_fixed_string_t flexbytes;
+ cmdline_fixed_string_t flexbytes_value;
+ cmdline_fixed_string_t pf_vf;
+ cmdline_fixed_string_t drop;
+ cmdline_fixed_string_t queue;
+ uint16_t queue_id;
+ cmdline_fixed_string_t fd_id;
+ uint32_t fd_id_value;
+ cmdline_fixed_string_t mac;
+ struct ether_addr mac_addr;
+ cmdline_fixed_string_t tunnel;
+ cmdline_fixed_string_t tunnel_type;
+ cmdline_fixed_string_t tunnel_id;
+ uint32_t tunnel_id_value;
+};
+
+static void
+cmd_flow_director_filter_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_flow_director_result *res = parsed_result;
+ struct rte_eth_fdir_filter entry;
+ uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN];
+ char *end;
+ unsigned long vf_id;
+ int ret = 0;
+
+ if (enable_hwlb) {
+ printf("Hash Filter is already Defined !\n");
+ printf("Please undefine HWLD flag and define "
+ "FDIR_FILTER flag\n");
+ return;
+ }
+
+ ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_FDIR);
+ if (ret < 0) {
+ printf("flow director is not supported on port %u.\n",
+ res->port_id);
+ return;
+ }
+ memset(flexbytes, 0, sizeof(flexbytes));
+ memset(&entry, 0, sizeof(struct rte_eth_fdir_filter));
+#if 0
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (strcmp(res->mode_value, "MAC-VLAN")) {
+ printf("Please set mode to MAC-VLAN.\n");
+ return;
+ }
+ } else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ if (strcmp(res->mode_value, "Tunnel")) {
+ printf("Please set mode to Tunnel.\n");
+ return;
+ }
+ } else {
+ if (strcmp(res->mode_value, "IP")) {
+ printf("Please set mode to IP.\n");
+ return;
+ }
+#endif
+ {
+ entry.input.flow_type = str2flowtype(res->flow_type);
+ }
+
+ ret = parse_flexbytes(res->flexbytes_value,
+ flexbytes,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+ if (ret < 0) {
+ printf("error: Cannot parse flexbytes input.\n");
+ return;
+ }
+
+ switch (entry.input.flow_type) {
+ case RTE_ETH_FLOW_FRAG_IPV4:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ entry.input.flow.ip4_flow.proto = res->proto_value;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ IPV4_ADDR_TO_UINT(res->ip_dst,
+ entry.input.flow.ip4_flow.dst_ip);
+ IPV4_ADDR_TO_UINT(res->ip_src,
+ entry.input.flow.ip4_flow.src_ip);
+ entry.input.flow.ip4_flow.tos = res->tos_value;
+ entry.input.flow.ip4_flow.ttl = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.udp4_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.udp4_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ IPV4_ADDR_TO_UINT(res->ip_dst,
+ entry.input.flow.sctp4_flow.ip.dst_ip);
+ IPV4_ADDR_TO_UINT(res->ip_src,
+ entry.input.flow.sctp4_flow.ip.src_ip);
+ entry.input.flow.ip4_flow.tos = res->tos_value;
+ entry.input.flow.ip4_flow.ttl = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.sctp4_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.sctp4_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ entry.input.flow.sctp4_flow.verify_tag =
+ rte_cpu_to_be_32(res->verify_tag_value);
+ break;
+
+ case RTE_ETH_FLOW_FRAG_IPV6:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ entry.input.flow.ipv6_flow.proto = res->proto_value;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ IPV6_ADDR_TO_ARRAY(res->ip_dst,
+ entry.input.flow.ipv6_flow.dst_ip);
+ IPV6_ADDR_TO_ARRAY(res->ip_src,
+ entry.input.flow.ipv6_flow.src_ip);
+ entry.input.flow.ipv6_flow.tc = res->tos_value;
+ entry.input.flow.ipv6_flow.hop_limits = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.udp6_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.udp6_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ break;
+
+ case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
+ IPV6_ADDR_TO_ARRAY(res->ip_dst,
+ entry.input.flow.sctp6_flow.ip.dst_ip);
+ IPV6_ADDR_TO_ARRAY(res->ip_src,
+ entry.input.flow.sctp6_flow.ip.src_ip);
+ entry.input.flow.ipv6_flow.tc = res->tos_value;
+ entry.input.flow.ipv6_flow.hop_limits = res->ttl_value;
+ /* need convert to big endian. */
+ entry.input.flow.sctp6_flow.dst_port =
+ rte_cpu_to_be_16(res->port_dst);
+ entry.input.flow.sctp6_flow.src_port =
+ rte_cpu_to_be_16(res->port_src);
+ entry.input.flow.sctp6_flow.verify_tag =
+ rte_cpu_to_be_32(res->verify_tag_value);
+ break;
+ case RTE_ETH_FLOW_L2_PAYLOAD:
+ entry.input.flow.l2_flow.ether_type =
+ rte_cpu_to_be_16(res->ether_type);
+ break;
+ default:
+ break;
+ }
+#if 0
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
+ (void)rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr,
+ &res->mac_addr,
+ sizeof(struct ether_addr));
+
+ if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
+ (void)rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr,
+ &res->mac_addr,
+ sizeof(struct ether_addr));
+ entry.input.flow.tunnel_flow.tunnel_type =
+ str2fdir_tunneltype(res->tunnel_type);
+ entry.input.flow.tunnel_flow.tunnel_id =
+ rte_cpu_to_be_32(res->tunnel_id_value);
+ }
+#endif
+
+ (void)rte_memcpy(entry.input.flow_ext.flexbytes,
+ flexbytes,
+ RTE_ETH_FDIR_MAX_FLEXLEN);
+
+ entry.input.flow_ext.vlan_tci = rte_cpu_to_be_16(res->vlan_value);
+
+ entry.action.flex_off = 0; /*use 0 by default */
+ if (!strcmp(res->drop, "drop"))
+ entry.action.behavior = RTE_ETH_FDIR_REJECT;
+ else
+ entry.action.behavior = RTE_ETH_FDIR_ACCEPT;
+
+ if (!strcmp(res->pf_vf, "pf"))
+ entry.input.flow_ext.is_vf = 0;
+ else if (!strncmp(res->pf_vf, "vf", 2)) {
+ struct rte_eth_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(res->port_id, &dev_info);
+ errno = 0;
+ vf_id = strtoul(res->pf_vf + 2, &end, 10);
+ if (errno != 0 || *end != '\0' || vf_id >= dev_info.max_vfs) {
+ printf("invalid parameter %s.\n", res->pf_vf);
+ return;
+ }
+ entry.input.flow_ext.is_vf = 1;
+ entry.input.flow_ext.dst_id = (uint16_t)vf_id;
+ } else {
+ printf("invalid parameter %s.\n", res->pf_vf);
+ return;
+ }
+ /* set to report FD ID by default */
+ entry.action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ entry.action.rx_queue = res->queue_id;
+ entry.soft_id = res->fd_id_value;
+ if (!strcmp(res->ops, "add"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_ADD, &entry);
+ else if (!strcmp(res->ops, "del"))
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_DELETE, &entry);
+ else
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR,
+ RTE_ETH_FILTER_UPDATE, &entry);
+ if (ret < 0)
+ printf("flow director programming error: (%s)\n",
+ strerror(-ret));
+// fdir_filter_enabled = 1;
+}
+
+
+
+cmdline_parse_token_string_t cmd_flow_director_filter =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow_director_filter, "flow_director_filter");
+
+cmdline_parse_token_num_t cmd_flow_director_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_id, UINT8);
+
+
+cmdline_parse_token_string_t cmd_flow_director_mode =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode, "mode");
+
+cmdline_parse_token_string_t cmd_flow_director_mode_ip =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ mode_value, "IP");
+
+cmdline_parse_token_string_t cmd_flow_director_ops =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ops, "add#del#update");
+
+cmdline_parse_token_string_t cmd_flow_director_flow =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow, "flow");
+
+cmdline_parse_token_string_t cmd_flow_director_flow_type =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flow_type, "ipv4-other#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#"
+ "ipv6-other#ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#l2_payload");
+
+cmdline_parse_token_string_t cmd_flow_director_src =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ src, "src");
+cmdline_parse_token_ipaddr_t cmd_flow_director_ip_src =
+TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result,
+ ip_src);
+cmdline_parse_token_num_t cmd_flow_director_port_src =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_src, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_dst =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ dst, "dst");
+cmdline_parse_token_ipaddr_t cmd_flow_director_ip_dst =
+TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result,
+ ip_dst);
+cmdline_parse_token_num_t cmd_flow_director_port_dst =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ port_dst, UINT16);
+
+cmdline_parse_token_string_t cmd_flow_director_tos =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ tos, "tos");
+cmdline_parse_token_num_t cmd_flow_director_tos_value =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ tos_value, UINT8);
+
+cmdline_parse_token_string_t cmd_flow_director_ttl =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ttl, "ttl");
+cmdline_parse_token_num_t cmd_flow_director_ttl_value =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ ttl_value, UINT8);
+
+cmdline_parse_token_string_t cmd_flow_director_vlan =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ vlan, "vlan");
+cmdline_parse_token_num_t cmd_flow_director_vlan_value =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ vlan_value, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_flexbytes =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flexbytes, "flexbytes");
+cmdline_parse_token_string_t cmd_flow_director_flexbytes_value =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ flexbytes_value, NULL);
+cmdline_parse_token_string_t cmd_flow_director_drop =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ drop, "drop#fwd");
+cmdline_parse_token_string_t cmd_flow_director_pf_vf =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ pf_vf, NULL);
+cmdline_parse_token_string_t cmd_flow_director_queue =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ queue, "queue");
+cmdline_parse_token_num_t cmd_flow_director_queue_id =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_flow_director_fd_id =
+TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ fd_id, "fd_id");
+cmdline_parse_token_num_t cmd_flow_director_fd_id_value =
+TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ fd_id_value, UINT32);
+
+
+cmdline_parse_inst_t cmd_add_del_udp_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete an udp/tcp flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_src,
+ (void *)&cmd_flow_director_ip_src,
+ (void *)&cmd_flow_director_port_src,
+ (void *)&cmd_flow_director_dst,
+ (void *)&cmd_flow_director_ip_dst,
+ (void *)&cmd_flow_director_port_dst,
+ (void *)&cmd_flow_director_tos,
+ (void *)&cmd_flow_director_tos_value,
+ (void *)&cmd_flow_director_ttl,
+ (void *)&cmd_flow_director_ttl_value,
+ (void *)&cmd_flow_director_vlan,
+ (void *)&cmd_flow_director_vlan_value,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+/* L2 payload*/
+cmdline_parse_token_string_t cmd_flow_director_ether =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result,
+ ether, "ether");
+cmdline_parse_token_num_t cmd_flow_director_ether_type =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result,
+ ether_type, UINT16);
+
+cmdline_parse_inst_t cmd_add_del_l2_flow_director = {
+ .f = cmd_flow_director_filter_parsed,
+ .data = NULL,
+ .help_str = "add or delete a L2 flow director entry on NIC",
+ .tokens = {
+ (void *)&cmd_flow_director_filter,
+ (void *)&cmd_flow_director_port_id,
+ (void *)&cmd_flow_director_mode,
+ (void *)&cmd_flow_director_mode_ip,
+ (void *)&cmd_flow_director_ops,
+ (void *)&cmd_flow_director_flow,
+ (void *)&cmd_flow_director_flow_type,
+ (void *)&cmd_flow_director_ether,
+ (void *)&cmd_flow_director_ether_type,
+ (void *)&cmd_flow_director_flexbytes,
+ (void *)&cmd_flow_director_flexbytes_value,
+ (void *)&cmd_flow_director_drop,
+ (void *)&cmd_flow_director_pf_vf,
+ (void *)&cmd_flow_director_queue,
+ (void *)&cmd_flow_director_queue_id,
+ (void *)&cmd_flow_director_fd_id,
+ (void *)&cmd_flow_director_fd_id_value,
+ NULL,
+ },
+};
+
+#if 1
+/* Set hash input set */
+struct cmd_set_hash_input_set_result {
+ cmdline_fixed_string_t set_hash_input_set;
+ uint8_t port_id;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t inset_field0;
+ cmdline_fixed_string_t inset_field1;
+ cmdline_fixed_string_t inset_field2;
+ cmdline_fixed_string_t inset_field3;
+ cmdline_fixed_string_t inset_field4;
+ cmdline_fixed_string_t select;
+};
+
+static enum rte_eth_input_set_field
+str2inset(char *string)
+{
+ uint16_t i;
+
+ static const struct {
+ char str[32];
+ enum rte_eth_input_set_field inset;
+ } inset_table[] = {
+ {"ethertype", RTE_ETH_INPUT_SET_L2_ETHERTYPE},
+ {"ovlan", RTE_ETH_INPUT_SET_L2_OUTER_VLAN},
+ {"ivlan", RTE_ETH_INPUT_SET_L2_INNER_VLAN},
+ {"src-ipv4", RTE_ETH_INPUT_SET_L3_SRC_IP4},
+ {"dst-ipv4", RTE_ETH_INPUT_SET_L3_DST_IP4},
+ {"ipv4-tos", RTE_ETH_INPUT_SET_L3_IP4_TOS},
+ {"ipv4-proto", RTE_ETH_INPUT_SET_L3_IP4_PROTO},
+ {"ipv4-ttl", RTE_ETH_INPUT_SET_L3_IP4_TTL},
+ {"src-ipv6", RTE_ETH_INPUT_SET_L3_SRC_IP6},
+ {"dst-ipv6", RTE_ETH_INPUT_SET_L3_DST_IP6},
+ {"ipv6-tc", RTE_ETH_INPUT_SET_L3_IP6_TC},
+ {"ipv6-next-header", RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER},
+ {"ipv6-hop-limits", RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS},
+ {"udp-src-port", RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT},
+ {"udp-dst-port", RTE_ETH_INPUT_SET_L4_UDP_DST_PORT},
+ {"tcp-src-port", RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT},
+ {"tcp-dst-port", RTE_ETH_INPUT_SET_L4_TCP_DST_PORT},
+ {"sctp-src-port", RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT},
+ {"sctp-dst-port", RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT},
+ {"sctp-veri-tag", RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG},
+ {"udp-key", RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY},
+ {"gre-key", RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY},
+ {"fld-1st", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD},
+ {"fld-2nd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD},
+ {"fld-3rd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD},
+ {"fld-4th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD},
+ {"fld-5th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD},
+ {"fld-6th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD},
+ {"fld-7th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD},
+ {"fld-8th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD},
+ {"none", RTE_ETH_INPUT_SET_NONE},
+ };
+ for (i = 0; i < RTE_DIM(inset_table); i++) {
+ if (!strcmp(string, inset_table[i].str))
+ return inset_table[i].inset;
+ }
+
+ return RTE_ETH_INPUT_SET_UNKNOWN;
+}
+
+static void
+cmd_set_hash_input_set_1_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ if (enable_flow_dir) {
+ printf("FDIR Filter is Defined!\n");
+ printf("Please undefine FDIR_FILTER flag and define "
+ "HWLD flag\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field0);
+ info.info.input_set_conf.inset_size = 1;
+
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+
+ //hash_filter_enabled = 1;
+}
+
+static void
+cmd_set_hash_input_set_2_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ if (enable_flow_dir) {
+ printf("FDIR Filter is Defined!\n");
+ printf("Please undefine FDIR_FILTER flag and define "
+ "HWLD flag\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field0);
+ info.info.input_set_conf.field[1] = str2inset(res->inset_field1);
+
+ info.info.input_set_conf.inset_size = 2;
+
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+
+ //hash_filter_enabled = 1;
+}
+
+#if 0
+static void
+cmd_set_hash_input_set_3_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field0);
+ info.info.input_set_conf.field[1] = str2inset(res->inset_field1);
+ info.info.input_set_conf.field[2] = str2inset(res->inset_field2);
+ info.info.input_set_conf.inset_size = 3;
+
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+}
+#endif
+static void
+cmd_set_hash_input_set_4_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ if (enable_flow_dir) {
+ printf("FDIR Filter is Defined!\n");
+ printf("Please undefine FDIR_FILTER flag and define "
+ "HWLD flag\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field0);
+ info.info.input_set_conf.field[1] = str2inset(res->inset_field1);
+ info.info.input_set_conf.field[2] = str2inset(res->inset_field2);
+ info.info.input_set_conf.field[3] = str2inset(res->inset_field3);
+
+ info.info.input_set_conf.inset_size = 4;
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+ //hash_filter_enabled = 1;
+}
+
+#if 0
+static void
+cmd_set_hash_input_set_5_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_input_set_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT;
+ info.info.input_set_conf.flow_type = str2flowtype(res->flow_type);
+
+ info.info.input_set_conf.field[0] = str2inset(res->inset_field0);
+ info.info.input_set_conf.field[1] = str2inset(res->inset_field1);
+ info.info.input_set_conf.field[2] = str2inset(res->inset_field2);
+ info.info.input_set_conf.field[3] = str2inset(res->inset_field3);
+ info.info.input_set_conf.field[4] = str2inset(res->inset_field4);
+
+ info.info.input_set_conf.inset_size = 5;
+ if (!strcmp(res->select, "select"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT;
+ else if (!strcmp(res->select, "add"))
+ info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD;
+ rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+}
+#endif
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ set_hash_input_set, "set_hash_input_set");
+cmdline_parse_token_num_t cmd_set_hash_input_set_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_hash_input_set_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_hash_input_set_flow_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ flow_type,
+ "ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#"
+ "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_field0 =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field0,
+ "src-ipv4#src-ipv6#dst-ipv4#dst-ipv6#"
+ "udp-src-port#udp-dst-port#tcp-src-port#tcp-dst-port#none");
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_field1 =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field1,
+ "dst-ipv4#dst-ipv6#"
+ "udp-src-port#tcp-src-port#udp-dst-port#tcp-dst-port#none");
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_field2 =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field2,
+ "udp-src-port#tcp-src-port#none");
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_field3 =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field3,
+ "udp-dst-port#tcp-dst-port#none");
+#if 0
+cmdline_parse_token_string_t cmd_set_hash_input_set_field4 =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ inset_field4, "ipv4-proto#ipv6-next-header#none");
+#endif
+
+cmdline_parse_token_string_t cmd_set_hash_input_set_select =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result,
+ select, "select#add");
+
+cmdline_parse_inst_t cmd_set_hash_input_set_1 = {
+ .f = cmd_set_hash_input_set_1_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set_1 <port_id> "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "src-ipv4|src-ipv6|dst-ipv4|dst-ipv6|"
+ "udp-src-port|udp-dst-port|tcp-src-port|tcp-dst-port|none "
+ "select|add",
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field0,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_hash_input_set_2 = {
+ .f = cmd_set_hash_input_set_2_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set_2 <port_id> "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other| "
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "src-ipv4|src-ipv6|dst-ipv4|dst-ipv6| "
+ "udp-src-port|udp-dst-port|tcp-src-port|tcp-dst-port|none "
+ "udp-src-port|tcp-src-port|udp-dst-port|tcp-dst-port|none "
+ "select|add",
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field0,
+ (void *)&cmd_set_hash_input_set_field1,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+
+#if 0
+cmdline_parse_inst_t cmd_set_hash_input_set_3 = {
+ .f = cmd_set_hash_input_set_3_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set_3 <port_id> "
+ "ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|"
+ "ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port|tcp-src-port|"
+ "tcp-dst-port|sctp-src-port|sctp-dst-port|sctp-veri-tag|udp-key|"
+ "gre-key|fld-1st|fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|"
+ "fld-7th|fld-8th|none "
+ "udp-src-port|udp-dst-port|tcp-src-port|tcp-dst-port|none "
+ "select|add",
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field0,
+ (void *)&cmd_set_hash_input_set_field1,
+ (void *)&cmd_set_hash_input_set_field2,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+#endif
+
+cmdline_parse_inst_t cmd_set_hash_input_set_4 = {
+ .f = cmd_set_hash_input_set_4_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set_4 <port_id> "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "src-ipv4|src-ipv6|dst-ipv4|dst-ipv6|"
+ "udp-src-port|udp-dst-port|tcp-src-port|tcp-dst-port|none "
+ "udp-src-port|tcp-src-port|udp-dst-port|tcp-dst-port|none "
+ "udp-src-port|tcp-src-port|dst-ipv4|none "
+ "udp-dst-port|tcp-dst-port|none "
+ "select|add",
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field0,
+ (void *)&cmd_set_hash_input_set_field1,
+ (void *)&cmd_set_hash_input_set_field2,
+ (void *)&cmd_set_hash_input_set_field3,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+#if 0
+cmdline_parse_inst_t cmd_set_hash_input_set_5 = {
+ .f = cmd_set_hash_input_set_5_parsed,
+ .data = NULL,
+ .help_str = "set_hash_input_set_5 <port_id> "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "src-ipv4|src-ipv6|none "
+ "dst-ipv4|dst-ipv6|none "
+ "udp-src-port|tcp-src-port|none "
+ "udp-dst-port|tcp-dst-port|none "
+ "ipv4-proto|ipv6-next-header|none "
+ "select|add",
+
+ .tokens = {
+ (void *)&cmd_set_hash_input_set_cmd,
+ (void *)&cmd_set_hash_input_set_port_id,
+ (void *)&cmd_set_hash_input_set_flow_type,
+ (void *)&cmd_set_hash_input_set_field0,
+ (void *)&cmd_set_hash_input_set_field1,
+ (void *)&cmd_set_hash_input_set_field2,
+ (void *)&cmd_set_hash_input_set_field3,
+ (void *)&cmd_set_hash_input_set_field4,
+ (void *)&cmd_set_hash_input_set_select,
+ NULL,
+ },
+};
+#endif
+#endif
+/* set hash global config */
+struct cmd_set_hash_global_config_result {
+ cmdline_fixed_string_t set_hash_global_config;
+ uint8_t port_id;
+ cmdline_fixed_string_t hash_func;
+ cmdline_fixed_string_t flow_type;
+ cmdline_fixed_string_t enable;
+};
+
+static void
+cmd_set_hash_global_config_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_hash_global_config_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ uint32_t ftype, idx, offset;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port %d\n",
+ res->port_id);
+ return;
+ }
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG;
+ if (!strcmp(res->hash_func, "toeplitz"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ else if (!strcmp(res->hash_func, "simple_xor"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ else if (!strcmp(res->hash_func, "default"))
+ info.info.global_conf.hash_func =
+ RTE_ETH_HASH_FUNCTION_DEFAULT;
+
+ ftype = str2flowtype(res->flow_type);
+ idx = ftype / (CHAR_BIT * sizeof(uint32_t));
+ offset = ftype % (CHAR_BIT * sizeof(uint32_t));
+ info.info.global_conf.valid_bit_mask[idx] |= (1UL << offset);
+ if (!strcmp(res->enable, "enable"))
+ if(idx < RTE_SYM_HASH_MASK_ARRAY_SIZE)
+ info.info.global_conf.sym_hash_enable_mask[idx] |=
+ (1UL << offset);
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+ if (ret < 0)
+ printf("Cannot set global hash configurations by port %d\n",
+ res->port_id);
+ else
+ printf("Global hash configurations have been set "
+ "succcessfully by port %d\n", res->port_id);
+}
+cmdline_parse_token_string_t cmd_set_hash_global_config_all =
+TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ set_hash_global_config, "set_hash_global_config");
+cmdline_parse_token_num_t cmd_set_hash_global_config_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_set_hash_global_config_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
+TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ hash_func, "toeplitz#simple_xor#default");
+cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type =
+TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ flow_type,
+ "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#ipv6#"
+ "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload");
+cmdline_parse_token_string_t cmd_set_hash_global_config_enable =
+TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
+ enable, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_hash_global_config = {
+ .f = cmd_set_hash_global_config_parsed,
+ .data = NULL,
+ .help_str = "set_hash_global_config port_id "
+ "toeplitz|simple_xor|default "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
+ "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
+ "enable|disable",
+ .tokens = {
+ (void *)&cmd_set_hash_global_config_all,
+ (void *)&cmd_set_hash_global_config_port_id,
+ (void *)&cmd_set_hash_global_config_hash_func,
+ (void *)&cmd_set_hash_global_config_flow_type,
+ (void *)&cmd_set_hash_global_config_enable,
+ NULL,
+ },
+};
+
+/* *** Set symmetric hash enable per port *** */
+struct cmd_set_sym_hash_ena_per_port_result {
+ cmdline_fixed_string_t set_sym_hash_ena_per_port;
+ cmdline_fixed_string_t enable;
+ uint8_t port_id;
+};
+
+static void
+cmd_set_sym_hash_per_port_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_set_sym_hash_ena_per_port_result *res = parsed_result;
+ struct rte_eth_hash_filter_info info;
+ int ret;
+
+ if (rte_eth_dev_filter_supported(res->port_id,
+ RTE_ETH_FILTER_HASH) < 0) {
+ printf("RTE_ETH_FILTER_HASH not supported on port: %d\n",
+ res->port_id);
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.info_type = RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT;
+
+ if (!strcmp(res->enable, "enable"))
+ info.info.enable = 1;
+
+ ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH,
+ RTE_ETH_FILTER_SET, &info);
+ if (ret < 0) {
+ printf("Cannot set symmetric hash enable per port on "
+ "port %u\n", res->port_id);
+ return;
+ }
+ printf("Symmetric hash has been set to %s on port %u\n",
+ res->enable, res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ set_sym_hash_ena_per_port, "set_sym_hash_ena_per_port");
+cmdline_parse_token_num_t cmd_set_sym_hash_ena_per_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_enable =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result,
+ enable, "enable#disable");
+
+cmdline_parse_inst_t cmd_set_sym_hash_ena_per_port = {
+ .f = cmd_set_sym_hash_per_port_parsed,
+ .data = NULL,
+ .help_str = "set_sym_hash_ena_per_port port_id enable|disable",
+ .tokens = {
+ (void *)&cmd_set_sym_hash_ena_per_port_all,
+ (void *)&cmd_set_sym_hash_ena_per_port_port_id,
+ (void *)&cmd_set_sym_hash_ena_per_port_enable,
+ NULL,
+ },
+};
+#endif
+
+static int
+app_pipeline_arpicmp_entry_dbg(struct app_params *app,
+ uint32_t pipeline_id, uint8_t *msg)
+{
+ struct pipeline_arpicmp_entry_dbg_msg_req *req;
+ struct pipeline_arpicmp_entry_dbg_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ARPICMP_MSG_REQ_ENTRY_DBG;
+ req->data[0] = msg[0];
+ req->data[1] = msg[1];
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * entry dbg
+ */
+
+
+struct cmd_entry_dbg_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t dbg_string;
+ uint8_t cmd;
+ uint8_t d1;
+};
+
+static void
+cmd_entry_dbg_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_dbg_result *params = parsed_result;
+ struct app_params *app = data;
+ uint8_t msg[2];
+ int status;
+
+ msg[0] = params->cmd;
+ msg[1] = params->d1;
+ status = app_pipeline_arpicmp_entry_dbg(app, params->p, msg);
+
+ if (status != 0) {
+ printf("Dbg Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, p_string, "p");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, p, UINT32);
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result,
+ entry_string, "txrx");
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_dbg_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, dbg_string,
+ "dbg");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_d1 =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, d1, UINT8);
+
+static cmdline_parse_inst_t lb_cmd_entry_dbg = {
+ .f = cmd_entry_dbg_parsed,
+ .data = NULL,
+ .help_str = "ARPICMP dbg cmd",
+ .tokens = {
+ (void *)&lb_cmd_entry_dbg_p_string,
+ (void *)&lb_cmd_entry_dbg_p,
+ (void *)&lb_cmd_entry_dbg_entry_string,
+ (void *)&lb_cmd_entry_dbg_dbg_string,
+ (void *)&lb_cmd_entry_dbg_cmd,
+ (void *)&lb_cmd_entry_dbg_d1,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &lb_cmd_entry_dbg,
+ (cmdline_parse_inst_t *) &cmd_arp_add,
+ (cmdline_parse_inst_t *) &cmd_arp_del,
+ (cmdline_parse_inst_t *) &cmd_arp_req,
+ (cmdline_parse_inst_t *) &cmd_icmp_echo_req,
+ (cmdline_parse_inst_t *) &cmd_arp_ls,
+ (cmdline_parse_inst_t *) &cmd_show_ports_info,
+ /*HWLB cmds*/
+ (cmdline_parse_inst_t *) &cmd_set_fwd_mode,
+ (cmdline_parse_inst_t *) &cmd_add_del_udp_flow_director,
+ (cmdline_parse_inst_t *) &cmd_add_del_l2_flow_director,
+ (cmdline_parse_inst_t *) &cmd_set_hash_input_set_1,
+ (cmdline_parse_inst_t *) &cmd_set_hash_input_set_2,
+/* (cmdline_parse_inst_t *) & cmd_set_hash_input_set_3,*/
+ (cmdline_parse_inst_t *) &cmd_set_hash_input_set_4,
+/* (cmdline_parse_inst_t *) & cmd_set_hash_input_set_5,*/
+ (cmdline_parse_inst_t *) &cmd_set_hash_global_config,
+ (cmdline_parse_inst_t *) &cmd_set_sym_hash_ena_per_port,
+ #ifndef VNF_ACL
+ (cmdline_parse_inst_t *) &cmd_arp_dbg,
+ (cmdline_parse_inst_t *) &cmd_arp_timer,
+ #endif
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_arpicmp_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_arpicmp = {
+ .name = "ARPICMP",
+ .be_ops = &pipeline_arpicmp_be_ops,
+ .fe_ops = &pipeline_arpicmp_fe_ops,
+};
diff --git a/common/VIL/pipeline_arpicmp/pipeline_arpicmp.h b/common/VIL/pipeline_arpicmp/pipeline_arpicmp.h
new file mode 100644
index 00000000..1efb14e2
--- /dev/null
+++ b/common/VIL/pipeline_arpicmp/pipeline_arpicmp.h
@@ -0,0 +1,122 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_ARPICMP_H__
+#define __INCLUDE_PIPELINE_ARPICMP_H__
+
+#include "pipeline.h"
+#include "pipeline_arpicmp_be.h"
+
+/*
+ * Pipeline type
+ */
+extern struct pipeline_type pipeline_arpicmp;
+//uint16_t verbose_level = 1; /**< should be Silent by default. */
+#define MAX_PKT_BURST 512
+#define DEF_PKT_BURST 32
+/**< Number of packets per burst. */
+//uint16_t nb_pkt_per_burst = DEF_PKT_BURST;
+typedef uint8_t portid_t;
+typedef uint16_t queueid_t;
+typedef uint16_t streamid_t;
+/**
+ * The data structure associated with a forwarding stream between a receive
+ * port/queue and a transmit port/queue.
+ */
+struct fwd_stream {
+ /* "read-only" data */
+ /**< port to poll for received packets */
+ portid_t rx_port;
+ /**< RX queue to poll on "rx_port" */
+ queueid_t rx_queue;
+ /**< forwarding port of received packets */
+ portid_t tx_port;
+ /**< TX queue to send forwarded packets */
+ queueid_t tx_queue;
+ /**< index of peer ethernet address of packets */
+ streamid_t peer_addr;
+
+ /* "read-write" results */
+ /**< received packets */
+ unsigned int rx_packets;
+ /**< received packets transmitted */
+ unsigned int tx_packets;
+ /**< received packets not forwarded */
+ unsigned int fwd_dropped;
+ /**< received packets has bad ip checksum */
+ unsigned int rx_bad_ip_csum;
+ /**< received packets has bad l4 checksum */
+ unsigned int rx_bad_l4_csum;
+ #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t core_cycles; /**< used for RX and TX processing */
+ #endif
+ #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ struct pkt_burst_stats rx_burst_stats;
+ struct pkt_burst_stats tx_burst_stats;
+ #endif
+};
+/*
+ * Forwarding mode operations:
+ * - IO forwarding mode (default mode)
+ * Forwards packets unchanged.
+ *
+ * - MAC forwarding mode
+ * Set the source and the destination Ethernet addresses of packets
+ * before forwarding them.
+ *
+ * - IEEE1588 forwarding mode
+ * Check that received IEEE1588 Precise Time Protocol (PTP) packets are
+ * filtered and timestamped by the hardware.
+ * Forwards packets unchanged on the same port.
+ * Check that sent IEEE1588 PTP packets are timestamped by the hardware.
+ */
+typedef void (*port_fwd_begin_t)(portid_t pi);
+typedef void (*port_fwd_end_t)(portid_t pi);
+typedef void (*packet_fwd_t)(struct fwd_stream *fs);
+struct fwd_engine {
+ /**< Forwarding mode name. */
+ const char *fwd_mode_name;
+ /**< NULL if nothing special to do. */
+ port_fwd_begin_t port_fwd_begin;
+ /**< NULL if nothing special to do. */
+ port_fwd_end_t port_fwd_end;
+ /**< Mandatory. */
+ packet_fwd_t packet_fwd;
+};
+#define IPV4_ADDR_TO_UINT(ip_addr, ip) \
+do { \
+ if ((ip_addr).family == AF_INET) \
+ (ip) = (ip_addr).addr.ipv4.s_addr; \
+ else { \
+ printf("invalid parameter.\n"); \
+ return; \
+ } \
+} while (0)
+
+#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \
+do { \
+ if ((ip_addr).family == AF_INET6) \
+ (void)rte_memcpy(&(ip), \
+ &((ip_addr).addr.ipv6), \
+ sizeof(struct in6_addr)); \
+ else { \
+ printf("invalid parameter.\n"); \
+ return; \
+ } \
+} while (0)
+
+void set_pkt_forwarding_mode(const char *fwd_mode_name);
+#endif
diff --git a/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.c b/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.c
new file mode 100644
index 00000000..7238bd1d
--- /dev/null
+++ b/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.c
@@ -0,0 +1,3484 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <string.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <app.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_stub.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+#include <rte_arp.h>
+#include <rte_icmp.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_cycles.h>
+#include <rte_hexdump.h>
+#include "pipeline_actions_common.h"
+#include "hash_func.h"
+#include "vnf_common.h"
+#include "pipeline_common_be.h"
+#include "pipeline_arpicmp_be.h"
+#include "parser.h"
+#include "hash_func.h"
+#include "vnf_common.h"
+#include "app.h"
+
+#include"pipeline_common_fe.h"
+#ifndef VNF_ACL
+#include "lib_arp.h"
+#include "lib_icmpv6.h"
+#include "interface.h"
+#endif
+
+#ifdef VNF_ACL
+
+#define NB_ARPICMP_MBUF 64
+#define NB_NDICMP_MBUF 64
+#define IP_VERSION_4 0x40
+/* default IP header length == five 32-bits words. */
+#define IP_HDRLEN 0x05
+#define IP_VHL_DEF (IP_VERSION_4 | IP_HDRLEN)
+
+#define is_multicast_ipv4_addr(ipv4_addr) \
+ (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0)
+
+
+/*ND IPV6 */
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+static int my_inet_pton_ipv6(int af, const char *src, void *dst);
+static int inet_pton_ipv6(const char *src, unsigned char *dst);
+static int inet_pton_ipv4(const char *src, unsigned char *dst);
+
+uint8_t vnf_common_arp_lib_init;
+uint8_t vnf_common_nd_lib_init;
+uint8_t loadb_pipeline_count;
+
+uint32_t ARPICMP_DEBUG;
+uint32_t NDIPV6_DEBUG;
+
+uint32_t arp_route_tbl_index;
+uint32_t nd_route_tbl_index;
+uint32_t link_hw_addr_array_idx;
+
+uint32_t lib_arp_get_mac_req;
+uint32_t lib_arp_nh_found;
+uint32_t lib_arp_no_nh_found;
+uint32_t lib_arp_arp_entry_found;
+uint32_t lib_arp_no_arp_entry_found;
+uint32_t lib_arp_populate_called;
+uint32_t lib_arp_delete_called;
+uint32_t lib_arp_duplicate_found;
+
+uint32_t lib_nd_get_mac_req;
+uint32_t lib_nd_nh_found;
+uint32_t lib_nd_no_nh_found;
+uint32_t lib_nd_nd_entry_found;
+uint32_t lib_nd_no_arp_entry_found;
+uint32_t lib_nd_populate_called;
+uint32_t lib_nd_delete_called;
+uint32_t lib_nd_duplicate_found;
+
+struct rte_mempool *lib_arp_pktmbuf_tx_pool;
+struct rte_mempool *lib_nd_pktmbuf_tx_pool;
+
+struct rte_mbuf *lib_arp_pkt;
+struct rte_mbuf *lib_nd_pkt;
+
+static struct rte_hash_parameters arp_hash_params = {
+ .name = "ARP",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct arp_key_ipv4),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+static struct rte_hash_parameters nd_hash_params = {
+ .name = "ND",
+ .entries = 64,
+ .reserved = 0,
+ .key_len = sizeof(struct nd_key_ipv6),
+ .hash_func = rte_jhash,
+ .hash_func_init_val = 0,
+};
+
+struct rte_hash *arp_hash_handle;
+struct rte_hash *nd_hash_handle;
+
+#endif
+/* Shared among all VNFs including LB */
+struct app_params *myApp;
+struct rte_pipeline *myP;
+struct pipeline_arpicmp *gp_arp;
+uint8_t num_vnf_threads;
+
+#ifdef VNF_ACL
+
+struct arp_port_address {
+ uint32_t ip;
+ uint64_t mac_addr;
+};
+
+struct arp_port_address arp_port_addresses[RTE_MAX_ETHPORTS];
+
+uint16_t arp_meta_offset;
+#endif
+
+struct pipeline_arpicmp {
+ struct pipeline p;
+ pipeline_msg_req_handler
+ custom_handlers[PIPELINE_ARPICMP_MSG_REQS];
+ uint64_t receivedPktCount;
+ uint64_t droppedPktCount;
+ uint64_t sentPktCount;
+ uint8_t links_map[PIPELINE_MAX_PORT_IN];
+ uint8_t outport_id[PIPELINE_MAX_PORT_IN];
+ uint8_t pipeline_num;
+} __rte_cache_aligned;
+
+#ifdef VNF_ACL
+
+#define MAX_NUM_ARP_ENTRIES 64
+#define MAX_NUM_ND_ENTRIES 64
+
+
+struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY] = {
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
+ {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, 0,
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
+};
+
+struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY] = {
+// {0xac102814, 1, 0xac102814},
+// {0xac106414, 0, 0xac106414},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0},
+ {0, 0, 0, 0}
+};
+
+#endif
+
+void pipelines_port_info(void)
+{
+ struct app_params *app = myApp;
+ uint8_t i, pipeline;
+ for (pipeline = 0; pipeline < app->n_pipelines; pipeline++) {
+ printf("*** PIPELINE %d ***\n\n", pipeline);
+
+ printf("*** OUTPORTs ***\n");
+ for (i = 1; i < app->pipeline_params[pipeline].n_pktq_out;
+ i++) {
+ switch (app->pipeline_params[pipeline].pktq_out[i].
+ type) {
+ case APP_PKTQ_OUT_SWQ:
+ printf("pktq_out[%d]:%s\n", i,
+ app->swq_params[app->pipeline_params
+ [pipeline].
+ pktq_out[i].id].name);
+ break;
+ case APP_PKTQ_OUT_HWQ:
+ printf("pktq_out[%d]:%s\n", i,
+ app->hwq_out_params[app->pipeline_params
+ [pipeline].pktq_out
+ [i].id].name);
+ break;
+ default:
+ printf("Not OUT SWQ or HWQ\n");
+ }
+ }
+ printf("*** INPORTs ***\n");
+ for (i = 0; i < app->pipeline_params[pipeline].n_pktq_in; i++) {
+ switch (app->pipeline_params[pipeline].pktq_in[i]
+ .type) {
+ case APP_PKTQ_IN_SWQ:
+ printf("pktq_in[%d]:%s\n", i,
+ app->swq_params[app->pipeline_params
+ [pipeline].
+ pktq_in[i].id].name);
+ break;
+ case APP_PKTQ_IN_HWQ:
+ printf("pktq_in[%d]:%s\n", i,
+ app->hwq_in_params[app->pipeline_params
+ [pipeline].
+ pktq_in[i].id].name);
+ break;
+ default:
+ printf("Not IN SWQ or HWQ\n");
+ }
+ }
+ } //for
+}
+
+void pipelines_map_info(void)
+{
+ int i = 0;
+
+ printf("PIPELINE_MAX_PORT_IN %d\n", PIPELINE_MAX_PORT_IN);
+ printf("lb_outport_id[%d", lb_outport_id[0]);
+ for (i = 1; i < PIPELINE_MAX_PORT_IN; i++)
+ printf(",%d", lb_outport_id[i]);
+ printf("]\n");
+
+ printf("vnf_to_loadb_map[%d", vnf_to_loadb_map[0]);
+ for (i = 1; i < PIPELINE_MAX_PORT_IN; i++)
+ printf(",%d", vnf_to_loadb_map[i]);
+ printf("]\n");
+
+ printf("port_to_loadb_map[%d", port_to_loadb_map[0]);
+ for (i = 1; i < PIPELINE_MAX_PORT_IN; i++)
+ printf(",%d", port_to_loadb_map[i]);
+ printf("]\n");
+
+ printf("loadb_pipeline_nums[%d", loadb_pipeline_nums[0]);
+ for (i = 1; i < PIPELINE_MAX_PORT_IN; i++)
+ printf(",%d", loadb_pipeline_nums[i]);
+ printf("]\n");
+
+ printf("loadb_pipeline[%p", loadb_pipeline[0]);
+ for (i = 1; i < PIPELINE_MAX_PORT_IN; i++)
+ printf(",%p", loadb_pipeline[i]);
+ printf("]\n");
+}
+
+void register_pipeline_Qs(uint8_t pipeline_num, struct pipeline *p)
+{
+ struct rte_port_ethdev_reader *hwq;
+ struct rte_port_ring_writer *out_swq;
+ struct rte_port_ring_reader *in_swq;
+ struct rte_pipeline *rte = p->p;
+ uint8_t port_count = 0;
+ int queue_out = 0xff, queue_in = 0xff;
+
+ printf("Calling register_pipeline_Qs in PIPELINE%d\n", pipeline_num);
+ for (port_count = 0; port_count < rte->num_ports_out; port_count++) {
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_out[port_count].type){
+
+ case APP_PKTQ_OUT_SWQ:
+
+ if (port_count >= rte->num_ports_in) {
+
+ /* Dont register ARP output Q */
+ if (rte->num_ports_out % rte->num_ports_in)
+ if (port_count == rte->num_ports_out - 1)
+ return;
+ int temp;
+ temp = ((port_count) % rte->num_ports_in);
+
+ in_swq = rte->ports_in[temp].h_port;
+ out_swq = rte->ports_out[port_count].h_port;
+ printf("in_swq : %s\n",
+ in_swq->ring->name);
+ int status =
+ sscanf(in_swq->ring->name, "SWQ%d",
+ &queue_in);
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+ printf("out_swq: %s\n",
+ out_swq->ring->name);
+ status =
+ sscanf(out_swq->ring->name, "SWQ%d",
+ &queue_out);
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+ if (queue_in < 128 && queue_out < 128) {
+ SWQ_to_Port_map[queue_out] =
+ SWQ_to_Port_map[queue_in];
+ printf("SWQ_to_Port_map[%d]%d\n", queue_out,
+ SWQ_to_Port_map[queue_out]);
+ }
+ continue;
+ }
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_in[port_count].type){
+
+ case APP_PKTQ_OUT_HWQ:
+ hwq = rte->ports_in[port_count].h_port;
+ out_swq = rte->ports_out[port_count].h_port;
+ printf("out_swq: %s\n",
+ out_swq->ring->name);
+ int status =
+ sscanf(out_swq->ring->name, "SWQ%d",
+ &queue_out);
+
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+ if (queue_out < 128) {
+ SWQ_to_Port_map[queue_out] = hwq->port_id;
+ printf("SWQ_to_Port_map[%d]%d\n", queue_out,
+ SWQ_to_Port_map[queue_out]);
+ }
+ break;
+
+ case APP_PKTQ_OUT_SWQ:
+ in_swq = rte->ports_in[port_count].h_port;
+ out_swq = rte->ports_out[port_count].h_port;
+ printf("in_swq : %s\n",
+ in_swq->ring->name);
+ status =
+ sscanf(in_swq->ring->name, "SWQ%d",
+ &queue_in);
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+ printf("out_swq: %s\n",
+ out_swq->ring->name);
+ status =
+ sscanf(out_swq->ring->name, "SWQ%d",
+ &queue_out);
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+ if (queue_in < 128 && queue_out < 128){
+ SWQ_to_Port_map[queue_out] =
+ SWQ_to_Port_map[queue_in];
+ printf("SWQ_to_Port_map[%d]%d\n", queue_out,
+ SWQ_to_Port_map[queue_out]);
+ }
+ break;
+
+ default:
+ printf("This never hits\n");
+ }
+
+ break;
+
+ case APP_PKTQ_OUT_HWQ:
+ printf("This is HWQ\n");
+ break;
+
+ default:
+ printf("set_phy_outport_map: This never hits\n");
+ }
+ }
+}
+
+void set_link_map(uint8_t pipeline_num, struct pipeline *p, uint8_t *map)
+{
+ struct rte_port_ethdev_writer *hwq;
+ struct rte_port_ring_writer *out_swq;
+ struct rte_pipeline *rte = p->p;
+
+ uint8_t port_count = 0;
+ int index = 0, queue_out = 0xff;
+
+ printf("Calling set_link_map in PIPELINE%d\n", pipeline_num);
+ for (port_count = 0; port_count < rte->num_ports_out; port_count++) {
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_out[port_count].type){
+
+ case APP_PKTQ_OUT_HWQ:
+ hwq = rte->ports_out[port_count].h_port;
+ map[index++] = hwq->port_id;
+ printf("links_map[%d]:%d\n", index - 1, map[index - 1]);
+ break;
+
+ case APP_PKTQ_OUT_SWQ:
+ out_swq = rte->ports_out[port_count].h_port;
+ printf("set_link_map out_swq: %s\n",
+ out_swq->ring->name);
+ int status = sscanf(out_swq->ring->name, "SWQ%d",
+ &queue_out);
+ if (status < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+
+ if (queue_out < 128) {
+ map[index++] = SWQ_to_Port_map[queue_out];
+ printf("links_map[%s]:%d\n", out_swq->ring->name,
+ map[index - 1]);
+ }
+ break;
+
+ default:
+ printf("set_phy_outport_map: This never hits\n");
+ }
+ }
+}
+
+void set_outport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map)
+{
+ uint8_t port_count = 0;
+ int queue_out = 0xff, index = 0;
+
+ struct rte_port_ethdev_writer *hwq;
+ struct rte_port_ring_writer *out_swq;
+ struct rte_pipeline *rte = p->p;
+
+ printf("\n**** set_outport_id() with pipeline_num:%d ****\n\n",
+ pipeline_num);
+ for (port_count = 0;
+ port_count < rte->num_ports_out;
+ port_count++) {
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_out[port_count].type) {
+
+ case APP_PKTQ_OUT_HWQ:
+ hwq = rte->ports_out[port_count].h_port;
+ //if (index >= 0)
+ {
+ map[hwq->port_id] = index;
+ printf("hwq port_id:%d index:%d\n",
+ hwq->port_id, index);
+ map[hwq->port_id] = index++;
+ printf("hwq port_id:%d index:%d\n",
+ hwq->port_id, index-1);
+ printf("outport_id[%d]:%d\n", index - 1,
+ map[index - 1]);
+ }
+ break;
+
+ case APP_PKTQ_OUT_SWQ:
+
+ /* Dont register ARP output Q */
+ if (port_count >= rte->num_ports_in)
+ if (rte->num_ports_out % rte->num_ports_in)
+ if (port_count == rte->num_ports_out - 1)
+ return;
+ out_swq = rte->ports_out[port_count].h_port;
+ printf("set_outport_id out_swq: %s\n",
+ out_swq->ring->name);
+ int temp = sscanf(out_swq->ring->name, "SWQ%d",
+ &queue_out);
+ if (temp < 0) {
+ printf("Unable to read SWQ number\n");
+ return;
+ }
+
+ if (queue_out < 128 && index >= 0) {
+ map[SWQ_to_Port_map[queue_out]] = index++;
+ printf("outport_id[%s]:%d\n", out_swq->ring->name,
+ map[SWQ_to_Port_map[queue_out]]);
+ }
+ break;
+
+ default:
+ printf(" ");
+
+ }
+ }
+}
+
+void set_phy_outport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map)
+{
+ uint8_t port_count = 0;
+ int index = 0;
+
+ struct rte_port_ethdev_writer *hwq;
+ struct rte_pipeline *rte = p->p;
+
+ printf("\n**** set_phy_outport_id() with pipeline_num:%d ****\n\n",
+ pipeline_num);
+ for (port_count = 0;
+ port_count < myApp->pipeline_params[pipeline_num].n_pktq_out;
+ port_count++) {
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_out[port_count].type) {
+
+ case APP_PKTQ_OUT_HWQ:
+ hwq = rte->ports_out[port_count].h_port;
+ map[hwq->port_id] = index++;
+ printf("outport_id[%d]:%d\n", index - 1,
+ map[index - 1]);
+ break;
+
+ default:
+ printf(" ");
+
+ }
+ }
+}
+
+void set_phy_inport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map)
+{
+ uint8_t port_count = 0;
+ int index = 0;
+
+ struct rte_port_ethdev_reader *hwq;
+ struct rte_pipeline *rte = p->p;
+
+ printf("\n**** set_phy_inport_id() with pipeline_num:%d ****\n\n",
+ pipeline_num);
+ for (port_count = 0;
+ port_count < myApp->pipeline_params[pipeline_num].n_pktq_in;
+ port_count++) {
+
+ switch (myApp->pipeline_params[pipeline_num].
+ pktq_in[port_count].type) {
+
+ case APP_PKTQ_OUT_HWQ:
+ hwq = rte->ports_in[port_count].h_port;
+ map[hwq->port_id] = index++;
+ printf("outport_id[%d]:%d\n", index - 1,
+ map[index - 1]);
+ break;
+
+ default:
+ printf(" ");
+
+ }
+ }
+}
+
+#ifdef VNF_ACL
+
+uint32_t get_nh(uint32_t ip, uint32_t *port)
+{
+ int i = 0;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+ if (((lib_arp_route_table[i].
+ ip & lib_arp_route_table[i].mask) ==
+ (ip & lib_arp_route_table[i].mask))) {
+
+ *port = lib_arp_route_table[i].port;
+ lib_arp_nh_found++;
+ return lib_arp_route_table[i].nh;
+ }
+ if (ARPICMP_DEBUG > 1)
+ printf("No nh match ip 0x%x, port %u, t_ip "
+ "0x%x, t_port %u, mask 0x%x, r1 %x, r2 %x\n",
+ ip, *port, lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].port,
+ lib_arp_route_table[i].mask,
+ (lib_arp_route_table[i].ip &
+ lib_arp_route_table[i].mask),
+ (ip & lib_arp_route_table[i].mask));
+ }
+ if (ARPICMP_DEBUG && ip)
+ printf("No NH - ip 0x%x, port %u\n", ip, *port);
+ lib_arp_no_nh_found++;
+ return 0;
+}
+
+/*ND IPv6 */
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[])
+{
+ int i = 0;
+ uint8_t netmask_ipv6[16], netip_nd[16], netip_in[16];
+ uint8_t k = 0, l = 0, depthflags = 0, depthflags1 = 0;
+ memset(netmask_ipv6, 0, sizeof(netmask_ipv6));
+ memset(netip_nd, 0, sizeof(netip_nd));
+ memset(netip_in, 0, sizeof(netip_in));
+ if (!ipv6)
+ return;
+ for (i = 0; i < MAX_ARP_RT_ENTRY; i++) {
+
+ convert_prefixlen_to_netmask_ipv6(
+ lib_nd_route_table[i].depth,
+ netmask_ipv6);
+
+ for (k = 0; k < 16; k++) {
+ if (lib_nd_route_table[i].ipv6[k] & netmask_ipv6[k]) {
+ depthflags++;
+ netip_nd[k] = lib_nd_route_table[i].ipv6[k];
+ }
+ }
+
+ for (l = 0; l < 16; l++) {
+ if (ipv6[l] & netmask_ipv6[l]) {
+ depthflags1++;
+ netip_in[l] = ipv6[l];
+ }
+ }
+ int j = 0;
+ if ((depthflags == depthflags1)
+ && (memcmp(netip_nd, netip_in,
+ sizeof(netip_nd)) == 0)) {
+ //&& (lib_nd_route_table[i].port == port))
+ *port = lib_nd_route_table[i].port;
+ lib_nd_nh_found++;
+
+ for (j = 0; j < 16; j++)
+ nhipv6[j] = lib_nd_route_table[i].nhipv6[j];
+
+ return;
+ }
+
+ if (NDIPV6_DEBUG > 1)
+ printf("No nh match\n");
+ depthflags = 0;
+ depthflags1 = 0;
+ }
+ if (NDIPV6_DEBUG && ipv6)
+ printf("No NH - ip 0x%x, port %u\n", ipv6[0], *port);
+ lib_nd_no_nh_found++;
+}
+
+/* Added for Multiport changes*/
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr)
+{
+ lib_arp_get_mac_req++;
+ uint32_t nhip = 0;
+
+ nhip = get_nh(ipaddr, phy_port);
+ if (nhip == 0) {
+ if (ARPICMP_DEBUG && ipaddr)
+ printf("ARPICMP no nh found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ //return 0;
+ return NH_NOT_FOUND;
+ }
+
+ struct arp_entry_data *ret_arp_data = NULL;
+ struct arp_key_ipv4 tmp_arp_key;
+ tmp_arp_key.port_id = *phy_port;/* Changed for Multi Port*/
+ tmp_arp_key.ip = nhip;
+
+ ret_arp_data = retrieve_arp_entry(tmp_arp_key);
+ if (ret_arp_data == NULL) {
+ if (ARPICMP_DEBUG && ipaddr) {
+ printf
+ ("ARPICMP no arp entry found for ip %x, port %d\n",
+ ipaddr, *phy_port);
+ print_arp_table();
+ }
+ lib_arp_no_arp_entry_found++;
+ return ARP_NOT_FOUND;
+ }
+ ether_addr_copy(&ret_arp_data->eth_addr, hw_addr);
+ lib_arp_arp_entry_found++;
+ return ARP_FOUND;
+}
+
+/*ND IPv6 */
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ if (ipv6addr)
+ get_nh_ipv6(ipv6addr, &phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j])
+ flag++;
+ }
+ if (flag == 0) {
+ if (ipv6addr) {
+ if (NDIPV6_DEBUG && ipv6addr)
+ printf("NDIPV6 no nh found for ipv6 "
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x%02x, port %d\n",
+ ipv6addr[0], ipv6addr[1], ipv6addr[2], ipv6addr[3],
+ ipv6addr[4], ipv6addr[5], ipv6addr[6], ipv6addr[7],
+ ipv6addr[8], ipv6addr[9], ipv6addr[10],
+ ipv6addr[11], ipv6addr[12], ipv6addr[13],
+ ipv6addr[14], ipv6addr[15], phy_port);
+ return 0;
+ }
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = phy_port;
+
+ for (i = 0; i < 16; i++)
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG && ipv6addr) {
+ printf("NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], phy_port);
+ }
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+
+}
+
+/*ND IPv6 */
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[])
+{
+ int i = 0, j = 0, flag = 0;
+ lib_nd_get_mac_req++;
+
+ get_nh_ipv6(ipv6addr, phy_port, nhipv6);
+ for (j = 0; j < 16; j++) {
+ if (nhipv6[j])
+ flag++;
+ }
+ if (flag == 0) {
+ if (NDIPV6_DEBUG && ipv6addr)
+ printf("NDIPV6 no nh found for ipv6 "
+ "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x, port %d\n",
+ ipv6addr[0], ipv6addr[1], ipv6addr[2], ipv6addr[3],
+ ipv6addr[4], ipv6addr[5], ipv6addr[6], ipv6addr[7],
+ ipv6addr[8], ipv6addr[9], ipv6addr[10],
+ ipv6addr[11], ipv6addr[12], ipv6addr[13],
+ ipv6addr[14], ipv6addr[15], *phy_port);
+ return 0;
+ }
+
+ struct nd_entry_data *ret_nd_data = NULL;
+ struct nd_key_ipv6 tmp_nd_key;
+ tmp_nd_key.port_id = *phy_port;
+
+ for (i = 0; i < 16; i++)
+ tmp_nd_key.ipv6[i] = nhipv6[i];
+
+ ret_nd_data = retrieve_nd_entry(tmp_nd_key);
+ if (ret_nd_data == NULL) {
+ if (NDIPV6_DEBUG && ipv6addr) {
+ printf("NDIPV6 no nd entry found for ip %x, port %d\n",
+ ipv6addr[0], *phy_port);
+ }
+ lib_nd_no_arp_entry_found++;
+ return 0;
+ }
+ ether_addr_copy(&ret_nd_data->eth_addr, hw_addr);
+ lib_nd_nd_entry_found++;
+ return 1;
+
+}
+
+/*
+ * ARP table
+ */
+struct lib_arp_arp_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint64_t macaddr;
+};
+
+static const char *arp_op_name(uint16_t arp_op)
+{
+ switch (CHECK_ENDIAN_16(arp_op)) {
+ case (ARP_OP_REQUEST):
+ return "ARP Request";
+ case (ARP_OP_REPLY):
+ return "ARP Reply";
+ case (ARP_OP_REVREQUEST):
+ return "Reverse ARP Request";
+ case (ARP_OP_REVREPLY):
+ return "Reverse ARP Reply";
+ case (ARP_OP_INVREQUEST):
+ return "Peer Identify Request";
+ case (ARP_OP_INVREPLY):
+ return "Peer Identify Reply";
+ default:
+ break;
+ }
+ return "Unkwown ARP op";
+}
+
+static void print_icmp_packet(struct icmp_hdr *icmp_h)
+{
+ printf(" ICMP: type=%d (%s) code=%d id=%d seqnum=%d\n",
+ icmp_h->icmp_type,
+ (icmp_h->icmp_type == IP_ICMP_ECHO_REPLY ? "Reply" :
+ (icmp_h->icmp_type ==
+ IP_ICMP_ECHO_REQUEST ? "Reqest" : "Undef")), icmp_h->icmp_code,
+ CHECK_ENDIAN_16(icmp_h->icmp_ident),
+ CHECK_ENDIAN_16(icmp_h->icmp_seq_nb));
+}
+
+static void print_ipv4_h(struct ipv4_hdr *ip_h)
+{
+ struct icmp_hdr *icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+ printf(" IPv4: Version=%d HLEN=%d Type=%d Length=%d\n",
+ (ip_h->version_ihl & 0xf0) >> 4, (ip_h->version_ihl & 0x0f),
+ ip_h->type_of_service, rte_cpu_to_be_16(ip_h->total_length));
+ if (ip_h->next_proto_id == IPPROTO_ICMP)
+ print_icmp_packet(icmp_h);
+}
+
+static void print_arp_packet(struct arp_hdr *arp_h)
+{
+ printf(" ARP: hrd=%d proto=0x%04x hln=%d "
+ "pln=%d op=%u (%s)\n",
+ CHECK_ENDIAN_16(arp_h->arp_hrd),
+ CHECK_ENDIAN_16(arp_h->arp_pro), arp_h->arp_hln,
+ arp_h->arp_pln, CHECK_ENDIAN_16(arp_h->arp_op),
+ arp_op_name(arp_h->arp_op));
+
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER) {
+ printf("incorrect arp_hrd format for IPv4 ARP (%d)\n",
+ (arp_h->arp_hrd));
+ } else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4) {
+ printf("incorrect arp_pro format for IPv4 ARP (%d)\n",
+ (arp_h->arp_pro));
+ } else if (arp_h->arp_hln != 6) {
+ printf("incorrect arp_hln format for IPv4 ARP (%d)\n",
+ arp_h->arp_hln);
+ } else if (arp_h->arp_pln != 4) {
+ printf("incorrect arp_pln format for IPv4 ARP (%d)\n",
+ arp_h->arp_pln);
+ } else {
+ // print remainder of ARP request
+ printf(" sha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_sha.addr_bytes[0],
+ arp_h->arp_data.arp_sha.addr_bytes[1],
+ arp_h->arp_data.arp_sha.addr_bytes[2],
+ arp_h->arp_data.arp_sha.addr_bytes[3],
+ arp_h->arp_data.arp_sha.addr_bytes[4],
+ arp_h->arp_data.arp_sha.addr_bytes[5]);
+ printf(" sip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_sip) & 0xFF);
+ printf(" tha=%02X:%02X:%02X:%02X:%02X:%02X",
+ arp_h->arp_data.arp_tha.addr_bytes[0],
+ arp_h->arp_data.arp_tha.addr_bytes[1],
+ arp_h->arp_data.arp_tha.addr_bytes[2],
+ arp_h->arp_data.arp_tha.addr_bytes[3],
+ arp_h->arp_data.arp_tha.addr_bytes[4],
+ arp_h->arp_data.arp_tha.addr_bytes[5]);
+ printf(" tip=%d.%d.%d.%d\n",
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 24) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 16) & 0xFF,
+ (CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) >> 8) & 0xFF,
+ CHECK_ENDIAN_32(arp_h->arp_data.arp_tip) & 0xFF);
+ }
+}
+
+static void print_eth(struct ether_hdr *eth_h)
+{
+ printf(" ETH: src=%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_h->s_addr.addr_bytes[0],
+ eth_h->s_addr.addr_bytes[1],
+ eth_h->s_addr.addr_bytes[2],
+ eth_h->s_addr.addr_bytes[3],
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5]);
+ printf(" dst=%02X:%02X:%02X:%02X:%02X:%02X\n",
+ eth_h->d_addr.addr_bytes[0],
+ eth_h->d_addr.addr_bytes[1],
+ eth_h->d_addr.addr_bytes[2],
+ eth_h->d_addr.addr_bytes[3],
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5]);
+
+}
+
+static void
+print_mbuf(const char *rx_tx, unsigned int portid, struct rte_mbuf *mbuf,
+ unsigned int line)
+{
+ struct ether_hdr *eth_h = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ struct arp_hdr *arp_h =
+ (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ struct ipv4_hdr *ipv4_h =
+ (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+
+ printf("%s(%d): on port %d pkt-len=%u nb-segs=%u\n",
+ rx_tx, line, portid, mbuf->pkt_len, mbuf->nb_segs);
+ print_eth(eth_h);
+ switch (rte_cpu_to_be_16(eth_h->ether_type)) {
+ case ETHER_TYPE_IPv4:
+ print_ipv4_h(ipv4_h);
+ break;
+ case ETHER_TYPE_ARP:
+ print_arp_packet(arp_h);
+ break;
+ default:
+ printf(" unknown packet type\n");
+ break;
+ }
+ fflush(stdout);
+}
+
+struct arp_entry_data *retrieve_arp_entry(struct arp_key_ipv4 arp_key)
+{
+ struct arp_entry_data *ret_arp_data = NULL;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ int ret = rte_hash_lookup_data(arp_hash_handle, &arp_key,
+ (void **)&ret_arp_data);
+ if (ret < 0) {
+ if (ARPICMP_DEBUG)
+ printf("arp-hash lookup failed ret %d, "
+ "EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ } else {
+ return ret_arp_data;
+ }
+
+ return NULL;
+}
+
+/*
+* ND IPv6
+* Validate if key-value pair already exists in the hash table
+* for given key - ND IPv6
+*
+*/
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key)
+{
+ struct nd_entry_data *ret_nd_data = NULL;
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ /*Find a nd IPv6 key-data pair in the hash table for ND IPv6 */
+ int ret = rte_hash_lookup_data(nd_hash_handle, &nd_key,
+ (void **)&ret_nd_data);
+ if (ret < 0) {
+ if (NDIPV6_DEBUG)
+ printf("nd-hash: no lookup Entry Found - "
+ "ret %d, EINVAL %d, ENOENT %d\n",
+ ret, EINVAL, ENOENT);
+ } else {
+ return ret_nd_data;
+ }
+
+ return NULL;
+}
+
+void print_arp_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ printf("\tport hw addr status ip addr\n");
+
+ while (rte_hash_iterate(arp_hash_handle, &next_key, &next_data, &iter)
+ >= 0) {
+
+ struct arp_entry_data *tmp_arp_data =
+ (struct arp_entry_data *)next_data;
+ struct arp_key_ipv4 tmp_arp_key;
+ memcpy(&tmp_arp_key, next_key, sizeof(struct arp_key_ipv4));
+ printf
+ ("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s %d.%d.%d.%d\n",
+ tmp_arp_data->port, tmp_arp_data->eth_addr.addr_bytes[0],
+ tmp_arp_data->eth_addr.addr_bytes[1],
+ tmp_arp_data->eth_addr.addr_bytes[2],
+ tmp_arp_data->eth_addr.addr_bytes[3],
+ tmp_arp_data->eth_addr.addr_bytes[4],
+ tmp_arp_data->eth_addr.addr_bytes[5],
+ tmp_arp_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE",
+ (tmp_arp_data->ip >> 24),
+ ((tmp_arp_data->ip & 0x00ff0000) >> 16),
+ ((tmp_arp_data->ip & 0x0000ff00) >> 8),
+ ((tmp_arp_data->ip & 0x000000ff)));
+ }
+
+ uint32_t i = 0;
+ printf("\nARP routing table has %d entries\n", arp_route_tbl_index);
+ printf("\nIP_Address Mask Port NH_IP_Address\n");
+ for (i = 0; i < arp_route_tbl_index; i++) {
+ printf("0x%x 0x%x %d 0x%x\n",
+ lib_arp_route_table[i].ip,
+ lib_arp_route_table[i].mask,
+ lib_arp_route_table[i].port, lib_arp_route_table[i].nh);
+ }
+
+ printf("\nARP Stats: Total Queries %u, ok_NH %u, no_NH %u, "
+ "ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_arp_get_mac_req, lib_arp_nh_found, lib_arp_no_nh_found,
+ lib_arp_arp_entry_found, lib_arp_no_arp_entry_found,
+ lib_arp_populate_called, lib_arp_delete_called,
+ lib_arp_duplicate_found);
+
+ printf("ARP table key len is %lu\n", sizeof(struct arp_key_ipv4));
+}
+
+/* ND IPv6 */
+void print_nd_table(void)
+{
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ uint8_t ii = 0, j = 0, k = 0;
+
+ printf("\tport hw addr status ip addr\n");
+
+ while (rte_hash_iterate(nd_hash_handle, &next_key, &next_data, &iter) >=
+ 0) {
+
+ struct nd_entry_data *tmp_nd_data =
+ (struct nd_entry_data *)next_data;
+ struct nd_key_ipv6 tmp_nd_key;
+ memcpy(&tmp_nd_key, next_key, sizeof(struct nd_key_ipv6));
+ printf("\t%4d %02X:%02X:%02X:%02X:%02X:%02X %10s\n",
+ tmp_nd_data->port,
+ tmp_nd_data->eth_addr.addr_bytes[0],
+ tmp_nd_data->eth_addr.addr_bytes[1],
+ tmp_nd_data->eth_addr.addr_bytes[2],
+ tmp_nd_data->eth_addr.addr_bytes[3],
+ tmp_nd_data->eth_addr.addr_bytes[4],
+ tmp_nd_data->eth_addr.addr_bytes[5],
+ tmp_nd_data->status ==
+ COMPLETE ? "COMPLETE" : "INCOMPLETE");
+ printf("\t\t\t\t\t\t");
+ for (ii = 0; ii < ND_IPV6_ADDR_SIZE; ii += 2) {
+ printf("%02X%02X ", tmp_nd_data->ipv6[ii],
+ tmp_nd_data->ipv6[ii + 1]);
+ }
+ printf("\n");
+ }
+
+ uint32_t i = 0;
+ printf("\n\nND IPV6 routing table has %d entries\n",
+ nd_route_tbl_index);
+ printf("\nIP_Address Depth Port NH_IP_Address\n");
+ for (i = 0; i < nd_route_tbl_index; i++) {
+ printf("\n");
+
+ for (j = 0; j < ND_IPV6_ADDR_SIZE; j += 2) {
+ printf("%02X%02X ", lib_nd_route_table[i].ipv6[j],
+ lib_nd_route_table[i].ipv6[j + 1]);
+ }
+
+ printf
+ ("\n\t\t\t %d %d\n",
+ lib_nd_route_table[i].depth, lib_nd_route_table[i].port);
+ printf("\t\t\t\t\t\t\t\t\t");
+ for (k = 0; k < ND_IPV6_ADDR_SIZE; k += 2) {
+ printf("%02X%02X ", lib_nd_route_table[i].nhipv6[k],
+ lib_nd_route_table[i].ipv6[k + 1]);
+ }
+ }
+ printf("\nND IPV6 Stats:\nTotal Queries %u, ok_NH %u, no_NH %u,"
+ "ok_Entry %u, no_Entry %u, PopulateCall %u, Del %u, Dup %u\n",
+ lib_nd_get_mac_req, lib_nd_nh_found, lib_nd_no_nh_found,
+ lib_nd_nd_entry_found, lib_nd_no_arp_entry_found,
+ lib_nd_populate_called, lib_nd_delete_called,
+ lib_nd_duplicate_found);
+ printf("ND table key len is %lu\n\n", sizeof(struct nd_key_ipv6));
+}
+
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid)
+{
+
+ /* need to lock here if multi-threaded... */
+ /* rte_hash_del_key is not thread safe */
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_delete_called++;
+
+ if (ARPICMP_DEBUG)
+ printf("remove_arp_entry ip %x, port %d\n", arp_key.ip,
+ arp_key.port_id);
+ rte_hash_del_key(arp_hash_handle, &arp_key);
+}
+
+/* ND IPv6 */
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid)
+{
+ /* need to lock here if multi-threaded */
+ /* rte_hash_del_key is not thread safe */
+ int i = 0;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+ /* arp_key.ip = rte_bswap32(ipaddr); */
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ nd_key.ipv6[i] = ipv6addr[i];
+
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_delete_called++;
+
+ if (NDIPV6_DEBUG) {
+ printf("Deletes rte hash table nd entry for port %d ipv6=",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2)
+ printf("%02X%02X ", nd_key.ipv6[i], nd_key.ipv6[i + 1]);
+ }
+ rte_hash_del_key(nd_hash_handle, &nd_key);
+}
+
+void
+populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid)
+{
+ /* need to lock here if multi-threaded */
+ /* rte_hash_add_key_data is not thread safe */
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = portid;
+ arp_key.ip = ipaddr;
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ lib_arp_populate_called++;
+
+ if (ARPICMP_DEBUG)
+ printf("populate_arp_entry ip %x, port %d\n", arp_key.ip,
+ arp_key.port_id);
+ struct arp_entry_data *new_arp_data = retrieve_arp_entry(arp_key);
+ if (new_arp_data
+ && is_same_ether_addr(&new_arp_data->eth_addr, hw_addr)) {
+ if (ARPICMP_DEBUG)
+ printf("arp_entry exists ip%x, port %d\n", arp_key.ip,
+ arp_key.port_id);
+ lib_arp_duplicate_found++;
+ return;
+ }
+ new_arp_data = (struct arp_entry_data *)
+ malloc(sizeof(struct arp_entry_data));
+ if (new_arp_data == NULL) {
+ printf("populate_arp_entry:new_arp_data is NULL\n");
+ return;
+ }
+ new_arp_data->eth_addr = *hw_addr;
+ new_arp_data->status = INCOMPLETE;
+ new_arp_data->port = portid;
+ new_arp_data->ip = ipaddr;
+ rte_hash_add_key_data(arp_hash_handle, &arp_key, new_arp_data);
+
+ if (ARPICMP_DEBUG) {
+ // print entire hash table
+ printf("\tARP: table update - hwaddr= "
+ "%02x:%02x:%02x:%02x:%02x:%02x ip=%d.%d.%d.%d "
+ "on port=%d\n",
+ new_arp_data->eth_addr.addr_bytes[0],
+ new_arp_data->eth_addr.addr_bytes[1],
+ new_arp_data->eth_addr.addr_bytes[2],
+ new_arp_data->eth_addr.addr_bytes[3],
+ new_arp_data->eth_addr.addr_bytes[4],
+ new_arp_data->eth_addr.addr_bytes[5],
+ (arp_key.ip >> 24),
+ ((arp_key.ip & 0x00ff0000) >> 16),
+ ((arp_key.ip & 0x0000ff00) >> 8),
+ ((arp_key.ip & 0x000000ff)), portid);
+ /* print_arp_table(); */
+ puts("");
+ }
+}
+
+/*
+* ND IPv6
+*
+* Install key - data pair in Hash table - From Pipeline Configuration
+*
+*/
+int
+populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ipv6[],
+ uint8_t portid)
+{
+
+ /* need to lock here if multi-threaded */
+ /* rte_hash_add_key_data is not thread safe */
+ uint8_t i;
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = portid;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++ /*i+=2 */)
+ nd_key.ipv6[i] = ipv6[i];
+
+ printf("\n");
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ lib_nd_populate_called++;
+
+ /*Validate if key-value pair already
+ * exists in the hash table for ND IPv6
+ */
+ struct nd_entry_data *new_nd_data = retrieve_nd_entry(nd_key);
+
+ if (new_nd_data && is_same_ether_addr(&new_nd_data->eth_addr,
+ hw_addr)) {
+
+ if (NDIPV6_DEBUG) {
+ printf("nd_entry exists port %d ipv6 = ",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ printf("%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ }
+
+ lib_nd_duplicate_found++;
+ if (NDIPV6_DEBUG)
+ printf("nd_entry exists\n");
+ return 0;
+ }
+
+ new_nd_data = (struct nd_entry_data *)
+ malloc(sizeof(struct nd_entry_data));
+ if (new_nd_data == NULL) {
+ printf("populate_nd_entry: new_nd_data is NULL\n");
+ return 0;
+ }
+ new_nd_data->eth_addr = *hw_addr;
+ new_nd_data->status = COMPLETE;
+ new_nd_data->port = portid;
+
+ if (NDIPV6_DEBUG)
+ printf("populate_nd_entry ipv6=");
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++ /*i+=2 */)
+ new_nd_data->ipv6[i] = ipv6[i];
+
+ if (NDIPV6_DEBUG) {
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+
+ printf("%02X%02X ", new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+ }
+
+ /*Add a key-data pair at hash table for ND IPv6 static routing */
+ rte_hash_add_key_data(nd_hash_handle, &nd_key, new_nd_data);
+
+ if (NDIPV6_DEBUG)
+ printf("\n....Added a key-data pair at rte hash table "
+ "for ND IPv6 static routing\n");
+
+ if (NDIPV6_DEBUG) {
+ /* print entire hash table */
+ printf("\tND: table update - hwaddr= "
+ "%02x:%02x:%02x:%02x:%02x:%02x on port=%d\n",
+ new_nd_data->eth_addr.addr_bytes[0],
+ new_nd_data->eth_addr.addr_bytes[1],
+ new_nd_data->eth_addr.addr_bytes[2],
+ new_nd_data->eth_addr.addr_bytes[3],
+ new_nd_data->eth_addr.addr_bytes[4],
+ new_nd_data->eth_addr.addr_bytes[5], portid);
+ printf("\tipv6=");
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ new_nd_data->ipv6[i] = ipv6[i];
+ printf("%02X%02X ", new_nd_data->ipv6[i],
+ new_nd_data->ipv6[i + 1]);
+ }
+
+ printf("\n");
+
+ puts("");
+ }
+ return 1;
+}
+
+void print_pkt1(struct rte_mbuf *pkt)
+{
+ uint8_t *rd = RTE_MBUF_METADATA_UINT8_PTR(pkt, 0);
+ int i = 0, j = 0;
+ printf("\nPacket Contents...\n");
+ for (i = 0; i < 20; i++) {
+ for (j = 0; j < 20; j++)
+ printf("%02x ", rd[(20 * i) + j]);
+ printf("\n");
+ }
+}
+
+struct ether_addr broadcast_ether_addr = {
+ .addr_bytes[0] = 0xFF,
+ .addr_bytes[1] = 0xFF,
+ .addr_bytes[2] = 0xFF,
+ .addr_bytes[3] = 0xFF,
+ .addr_bytes[4] = 0xFF,
+ .addr_bytes[5] = 0xFF,
+};
+
+static const struct ether_addr null_ether_addr = {
+ .addr_bytes[0] = 0x00,
+ .addr_bytes[1] = 0x00,
+ .addr_bytes[2] = 0x00,
+ .addr_bytes[3] = 0x00,
+ .addr_bytes[4] = 0x00,
+ .addr_bytes[5] = 0x00,
+};
+
+#define MAX_NUM_MAC_ADDRESS 16
+struct ether_addr link_hw_addr[MAX_NUM_MAC_ADDRESS] = {
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc8} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x90, 0xe2, 0xba, 0x54, 0x67, 0xc9} },
+{.addr_bytes = {0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x12, 0x13, 0x14, 0x15, 0x16, 0x17} },
+{.addr_bytes = {0x22, 0x33, 0x44, 0x55, 0x66, 0x77} },
+{.addr_bytes = {0x18, 0x19, 0x1a, 0x1b, 0xcd, 0xef} }
+};
+
+struct ether_addr *get_link_hw_addr(uint8_t out_port)
+{
+ return &link_hw_addr[out_port];
+}
+
+static void
+request_icmp_echo(unsigned int port_id, uint32_t ip, struct ether_addr *gw_addr)
+{
+ struct ether_hdr *eth_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+
+ struct app_link_params *link;
+ link = &myApp->link_params[port_id];
+ arp_port_addresses[port_id].ip = link->ip;
+ arp_port_addresses[port_id].mac_addr = link->mac_addr;
+
+ struct rte_mbuf *icmp_pkt = lib_arp_pkt;
+ if (icmp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmp_pkt, struct ether_hdr *);
+ ether_addr_copy(gw_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &arp_port_addresses[port_id].mac_addr, &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+
+ ip_h = (struct ipv4_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmp_h = (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ ip_h->version_ihl = IP_VHL_DEF;
+ ip_h->type_of_service = 0;
+ ip_h->total_length =
+ rte_cpu_to_be_16(sizeof(struct ipv4_hdr) + sizeof(struct icmp_hdr));
+ ip_h->packet_id = 0xaabb;
+ ip_h->fragment_offset = 0x0000;
+ ip_h->time_to_live = 64;
+ ip_h->next_proto_id = IPPROTO_ICMP;
+ ip_h->src_addr = rte_bswap32(arp_port_addresses[port_id].ip);
+ ip_h->dst_addr = ip;
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = rte_ipv4_cksum(ip_h);
+
+ icmp_h->icmp_type = IP_ICMP_ECHO_REQUEST;
+ icmp_h->icmp_code = 0;
+ icmp_h->icmp_ident = 0xdead;
+ icmp_h->icmp_seq_nb = 0xbeef;
+
+ icmp_h->icmp_cksum = ~rte_raw_cksum(icmp_h, sizeof(struct icmp_hdr));
+
+ icmp_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) +
+ sizeof(struct icmp_hdr);
+ icmp_pkt->data_len = icmp_pkt->pkt_len;
+
+ if (ARPICMP_DEBUG) {
+ printf("Sending echo request\n");
+ print_mbuf("TX", port_id, icmp_pkt, __LINE__);
+ }
+
+ rte_pipeline_port_out_packet_insert(gp_arp->p.p,
+ gp_arp->outport_id[port_id], icmp_pkt);
+ gp_arp->sentPktCount++;
+}
+
+void request_echo(unsigned int port_id, uint32_t ip)
+{
+ (void)port_id;
+ (void)ip;
+
+ struct ether_addr gw_addr;
+ uint32_t dest_ip = rte_bswap32(ip);
+ uint32_t phy_port;
+
+ if (get_dest_mac_addr_port(dest_ip, &phy_port, &gw_addr) == ARP_FOUND) {
+ request_icmp_echo(phy_port, ip, &gw_addr);
+ return;
+ }
+
+ if (ARPICMP_DEBUG)
+ printf("Sending echo request ... get mac failed.\n");
+}
+
+void request_arp(uint8_t port_id, uint32_t ip, struct rte_pipeline *rte_p)
+{
+ (void)port_id;
+ (void)ip;
+
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+
+ struct app_link_params *link;
+ link = &myApp->link_params[port_id];
+ arp_port_addresses[port_id].ip = link->ip;
+ arp_port_addresses[port_id].mac_addr = link->mac_addr;
+
+ struct rte_mbuf *arp_pkt = lib_arp_pkt;
+
+ if (arp_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating arp_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(arp_pkt, struct ether_hdr *);
+
+ ether_addr_copy(&broadcast_ether_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &arp_port_addresses[port_id].mac_addr, &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_ARP);
+
+ arp_h = (struct arp_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ arp_h->arp_hrd = CHECK_ENDIAN_16(ARP_HRD_ETHER);
+ arp_h->arp_pro = CHECK_ENDIAN_16(ETHER_TYPE_IPv4);
+ arp_h->arp_hln = ETHER_ADDR_LEN;
+ arp_h->arp_pln = sizeof(uint32_t);
+ arp_h->arp_op = CHECK_ENDIAN_16(ARP_OP_REQUEST);
+
+ ether_addr_copy((struct ether_addr *)
+ &arp_port_addresses[port_id].mac_addr,
+ &arp_h->arp_data.arp_sha);
+ arp_h->arp_data.arp_sip =
+ rte_cpu_to_be_32(arp_port_addresses[port_id].ip);
+ ether_addr_copy(&null_ether_addr, &arp_h->arp_data.arp_tha);
+ arp_h->arp_data.arp_tip = rte_cpu_to_be_32(ip);
+ printf("arp tip:%x arp sip :%x\n", arp_h->arp_data.arp_tip,
+ arp_h->arp_data.arp_sip);
+ /* mmcd changed length from 60 to 42 -
+ * real length of arp request, no padding on ethernet needed -
+ * looks now like linux arp
+ */
+
+ arp_pkt->pkt_len = 42;
+ arp_pkt->data_len = 42;
+
+ if (ARPICMP_DEBUG) {
+ printf("Sending arp request\n");
+ print_mbuf("TX", port_id, arp_pkt, __LINE__);
+ }
+
+ rte_pipeline_port_out_packet_insert(rte_p, port_id, arp_pkt);
+ gp_arp->sentPktCount++;
+
+}
+
+void request_arp_wrap(uint8_t port_id, uint32_t ip)
+{
+ request_arp(port_id, ip, gp_arp->p.p);
+}
+
+void process_arpicmp_pkt(
+ struct rte_mbuf *pkt,
+ uint32_t out_port,
+ uint32_t pkt_mask)
+{
+ uint8_t in_port_id = pkt->port;
+ struct app_link_params *link;
+ struct ether_hdr *eth_h;
+ struct arp_hdr *arp_h;
+ struct ipv4_hdr *ip_h;
+ struct icmp_hdr *icmp_h;
+ uint32_t cksum;
+ uint32_t ip_addr;
+ uint32_t req_tip;
+
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
+ arp_h =
+ (struct arp_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ if (CHECK_ENDIAN_16(arp_h->arp_hrd) != ARP_HRD_ETHER)
+ printf
+ ("Invalid hardware format of hardware address - "
+ "not processing ARP req\n");
+ else if (CHECK_ENDIAN_16(arp_h->arp_pro) != ETHER_TYPE_IPv4)
+ printf
+ ("Invalid protocol address format - "
+ "not processing ARP req\n");
+ else if (arp_h->arp_hln != 6)
+ printf
+ ("Invalid hardware address length - "
+ "not processing ARP req\n");
+ else if (arp_h->arp_pln != 4)
+ printf
+ ("Invalid protocol address length - "
+ "not processing ARP req\n");
+ else {
+ link = &myApp->link_params[in_port_id];
+ arp_port_addresses[in_port_id].ip = link->ip;
+ arp_port_addresses[in_port_id].mac_addr =
+ link->mac_addr;
+
+ if (arp_h->arp_data.arp_tip !=
+ rte_bswap32(arp_port_addresses[in_port_id].ip)) {
+ printf
+ ("ARP requested IP address mismatches "
+ "interface IP - discarding\n");
+ printf("arp_tip = %x\n",
+ arp_h->arp_data.arp_tip);
+ printf("arp_port_addresses = %x\n",
+ arp_port_addresses[in_port_id].ip);
+ printf("in_port_id = %x\n", in_port_id);
+ printf("arp_port_addresses[0] = %x\n",
+ arp_port_addresses[0].ip);
+
+ rte_pipeline_ah_packet_drop(gp_arp->p.p,
+ pkt_mask);
+ gp_arp->droppedPktCount++;
+
+ }
+ /* revise conditionals to allow processing of
+ * requests with target ip = this ip and
+ * processing of replies to destination ip = this ip
+ */
+ else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REQUEST)) {
+
+ if (ARPICMP_DEBUG) {
+ printf("arp_op %d, ARP_OP_REQUEST %d\n",
+ arp_h->arp_op,
+ rte_cpu_to_be_16(ARP_OP_REQUEST));
+ print_mbuf("RX", in_port_id, pkt, __LINE__);
+ }
+
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_cpu_to_be_32
+ (arp_h->arp_data.arp_sip),
+ in_port_id);
+
+ /* build reply */
+ req_tip = arp_h->arp_data.arp_tip;
+ ether_addr_copy(&eth_h->s_addr, &eth_h->d_addr);
+
+ // set sender mac address -
+ ether_addr_copy((struct ether_addr *)&
+ arp_port_addresses[in_port_id].mac_addr,
+ &eth_h->s_addr);
+
+ arp_h->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
+ ether_addr_copy(&eth_h->s_addr,
+ &arp_h->arp_data.arp_sha);
+ arp_h->arp_data.arp_tip =
+ arp_h->arp_data.arp_sip;
+ arp_h->arp_data.arp_sip = req_tip;
+ ether_addr_copy(&eth_h->d_addr,
+ &arp_h->arp_data.arp_tha);
+
+ rte_pipeline_port_out_packet_insert(gp_arp->p.p,
+ out_port, pkt);
+ gp_arp->sentPktCount++;
+
+ } else if (arp_h->arp_op ==
+ rte_cpu_to_be_16(ARP_OP_REPLY)) {
+ // TODO: be sure that ARP request
+ //was actually sent!!!
+ if (ARPICMP_DEBUG) {
+ printf("ARP_OP_REPLY received");
+ print_mbuf("RX", in_port_id, pkt,
+ __LINE__);
+ }
+ populate_arp_entry((struct ether_addr *)
+ &arp_h->arp_data.arp_sha,
+ rte_bswap32(arp_h->
+ arp_data.arp_sip),
+ in_port_id);
+
+ /* To drop the packet from LB */
+ rte_pipeline_ah_packet_drop(gp_arp->p.p,
+ pkt_mask);
+ gp_arp->droppedPktCount++;
+
+ } else {
+ if (ARPICMP_DEBUG)
+ printf("Invalid ARP opcode - not "
+ "processing ARP req %x\n",
+ arp_h->arp_op);
+ }
+ }
+ } else {
+ ip_h =
+ (struct ipv4_hdr *)((char *)eth_h +
+ sizeof(struct ether_hdr));
+ icmp_h =
+ (struct icmp_hdr *)((char *)ip_h + sizeof(struct ipv4_hdr));
+
+ if (eth_h->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+
+ link = &myApp->link_params[in_port_id];
+ arp_port_addresses[in_port_id].ip = link->ip;
+ arp_port_addresses[in_port_id].mac_addr =
+ link->mac_addr;
+
+ if (!is_same_ether_addr((struct ether_addr *)
+ &arp_port_addresses[in_port_id].
+ mac_addr, &eth_h->d_addr)) {
+
+ if (ARPICMP_DEBUG)
+ printf("Ethernet frame not destined "
+ "for MAC address of received network "
+ "interface - discarding\n");
+
+ } else if (ip_h->next_proto_id != IPPROTO_ICMP) {
+ if (ARPICMP_DEBUG)
+ printf("IP protocol ID is not set to "
+ "ICMP - discarding\n");
+
+ } else if ((ip_h->version_ihl & 0xf0) != IP_VERSION_4) {
+ if (ARPICMP_DEBUG)
+ printf("IP version other than 4 - "
+ "discarding\n");
+
+ } else if ((ip_h->version_ihl & 0x0f) != IP_HDRLEN) {
+ if (ARPICMP_DEBUG)
+ printf("Unknown IHL - discarding\n");
+
+ } else {
+ if (icmp_h->icmp_type == IP_ICMP_ECHO_REQUEST
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ ip_addr = ip_h->src_addr;
+ ether_addr_copy(&eth_h->s_addr,
+ &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &arp_port_addresses
+ [in_port_id].mac_addr,
+ &eth_h->s_addr);
+
+ if (ip_h->dst_addr !=
+ rte_bswap32(arp_port_addresses
+ [in_port_id].ip)) {
+ if (ARPICMP_DEBUG) {
+ printf("IPv4 packet not destined for "
+ "configured IP on RX port - "
+ "discarding\n");
+ printf("ip_h->dst_addr = %u, "
+ "in_port_id = %u, "
+ "arp_port_addresses.ip = %u\n",
+ ip_h->dst_addr, in_port_id,
+ arp_port_addresses[in_port_id].ip);
+ }
+ } else {
+
+ if (is_multicast_ipv4_addr
+ (ip_h->dst_addr)) {
+ uint32_t ip_src;
+
+ ip_src = rte_be_to_cpu_32
+ (ip_addr);
+ if ((ip_src & 0x00000003) == 1)
+ ip_src = (ip_src &
+ 0xFFFFFFFC)
+ | 0x00000002;
+ else
+ ip_src = (ip_src &
+ 0xFFFFFFFC)
+ | 0x00000001;
+
+ ip_h->src_addr =
+ rte_cpu_to_be_32(ip_src);
+ ip_h->dst_addr = ip_addr;
+
+ ip_h->hdr_checksum = 0;
+ ip_h->hdr_checksum = ~rte_raw_cksum(
+ ip_h, sizeof(struct
+ ipv4_hdr));
+ } else {
+ ip_h->src_addr = ip_h->dst_addr;
+ ip_h->dst_addr = ip_addr;
+ }
+
+ icmp_h->icmp_type =
+ IP_ICMP_ECHO_REPLY;
+ cksum = ~icmp_h->icmp_cksum & 0xffff;
+ cksum += ~htons(IP_ICMP_ECHO_REQUEST << 8) & 0xffff;
+ cksum += htons(IP_ICMP_ECHO_REPLY << 8);
+ cksum = (cksum & 0xffff) + (cksum >> 16);
+ cksum = (cksum & 0xffff) + (cksum >> 16);
+ icmp_h->icmp_cksum = ~cksum;
+
+ if (ARPICMP_DEBUG)
+ print_mbuf("TX", in_port_id, pkt, __LINE__);
+
+ rte_pipeline_port_out_packet_insert(gp_arp->p.p,
+ out_port, pkt);
+ gp_arp->sentPktCount++;
+
+ }
+ }
+ else if (icmp_h->icmp_type == IP_ICMP_ECHO_REPLY
+ && icmp_h->icmp_code == 0) {
+ if (ARPICMP_DEBUG)
+ print_mbuf("RX", in_port_id,
+ pkt, __LINE__);
+
+ struct arp_key_ipv4 arp_key;
+ arp_key.port_id = in_port_id;
+ arp_key.ip =
+ rte_bswap32(ip_h->src_addr);
+ arp_key.filler1 = 0;
+ arp_key.filler2 = 0;
+ arp_key.filler3 = 0;
+
+ struct arp_entry_data *arp_entry =
+ retrieve_arp_entry(arp_key);
+ if (arp_entry == NULL) {
+ printf("Received unsolicited "
+ "ICMP echo reply from ip%x, "
+ "port %d\n",
+ arp_key.ip,
+ arp_key.port_id);
+ return;
+ }
+
+ arp_entry->status = COMPLETE;
+ /* To drop the packet from LB */
+ rte_pipeline_ah_packet_drop(gp_arp->p.p,
+ pkt_mask);
+ gp_arp->droppedPktCount++;
+ }
+ }
+ }
+ }
+}
+
+
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int my_inet_pton_ipv6(int af, const char *src, void *dst)
+{
+ switch (af) {
+ case AF_INET:
+ return inet_pton_ipv4(src, dst);
+ case AF_INET6:
+ return inet_pton_ipv6(src, dst);
+ default:
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton_ipv4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton_ipv6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int inet_pton_ipv6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton_ipv4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets,
+ * having a colon means error
+ */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+/**
+ * Function to classify ICMPv6 Packets based on NextHeader field in IPv6 Header.
+ * Updates ND Cache table with link layer addresses as received from Neighbor.
+ * Processes ICMPv6 Echo destined to local port and replys.
+ *
+ * @param pkt
+ * A pointer to the packet received from Loadbalancer pipeline
+ * @param out_port
+ * A pointer to the output port action
+ * @param pkt_num
+ * A packet number
+ *
+ * @return
+ * NULL
+ */
+
+void
+process_icmpv6_pkt(
+ struct rte_mbuf *pkt,
+ uint32_t out_port,
+ __rte_unused uint32_t pkt_num)
+{
+
+ uint8_t in_port_id = pkt->port;
+ struct app_link_params *link;
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_nd_hdr *icmpv6_nd_h;
+ uint8_t ipv6_addr[16];
+ uint8_t i = 0, flag = 1;
+ uint8_t req_tipv6[16];
+
+ eth_h = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ struct rte_mbuf *icmpv6_pkt = pkt;
+
+ link = &myApp->link_params[in_port_id];
+ icmpv6_port_addresses[in_port_id].mac_addr = link->mac_addr;
+
+ if (!is_same_ether_addr
+ ((struct ether_addr *)&icmpv6_port_addresses[in_port_id].mac_addr,
+ &eth_h->d_addr)) {
+ if (ARPICMP_DEBUG) {
+ printf("Ethernet frame not destined for MAC address "
+ "of received network interface - discarding\n");
+ }
+ } else {
+ if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REQUEST)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ for (i = 0; i < 16; i++)
+ ipv6_addr[i] = ipv6_h->src_addr[i];
+
+ for (i = 0; i < 16; i++) {
+ if (ipv6_h->dst_addr[i] !=
+ icmpv6_port_addresses[in_port_id].ipv6[i]) {
+ flag++;
+ }
+ }
+ if (!flag) {
+ printf("IPv6 packet not destined for "
+ "configured IP on RX port - discarding\n");
+ } else {
+ {
+
+ ether_addr_copy(&eth_h->s_addr,
+ &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &icmpv6_port_addresses
+ [in_port_id].mac_addr,
+ &eth_h->s_addr);
+
+ for (i = 0; i < 16; i++)
+ ipv6_h->src_addr[i] =
+ ipv6_h->dst_addr[i];
+ for (i = 0; i < 16; i++)
+ ipv6_h->dst_addr[i] =
+ ipv6_addr[i];
+
+ icmpv6_h->icmpv6_type =
+ ICMPV6_ECHO_REPLY;
+
+ rte_pipeline_port_out_packet_insert
+ (gp_arp->p.p, out_port, icmpv6_pkt);
+ gp_arp->sentPktCount++;
+ }
+ }
+
+ } else if ((icmpv6_h->icmpv6_type == ICMPV6_ECHO_REPLY)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct nd_key_ipv6 nd_key;
+ nd_key.port_id = in_port_id;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ nd_key.ipv6[i] = ipv6_h->src_addr[i];
+
+ nd_key.filler1 = 0;
+ nd_key.filler2 = 0;
+ nd_key.filler3 = 0;
+
+ /* Validate if key-value pair already
+ * exists in the hash table for ND IPv6
+ */
+ struct nd_entry_data *new_nd_data =
+ retrieve_nd_entry(nd_key);
+
+ if (new_nd_data == NULL) {
+ printf("Received unsolicited ICMPv6 echo "
+ "reply on port %d\n",
+ nd_key.port_id);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i += 2) {
+ printf("%02X%02X ", nd_key.ipv6[i],
+ nd_key.ipv6[i + 1]);
+ }
+ return;
+ }
+
+ new_nd_data->status = COMPLETE;
+
+ } else
+ if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_SOLICITATION)
+ && (icmpv6_h->icmpv6_code == 0)) {
+
+ icmpv6_nd_h =
+ (struct icmpv6_nd_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t src_ipv6[16], dst_ipv6[16];
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ src_ipv6[i] = ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ dst_ipv6[i] = ipv6_h->dst_addr[i];
+
+ // Check for Multicast Address
+ if ((IPV6_MULTICAST
+ && ((dst_ipv6[0] << 8) | dst_ipv6[1]))) {
+ if (populate_nd_entry
+ (src_hw_addr, src_ipv6, in_port_id)) {
+
+ //build a Neighbor Advertisement message
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ req_tipv6[i] =
+ icmpv6_nd_h->target_ipv6[i];
+
+ ether_addr_copy(&eth_h->s_addr,
+ &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)
+ &icmpv6_port_addresses
+ [in_port_id].mac_addr,
+ &eth_h->s_addr);
+
+ // set sender mac address
+ ether_addr_copy(&eth_h->s_addr,
+ &icmpv6_nd_h->
+ link_layer_address);
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->dst_addr[i] =
+ ipv6_h->src_addr[i];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6_h->src_addr[i] =
+ req_tipv6[i];
+ icmpv6_h->icmpv6_type =
+ ICMPV6_NEIGHBOR_ADVERTISEMENT;
+ icmpv6_nd_h->type =
+ e_Target_Link_Layer_Address;
+ icmpv6_nd_h->icmpv6_reserved |=
+ rte_cpu_to_be_32
+ (NEIGHBOR_SOLICITATION_SET);
+
+ rte_pipeline_port_out_packet_insert
+ (gp_arp->p.p, out_port, icmpv6_pkt);
+ gp_arp->sentPktCount++;
+ }
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf("Non-Multicasted Neighbor "
+ "Solicitation Message Received, "
+ "can't do Address Resolution\n");
+ printf("............Some one else "
+ "is the target host here !!!\n");
+ }
+ }
+
+ } else
+ if ((icmpv6_h->icmpv6_type == ICMPV6_NEIGHBOR_ADVERTISEMENT)
+ && (icmpv6_h->icmpv6_code == 0)) {
+ struct ether_addr *src_hw_addr = &eth_h->s_addr;
+ uint8_t ipv6[16];
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ ipv6[i] = ipv6_h->src_addr[i];
+
+ if (populate_nd_entry(src_hw_addr, ipv6, in_port_id))
+ if (ARPICMP_DEBUG)
+ printf("Now on, unicast IPv6 traffic "
+ "is possible\n");
+ // Now on, unicast IPv6 traffic is possible
+ } else {
+ if (ARPICMP_DEBUG) {
+ printf("ICMPv6 Type %d Not Supported yet !!!\n",
+ icmpv6_h->icmpv6_type);
+ }
+ }
+
+ }
+
+}
+
+void request_icmpv6_echo(uint32_t port_id, uint8_t ipv6[])
+{
+ (void)port_id;
+ (void)ipv6;
+ int i;
+
+ struct ether_addr gw_addr;
+ uint8_t nhipv6[16];
+ uint8_t dest_ipv6[16];
+ uint32_t phy_port;
+
+ for (i = 0; i < ND_IPV6_ADDR_SIZE; i++)
+ dest_ipv6[i] = ipv6[i];
+
+ if (get_dest_mac_address_ipv6_port(dest_ipv6, &phy_port,
+ &gw_addr, nhipv6)) {
+ request_icmpv6_echo_message(phy_port, ipv6, &gw_addr);
+ return;
+ }
+
+ if (ARPICMP_DEBUG)
+ printf("Sending icmpv6 echo request ... get mac failed.\n");
+}
+
+void
+request_icmpv6_echo_message(uint16_t port_id, uint8_t ipv6[],
+ struct ether_addr *gw_addr)
+{
+ struct ether_hdr *eth_h;
+ struct ipv6_hdr *ipv6_h;
+ struct icmpv6_hdr *icmpv6_h;
+ struct icmpv6_info_hdr *icmpv6_info_h;
+ int i;
+ struct app_link_params *link;
+ link = &mylink[port_id];
+
+ for (i = 0; i < 16; i++)
+ icmpv6_port_addresses[port_id].ipv6[i] = link->ipv6[i];
+
+ icmpv6_port_addresses[port_id].mac_addr = link->mac_addr;
+
+ struct rte_mbuf *icmpv6_pkt = lib_icmpv6_pkt;
+ if (icmpv6_pkt == NULL) {
+ if (ARPICMP_DEBUG)
+ printf("Error allocating icmpv6_pkt rte_mbuf\n");
+ return;
+ }
+
+ eth_h = rte_pktmbuf_mtod(icmpv6_pkt, struct ether_hdr *);
+ ether_addr_copy(gw_addr, &eth_h->d_addr);
+ ether_addr_copy((struct ether_addr *)&icmpv6_port_addresses[port_id].
+ mac_addr, &eth_h->s_addr);
+ eth_h->ether_type = CHECK_ENDIAN_16(ETHER_TYPE_IPv6);
+
+ ipv6_h = (struct ipv6_hdr *)((char *)eth_h + sizeof(struct ether_hdr));
+ icmpv6_h =
+ (struct icmpv6_hdr *)((char *)ipv6_h + sizeof(struct ipv6_hdr));
+ icmpv6_info_h =
+ (struct icmpv6_info_hdr *)((char *)icmpv6_h +
+ sizeof(struct icmpv6_hdr));
+
+ ipv6_h->vtc_flow = 0x60000000;
+ ipv6_h->payload_len = 64;
+ ipv6_h->proto = 58;
+ ipv6_h->hop_limits = 64;
+
+ for (i = 0; i < 16; i++) {
+ ipv6_h->src_addr[i] = icmpv6_port_addresses[port_id].ipv6[i];
+ ipv6_h->dst_addr[i] = ipv6[i];
+ }
+
+ icmpv6_h->icmpv6_type = ICMPV6_ECHO_REQUEST;
+ icmpv6_h->icmpv6_code = 0;
+ icmpv6_info_h->icmpv6_ident = 0x5151;
+ icmpv6_info_h->icmpv6_seq_nb = 0x1;
+
+ icmpv6_h->icmpv6_cksum =
+ ~rte_raw_cksum(icmpv6_h, sizeof(struct icmpv6_hdr));
+
+ icmpv6_pkt->pkt_len =
+ sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr) +
+ sizeof(struct icmpv6_hdr);
+ icmpv6_pkt->data_len = icmpv6_pkt->pkt_len;
+
+ if (ARPICMP_DEBUG)
+ printf("Sending icmpv6 echo request\n");
+
+ rte_pipeline_port_out_packet_insert(gp_arp->p.p,
+ gp_arp->outport_id[port_id],
+ icmpv6_pkt);
+
+ gp_arp->sentPktCount++;
+}
+
+
+#endif
+
+static void *pipeline_arpicmp_msg_req_custom_handler(struct pipeline *p,
+ void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_arpicmp_msg_req_custom_handler,
+
+};
+
+static void *pipeline_arpicmp_msg_req_entry_dbg_handler(struct pipeline *p,
+ void *msg);
+static void *pipeline_arpicmp_msg_req_entry_dbg_handler(
+ __rte_unused struct pipeline *p,
+ __rte_unused void *msg)
+{
+ /*have to handle dbg commands*/
+ return NULL;
+}
+
+static __rte_unused pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_ARPICMP_MSG_REQ_ENTRY_DBG] =
+ pipeline_arpicmp_msg_req_entry_dbg_handler,
+};
+
+/**
+ * Function for pipeline custom handlers
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_arpicmp_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_arpicmp *p_arp = (struct pipeline_arpicmp *)p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_ARPICMP_MSG_REQS) ?
+ p_arp->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+#ifdef VNF_ACL
+
+/* Not needed as no arguments are needed for TxRX
+ * ARP arguments are handled in ARP module
+ */
+int
+pipeline_arpicmp_parse_args(struct pipeline_arpicmp *p,
+ struct pipeline_params *params);
+int
+pipeline_arpicmp_parse_args(
+ __rte_unused struct pipeline_arpicmp *p,
+ struct pipeline_params *params)
+{
+
+ uint32_t i;
+ uint32_t arp_meta_offset_present = 0;
+
+ uint32_t arp_route_tbl_present = 0;
+ uint32_t nd_route_tbl_present = 0;
+ uint32_t ports_mac_list_present = 0;
+ uint32_t pktq_in_prv_present = 0;
+ uint32_t prv_to_pub_map_present = 0;
+
+ uint8_t n_prv_in_port = 0;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ in_port_dir_a[i] = 0; //make all RX ports ingress initially
+ prv_to_pub_map[i] = 0xff;
+ pub_to_prv_map[i] = 0xff;
+ }
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (ARPICMP_DEBUG > 2) {
+ printf("ARP args[%d]: %s %d, %s\n", i, arg_name,
+ atoi(arg_value), arg_value);
+ }
+ if (strcmp(arg_name, "arp_meta_offset") == 0) {
+ if (arp_meta_offset_present) {
+ printf("arp_meta_offset "
+ "initialized already\n");
+ return -1;
+ }
+ arp_meta_offset_present = 1;
+ arp_meta_offset = atoi(arg_value);
+ continue;
+ }
+ /* pktq_in_prv */
+ if (strcmp(arg_name, "pktq_in_prv") == 0) {
+ if (pktq_in_prv_present) {
+ printf("Duplicate pktq_in_prv ... "
+ "parse failed..\n\n");
+ return -1;
+ }
+ pktq_in_prv_present = 1;
+
+ int rxport = 0, j = 0;
+ char phy_port_num[5];
+ char *token = strtok(arg_value, "RXQ");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != '.')) {
+ phy_port_num[j] = token[j];
+ j++;
+ }
+ phy_port_num[j] = '\0';
+ rxport = atoi(phy_port_num);
+ printf("token: %s, phy_port_str: %s, "
+ "phy_port_num %d\n",
+ token, phy_port_num, rxport);
+
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ // set rxport egress
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ in_port_dir_a[rxport] = 1;
+ token = strtok(NULL, "RXQ");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf
+ ("VNF common parse error - "
+ "no prv RX phy port\n");
+ return -1;
+ }
+ continue;
+ }
+
+ /* prv_to_pub_map */
+ if (strcmp(arg_name, "prv_to_pub_map") == 0) {
+ if (prv_to_pub_map_present) {
+ printf
+ ("Duplicated prv_to_pub_map ... "
+ "parse failed ...\n");
+ return -1;
+ }
+ prv_to_pub_map_present = 1;
+
+ int rxport = 0, txport = 0, j = 0, k = 0;
+ char rx_phy_port_num[5];
+ char tx_phy_port_num[5];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != ',')) {
+ rx_phy_port_num[j] = token[j];
+ j++;
+ }
+ rx_phy_port_num[j] = '\0';
+ rxport = atoi(rx_phy_port_num);
+
+ j++;
+ k = 0;
+ while ((k < 4) && (token[j + k] != ')')) {
+ tx_phy_port_num[k] = token[j + k];
+ k++;
+ }
+ tx_phy_port_num[k] = '\0';
+ txport = atoi(tx_phy_port_num);
+ if (rxport < PIPELINE_MAX_PORT_IN && txport < PIPELINE_MAX_PORT_IN){
+ printf("token: %s,"
+ "rx_phy_port_str: %s, phy_port_num %d,"
+ "tx_phy_port_str: %s, tx_phy_port_num %d\n",
+ token, rx_phy_port_num, rxport,
+ tx_phy_port_num, txport);
+ }
+ else
+ return -1;
+ if ((rxport >= PIPELINE_MAX_PORT_IN) ||
+ (txport >= PIPELINE_MAX_PORT_IN) ||
+ (in_port_dir_a[rxport] != 1)) {
+ printf("CG-NAPT parse error - "
+ "incorrect prv-pub translation. "
+ "Rx %d, Tx %d, Rx Dir %d\n",
+ rxport, txport, in_port_dir_a[rxport]);
+ return -1;
+ }
+
+ prv_to_pub_map[rxport] = txport;
+ pub_to_prv_map[txport] = rxport;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+
+ /* lib_arp_debug */
+ if (strcmp(arg_name, "lib_arp_debug") == 0) {
+ ARPICMP_DEBUG = atoi(arg_value);
+
+ continue;
+ }
+
+ /* ports_mac_list */
+ if (strcmp(arg_name, "ports_mac_list") == 0) {
+ ports_mac_list_present = 1;
+
+ uint32_t i = 0, j = 0, k = 0, MAC_NUM_BYTES = 6;
+
+ char byteStr[MAC_NUM_BYTES][3];
+ uint32_t byte[MAC_NUM_BYTES];
+
+ char *token = strtok(arg_value, " ");
+ while (token) {
+ k = 0;
+ for (i = 0; i < MAC_NUM_BYTES; i++) {
+ for (j = 0; j < 2; j++)
+ byteStr[i][j] = token[k++];
+ byteStr[i][j] = '\0';
+ k++;
+ }
+
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ byte[i] = strtoul(byteStr[i], NULL, 16);
+
+ if (ARPICMP_DEBUG) {
+ printf("token: %s", token);
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ printf(", byte[%u] %u", i,
+ byte[i]);
+ printf("\n");
+ }
+ //Populate the static arp_route_table
+ for (i = 0; i < MAC_NUM_BYTES; i++)
+ link_hw_addr
+ [link_hw_addr_array_idx].addr_bytes
+ [i] = byte[i];
+
+ link_hw_addr_array_idx++;
+ token = strtok(NULL, " ");
+ }
+
+ continue;
+ }
+
+ /* arp_route_tbl */
+ if (strcmp(arg_name, "arp_route_tbl") == 0) {
+ arp_route_tbl_present = 1;
+
+ uint32_t dest_ip = 0, mask = 0, tx_port = 0, nh_ip =
+ 0, i = 0, j = 0, k = 0, l = 0;
+ uint32_t arp_route_tbl_str_max_len = 10;
+ char dest_ip_str[arp_route_tbl_str_max_len];
+ char mask_str[arp_route_tbl_str_max_len];
+ char tx_port_str[arp_route_tbl_str_max_len];
+ char nh_ip_str[arp_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (arp_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ip_str[i] = token[i];
+ i++;
+ }
+ dest_ip_str[i] = '\0';
+ dest_ip = strtoul(dest_ip_str, NULL, 16);
+
+ i++;
+ j = 0;
+ while ((j < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ mask_str[j] = token[i + j];
+ j++;
+ }
+ mask_str[j] = '\0';
+ mask = strtoul(mask_str, NULL, 16);
+
+ j++;
+ k = 0;
+ while ((k < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ //atoi(tx_port_str);
+ tx_port = strtoul(tx_port_str, NULL, 16);
+
+ k++;
+ l = 0;
+ while ((l < (arp_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ip_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ip_str[l] = '\0';
+ //atoi(nh_ip_str);
+ nh_ip = strtoul(nh_ip_str, NULL, 16);
+
+ if (ARPICMP_DEBUG) {
+ printf("token: %s, "
+ "dest_ip_str: %s, dest_ip %u, "
+ "mask_str: %s, mask %u, "
+ "tx_port_str: %s, tx_port %u, "
+ "nh_ip_str: %s, nh_ip %u\n",
+ token, dest_ip_str, dest_ip,
+ mask_str, mask, tx_port_str,
+ tx_port, nh_ip_str, nh_ip);
+ }
+ #if 0
+ if (tx_port >= params->n_ports_out) {
+ printf("ARP-ICMP parse error - "
+ "incorrect tx_port %d, max %d\n",
+ tx_port, params->n_ports_out);
+ return -1;
+ }
+ #endif
+
+ //Populate the static arp_route_table
+ lib_arp_route_table[arp_route_tbl_index].ip =
+ dest_ip;
+ lib_arp_route_table[arp_route_tbl_index].mask =
+ mask;
+ lib_arp_route_table[arp_route_tbl_index].port =
+ tx_port;
+ lib_arp_route_table[arp_route_tbl_index].nh =
+ nh_ip;
+ arp_route_tbl_index++;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /*ND IPv6 */
+ /* nd_route_tbl */
+ if (strcmp(arg_name, "nd_route_tbl") == 0) {
+ nd_route_tbl_present = 1;
+
+ uint8_t dest_ipv6[16], depth = 0, tx_port =
+ 0, nh_ipv6[16], i = 0, j = 0, k = 0, l = 0;
+ uint8_t nd_route_tbl_str_max_len = 128; //64;
+ char dest_ipv6_str[nd_route_tbl_str_max_len];
+ char depth_str[nd_route_tbl_str_max_len];
+ char tx_port_str[nd_route_tbl_str_max_len];
+ char nh_ipv6_str[nd_route_tbl_str_max_len];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ i = 0;
+ while ((i < (nd_route_tbl_str_max_len - 1))
+ && (token[i] != ',')) {
+ dest_ipv6_str[i] = token[i];
+ i++;
+ }
+ dest_ipv6_str[i] = '\0';
+ my_inet_pton_ipv6(AF_INET6, dest_ipv6_str,
+ &dest_ipv6);
+
+ i++;
+ j = 0;
+ while ((j < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j] != ',')) {
+ depth_str[j] = token[i + j];
+ j++;
+ }
+ depth_str[j] = '\0';
+ //converting string char to integer
+ int s;
+ for (s = 0; depth_str[s] != '\0'; ++s)
+ depth = depth * 10 + depth_str[s] - '0';
+
+ j++;
+ k = 0;
+ while ((k < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k] != ',')) {
+ tx_port_str[k] = token[i + j + k];
+ k++;
+ }
+ tx_port_str[k] = '\0';
+ //atoi(tx_port_str);
+ tx_port = strtoul(tx_port_str, NULL, 16);
+
+ k++;
+ l = 0;
+ while ((l < (nd_route_tbl_str_max_len - 1))
+ && (token[i + j + k + l] != ')')) {
+ nh_ipv6_str[l] = token[i + j + k + l];
+ l++;
+ }
+ nh_ipv6_str[l] = '\0';
+ my_inet_pton_ipv6(AF_INET6, nh_ipv6_str,
+ &nh_ipv6);
+
+ //Populate the static arp_route_table
+ for (i = 0; i < 16; i++) {
+ lib_nd_route_table
+ [nd_route_tbl_index].ipv6[i] =
+ dest_ipv6[i];
+ lib_nd_route_table
+ [nd_route_tbl_index].nhipv6[i] =
+ nh_ipv6[i];
+ }
+ lib_nd_route_table[nd_route_tbl_index].depth =
+ depth;
+ lib_nd_route_table[nd_route_tbl_index].port =
+ tx_port;
+
+ nd_route_tbl_index++;
+ token = strtok(NULL, "(");
+ } //while
+
+ continue;
+ }
+ /* any other */
+
+ }
+
+ #if 0
+ if (!arp_meta_offset_present) {
+ printf("ARPICMP: arp_meta_offset not initialized\n");
+ return -1;
+ }
+ #endif
+
+ if (!arp_route_tbl_present && !nd_route_tbl_present) {
+ printf("Neither arp_route_tbl_present nor "
+ "nd_route_tbl_present declared\n");
+ return -1;
+ }
+
+ if (!pktq_in_prv_present) {
+ printf("pktq_in_prv not declared\n");
+ return -1;
+ }
+
+ if (!ports_mac_list_present) {
+ printf("ports_mac_list not declared\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif
+
+uint32_t arpicmp_pkt_print_count;
+static inline void
+pkt_key_arpicmp(struct rte_mbuf *pkt, uint32_t pkt_num, void *arg)
+{
+
+ struct pipeline_arpicmp_in_port_h_arg *ap = arg;
+ struct pipeline_arpicmp *p_arp = (struct pipeline_arpicmp *)ap->p;
+
+ p_arp->receivedPktCount++;
+
+ uint8_t in_port_id = pkt->port;
+ #ifdef VNF_ACL
+ struct app_link_params *link;
+ #endif
+ uint8_t *protocol;
+ uint32_t pkt_mask = 1 << pkt_num;
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+
+ uint32_t prot_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
+
+ #ifdef VNF_ACL
+ uint32_t out_port;
+ #endif
+
+ uint16_t *eth_proto =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ #ifdef VNF_ACL
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
+ #endif
+
+ #ifdef IPV6
+ uint32_t prot_offset_ipv6 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
+
+ if (rte_be_to_cpu_16(*eth_proto) == ETHER_TYPE_IPv6)
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset_ipv6);
+ else
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
+ #else
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
+ #endif
+
+
+ if ((ARPICMP_DEBUG > 2) && (arpicmp_pkt_print_count < 10)) {
+ print_pkt1(pkt);
+ arpicmp_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto), *protocol, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ #ifdef VNF_ACL
+ link = &myApp->link_params[in_port_id];
+ #endif
+
+ /* Classifier for ICMP pass-through*/
+ if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_ARP) ||
+ ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
+ && (*protocol == IP_PROTOCOL_ICMP)
+ #ifdef VNF_ACL
+ && (link->ip == rte_be_to_cpu_32(*dst_addr))
+ #endif
+ )) {
+
+ #ifdef VNF_ACL
+ out_port = p_arp->outport_id[in_port_id];
+ process_arpicmp_pkt(pkt, out_port, pkt_mask);
+ #else
+ process_arpicmp_pkt(pkt, ifm_get_port(in_port_id));
+ #endif
+ return;
+ }
+ #ifdef IPV6
+ else if ((rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV6)
+ && (*protocol == ICMPV6_PROTOCOL_ID)) {
+ #ifdef VNF_ACL
+ out_port = p_arp->outport_id[in_port_id];
+ process_icmpv6_pkt(pkt, out_port, pkt_mask);
+ #else
+ process_icmpv6_pkt(pkt, ifm_get_port(in_port_id));
+ #endif
+
+ return;
+ }
+ #endif
+
+ /* Drop the pkt if not ARP/ICMP */
+ rte_pipeline_ah_packet_drop(p_arp->p.p, pkt_mask);
+ p_arp->droppedPktCount++;
+
+}
+
+static inline void
+pkt4_key_arpicmp(struct rte_mbuf **pkt, uint32_t pkt_num, void *arg)
+{
+
+ struct pipeline_arpicmp_in_port_h_arg *ap = arg;
+ struct pipeline_arpicmp *p_arp = (struct pipeline_arpicmp *)ap->p;
+
+ p_arp->receivedPktCount += 4;
+
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+ uint8_t in_port_id = pkt[0]->port;
+
+ uint32_t prot_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ #ifdef VNF_ACL
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ #endif
+
+ uint32_t pkt_mask0 = 1 << pkt_num;
+ uint32_t pkt_mask1 = 1 << (pkt_num + 1);
+ uint32_t pkt_mask2 = 1 << (pkt_num + 2);
+ uint32_t pkt_mask3 = 1 << (pkt_num + 3);
+
+ #ifdef VNF_ACL
+ uint32_t out_port0;
+ uint32_t out_port1;
+ uint32_t out_port2;
+ uint32_t out_port3;
+ #endif
+
+ uint16_t *eth_proto0 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[0], eth_proto_offset);
+ uint16_t *eth_proto1 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[1], eth_proto_offset);
+ uint16_t *eth_proto2 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[2], eth_proto_offset);
+ uint16_t *eth_proto3 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[3], eth_proto_offset);
+
+ uint8_t *protocol0;
+ uint8_t *protocol1;
+ uint8_t *protocol2;
+ uint8_t *protocol3;
+
+ #ifdef VNF_ACL
+ uint32_t *dst_addr0 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[0], dst_addr_offset);
+ uint32_t *dst_addr1 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[1], dst_addr_offset);
+ uint32_t *dst_addr2 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[2], dst_addr_offset);
+ uint32_t *dst_addr3 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[3], dst_addr_offset);
+
+ struct app_link_params *link0;
+ struct app_link_params *link1;
+ struct app_link_params *link2;
+ struct app_link_params *link3;
+
+ link0 = &myApp->link_params[pkt[0]->port];
+ link1 = &myApp->link_params[pkt[1]->port];
+ link2 = &myApp->link_params[pkt[2]->port];
+ link3 = &myApp->link_params[pkt[3]->port];
+ #endif
+
+ #ifdef IPV6
+ uint32_t prot_offset_ipv6 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
+
+ #endif
+
+ #ifdef IPV6
+/* --0-- */
+ if (rte_be_to_cpu_16(*eth_proto0) == ETHER_TYPE_IPv6)
+ protocol0 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset_ipv6);
+ else
+ protocol0 = RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset);
+
+/* --1-- */
+ if (rte_be_to_cpu_16(*eth_proto1) == ETHER_TYPE_IPv6)
+ protocol1 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset_ipv6);
+ else
+ protocol1 = RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset);
+
+/* --2-- */
+ if (rte_be_to_cpu_16(*eth_proto2) == ETHER_TYPE_IPv6)
+ protocol2 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset_ipv6);
+ else
+ protocol2 = RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset);
+
+/* --3-- */
+ if (rte_be_to_cpu_16(*eth_proto3) == ETHER_TYPE_IPv6)
+ protocol3 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset_ipv6);
+ else
+ protocol3 = RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset);
+ #else
+ protocol0 = RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset);
+ protocol1 = RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset);
+ protocol2 = RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset);
+ protocol3 = RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset);
+ #endif
+
+ if ((ARPICMP_DEBUG > 2) && (arpicmp_pkt_print_count < 10)) {
+ print_pkt1(pkt[0]);
+ arpicmp_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto0), *protocol0, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+
+ if ((rte_be_to_cpu_16(*eth_proto0) == ETH_TYPE_ARP) ||
+ ((rte_be_to_cpu_16(*eth_proto0) == ETH_TYPE_IPV4)
+ && (*protocol0 == IP_PROTOCOL_ICMP)
+ #ifdef VNF_ACL
+ && (link0->ip == rte_be_to_cpu_32(*dst_addr0))
+ #endif
+ )) {
+
+ #ifdef VNF_ACL
+ out_port0 = p_arp->outport_id[pkt[0]->port];
+ process_arpicmp_pkt(pkt[0], out_port0, pkt_mask0);
+ #else
+ process_arpicmp_pkt(pkt[0], ifm_get_port(in_port_id));
+ #endif
+
+ goto PKT1;
+ }
+ #ifdef IPV6
+ else if ((rte_be_to_cpu_16(*eth_proto0) == ETH_TYPE_IPV6)
+ && (*protocol0 == ICMPV6_PROTOCOL_ID)) {
+
+ #ifdef VNF_ACL
+ out_port0 = p_arp->outport_id[pkt[0]->port];
+ process_icmpv6_pkt(pkt[0], out_port0, pkt_mask0);
+ #else
+ process_icmpv6_pkt(pkt[0], ifm_get_port(in_port_id));
+ #endif
+
+ goto PKT1;
+ }
+ #endif
+
+ /* Drop the pkt if not ARP/ICMP */
+ rte_pipeline_ah_packet_drop(p_arp->p.p, pkt_mask0);
+ p_arp->droppedPktCount++;
+
+PKT1:
+ if ((ARPICMP_DEBUG > 2) && (arpicmp_pkt_print_count < 10)) {
+ print_pkt1(pkt[1]);
+ arpicmp_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto1), *protocol1, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ if ((rte_be_to_cpu_16(*eth_proto1) == ETH_TYPE_ARP) ||
+ ((rte_be_to_cpu_16(*eth_proto1) == ETH_TYPE_IPV4)
+ && (*protocol1 == IP_PROTOCOL_ICMP)
+ #ifdef VNF_ACL
+ && (link1->ip == rte_be_to_cpu_32(*dst_addr1))
+ #endif
+ )) {
+
+ #ifdef VNF_ACL
+ out_port1 = p_arp->outport_id[pkt[1]->port];
+ process_arpicmp_pkt(pkt[1], out_port1, pkt_mask1);
+ #else
+ process_arpicmp_pkt(pkt[1], ifm_get_port(in_port_id));
+ #endif
+ goto PKT2;
+ }
+ #ifdef IPV6
+ else if ((rte_be_to_cpu_16(*eth_proto1) == ETH_TYPE_IPV6)
+ && (*protocol1 == ICMPV6_PROTOCOL_ID)) {
+
+ #ifdef VNF_ACL
+ out_port1 = p_arp->outport_id[pkt[1]->port];
+ process_icmpv6_pkt(pkt[1], out_port1, pkt_mask1);
+ #else
+ process_icmpv6_pkt(pkt[1], ifm_get_port(in_port_id));
+ #endif
+
+ goto PKT2;
+ }
+ #endif
+
+ /* Drop the pkt if not ARP/ICMP */
+ rte_pipeline_ah_packet_drop(p_arp->p.p, pkt_mask1);
+ p_arp->droppedPktCount++;
+
+PKT2:
+ if ((ARPICMP_DEBUG > 2) && (arpicmp_pkt_print_count < 10)) {
+ print_pkt1(pkt[2]);
+ arpicmp_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto2), *protocol2, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ if ((rte_be_to_cpu_16(*eth_proto2) == ETH_TYPE_ARP) ||
+ ((rte_be_to_cpu_16(*eth_proto2) == ETH_TYPE_IPV4)
+ && (*protocol2 == IP_PROTOCOL_ICMP)
+ #ifdef VNF_ACL
+ && (link2->ip == rte_be_to_cpu_32(*dst_addr2))
+ #endif
+ )) {
+
+ #ifdef VNF_ACL
+ out_port2 = p_arp->outport_id[pkt[2]->port];
+ process_arpicmp_pkt(pkt[2], out_port2, pkt_mask2);
+ #else
+ process_arpicmp_pkt(pkt[2], ifm_get_port(in_port_id));
+ #endif
+
+ goto PKT3;
+ }
+ #ifdef IPV6
+ else if ((rte_be_to_cpu_16(*eth_proto2) == ETH_TYPE_IPV6)
+ && (*protocol2 == ICMPV6_PROTOCOL_ID)) {
+
+ #ifdef VNF_ACL
+ out_port2 = p_arp->outport_id[pkt[2]->port];
+ process_icmpv6_pkt(pkt[2], out_port2, pkt_mask2);
+ #else
+ process_icmpv6_pkt(pkt[2], ifm_get_port(in_port_id));
+ #endif
+
+ goto PKT3;
+ }
+ #endif
+
+ /* Drop the pkt if not ARP/ICMP */
+ rte_pipeline_ah_packet_drop(p_arp->p.p, pkt_mask2);
+ p_arp->droppedPktCount++;
+
+PKT3:
+ if ((ARPICMP_DEBUG > 2) && (arpicmp_pkt_print_count < 10)) {
+ print_pkt1(pkt[3]);
+ arpicmp_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto3), *protocol3, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ if ((rte_be_to_cpu_16(*eth_proto3) == ETH_TYPE_ARP) ||
+ ((rte_be_to_cpu_16(*eth_proto3) == ETH_TYPE_IPV4)
+ && (*protocol3 == IP_PROTOCOL_ICMP)
+
+ #ifdef VNF_ACL
+ && (link3->ip == rte_be_to_cpu_32(*dst_addr3))
+ #endif
+ )) {
+
+ #ifdef VNF_ACL
+ out_port3 = p_arp->outport_id[pkt[3]->port];
+ process_arpicmp_pkt(pkt[3], out_port3, pkt_mask3);
+ #else
+ process_arpicmp_pkt(pkt[3], ifm_get_port(in_port_id));
+ #endif
+
+ return;
+ }
+ #ifdef IPV6
+ else if ((rte_be_to_cpu_16(*eth_proto3) == ETH_TYPE_IPV6)
+ && (*protocol3 == ICMPV6_PROTOCOL_ID)) {
+
+ #ifdef VNF_ACL
+ out_port3 = p_arp->outport_id[pkt[3]->port];
+ process_icmpv6_pkt(pkt[3], out_port3, pkt_mask3);
+ #else
+ process_icmpv6_pkt(pkt[3], ifm_get_port(in_port_id));
+ #endif
+ return;
+ }
+ #endif
+
+ /* Drop the pkt if not ARP/ICMP */
+ rte_pipeline_ah_packet_drop(p_arp->p.p, pkt_mask3);
+ p_arp->droppedPktCount++;
+
+
+}
+
+PIPELINE_ARPICMP_KEY_PORT_IN_AH(
+ port_in_ah_arpicmp,
+ pkt_key_arpicmp,
+ pkt4_key_arpicmp);
+
+static void *pipeline_arpicmp_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_arpicmp *p_arp;
+ uint32_t size, i, in_ports_arg_size;
+
+ printf("Start pipeline_arpicmp_init\n");
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_arpicmp));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_arp = (struct pipeline_arpicmp *)p;
+ if (p == NULL)
+ return NULL;
+
+ //gp_arp = p_arp;
+ struct app_params *app = (struct app_params *)arg;
+ myApp = arg;
+
+ PLOG(p, HIGH, "ARPICMP");
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ p_arp->receivedPktCount = 0;
+ p_arp->droppedPktCount = 0;
+
+#ifdef VNF_ACL
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++)
+ p_arp->links_map[i] = 0xff;
+
+ p_arp->pipeline_num = 0;
+
+ /* Parse arguments */
+ if (pipeline_arpicmp_parse_args(p_arp, params))
+ return NULL;
+#endif
+ #ifndef VNF_ACL
+ lib_arp_init(params, app);
+ #endif
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "ARPICMP",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ //.offset_port_id = arp_meta_offset,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ p->n_tables = 1;
+
+ /* Memory allocation for in_port_h_arg */
+ in_ports_arg_size = RTE_CACHE_LINE_ROUNDUP(
+ (sizeof(struct pipeline_arpicmp_in_port_h_arg)) *
+ (params->n_ports_in));
+ struct pipeline_arpicmp_in_port_h_arg *ap =
+ (struct pipeline_arpicmp_in_port_h_arg *)rte_zmalloc(NULL,
+ in_ports_arg_size,
+ RTE_CACHE_LINE_SIZE);
+ if (ap == NULL)
+ return NULL;
+
+ /*Input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ /* passing our txrx pipeline in call back arg */
+ (ap[i]).p = p_arp;
+ (ap[i]).in_port_id = i;
+ struct rte_pipeline_port_in_params port_params = {
+ .ops =
+ pipeline_port_in_params_get_ops(&params->
+ port_in[i]),
+ .arg_create =
+ pipeline_port_in_params_convert(&params->
+ port_in[i]),
+ .f_action = NULL,
+ .arg_ah = &(ap[i]),
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ port_params.f_action = port_in_ah_arpicmp;
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops =
+ pipeline_port_out_params_get_ops(&params->
+ port_out[i]),
+ .arg_create =
+ pipeline_port_out_params_convert(&params->
+ port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+ int pipeline_num = 0;
+
+ int status = sscanf(params->name, "PIPELINE%d", &pipeline_num);
+
+ if (status < 0) {
+ return NULL;
+ printf("Unable to read pipeline number\n");
+ }
+
+ p_arp->pipeline_num = (uint8_t) pipeline_num;
+
+ register_pipeline_Qs(p_arp->pipeline_num, p);
+ set_phy_outport_id(p_arp->pipeline_num, p, p_arp->outport_id);
+
+ /* Tables */
+ {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->
+ port_in_id
+ [i],
+ p->
+ table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+
+#ifdef VNF_ACL
+
+ /* create the arpicmp mbuf rx pool */
+ lib_arp_pktmbuf_tx_pool = rte_pktmbuf_pool_create(
+ "lib_arp_mbuf_tx_pool",
+ NB_ARPICMP_MBUF, 32,
+ 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+
+ if (lib_arp_pktmbuf_tx_pool == NULL) {
+ printf("ARP mbuf pool create failed.\n");
+ return NULL;
+ }
+
+ lib_arp_pkt = rte_pktmbuf_alloc(lib_arp_pktmbuf_tx_pool);
+ if (lib_arp_pkt == NULL) {
+ printf("ARP lib_arp_pkt alloc failed.\n");
+ return NULL;
+ }
+
+ /* ARP Table */
+ arp_hash_params.socket_id = rte_socket_id();
+ arp_hash_params.entries = MAX_NUM_ARP_ENTRIES;
+ arp_hash_handle = rte_hash_create(&arp_hash_params);
+
+ if (arp_hash_handle == NULL) {
+ printf("ARP rte_hash_create failed. socket %d ...\n",
+ arp_hash_params.socket_id);
+ return NULL;
+ }
+ printf("arp_hash_handle %p\n\n", (void *)arp_hash_handle);
+
+ /* ND IPv6 */
+ nd_hash_params.socket_id = rte_socket_id();
+ nd_hash_params.entries = MAX_NUM_ND_ENTRIES;
+ nd_hash_handle = rte_hash_create(&nd_hash_params);
+
+ if (nd_hash_handle == NULL) {
+ printf("ND rte_hash_create failed. socket %d ...\n",
+ nd_hash_params.socket_id);
+ return NULL;
+ }
+
+ printf("nd_hash_handle %p\n\n", (void *)nd_hash_handle);
+#endif
+ return p;
+}
+
+static int pipeline_arpicmp_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int pipeline_arpicmp_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static int
+pipeline_arpicmp_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
+ return -1;
+
+ *port_out = port_in / p->n_ports_in;
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_arpicmp_be_ops = {
+ .f_init = pipeline_arpicmp_init,
+ .f_free = pipeline_arpicmp_free,
+ .f_run = NULL,
+ .f_timer = pipeline_arpicmp_timer,
+ .f_track = pipeline_arpicmp_track,
+};
diff --git a/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.h b/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.h
new file mode 100644
index 00000000..2c7fce2e
--- /dev/null
+++ b/common/VIL/pipeline_arpicmp/pipeline_arpicmp_be.h
@@ -0,0 +1,343 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_ARPICMP_BE_H__
+#define __INCLUDE_PIPELINE_ARPICMP_BE_H__
+
+#include "pipeline_common_be.h"
+#define PIPELINE_ARPICMP_KEY_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i, j; \
+ \
+ for (j = 0; j < n_pkts; j++) \
+ rte_prefetch0(pkts[j]); \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], i, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], i, arg); \
+ \
+ \
+ return 0; \
+}
+
+extern struct app_params *myApp;
+void print_pkt1(struct rte_mbuf *pkt);
+struct ether_addr *get_link_hw_addr(uint8_t out_port);
+#ifdef VNF_ACL
+
+#include <rte_pipeline.h>
+#include "rte_ether.h"
+#include "app.h"
+
+#if (RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN)
+// x86 == little endian
+// network == big endian
+#define CHECK_ENDIAN_16(x) rte_be_to_cpu_16(x)
+#define CHECK_ENDIAN_32(x) rte_be_to_cpu_32(x)
+#else
+#define CHECK_ENDIAN_16(x) (x)
+#define CHECK_ENDIAN_32(x) (x)
+#endif
+
+
+#define MAX_ARP_RT_ENTRY 16
+#define MAX_ND_RT_ENTRY 16
+
+#define ND_IPV6_ADDR_SIZE 16 /* 16 Byte of IPv6 Address */
+
+enum {
+ARP_FOUND,
+ARP_NOT_FOUND,
+NH_NOT_FOUND,
+};
+
+enum arp_key_type {
+ ARP_IPV4,
+ /* ND IPv6 */
+ ND_IPV6,
+};
+
+struct arp_key_ipv4 {
+ uint32_t ip;
+ uint8_t port_id;
+ uint8_t filler1;
+ uint8_t filler2;
+ uint8_t filler3;
+};
+
+/* ND IPv6 */
+struct nd_key_ipv6 {
+ /*128 Bit of IPv6 Address */
+ /*<48bit Network> <16bit Subnet> <64bit Interface> */
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE];
+ uint8_t port_id;
+ uint8_t filler1;
+ uint8_t filler2;
+ uint8_t filler3;
+};
+
+struct arp_key {
+ enum arp_key_type type;
+ union {
+ struct arp_key_ipv4 ipv4;
+ } key;
+};
+
+struct lib_arp_route_table_entry {
+ uint32_t ip;
+ uint32_t mask;
+ uint32_t port;
+ uint32_t nh;
+};
+
+struct lib_nd_route_table_entry {
+ uint8_t ipv6[16];
+ uint8_t depth;
+ uint32_t port;
+ uint8_t nhipv6[16];
+};
+extern struct lib_arp_route_table_entry lib_arp_route_table[MAX_ARP_RT_ENTRY];
+extern struct lib_nd_route_table_entry lib_nd_route_table[MAX_ND_RT_ENTRY];
+
+extern uint8_t prv_in_port_a[PIPELINE_MAX_PORT_IN];
+extern void convert_prefixlen_to_netmask_ipv6(uint32_t depth,
+ uint8_t netmask_ipv6[]);
+uint32_t get_nh(uint32_t, uint32_t*);
+void get_nh_ipv6(uint8_t ipv6[], uint32_t *port, uint8_t nhipv6[]);
+
+extern uint32_t ARPICMP_DEBUG;
+
+
+/* ARP entry populated and echo reply recieved */
+#define COMPLETE 1
+/* ARP entry populated and either awaiting echo reply or stale entry */
+#define INCOMPLETE 0
+
+/* ND IPv6 */
+extern uint32_t NDIPV6_DEBUG;
+
+/* ICMPv6 entry populated and echo reply recieved */
+#define ICMPv6_COMPLETE 1
+/* ICMPv6 entry populated and either awaiting echo reply or stale entry */
+#define ICMPv6_INCOMPLETE 0
+
+struct arp_entry_data {
+ struct ether_addr eth_addr;
+ uint8_t port;
+ uint8_t status;
+ uint32_t ip;
+} __attribute__ ((__packed__));
+
+/*ND IPv6*/
+struct nd_entry_data {
+ struct ether_addr eth_addr;
+ uint8_t port;
+ uint8_t status;
+ uint8_t ipv6[ND_IPV6_ADDR_SIZE];
+} __attribute__ ((__packed__));
+
+int get_dest_mac_address(const uint32_t ipaddr, const uint32_t phy_port,
+ struct ether_addr *hw_addr, uint32_t *nhip);
+int get_dest_mac_addr(const uint32_t ipaddr, const uint32_t phy_port,
+ struct ether_addr *hw_addr);
+
+int get_dest_mac_address_ipv6(uint8_t ipv6addr[], uint32_t phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[]);
+
+void lib_arp_request_arp(
+ const uint32_t ipaddr,
+ const uint32_t phy_port,
+ struct rte_pipeline *rte_p);
+
+void print_arp_table(void);
+void print_nd_table(void);
+void remove_arp_entry(uint32_t ipaddr, uint8_t portid);
+void remove_nd_entry_ipv6(uint8_t ipv6addr[], uint8_t portid);
+void populate_arp_entry(const struct ether_addr *hw_addr, uint32_t ipaddr,
+ uint8_t portid);
+/*ND IPv6*/
+int populate_nd_entry(const struct ether_addr *hw_addr, uint8_t ip[],
+ uint8_t portid);
+void request_arp(uint8_t port_id, uint32_t ip, struct rte_pipeline *rte_p);
+void request_arp_wrap(uint8_t port_id, uint32_t ip);
+void request_echo(unsigned int port_id, uint32_t ip);
+
+void process_arpicmp_pkt(struct rte_mbuf *pkt, uint32_t out_port,
+ uint32_t pkt_num);
+
+struct arp_entry_data *retrieve_arp_entry(const struct arp_key_ipv4 arp_key);
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key);
+
+struct nd_entry_data *retrieve_nd_entry(struct nd_key_ipv6 nd_key);
+
+void lib_nd_init(/*struct pipeline_params *params, */ struct app_params *app);
+void print_pkt1(struct rte_mbuf *pkt);
+
+#endif
+
+uint8_t lb_outport_id[PIPELINE_MAX_PORT_IN];
+struct pipeline *loadb_pipeline[PIPELINE_MAX_PORT_IN];
+struct pipeline *all_pipeline[PIPELINE_MAX_PORT_IN];
+uint8_t vnf_to_loadb_map[PIPELINE_MAX_PORT_IN];
+uint8_t port_to_loadb_map[PIPELINE_MAX_PORT_IN];
+uint8_t loadb_pipeline_nums[PIPELINE_MAX_PORT_IN];
+
+#if 0
+uint8_t lb_outport_id[PIPELINE_MAX_PORT_IN];
+struct pipeline *arp_pipeline[PIPELINE_MAX_PORT_IN];
+uint8_t vnf_to_arp_map[PIPELINE_MAX_PORT_IN];
+uint8_t port_to_arp_map[PIPELINE_MAX_PORT_IN];
+uint8_t arp_pipeline_nums[PIPELINE_MAX_PORT_IN];
+#endif
+
+void set_port_to_loadb_map(uint8_t pipeline_num);
+uint8_t get_port_to_loadb_map(uint8_t phy_port_id);
+/* acts on port_to_loadb_map */
+
+void set_phy_inport_map(uint8_t pipeline_num, uint8_t *map);
+void set_phy_outport_map(uint8_t pipeline_num, uint8_t *map);
+
+void set_outport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map);
+/* acts on lb_outport_id */
+uint8_t get_loadb_outport_id(uint8_t actual_phy_port);
+/* acts on lb_outport_id */
+uint8_t get_vnf_set_num(uint8_t pipeline_num);
+
+void pipelines_port_info(void);
+void pipelines_map_info(void);
+void register_loadb_to_arp(uint8_t pipeline_num, struct pipeline *p,
+ __rte_unused struct app_params *app);
+/* vnf_to_loadb_map[] and loadb_pipelines[] */
+uint8_t SWQ_to_Port_map[128];
+
+extern struct pipeline_be_ops pipeline_arpicmp_be_ops;
+void register_pipeline_Qs(uint8_t pipeline_num, struct pipeline *p);
+void set_link_map(uint8_t pipeline_num, struct pipeline *p, uint8_t *map);
+void set_outport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map);
+void set_phy_outport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map);
+void set_phy_inport_id(uint8_t pipeline_num, struct pipeline *p, uint8_t *map);
+
+/*
+ * Messages
+ */
+enum pipeline_arpicmp_msg_req_type {
+ PIPELINE_ARPICMP_MSG_REQ_ENTRY_DBG,
+ PIPELINE_ARPICMP_MSG_REQS
+};
+
+/*
+ * MSG ENTRY DBG
+ */
+struct pipeline_arpicmp_entry_dbg_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_arpicmp_msg_req_type subtype;
+
+ /* data */
+ uint8_t data[2];
+};
+
+/*
+ * ARPICMP Entry
+ */
+
+struct pipeline_arpicmp_in_port_h_arg {
+ struct pipeline_arpicmp *p;
+ uint8_t in_port_id;
+};
+
+struct pipeline_arpicmp_entry_dbg_msg_rsp {
+ int status;
+};
+
+#ifdef VNF_ACL
+
+ /* ICMPv6 Header */
+struct icmpv6_hdr {
+ uint8_t icmpv6_type; /* ICMPV6 packet type. */
+ uint8_t icmpv6_code; /* ICMPV6 packet code. */
+ uint16_t icmpv6_cksum; /* ICMPV6 packet checksum. */
+} __attribute__ ((__packed__));
+
+ /**
+ * ICMPV6 Info Header
+ */
+struct icmpv6_info_hdr {
+ uint16_t icmpv6_ident; /* ICMPV6 packet identifier. */
+ uint16_t icmpv6_seq_nb; /* ICMPV6 packet sequence number. */
+} __attribute__ ((__packed__));
+
+ /**
+ * ICMPV6 ND Header
+ */
+struct icmpv6_nd_hdr {
+ /*ND Advertisement flags */
+ uint32_t icmpv6_reserved;
+ /* bit31-Router, bit30-Solicited, bit29-Override, bit28-bit0 unused */
+
+ uint8_t target_ipv6[16]; /**< target IPv6 address */
+ /*ICMPv6 Option*/
+ uint8_t type;
+ uint8_t length;
+ struct ether_addr link_layer_address;
+} __attribute__ ((__packed__));
+
+ /* Icmpv6 types */
+ #define ICMPV6_PROTOCOL_ID 58
+ #define ICMPV6_ECHO_REQUEST 0x0080
+ #define ICMPV6_ECHO_REPLY 0x0081
+ #define ICMPV6_NEIGHBOR_SOLICITATION 0x0087
+ #define ICMPV6_NEIGHBOR_ADVERTISEMENT 0x0088
+ #define IPV6_MULTICAST 0xFF02
+
+ #define NEIGHBOR_SOLICITATION_SET 0x40000000
+enum icmpv6_link_layer_Address_type {
+ e_Source_Link_Layer_Address = 1,
+ e_Target_Link_Layer_Address,
+ e_Link_Layer_Address
+};
+
+uint8_t is_multicast_ipv6_addr(uint8_t ipv6[]);
+struct icmpv6_port_address {
+ uint32_t ipv6[16];
+ uint64_t mac_addr;
+};
+
+struct icmpv6_port_address icmpv6_port_addresses[RTE_MAX_ETHPORTS];
+
+ #define MAX_NUM_ICMPv6_ENTRIES 64
+ //struct rte_pipeline *myicmpP;
+struct rte_mbuf *lib_icmpv6_pkt;
+void request_icmpv6_echo(uint32_t port_id, uint8_t ipv6[]);
+void request_icmpv6_echo_message(uint16_t port_id, uint8_t ipv6[],
+ struct ether_addr *gw_addr);
+void
+process_icmpv6_pkt(struct rte_mbuf *pkt, uint32_t out_port, uint32_t pkt_num);
+
+int get_dest_mac_addr_port(const uint32_t ipaddr,
+ uint32_t *phy_port, struct ether_addr *hw_addr);
+
+int get_dest_mac_address_ipv6_port(uint8_t ipv6addr[], uint32_t *phy_port,
+ struct ether_addr *hw_addr, uint8_t nhipv6[]);
+#endif
+#endif
diff --git a/common/VIL/pipeline_common/pipeline_common_be.c b/common/VIL/pipeline_common/pipeline_common_be.c
new file mode 100644
index 00000000..66e2c5bc
--- /dev/null
+++ b/common/VIL/pipeline_common/pipeline_common_be.c
@@ -0,0 +1,189 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+
+#include "pipeline_common_be.h"
+
+void *
+pipeline_msg_req_ping_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = 0; /* OK */
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_in_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_in_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_out_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_out_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_out) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_out_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_table_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_table_msg_rsp *rsp = msg;
+ uint32_t table_id;
+
+ /* Check request */
+ if (req->id >= p->n_tables) {
+ rsp->status = -1;
+ return rsp;
+ }
+ table_id = p->table_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_enable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_disable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_invalid_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = -1; /* Error */
+
+ return rsp;
+}
+
+int
+pipeline_msg_req_handle(struct pipeline *p)
+{
+ uint32_t msgq_id;
+
+ for (msgq_id = 0; msgq_id < p->n_msgq; msgq_id++) {
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ pipeline_msg_req_handler f_handle;
+
+ req = pipeline_msg_recv(p, msgq_id);
+ if (req == NULL)
+ break;
+
+ f_handle = (req->type < PIPELINE_MSG_REQS) ?
+ p->handlers[req->type] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ pipeline_msg_send(p,
+ msgq_id,
+ f_handle(p, (void *) req));
+ }
+ }
+
+ return 0;
+}
diff --git a/common/VIL/pipeline_common/pipeline_common_be.h b/common/VIL/pipeline_common/pipeline_common_be.h
new file mode 100644
index 00000000..f3e937e6
--- /dev/null
+++ b/common/VIL/pipeline_common/pipeline_common_be.h
@@ -0,0 +1,146 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_COMMON_BE_H__
+#define __INCLUDE_PIPELINE_COMMON_BE_H__
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_be.h"
+
+struct pipeline;
+
+enum pipeline_msg_req_type {
+ PIPELINE_MSG_REQ_PING = 0,
+ PIPELINE_MSG_REQ_STATS_PORT_IN,
+ PIPELINE_MSG_REQ_STATS_PORT_OUT,
+ PIPELINE_MSG_REQ_STATS_TABLE,
+ PIPELINE_MSG_REQ_PORT_IN_ENABLE,
+ PIPELINE_MSG_REQ_PORT_IN_DISABLE,
+ PIPELINE_MSG_REQ_CUSTOM,
+ PIPELINE_MSG_REQS
+};
+
+typedef void *(*pipeline_msg_req_handler)(struct pipeline *p, void *msg);
+
+struct pipeline {
+ struct rte_pipeline *p;
+ uint32_t port_in_id[PIPELINE_MAX_PORT_IN];
+ uint32_t port_out_id[PIPELINE_MAX_PORT_OUT];
+ uint32_t table_id[PIPELINE_MAX_TABLES];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+ uint32_t n_msgq;
+
+ pipeline_msg_req_handler handlers[PIPELINE_MSG_REQS];
+ char name[PIPELINE_NAME_SIZE];
+ uint32_t log_level;
+};
+
+enum pipeline_log_level {
+ PIPELINE_LOG_LEVEL_HIGH = 1,
+ PIPELINE_LOG_LEVEL_LOW,
+ PIPELINE_LOG_LEVELS
+};
+
+#define PLOG(p, level, fmt, ...) \
+do { \
+ if (p->log_level >= PIPELINE_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[%s] " fmt "\n", p->name, ## __VA_ARGS__);\
+} while (0)
+
+static inline void *
+pipeline_msg_recv(struct pipeline *p,
+ uint32_t msgq_id)
+{
+ struct rte_ring *r = p->msgq_in[msgq_id];
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+pipeline_msg_send(struct pipeline *p,
+ uint32_t msgq_id,
+ void *msg)
+{
+ struct rte_ring *r = p->msgq_out[msgq_id];
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+struct pipeline_msg_req {
+ enum pipeline_msg_req_type type;
+};
+
+struct pipeline_stats_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t id;
+};
+
+struct pipeline_port_in_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t port_id;
+};
+
+struct pipeline_custom_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t subtype;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+};
+
+struct pipeline_stats_port_in_msg_rsp {
+ int status;
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_stats_port_out_msg_rsp {
+ int status;
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_stats_table_msg_rsp {
+ int status;
+ struct rte_pipeline_table_stats stats;
+};
+
+void *pipeline_msg_req_ping_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_in_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_out_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_table_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_enable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_disable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_invalid_handler(struct pipeline *p, void *msg);
+
+int pipeline_msg_req_handle(struct pipeline *p);
+
+#endif
diff --git a/common/VIL/pipeline_common/pipeline_common_fe.c b/common/VIL/pipeline_common/pipeline_common_fe.c
new file mode 100644
index 00000000..5df29779
--- /dev/null
+++ b/common/VIL/pipeline_common/pipeline_common_fe.c
@@ -0,0 +1,1429 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "pipeline_common_fe.h"
+#ifndef VNF_ACL
+#include "interface.h"
+#endif
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PING;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+#if 1
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_in_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_IN;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = (struct pipeline_stats_port_in_msg_rsp *)
+ app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_out_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (pipeline_id >= app->n_pipelines) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_out))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_OUT;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_table_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_TABLE;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_ENABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_DISABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth)
+{
+ struct app_link_params *p;
+ uint32_t i, netmask, host, bcast;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is UP, please bring it DOWN first",
+ p->name);
+ return -1;
+ }
+
+ netmask = (~0U) << (32 - depth);
+ host = ip & netmask;
+ bcast = host | (~netmask);
+
+ if ((ip == 0) ||
+ (ip == UINT32_MAX) ||
+ (ip == host) ||
+ (ip == bcast)) {
+ APP_LOG(app, HIGH, "Illegal IP address");
+ return -1;
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+ mylink[i] = *link;
+ if (strcmp(p->name, link->name) == 0)
+ continue;
+
+ if (link->ip == ip) {
+ APP_LOG(app, HIGH,
+ "%s is already assigned this IP address",
+ link->name);
+ return -1;
+ }
+ }
+
+ if ((depth == 0) || (depth > 32)) {
+ APP_LOG(app, HIGH, "Illegal value for depth parameter "
+ "(%" PRIu32 ")",
+ depth);
+ return -1;
+ }
+
+ /* Save link parameters */
+ p->ip = ip;
+ p->depth = depth;
+ #ifndef VNF_ACL
+ if (ifm_add_ipv4_port(link_id, rte_bswap32(ip), depth) == IFM_FAILURE)
+ return -1;
+ #endif
+
+ return 0;
+}
+
+
+void convert_prefixlen_to_netmask_ipv6(uint32_t depth, uint8_t netmask_ipv6[])
+{
+ int mod, div, i;
+
+ memset(netmask_ipv6, 0, 16);
+
+ mod = depth % 8;
+ div = depth / 8;
+
+ for (i = 0; i < div; i++)
+ netmask_ipv6[i] = 0xff;
+
+ netmask_ipv6[i] = (~0 << (8 - mod));
+
+ return;
+}
+
+void
+get_host_portion_ipv6(uint8_t ipv6[], uint8_t netmask[], uint8_t host_ipv6[])
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ host_ipv6[i] = ipv6[i] & netmask[i];
+ }
+
+ return;
+}
+
+void
+get_bcast_portion_ipv6(uint8_t host[], uint8_t netmask[], uint8_t bcast_ipv6[])
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ bcast_ipv6[i] = host[i] | ~netmask[i];
+ }
+
+ return;
+}
+
+int
+app_link_config_ipv6(struct app_params *app,
+ uint32_t link_id, uint8_t ipv6[], uint32_t depth)
+{
+ struct app_link_params *p;
+ uint32_t i;
+ uint8_t netmask_ipv6[16], host[16], bcast[16];
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is UP, please bring it DOWN first",
+ p->name);
+ return -1;
+ }
+
+ convert_prefixlen_to_netmask_ipv6(depth, netmask_ipv6);
+ get_host_portion_ipv6(ipv6, netmask_ipv6, host);
+ get_bcast_portion_ipv6(host, netmask_ipv6, bcast);
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ if (strcmp(p->name, link->name) == 0)
+ continue;
+
+ if (!memcmp(link->ipv6, ipv6, 16)) {
+ APP_LOG(app, HIGH,
+ "%s is already assigned this IPv6 address",
+ link->name);
+ return -1;
+ }
+ }
+
+ if ((depth == 0) || (depth > 128)) {
+ APP_LOG(app, HIGH, "Illegal value for depth parameter "
+ "(%" PRIu32 ")", depth);
+ return -1;
+ }
+
+ /* Save link parameters */
+ memcpy(p->ipv6, ipv6, 16);
+
+ p->depth_ipv6 = depth;
+/*
+ printf("IPv6: %x%x:%x%x:%x%x:%x%x:%x%x:%x%x:%x%x:%x%x",
+ ipv6[0], ipv6[1], ipv6[2], ipv6[3], ipv6[4], ipv6[5],
+ ipv6[6], ipv6[7], ipv6[8], ipv6[9], ipv6[10], ipv6[11],
+ ipv6[12], ipv6[13], ipv6[14], ipv6[15]);
+*/
+ #ifndef VNF_ACL
+ if (ifm_add_ipv6_port(link_id, ipv6, depth) == IFM_FAILURE)
+ return -1;
+ #endif
+ return 0;
+}
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ /* Check link state */
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is already UP", p->name);
+ return 0;
+ }
+
+ /* Check that IP address is valid */
+ uint8_t temp[16];
+
+ memset(temp, 0, 16);
+
+ if ((p->ip || memcmp(p->ipv6, temp, 16)) == 0) {
+ APP_LOG(app, HIGH, "%s IP address is not set", p->name);
+ return 0;
+ }
+
+ app_link_up_internal(app, p);
+
+ return 0;
+}
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ /* Check link state */
+ if (p->state == 0) {
+ APP_LOG(app, HIGH, "%s is already DOWN", p->name);
+ return 0;
+ }
+
+ app_link_down_internal(app, p);
+
+ return 0;
+}
+
+/*
+ * ping
+ */
+
+struct cmd_ping_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t ping_string;
+};
+
+static void
+cmd_ping_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_ping_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_ping(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_ping_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
+
+cmdline_parse_token_num_t cmd_ping_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
+
+cmdline_parse_token_string_t cmd_ping_ping_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
+
+cmdline_parse_inst_t cmd_ping = {
+ .f = cmd_ping_parsed,
+ .data = NULL,
+ .help_str = "Pipeline ping",
+ .tokens = {
+ (void *) &cmd_ping_p_string,
+ (void *) &cmd_ping_pipeline_id,
+ (void *) &cmd_ping_ping_string,
+ NULL,
+ },
+};
+
+/*
+ * stats port in
+ */
+
+struct cmd_stats_port_in_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+
+};
+static void
+cmd_stats_port_in_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_port_in_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_in_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_in(app,
+ params->pipeline_id,
+ params->port_in_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for input port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_in_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+cmdline_parse_token_string_t cmd_stats_port_in_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_port_in_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_stats_port_in_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
+ "in");
+
+ cmdline_parse_token_num_t cmd_stats_port_in_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_stats_port_in = {
+ .f = cmd_stats_port_in_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_in_p_string,
+ (void *) &cmd_stats_port_in_pipeline_id,
+ (void *) &cmd_stats_port_in_stats_string,
+ (void *) &cmd_stats_port_in_port_string,
+ (void *) &cmd_stats_port_in_in_string,
+ (void *) &cmd_stats_port_in_port_in_id,
+ NULL,
+ },
+};
+
+/*
+ * stats port out
+ */
+
+struct cmd_stats_port_out_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t out_string;
+ uint32_t port_out_id;
+};
+
+static void
+cmd_stats_port_out_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+
+ struct cmd_stats_port_out_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_out_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_out(app,
+ params->pipeline_id,
+ params->port_out_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for output port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_out_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+cmdline_parse_token_string_t cmd_stats_port_out_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_port_out_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_stats_port_out_out_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
+ "out");
+
+cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
+ UINT32);
+
+cmdline_parse_inst_t cmd_stats_port_out = {
+ .f = cmd_stats_port_out_parsed,
+ .data = NULL,
+ .help_str = "Pipeline output port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_out_p_string,
+ (void *) &cmd_stats_port_out_pipeline_id,
+ (void *) &cmd_stats_port_out_stats_string,
+ (void *) &cmd_stats_port_out_port_string,
+ (void *) &cmd_stats_port_out_out_string,
+ (void *) &cmd_stats_port_out_port_out_id,
+ NULL,
+ },
+};
+
+/*
+ * stats table
+ */
+
+struct cmd_stats_table_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t table_string;
+ uint32_t table_id;
+};
+
+static void
+cmd_stats_table_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_table_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_table_stats stats;
+ int status;
+
+ status = app_pipeline_stats_table(app,
+ params->pipeline_id,
+ params->table_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for table %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts in with lookup miss: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by others: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by others: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->table_id,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+cmdline_parse_token_string_t cmd_stats_table_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_stats_table_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
+ "stats");
+
+cmdline_parse_token_string_t cmd_stats_table_table_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
+ "table");
+
+cmdline_parse_token_num_t cmd_stats_table_table_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
+
+cmdline_parse_inst_t cmd_stats_table = {
+ .f = cmd_stats_table_parsed,
+ .data = NULL,
+ .help_str = "Pipeline table stats",
+ .tokens = {
+ (void *) &cmd_stats_table_p_string,
+ (void *) &cmd_stats_table_pipeline_id,
+ (void *) &cmd_stats_table_stats_string,
+ (void *) &cmd_stats_table_table_string,
+ (void *) &cmd_stats_table_table_id,
+ NULL,
+ },
+};
+
+/*
+ * port in enable
+ */
+
+struct cmd_port_in_enable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_port_in_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_enable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_port_in_enable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_enable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_port_in_enable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
+ "in");
+
+cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
+ enable_string, "enable");
+
+cmdline_parse_inst_t cmd_port_in_enable = {
+ .f = cmd_port_in_enable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port enable",
+ .tokens = {
+ (void *) &cmd_port_in_enable_p_string,
+ (void *) &cmd_port_in_enable_pipeline_id,
+ (void *) &cmd_port_in_enable_port_string,
+ (void *) &cmd_port_in_enable_in_string,
+ (void *) &cmd_port_in_enable_port_in_id,
+ (void *) &cmd_port_in_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * port in disable
+ */
+
+struct cmd_port_in_disable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_port_in_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_disable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_port_in_disable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
+ "p");
+
+cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_disable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
+ "port");
+
+cmdline_parse_token_string_t cmd_port_in_disable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
+ "in");
+
+cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
+ disable_string, "disable");
+
+cmdline_parse_inst_t cmd_port_in_disable = {
+ .f = cmd_port_in_disable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port disable",
+ .tokens = {
+ (void *) &cmd_port_in_disable_p_string,
+ (void *) &cmd_port_in_disable_pipeline_id,
+ (void *) &cmd_port_in_disable_port_string,
+ (void *) &cmd_port_in_disable_in_string,
+ (void *) &cmd_port_in_disable_port_in_id,
+ (void *) &cmd_port_in_disable_disable_string,
+ NULL,
+ },
+};
+
+/*
+ * link config
+ */
+
+static void
+print_link_info(struct app_link_params *p)
+{
+ struct rte_eth_stats stats;
+ struct ether_addr *mac_addr;
+ uint32_t netmask = (~0U) << (32 - p->depth);
+ uint32_t host = p->ip & netmask;
+ uint32_t bcast = host | (~netmask);
+
+ memset(&stats, 0, sizeof(stats));
+ rte_eth_stats_get(p->pmd_id, &stats);
+
+ mac_addr = (struct ether_addr *) &p->mac_addr;
+
+ if (strlen(p->pci_bdf))
+ printf("%s(%s): flags=<%s>\n",
+ p->name,
+ p->pci_bdf,
+ (p->state) ? "UP" : "DOWN");
+ else
+ printf("%s: flags=<%s>\n",
+ p->name,
+ (p->state) ? "UP" : "DOWN");
+
+ if (p->ip)
+ printf("\tinet %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32
+ " netmask %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 " "
+ "broadcast %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "\n",
+ (p->ip >> 24) & 0xFF,
+ (p->ip >> 16) & 0xFF,
+ (p->ip >> 8) & 0xFF,
+ p->ip & 0xFF,
+ (netmask >> 24) & 0xFF,
+ (netmask >> 16) & 0xFF,
+ (netmask >> 8) & 0xFF,
+ netmask & 0xFF,
+ (bcast >> 24) & 0xFF,
+ (bcast >> 16) & 0xFF,
+ (bcast >> 8) & 0xFF,
+ bcast & 0xFF);
+
+ printf("\tether %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ printf("\tRX packets %" PRIu64
+ " bytes %" PRIu64
+ "\n",
+ stats.ipackets,
+ stats.ibytes);
+
+ printf("\tRX errors %" PRIu64
+ " missed %" PRIu64
+ " no-mbuf %" PRIu64
+ "\n",
+ stats.ierrors,
+ stats.imissed,
+ stats.rx_nombuf);
+
+ printf("\tTX packets %" PRIu64
+ " bytes %" PRIu64 "\n",
+ stats.opackets,
+ stats.obytes);
+
+ printf("\tTX errors %" PRIu64
+ "\n",
+ stats.oerrors);
+
+ printf("\n");
+}
+#endif
+struct cmd_link_config_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t config_string;
+ cmdline_ipaddr_t ip;
+ uint32_t depth;
+};
+
+static void
+cmd_link_config_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_config_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ uint32_t link_id = params->link_id;
+ uint32_t ip;
+ uint8_t ipv6[16];
+ if (params->ip.family == AF_INET)
+ ip = rte_bswap32((uint32_t) params->ip.addr.ipv4.s_addr);
+ else
+ memcpy(ipv6, params->ip.addr.ipv6.s6_addr, 16);
+
+ uint32_t depth = params->depth;
+
+ if (params->ip.family == AF_INET)
+ status = app_link_config(app, link_id, ip, depth);
+ else
+ status = app_link_config_ipv6(app, link_id, ipv6, depth);
+
+ if (status)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p)
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_config_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_config_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_config_config_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_config_result, config_string,
+ "config");
+
+cmdline_parse_token_ipaddr_t cmd_link_config_ip =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_link_config_result, ip);
+
+cmdline_parse_token_num_t cmd_link_config_depth =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_config_result, depth, UINT32);
+
+cmdline_parse_inst_t cmd_link_config = {
+ .f = cmd_link_config_parsed,
+ .data = NULL,
+ .help_str = "Link configuration",
+ .tokens = {
+ (void *)&cmd_link_config_link_string,
+ (void *)&cmd_link_config_link_id,
+ (void *)&cmd_link_config_config_string,
+ (void *)&cmd_link_config_ip,
+ (void *)&cmd_link_config_depth,
+ NULL,
+ },
+};
+
+/*
+ * link up
+ */
+
+struct cmd_link_up_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t up_string;
+};
+
+static void
+cmd_link_up_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_up_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_link_up(app, params->link_id);
+ if (status != 0)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
+ p);
+ if (p)
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_up_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_up_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_up_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_up_up_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_up_result, up_string, "up");
+
+cmdline_parse_inst_t cmd_link_up = {
+ .f = cmd_link_up_parsed,
+ .data = NULL,
+ .help_str = "Link UP",
+ .tokens = {
+ (void *)&cmd_link_up_link_string,
+ (void *)&cmd_link_up_link_id,
+ (void *)&cmd_link_up_up_string,
+ NULL,
+ },
+};
+
+/*
+ * link down
+ */
+
+struct cmd_link_down_result {
+ cmdline_fixed_string_t link_string;
+ uint32_t link_id;
+ cmdline_fixed_string_t down_string;
+};
+
+static void
+cmd_link_down_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_down_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_link_down(app, params->link_id);
+ if (status != 0)
+ printf("Command failed\n");
+ else {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", params->link_id,
+ p);
+ if (p)
+ print_link_info(p);
+ }
+}
+
+cmdline_parse_token_string_t cmd_link_down_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, link_string,
+ "link");
+
+cmdline_parse_token_num_t cmd_link_down_link_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_link_down_result, link_id, UINT32);
+
+cmdline_parse_token_string_t cmd_link_down_down_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_down_result, down_string,
+ "down");
+
+cmdline_parse_inst_t cmd_link_down = {
+ .f = cmd_link_down_parsed,
+ .data = NULL,
+ .help_str = "Link DOWN",
+ .tokens = {
+ (void *) &cmd_link_down_link_string,
+ (void *) &cmd_link_down_link_id,
+ (void *) &cmd_link_down_down_string,
+ NULL,
+ },
+};
+
+/*
+ * link ls
+ */
+
+struct cmd_link_ls_result {
+ cmdline_fixed_string_t link_string;
+ cmdline_fixed_string_t ls_string;
+};
+
+static void
+cmd_link_ls_parsed(
+ __attribute__((unused)) void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct app_params *app = data;
+ uint32_t link_id;
+
+ for (link_id = 0; link_id < app->n_links; link_id++) {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p)
+ print_link_info(p);
+ }
+ #ifndef VNF_ACL
+ print_interface_details();
+ #endif
+}
+
+cmdline_parse_token_string_t cmd_link_ls_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, link_string,
+ "link");
+
+cmdline_parse_token_string_t cmd_link_ls_ls_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_ls_result, ls_string, "ls");
+
+cmdline_parse_inst_t cmd_link_ls = {
+ .f = cmd_link_ls_parsed,
+ .data = NULL,
+ .help_str = "Link list",
+ .tokens = {
+ (void *)&cmd_link_ls_link_string,
+ (void *)&cmd_link_ls_ls_string,
+ NULL,
+ },
+};
+
+/*
+ * quit
+ */
+
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void
+cmd_quit_parsed(
+ __rte_unused void *parsed_result,
+ struct cmdline *cl,
+ __rte_unused void *data)
+{
+ cmdline_quit(cl);
+}
+
+static cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+static cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "Quit",
+ .tokens = {
+ (void *) &cmd_quit_quit,
+ NULL,
+ },
+};
+
+/*
+ * run
+ */
+
+static void
+app_run_file(
+ cmdline_parse_ctx_t *ctx,
+ const char *file_name)
+{
+ struct cmdline *file_cl;
+ int fd;
+
+ fd = open(file_name, O_RDONLY);
+ if (fd < 0) {
+ printf("Cannot open file \"%s\"\n", file_name);
+ return;
+ }
+
+ file_cl = cmdline_new(ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+}
+
+struct cmd_run_file_result {
+ cmdline_fixed_string_t run_string;
+ char file_name[APP_FILE_NAME_SIZE];
+};
+
+static void
+cmd_run_parsed(
+ void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_run_file_result *params = parsed_result;
+
+ app_run_file(cl->ctx, params->file_name);
+}
+
+cmdline_parse_token_string_t cmd_run_run_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, run_string,
+ "run");
+
+cmdline_parse_token_string_t cmd_run_file_name =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_file_result, file_name, NULL);
+
+cmdline_parse_inst_t cmd_run = {
+ .f = cmd_run_parsed,
+ .data = NULL,
+ .help_str = "Run CLI script file",
+ .tokens = {
+ (void *) &cmd_run_run_string,
+ (void *) &cmd_run_file_name,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_common_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_quit,
+ (cmdline_parse_inst_t *) &cmd_run,
+
+ (cmdline_parse_inst_t *) &cmd_link_config,
+ (cmdline_parse_inst_t *) &cmd_link_up,
+ (cmdline_parse_inst_t *) &cmd_link_down,
+ (cmdline_parse_inst_t *) &cmd_link_ls,
+
+ (cmdline_parse_inst_t *) &cmd_ping,
+ (cmdline_parse_inst_t *) &cmd_stats_port_in,
+ (cmdline_parse_inst_t *) &cmd_stats_port_out,
+ (cmdline_parse_inst_t *) &cmd_stats_table,
+ (cmdline_parse_inst_t *) &cmd_port_in_enable,
+ (cmdline_parse_inst_t *) &cmd_port_in_disable,
+ NULL,
+};
+
+int
+app_pipeline_common_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(pipeline_common_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ pipeline_common_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/common/VIL/pipeline_common/pipeline_common_fe.h b/common/VIL/pipeline_common/pipeline_common_fe.h
new file mode 100644
index 00000000..fd53cc1d
--- /dev/null
+++ b/common/VIL/pipeline_common/pipeline_common_fe.h
@@ -0,0 +1,231 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_COMMON_FE_H__
+#define __INCLUDE_PIPELINE_COMMON_FE_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <cmdline_parse.h>
+
+#include "pipeline_common_be.h"
+#include "pipeline.h"
+#include "app.h"
+
+#ifndef MSG_TIMEOUT_DEFAULT
+#define MSG_TIMEOUT_DEFAULT 1000
+#endif
+struct app_link_params mylink[APP_MAX_LINKS];
+static inline struct app_pipeline_data *
+app_pipeline_data(struct app_params *app, uint32_t id)
+{
+ struct app_pipeline_params *params;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", id, params);
+ if (params == NULL)
+ return NULL;
+
+ return &app->pipeline_data[params - app->pipeline_params];
+}
+
+static inline void *
+app_pipeline_data_fe(struct app_params *app, uint32_t id, struct pipeline_type *ptype)
+{
+ struct app_pipeline_data *pipeline_data;
+
+ pipeline_data = app_pipeline_data(app, id);
+ if (pipeline_data == NULL)
+ return NULL;
+
+ if (strcmp(pipeline_data->ptype->name, ptype->name) != 0)
+ return NULL;
+
+ if (pipeline_data->enabled == 0)
+ return NULL;
+
+ return pipeline_data->fe;
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_in_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-REQ-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_out_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-RSP-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline void *
+app_msg_alloc(__rte_unused struct app_params *app)
+{
+ return rte_malloc(NULL, 2048, RTE_CACHE_LINE_SIZE);
+}
+
+static inline void
+app_msg_free(__rte_unused struct app_params *app,
+ void *msg)
+{
+ rte_free(msg);
+}
+
+static inline void
+app_msg_send(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg)
+{
+ struct rte_ring *r = app_pipeline_msgq_in_get(app, pipeline_id);
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static inline void *
+app_msg_recv(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct rte_ring *r = app_pipeline_msgq_out_get(app, pipeline_id);
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void *
+app_msg_send_recv(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_pipeline_msgq_in_get(app, pipeline_id);
+ struct rte_ring *r_rsp = app_pipeline_msgq_out_get(app, pipeline_id);
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv = NULL;
+ uint64_t deadline;
+ int status = 0;
+
+ /* send */
+ do {
+ if(r_req)
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+ if (r_rsp)
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats);
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats);
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats);
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth);
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_pipeline_common_cmd_push(struct app_params *app);
+
+
+void convert_prefixlen_to_netmask_ipv6(uint32_t depth, uint8_t netmask_ipv6[]);
+
+void
+get_host_portion_ipv6(uint8_t ipv6[], uint8_t netmask[], uint8_t host_ipv6[]);
+
+void
+get_bcast_portion_ipv6(uint8_t host[], uint8_t netmask[], uint8_t bcast_ipv6[]);
+
+int
+app_link_config_ipv6(struct app_params *app,
+ uint32_t link_id, uint8_t ipv6[], uint32_t depth);
+
+#endif
diff --git a/common/VIL/pipeline_loadb/pipeline_loadb.c b/common/VIL/pipeline_loadb/pipeline_loadb.c
new file mode 100644
index 00000000..fdcc17ae
--- /dev/null
+++ b/common/VIL/pipeline_loadb/pipeline_loadb.c
@@ -0,0 +1,493 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_loadb.h"
+#include "vnf_common.h"
+//#include "lib_arp.h"
+#include "pipeline_arpicmp_be.h"
+//#include "lib_arp.h"
+//#include "interface.h"
+static int
+app_pipeline_loadb_entry_dbg(struct app_params *app,
+ uint32_t pipeline_id, uint8_t *msg)
+{
+ struct pipeline_loadb_entry_dbg_msg_req *req;
+ struct pipeline_loadb_entry_dbg_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_LOADB_MSG_REQ_ENTRY_DBG;
+ req->data[0] = msg[0];
+ req->data[1] = msg[1];
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * entry dbg
+ */
+
+struct cmd_entry_dbg_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t dbg_string;
+ uint8_t cmd;
+ uint8_t d1;
+};
+
+static void
+cmd_entry_dbg_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_dbg_result *params = parsed_result;
+ struct app_params *app = data;
+ uint8_t msg[2];
+ int status;
+
+ msg[0] = params->cmd;
+ msg[1] = params->d1;
+ status = app_pipeline_loadb_entry_dbg(app, params->p, msg);
+
+ if (status != 0) {
+ printf("Dbg Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, p_string, "p");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, p, UINT32);
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result,
+ entry_string, "lbentry");
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_dbg_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, dbg_string,
+ "dbg");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_d1 =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, d1, UINT8);
+
+static cmdline_parse_inst_t lb_cmd_entry_dbg = {
+ .f = cmd_entry_dbg_parsed,
+ .data = NULL,
+ .help_str = "LOADB dbg cmd",
+ .tokens = {
+ (void *)&lb_cmd_entry_dbg_p_string,
+ (void *)&lb_cmd_entry_dbg_p,
+ (void *)&lb_cmd_entry_dbg_entry_string,
+ (void *)&lb_cmd_entry_dbg_dbg_string,
+ (void *)&lb_cmd_entry_dbg_cmd,
+ (void *)&lb_cmd_entry_dbg_d1,
+ NULL,
+ },
+};
+
+/*static void*/
+/*print_arp_entry(const struct app_pipeline_arp_icmp_arp_entry *entry)*/
+/*{*/
+/* printf("(Port = %" PRIu32 ", IP = %" PRIu32 ".%" PRIu32*/
+/* ".%" PRIu32 ".%" PRIu32 ") => "*/
+/* "HWaddress = %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32*/
+/* ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",*/
+/* entry->key.key.ipv4.port_id,*/
+/* (entry->key.key.ipv4.ip >> 24) & 0xFF,*/
+/* (entry->key.key.ipv4.ip >> 16) & 0xFF,*/
+/* (entry->key.key.ipv4.ip >> 8) & 0xFF,*/
+/* entry->key.key.ipv4.ip & 0xFF,*/
+
+/* entry->macaddr.addr_bytes[0],*/
+/* entry->macaddr.addr_bytes[1],*/
+/* entry->macaddr.addr_bytes[2],*/
+/* entry->macaddr.addr_bytes[3],*/
+/* entry->macaddr.addr_bytes[4],*/
+/* entry->macaddr.addr_bytes[5]);*/
+/*}*/
+
+#if 0
+/*
+ * arp add
+ */
+
+struct cmd_arp_add_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arpadd_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+ struct ether_addr macaddr;
+
+};
+
+static void
+cmd_arp_add_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_add_result *params = parsed_result;
+ uint8_t ipv6[16];
+
+/* struct pipeline_arp_icmp_arp_key key;*/
+/* key.type = PIPELINE_ARP_ICMP_ARP_IPV4;*/
+/* key.key.ipv4.port_id = params->port_id;*/
+/* key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);*/
+/* populate_arp_entry(&req->macaddr, rte_bswap32(req->key.key.ipv4.ip),
+ * req->key.key.ipv4.port_id);
+ */
+ if (params->ip.family == AF_INET) {
+ populate_arp_entry(&params->macaddr,
+ rte_cpu_to_be_32(params->ip.addr.
+ ipv4.s_addr),
+ params->port_id, STATIC_ARP);
+ } else {
+ memcpy(ipv6, params->ip.addr.ipv6.s6_addr, 16);
+ populate_nd_entry(&params->macaddr, ipv6, params->port_id, STATIC_ND);
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_add_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_add_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_add_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_add_result, arpadd_string, "arpadd");
+
+static cmdline_parse_token_num_t cmd_arp_add_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_add_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_add_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_arp_add_result, ip);
+
+static cmdline_parse_token_etheraddr_t cmd_arp_add_macaddr =
+TOKEN_ETHERADDR_INITIALIZER(struct cmd_arp_add_result, macaddr);
+
+static cmdline_parse_inst_t cmd_arp_add = {
+ .f = cmd_arp_add_parsed,
+ .data = NULL,
+ .help_str = "ARP add",
+ .tokens = {
+ (void *)&cmd_arp_add_p_string,
+ (void *)&cmd_arp_add_p,
+ (void *)&cmd_arp_add_arp_string,
+ (void *)&cmd_arp_add_port_id,
+ (void *)&cmd_arp_add_ip,
+ (void *)&cmd_arp_add_macaddr,
+ NULL,
+ },
+};
+
+/*
+ * arp del
+ */
+
+struct cmd_arp_del_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ uint32_t port_id;
+ cmdline_ipaddr_t ip;
+};
+
+static void
+cmd_arp_del_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ uint8_t ipv6[16];
+
+/* struct pipeline_arp_icmp_arp_key key;*/
+/* key.type = PIPELINE_ARP_ICMP_ARP_IPV4;*/
+/* key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);*/
+/* key.key.ipv4.port_id = params->port_id;*/
+/* remove_arp_entry(rte_bswap32(req->key.key.ipv4.ip),
+ * req->key.key.ipv4.port_id);
+ */
+ if (params->ip.family == AF_INET) {
+ remove_arp_entry(rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr),
+ params->port_id, NULL);
+ } else {
+ memcpy(ipv6, params->ip.addr.ipv6.s6_addr, 16);
+ remove_nd_entry_ipv6(ipv6, params->port_id);
+ }
+}
+
+static cmdline_parse_token_string_t cmd_arp_del_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_del_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_del_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arpdel");
+
+static cmdline_parse_token_num_t cmd_arp_del_port_id =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_del_result, port_id, UINT32);
+
+static cmdline_parse_token_ipaddr_t cmd_arp_del_ip =
+TOKEN_IPADDR_INITIALIZER(struct cmd_arp_del_result, ip);
+
+static cmdline_parse_inst_t cmd_arp_del = {
+ .f = cmd_arp_del_parsed,
+ .data = NULL,
+ .help_str = "ARP delete",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_arp_del_arp_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arp req
+ */
+
+/*Re-uses delete structures*/
+
+static void
+cmd_arp_req_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ /*struct app_params *app = data;*/
+
+ struct arp_key_ipv4 key;
+/* int status;*/
+
+/* key.type = ARP_IPV4;*/
+/* key.key.ipv4.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);*/
+/* key.key.ipv4.port_id = params->port_id;*/
+ key.ip = rte_cpu_to_be_32(params->ip.addr.ipv4.s_addr);
+ key.port_id = params->port_id;
+ key.filler1 = 0;
+ key.filler2 = 0;
+ key.filler3 = 0;
+
+ struct arp_entry_data *arp_data = retrieve_arp_entry(key);
+
+ if (arp_data) {
+ if (ARPICMP_DEBUG)
+ printf("ARP entry exists for ip 0x%x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+ return;
+ }
+ /* else request an arp*/
+ if (ARPICMP_DEBUG)
+ printf("ARP - requesting arp for ip 0x%x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+ request_arp(params->port_id, params->ip.addr.ipv4.s_addr);
+ /*give pipeline number too*/
+}
+
+static cmdline_parse_token_string_t cmd_arp_req_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "arpreq");
+
+static cmdline_parse_inst_t cmd_arp_req = {
+ .f = cmd_arp_req_parsed,
+ .data = NULL,
+ .help_str = "ARP request",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_arp_req_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arpicmp echo req
+ */
+
+/*Re-uses delete structures*/
+
+static void
+cmd_icmp_echo_req_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ struct cmd_arp_del_result *params = parsed_result;
+ struct rte_mbuf *pkt;
+ l2_phy_interface_t *port = (l2_phy_interface_t *) ifm_get_port((uint8_t)params->port_id);
+
+ if (ARPICMP_DEBUG)
+ printf("Echo Req Handler ip %x, port %d\n",
+ params->ip.addr.ipv4.s_addr, params->port_id);
+
+ pkt = request_echo(params->port_id, params->ip.addr.ipv4.s_addr);
+ port->transmit_single_pkt(port, pkt);
+}
+
+static cmdline_parse_token_string_t cmd_icmp_echo_req_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_del_result, arp_string, "icmpecho");
+
+static cmdline_parse_inst_t cmd_icmp_echo_req = {
+ .f = cmd_icmp_echo_req_parsed,
+ .data = NULL,
+ .help_str = "ICMP echo request",
+ .tokens = {
+ (void *)&cmd_arp_del_p_string,
+ (void *)&cmd_arp_del_p,
+ (void *)&cmd_icmp_echo_req_string,
+ (void *)&cmd_arp_del_port_id,
+ (void *)&cmd_arp_del_ip,
+ NULL,
+ },
+};
+
+/*
+ * arp ls
+ */
+
+struct cmd_arp_ls_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+};
+
+static void
+cmd_arp_ls_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl, __rte_unused void *data)
+{
+ printf("\nARP table ...\n");
+ printf("-------------\n");
+ print_arp_table();
+
+ printf
+ ("............................................................\n");
+
+ printf("\nND IPv6 table:\n");
+ printf("--------------\n");
+ print_nd_table();
+}
+
+static cmdline_parse_token_string_t cmd_arp_ls_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_arp_ls_p =
+TOKEN_NUM_INITIALIZER(struct cmd_arp_ls_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_ls_arp_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
+ "arpls");
+
+static cmdline_parse_inst_t cmd_arp_ls = {
+ .f = cmd_arp_ls_parsed,
+ .data = NULL,
+ .help_str = "ARP list",
+ .tokens = {
+ (void *)&cmd_arp_ls_p_string,
+ (void *)&cmd_arp_ls_p,
+ (void *)&cmd_arp_ls_arp_string,
+ NULL,
+ },
+};
+
+/*
+ * show ports info
+ */
+
+struct cmd_show_ports_info_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+};
+
+static void
+cmd_show_ports_info_parsed(__rte_unused void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ __rte_unused void *data)
+{
+ show_ports_info();
+}
+
+static cmdline_parse_token_string_t cmd_show_ports_info_string =
+TOKEN_STRING_INITIALIZER(struct cmd_arp_ls_result, arp_string,
+ "showPortsInfo");
+
+static cmdline_parse_inst_t cmd_show_ports_info = {
+ .f = cmd_show_ports_info_parsed,
+ .data = NULL,
+ .help_str = "show ports info",
+ .tokens = {
+ (void *)&cmd_arp_ls_p_string,
+ (void *)&cmd_arp_ls_p,
+ (void *)&cmd_show_ports_info_string,
+ NULL,
+ },
+};
+#endif
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &lb_cmd_entry_dbg,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_loadb_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_loadb = {
+ .name = "LOADB",
+ .be_ops = &pipeline_loadb_be_ops,
+ .fe_ops = &pipeline_loadb_fe_ops,
+};
diff --git a/common/VIL/pipeline_loadb/pipeline_loadb.h b/common/VIL/pipeline_loadb/pipeline_loadb.h
new file mode 100644
index 00000000..866a6eab
--- /dev/null
+++ b/common/VIL/pipeline_loadb/pipeline_loadb.h
@@ -0,0 +1,29 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_LOADB_H__
+#define __INCLUDE_PIPELINE_LOADB_H__
+
+#include "pipeline.h"
+#include "pipeline_loadb_be.h"
+
+/*
+ * Pipeline type
+ */
+
+extern struct pipeline_type pipeline_loadb;
+
+#endif
diff --git a/common/VIL/pipeline_loadb/pipeline_loadb_be.c b/common/VIL/pipeline_loadb/pipeline_loadb_be.c
new file mode 100644
index 00000000..c7910127
--- /dev/null
+++ b/common/VIL/pipeline_loadb/pipeline_loadb_be.c
@@ -0,0 +1,1417 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_hash.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_jhash.h>
+#include <rte_thash.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_table_array.h>
+#include "pipeline_loadb_be.h"
+#include "pipeline_actions_common.h"
+#include "hash_func.h"
+#include "pipeline_arpicmp_be.h"
+#include "vnf_common.h"
+#include "app.h"
+
+#define BYTES_TO_BITS 8
+#define ROTATE_15_BITS 15
+
+#define MAX_VNF_THREADS 16
+
+int pkt_burst_cnt;
+
+uint8_t LOADB_DEBUG;
+uint8_t total_vnf_threads;
+uint32_t phyport_offset;
+
+struct pipeline_loadb {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_LOADB_MSG_REQS];
+
+ uint8_t n_vnf_threads;
+ uint8_t n_lb_tuples;
+ uint32_t outport_offset;
+ uint64_t receivedLBPktCount;
+ uint64_t droppedLBPktCount;
+ uint8_t links_map[PIPELINE_MAX_PORT_IN];
+ uint8_t outport_id[PIPELINE_MAX_PORT_IN];
+ uint8_t n_prv_Q;
+ uint8_t n_pub_Q;
+ uint8_t pipeline_num;
+} __rte_cache_aligned;
+
+uint8_t default_rss_key[] = {
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+};
+
+static void *pipeline_loadb_msg_req_custom_handler(struct pipeline *p,
+ void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_loadb_msg_req_custom_handler,
+
+};
+
+static void *pipeline_loadb_msg_req_entry_dbg_handler(struct pipeline *,
+ void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_LOADB_MSG_REQ_ENTRY_DBG] =
+ pipeline_loadb_msg_req_entry_dbg_handler,
+};
+
+/*
+ * LOADB table
+ */
+struct loadb_table_entry {
+ struct rte_pipeline_table_entry head;
+} __rte_cache_aligned;
+
+void *pipeline_loadb_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_loadb *p_lb = (struct pipeline_loadb *)p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_LOADB_MSG_REQS) ?
+ p_lb->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+uint32_t lb_pkt_print_count;
+
+uint8_t calculate_lb_thread_prv(struct rte_mbuf *pkt, void *arg)
+{
+ uint32_t hash_key[2], hash_ipv4;
+ uint32_t temp1, temp2, temp3;
+ uint8_t thread;
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *) ap->p;
+ uint8_t nthreads = p_loadb->n_vnf_threads;
+ union rte_thash_tuple tuple;
+
+ uint32_t *src_addr;
+ uint32_t *dst_addr;
+ uint16_t *src_port;
+ uint16_t *dst_port;
+ uint8_t *protocol;
+ struct lb_pkt *lb_pkt = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, MBUF_HDR_ROOM);
+
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) == ETHER_TYPE_IPv6) {
+ src_addr = (uint32_t *)&lb_pkt->ipv6_port.ipv6.src_addr;
+ dst_addr = (uint32_t *)&lb_pkt->ipv6_port.ipv6.dst_addr;
+ src_port = &lb_pkt->ipv6_port.src_port;
+ dst_port = &lb_pkt->ipv6_port.dst_port;
+ protocol = &lb_pkt->ipv6_port.ipv6.proto;
+ } else {
+ src_addr = &lb_pkt->ipv4_port.ipv4.src_addr;
+ dst_addr = &lb_pkt->ipv4_port.ipv4.dst_addr;
+ src_port = &lb_pkt->ipv4_port.src_port;
+ dst_port = &lb_pkt->ipv4_port.dst_port;
+ protocol = &lb_pkt->ipv4_port.ipv4.next_proto_id;
+ }
+
+ switch (p_loadb->n_lb_tuples) {
+
+ case 0:
+ /* Write */
+ /* Egress */
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) == ETHER_TYPE_IPv6)
+ temp1 = rte_bswap32(dst_addr[3]) ^ *dst_port;
+ else
+ temp1 = *dst_addr ^ *dst_port;
+
+ temp2 = (temp1 >> 24) ^ (temp1 >> 16) ^
+ (temp1 >> 8) ^ temp1;
+
+ temp3 = (temp2 >> 4) ^ (temp2 & 0xf);
+
+ /* To select the thread */
+ thread = temp3 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+ return thread;
+
+ case 1:
+ /* Write */
+ /* Egress */
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) == ETHER_TYPE_IPv6)
+ hash_key[0] = rte_bswap32(dst_addr[3]);
+ else
+ hash_key[0] = rte_bswap32(*dst_addr);
+
+ /* Compute */
+ hash_ipv4 = rte_jhash(&hash_key[0], 4, 0);
+
+ /* To select the thread */
+ thread = (hash_ipv4 % nthreads);
+
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3)
+ printf("thread: %u hash: %x hash_key: %x\n",
+ thread, hash_ipv4, hash_key[0]);
+ return thread;
+
+ case 2:
+ /* Write */
+ /* Egress */
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) ==
+ ETHER_TYPE_IPv6) {
+ hash_key[0] = rte_bswap32(dst_addr[3]);
+ hash_key[1] = *dst_port << 16;
+ } else{
+ hash_key[0] = rte_bswap32(*dst_addr);
+ hash_key[1] = *dst_port << 16;
+ }
+ /* Compute */
+ hash_ipv4 = rte_jhash(&hash_key[0], 6, 0);
+
+ /* To select the thread */
+ thread = (hash_ipv4 % nthreads);
+
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("public_addr: %x public_port: %x\n",
+ hash_key[0], *dst_port);
+ printf("thread: %u hash: %x hash_key0: %x "
+ "hash_key1: %x\n", thread, hash_ipv4,
+ hash_key[0], hash_key[1]);
+ }
+ return thread;
+
+ case 3:
+ printf("Invalid n_lb_tuples: %d\n", p_loadb->n_lb_tuples);
+ return 0;
+
+ case 4:
+ /* Write */
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) ==
+ ETHER_TYPE_IPv6) {
+ tuple.v4.src_addr = rte_bswap32(src_addr[3]);
+ tuple.v4.dst_addr = rte_bswap32(dst_addr[3]);
+ tuple.v4.sport = *src_port;
+ tuple.v4.dport = *dst_port;
+ } else{
+ tuple.v4.src_addr = rte_bswap32(*src_addr);
+ tuple.v4.dst_addr = rte_bswap32(*dst_addr);
+ tuple.v4.sport = *src_port;
+ tuple.v4.dport = *dst_port;
+ }
+ /* Compute */
+ hash_ipv4 = rte_softrss((uint32_t *)&tuple,
+ RTE_THASH_V4_L4_LEN,
+ default_rss_key);
+ /* Egress */
+
+ /* To select the thread */
+ thread = (hash_ipv4 % nthreads);
+
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("src_addr: %x dst_addr: %x src_port: %x "
+ "dst_port: %x\n", tuple.v4.src_addr, tuple.v4.dst_addr,
+ tuple.v4.sport, tuple.v4.dport);
+ printf("thread: %u hash: %x\n", thread, hash_ipv4);
+ }
+
+ return thread;
+
+ case 5:
+
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) ==
+ ETHER_TYPE_IPv6) {
+ /* point to last 32 bits of IPv6 addresses*/
+ src_addr += 3;
+ dst_addr += 3;
+ }
+
+ /* Compute */
+ temp1 = *src_addr ^ *dst_addr ^ *src_port ^
+ *dst_port ^ *protocol;
+
+ temp2 = (temp1 >> 24) ^ (temp1 >> 16) ^ (temp1 >> 8) ^ temp1;
+ temp3 = (temp2 >> 4) ^ (temp2 & 0xf);
+
+ /* Egress */
+
+ /* To select the thread */
+ thread = (temp3 % nthreads);
+
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("thread: %u temp1: %x temp2: %x temp3: %x\n",
+ thread, temp1, temp2, temp3);
+ printf("src_addr: %x dst_addr: %x src_port: %x "
+ "dst_port: %x protocol: %x\n", *src_addr, *dst_addr,
+ *src_port, *dst_port, *protocol);
+ }
+ return thread;
+
+ default:
+ printf("Invalid n_lb_tuples: %d\n", p_loadb->n_lb_tuples);
+ return 0;
+
+ }
+}
+
+uint8_t calculate_lb_thread_pub(struct rte_mbuf *pkt, void *arg)
+{
+ uint32_t hash_key[2], hash_ipv4;
+ uint32_t temp1, temp2, temp3;
+ uint8_t thread;
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *) ap->p;
+ uint8_t nthreads = p_loadb->n_vnf_threads;
+ union rte_thash_tuple tuple;
+
+ uint32_t *src_addr;
+ uint32_t *dst_addr;
+ uint16_t *src_port;
+ uint16_t *dst_port;
+ uint8_t *protocol;
+ struct lb_pkt *lb_pkt = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ MBUF_HDR_ROOM);
+
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) == ETHER_TYPE_IPv6) {
+ src_addr = (uint32_t *)&lb_pkt->ipv6_port.ipv6.src_addr;
+ dst_addr = (uint32_t *)&lb_pkt->ipv6_port.ipv6.dst_addr;
+ src_port = &lb_pkt->ipv6_port.src_port;
+ dst_port = &lb_pkt->ipv6_port.dst_port;
+ protocol = &lb_pkt->ipv6_port.ipv6.proto;
+ } else {
+ src_addr = &lb_pkt->ipv4_port.ipv4.src_addr;
+ dst_addr = &lb_pkt->ipv4_port.ipv4.dst_addr;
+ src_port = &lb_pkt->ipv4_port.src_port;
+ dst_port = &lb_pkt->ipv4_port.dst_port;
+ protocol = &lb_pkt->ipv4_port.ipv4.next_proto_id;
+ }
+
+ switch (p_loadb->n_lb_tuples) {
+
+ case 0:
+ /* Write */
+ /* Ingress */
+ temp1 = *src_addr ^ *src_port;
+ temp2 = (temp1 >> 24) ^ (temp1 >> 16) ^
+ (temp1 >> 8) ^ temp1;
+ temp3 = (temp2 >> 4) ^ (temp2 & 0xf);
+
+ /* To select the thread */
+ thread = temp3 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ return thread;
+
+ case 1:
+ /* Write */
+ /* Ingress */
+ hash_key[0] = rte_bswap32(*src_addr);
+
+ /* Compute */
+ hash_ipv4 = rte_jhash(&hash_key[0], 4, 0);
+
+ /* To select the thread */
+ thread = hash_ipv4 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3)
+ printf("thread: %u hash: %x hash_key: %x\n",
+ thread, hash_ipv4, hash_key[0]);
+ return thread;
+
+ case 2:
+ /* Write */
+ /* Ingress */
+ hash_key[0] = rte_bswap32(*src_addr);
+ hash_key[1] = *src_port << 16;
+
+ /* Compute */
+ hash_ipv4 = rte_jhash(&hash_key[0], 6, 0);
+
+ /* To select the thread */
+ thread = hash_ipv4 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("thread: %u hash: %x hash_key0: %x "
+ "hash_key1: %x\n", thread, hash_ipv4,
+ hash_key[0], hash_key[1]);
+ printf("public_addr: %x public_port: %x\n",
+ hash_key[0], *src_port);
+ }
+ return thread;
+
+ case 3:
+ printf("Invalid n_lb_tuples: %d\n", p_loadb->n_lb_tuples);
+ return 0;
+
+ case 4:
+ /* Write */
+ tuple.v4.src_addr = rte_bswap32(*src_addr);
+ tuple.v4.dst_addr = rte_bswap32(*dst_addr);
+ tuple.v4.sport = *src_port;
+ tuple.v4.dport = *dst_port;
+
+ /* Compute */
+ hash_ipv4 = rte_softrss((uint32_t *)&tuple,
+ RTE_THASH_V4_L4_LEN, default_rss_key);
+
+ /* Ingress */
+ /* To select the thread */
+ thread = hash_ipv4 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("src_addr: %x dst_addr: %x src_port: %x "
+ "dst_port: %x\n", tuple.v4.src_addr,
+ tuple.v4.dst_addr, tuple.v4.sport, tuple.v4.dport);
+
+ printf("thread: %u hash: %x\n", thread, hash_ipv4);
+ }
+ return thread;
+
+ case 5:
+
+ if (rte_be_to_cpu_16(lb_pkt->eth.ether_type) ==
+ ETHER_TYPE_IPv6) {
+ /* point to last 32 bits of IPv6 addresses*/
+ src_addr += 3;
+ dst_addr += 3;
+ }
+
+ /* Compute */
+ temp1 = *src_addr ^ *dst_addr ^ *src_port ^
+ *dst_port ^ *protocol;
+ temp2 = (temp1 >> 24) ^ (temp1 >> 16) ^
+ (temp1 >> 8) ^ temp1;
+ temp3 = (temp2 >> 4) ^ (temp2 & 0xf);
+
+ /* To select the thread */
+ thread = temp3 % nthreads;
+ /* To select the Q */
+ thread = ap->in_port_id + (p_loadb->p.n_ports_in *
+ (thread + 1) - p_loadb->p.n_ports_in);
+
+ if (LOADB_DEBUG > 3) {
+ printf("src_addr: %x dst_addr: %x src_port: %x "
+ "dst_port: %x protocol: %x\n", *src_addr, *dst_addr,
+ *src_port, *dst_port, *protocol);
+
+ printf("thread: %u temp1: %x temp2: %x temp3: %x\n",
+ thread, temp1, temp2, temp3);
+ }
+
+ return thread;
+
+ default:
+ printf("Invalid n_lb_tuples: %d\n", p_loadb->n_lb_tuples);
+ return 0;
+
+ }
+}
+
+static inline void
+pkt_work_loadb_key_prv(
+ struct rte_mbuf *pkt,
+ __rte_unused uint32_t pkt_num,
+ void *arg)
+{
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *)ap->p;
+ uint32_t outport_offset = p_loadb->outport_offset;
+
+ struct lb_pkt *lb_pkt = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ MBUF_HDR_ROOM);
+ uint32_t *out_port = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ outport_offset);
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("Start pkt_work_loadb_key\n");
+ #endif
+
+ if ((LOADB_DEBUG > 2) && (lb_pkt_print_count < 10)) {
+ print_pkt1(pkt);
+ lb_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(lb_pkt->eth.ether_type),
+ lb_pkt->ipv4_port.ipv4.next_proto_id, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ /* Write */
+ *out_port = calculate_lb_thread_prv(pkt, arg);
+
+ p_loadb->receivedLBPktCount++;
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("End pkt_work_loadb_key\n");
+ #endif
+}
+
+static inline void
+pkt_work_loadb_key_pub(
+ struct rte_mbuf *pkt,
+ __rte_unused uint32_t pkt_num,
+ void *arg)
+{
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *)ap->p;
+ uint32_t outport_offset = p_loadb->outport_offset;
+
+ struct lb_pkt *lb_pkt = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ MBUF_HDR_ROOM);
+ uint32_t *out_port = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ outport_offset);
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("Start pkt_work_loadb_key\n");
+ #endif
+
+ if ((LOADB_DEBUG > 2) && (lb_pkt_print_count < 10)) {
+ print_pkt1(pkt);
+ lb_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(lb_pkt->eth.ether_type),
+ lb_pkt->ipv4_port.ipv4.next_proto_id, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ /* Write */
+ *out_port = calculate_lb_thread_pub(pkt, arg);
+
+ p_loadb->receivedLBPktCount++;
+#ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("End pkt_work_loadb_key\n");
+#endif
+}
+
+static inline void
+pkt4_work_loadb_key_prv(
+ struct rte_mbuf **pkt,
+ __rte_unused uint32_t pkt_num,
+ void *arg)
+{
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *)ap->p;
+ uint32_t outport_offset = p_loadb->outport_offset;
+
+ uint32_t *out_port0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0],
+ outport_offset);
+ uint32_t *out_port1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1],
+ outport_offset);
+ uint32_t *out_port2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2],
+ outport_offset);
+ uint32_t *out_port3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3],
+ outport_offset);
+
+ struct lb_pkt *lb_pkt0 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt1 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt2 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt3 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3],
+ MBUF_HDR_ROOM);
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("Start pkt4_work_loadb_key\n");
+ #endif
+
+ if ((LOADB_DEBUG > 2) && (lb_pkt_print_count < 10)) {
+ print_pkt1(pkt[0]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt0->eth.ether_type),
+ lb_pkt0->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[1]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt1->eth.ether_type),
+ lb_pkt1->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[2]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt2->eth.ether_type),
+ lb_pkt2->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[3]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt3->eth.ether_type),
+ lb_pkt3->ipv4_port.ipv4.next_proto_id);
+ }
+ *out_port0 = calculate_lb_thread_prv(pkt[0], arg);
+ *out_port1 = calculate_lb_thread_prv(pkt[1], arg);
+ *out_port2 = calculate_lb_thread_prv(pkt[2], arg);
+ *out_port3 = calculate_lb_thread_prv(pkt[3], arg);
+
+ p_loadb->receivedLBPktCount += 4;
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("End pkt4_work_loadb_key\n");
+ #endif
+
+}
+
+static inline void
+pkt4_work_loadb_key_pub(
+ struct rte_mbuf **pkt,
+ __rte_unused uint32_t pkt_num,
+ void *arg)
+{
+ struct pipeline_loadb_in_port_h_arg *ap = arg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *)ap->p;
+ uint32_t outport_offset = p_loadb->outport_offset;
+
+ uint32_t *out_port0 = RTE_MBUF_METADATA_UINT32_PTR(pkt[0],
+ outport_offset);
+ uint32_t *out_port1 = RTE_MBUF_METADATA_UINT32_PTR(pkt[1],
+ outport_offset);
+ uint32_t *out_port2 = RTE_MBUF_METADATA_UINT32_PTR(pkt[2],
+ outport_offset);
+ uint32_t *out_port3 = RTE_MBUF_METADATA_UINT32_PTR(pkt[3],
+ outport_offset);
+
+ struct lb_pkt *lb_pkt0 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt1 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt2 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2],
+ MBUF_HDR_ROOM);
+ struct lb_pkt *lb_pkt3 = (struct lb_pkt *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3],
+ MBUF_HDR_ROOM);
+
+ #ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("Start pkt4_work_loadb_key\n");
+ #endif
+
+ if ((LOADB_DEBUG > 2) && (lb_pkt_print_count < 10)) {
+ print_pkt1(pkt[0]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt0->eth.ether_type),
+ lb_pkt0->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[1]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt1->eth.ether_type),
+ lb_pkt1->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[2]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt2->eth.ether_type),
+ lb_pkt2->ipv4_port.ipv4.next_proto_id);
+
+ print_pkt1(pkt[3]);
+ lb_pkt_print_count++;
+
+ printf("\nEth Typ %x, Prot %x\n",
+ rte_be_to_cpu_16(lb_pkt3->eth.ether_type),
+ lb_pkt3->ipv4_port.ipv4.next_proto_id);
+ }
+ *out_port0 = calculate_lb_thread_prv(pkt[0], arg);
+ *out_port1 = calculate_lb_thread_pub(pkt[1], arg);
+ *out_port2 = calculate_lb_thread_pub(pkt[2], arg);
+ *out_port3 = calculate_lb_thread_pub(pkt[3], arg);
+
+ p_loadb->receivedLBPktCount += 4;
+#ifdef MY_LOADB_DBG_PRINT
+ if (LOADB_DEBUG == 3)
+ printf("End pkt4_work_loadb_key\n");
+#endif
+
+}
+
+PIPELINE_LOADB_KEY_PORT_IN_AH(port_in_ah_loadb_key_prv,
+ pkt_work_loadb_key_prv,
+ pkt4_work_loadb_key_prv);
+
+PIPELINE_LOADB_KEY_PORT_IN_AH(port_in_ah_loadb_key_pub,
+ pkt_work_loadb_key_pub,
+ pkt4_work_loadb_key_pub);
+
+static int
+pipeline_loadb_parse_args(struct pipeline_loadb *p,
+ struct pipeline_params *params)
+{
+ uint32_t outport_offset_present = 0;
+ uint32_t n_vnf_threads_present = 0;
+ uint32_t pktq_in_prv_present = 0;
+ uint32_t prv_que_handler_present = 0;
+ uint32_t prv_to_pub_map_present = 0;
+ uint8_t n_prv_in_port = 0;
+ uint32_t i;
+
+ /* Default number of tuples */
+ p->n_lb_tuples = 0;
+
+ if (LOADB_DEBUG > 2)
+ printf("LOADB pipeline_loadb_parse_args params->n_args: %d\n",
+ params->n_args);
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (LOADB_DEBUG > 2)
+ printf("LOADB args[%d]: %s %d, %s\n", i, arg_name,
+ atoi(arg_value), arg_value);
+
+ /* outport_offset = 128 + 8 */
+ if (strcmp(arg_name, "outport_offset") == 0) {
+ if (outport_offset_present)
+ return -1;
+ outport_offset_present = 1;
+
+ p->outport_offset = atoi(arg_value);
+ if (p->outport_offset <= 0) {
+ printf("Outport_offset is invalid\n");
+ return -1;
+ }
+ printf("outport_offset: 0x%x\n", p->outport_offset);
+ continue;
+ }
+ /* n_vnf_threads = 4 */
+ if (strcmp(arg_name, "n_vnf_threads") == 0) {
+ if (n_vnf_threads_present)
+ return -1;
+ n_vnf_threads_present = 1;
+
+ p->n_vnf_threads = atoi(arg_value);
+
+ total_vnf_threads += p->n_vnf_threads;
+
+ if ((p->n_vnf_threads <= 0)
+ || (total_vnf_threads > MAX_VNF_THREADS)) {
+ printf("n_vnf_threads : MIN->0 MAX->16\n");
+ return -1;
+ }
+ printf("n_vnf_threads : 0x%x\n", p->n_vnf_threads);
+ printf("total_vnf_threads: 0x%x\n", total_vnf_threads);
+ continue;
+ }
+
+ /* pktq_in_prv */
+ if (strcmp(arg_name, "pktq_in_prv") == 0) {
+ if (pktq_in_prv_present) {
+ printf("Duplicate pktq_in_prv ... "
+ "parse failed..\n\n");
+ return -1;
+ }
+ pktq_in_prv_present = 1;
+
+ int rxport = 0, j = 0;
+ char phy_port_num[8];
+ char *token = strtok(arg_value, "RXQ");
+ while (token) {
+ j = 0;
+ while ((j < 7) && (token[j] != '.')) {
+ phy_port_num[j] = token[j];
+ j++;
+ }
+ phy_port_num[j] = '\0';
+ rxport = atoi(phy_port_num);
+ printf("token: %s, phy_port_str: %s, "
+ "phy_port_num %d\n",
+ token, phy_port_num, rxport);
+ prv_in_port_a[n_prv_in_port++] = rxport;
+ // set rxport egress
+ if (rxport < 0xff){
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ in_port_dir_a[rxport] = 1;
+ }
+ token = strtok(NULL, "RXQ");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf("VNF common parse error - "
+ "no prv RX phy port\n");
+ return -1;
+ }
+ continue;
+ }
+
+ /* pktq_in_prv_handler */
+
+ if (strcmp(arg_name, "prv_que_handler") == 0) {
+
+ if (prv_que_handler_present) {
+ printf("Duplicate pktq_in_prv ..\n\n");
+ return -1;
+ }
+ prv_que_handler_present = 1;
+ n_prv_in_port = 0;
+
+ char *token;
+ int rxport = 0;
+ /* get the first token */
+ token = strtok(arg_value, "(");
+ token = strtok(token, ")");
+ token = strtok(token, ",");
+ printf("***** prv_que_handler *****\n");
+ if (token)
+ printf("string is :%s\n", token);
+
+ if (token)
+ //printf("string is null\n");
+ printf("string is :%s\n", token);
+
+ /* walk through other tokens */
+ while (token != NULL) {
+ printf(" %s\n", token);
+ rxport = atoi(token);
+ prv_que_port_index[n_prv_in_port++] = rxport;
+ if (rxport < 0xff){
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ in_port_egress_prv[rxport] = 1;
+ }
+ p->n_prv_Q++;
+ token = strtok(NULL, ",");
+ }
+
+ if (n_prv_in_port == 0) {
+ printf("VNF common parse err - no prv RX phy port\n");
+ return -1;
+ }
+
+ continue;
+ }
+ /* prv_to_pub_map */
+ if (strcmp(arg_name, "prv_to_pub_map") == 0) {
+ if (prv_to_pub_map_present) {
+ printf("Duplicated prv_to_pub_map ... "
+ "parse failed ...\n");
+ return -1;
+ }
+ prv_to_pub_map_present = 1;
+
+ int rxport = 0, txport = 0, j = 0, k = 0;
+ char rx_phy_port_num[5];
+ char tx_phy_port_num[5];
+ char *token = strtok(arg_value, "(");
+ while (token) {
+ j = 0;
+ while ((j < 4) && (token[j] != ',')) {
+ rx_phy_port_num[j] = token[j];
+ j++;
+ }
+ rx_phy_port_num[j] = '\0';
+ rxport = atoi(rx_phy_port_num);
+
+ j++;
+ k = 0;
+ while ((k < 4) && (token[j+k] != ')')) {
+ tx_phy_port_num[k] = token[j+k];
+ k++;
+ }
+ tx_phy_port_num[k] = '\0';
+ txport = atoi(tx_phy_port_num);
+
+ printf("token: %s,rx_phy_port_str: %s, phy_port_num "
+ "%d, tx_phy_port_str: %s, tx_phy_port_num %d\n",
+ token, rx_phy_port_num, rxport,
+ tx_phy_port_num, txport);
+ if(rxport < PIPELINE_MAX_PORT_IN)
+ if ((rxport >= PIPELINE_MAX_PORT_IN) ||
+ (txport >= PIPELINE_MAX_PORT_IN) ||
+ (in_port_dir_a[rxport] != 1)) {
+ printf("CG-NAPT parse error - "
+ "incorrect prv-pub translation. Rx %d, "
+ "Tx %d, Rx Dir %d\n", rxport, txport,
+ in_port_dir_a[rxport]);
+
+ return -1;
+ }
+ if (rxport < 0xff){
+ if (rxport < PIPELINE_MAX_PORT_IN)
+ prv_to_pub_map[rxport] = txport;
+ }
+ if (txport < 0xff)
+ if(txport < PIPELINE_MAX_PORT_IN)
+ pub_to_prv_map[txport] = rxport;
+ token = strtok(NULL, "(");
+ }
+
+ continue;
+ }
+ /* Set number of tuples if available in config file */
+ if (strcmp(arg_name, "n_lb_tuples") == 0) {
+ p->n_lb_tuples = atoi(arg_value);
+ printf("n_lb_tuples: 0x%x\n", p->n_lb_tuples);
+ }
+
+ /* loadb_debug */
+ if (strcmp(arg_name, "loadb_debug") == 0) {
+ LOADB_DEBUG = atoi(arg_value);
+ continue;
+ }
+
+ /* any other Unknown argument return -1 */
+ } /* for */
+
+ /* Check that mandatory arguments are present */
+ if ((n_vnf_threads_present == 0) || (outport_offset_present == 0))
+ return -1;
+
+ return 0;
+
+}
+
+int check_loadb_thread(
+ struct app_params *app,
+ struct pipeline_params *params,
+ int32_t n_vnf_threads)
+{
+ uint32_t i;
+ int pipeline_num = 0;
+ int count = 0;
+ int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
+ if (dont_care != 1)
+ return -1;
+ /* changed from pipeline_num+1 to +2 */
+ for (i = pipeline_num + 2; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ if (!strncmp(p->type, "LOADB", strlen(p->type)))
+ break;
+ count++;
+ }
+ if (n_vnf_threads != count)
+ return -1;
+ return 0;
+
+}
+
+static void *pipeline_loadb_init(
+ struct pipeline_params *params,
+ __rte_unused void *arg)
+ /* arg is app parameter (struct app_params *app) */
+ /*save it for use in port in handler */
+{
+ struct pipeline *p;
+ struct pipeline_loadb *p_loadb;
+ uint32_t size, i, in_ports_arg_size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) || (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_loadb));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_loadb = (struct pipeline_loadb *)p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "LOADB");
+
+ p_loadb->n_vnf_threads = 0;
+ p_loadb->outport_offset = 0;
+ p_loadb->receivedLBPktCount = 0;
+ p_loadb->droppedLBPktCount = 0;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ p_loadb->links_map[i] = 0xff;
+ }
+ p_loadb->pipeline_num = 0xff;
+ p_loadb->n_prv_Q = 0;
+ p_loadb->n_pub_Q = 0;
+
+ /* Parse arguments */
+
+ if (pipeline_loadb_parse_args(p_loadb, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "LOADB",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ printf("Loadb p->p %p, socket %d\n", p->p,
+ pipeline_params.socket_id);
+ }
+
+ /* Memory allocation for in_port_h_arg */
+ in_ports_arg_size =
+ RTE_CACHE_LINE_ROUNDUP((sizeof(struct pipeline_loadb_in_port_h_arg))
+ * (params->n_ports_in));
+ struct pipeline_loadb_in_port_h_arg *ap =
+ (struct pipeline_loadb_in_port_h_arg *)
+ rte_zmalloc(NULL,
+ in_ports_arg_size,
+ RTE_CACHE_LINE_SIZE);
+ if (ap == NULL)
+ return NULL;
+
+ printf("ap pointer %p\n", ap);
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ /* passing our loadb pipeline in call back arg */
+ (ap[i]).p = p_loadb;
+ (ap[i]).in_port_id = i;
+
+ struct rte_pipeline_port_in_params port_params = {
+ .ops =
+ pipeline_port_in_params_get_ops(&params->port_in
+ [i]),
+ .arg_create =
+ pipeline_port_in_params_convert(&params->port_in
+ [i]),
+ /* Public in-port handler */
+ .f_action = NULL,
+ .arg_ah = &(ap[i]),
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ /* Private in-port handler */
+ if (is_port_index_privte(i)) {/* Multiport changes*/
+ printf("LOADB %d port is Prv\n", i);
+ port_params.f_action = port_in_ah_loadb_key_prv;
+ } else{
+ printf("LOADB %d port is Pub\n", i);
+ port_params.f_action = port_in_ah_loadb_key_pub;
+ }
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ }
+
+ p_loadb->n_pub_Q = p_loadb->p.n_ports_in - p_loadb->n_prv_Q;
+ printf("LOADB : n_prv_Q - %d n_pub_Q - %d\n",
+ p_loadb->n_prv_Q, p_loadb->n_pub_Q);
+
+ for (i = 0; i < p->n_ports_in; i++) {
+ printf("is_port_index_privte(%d): %d\n", i,
+ is_port_index_privte(i));
+ printf("is_phy_port_privte(%d): %d\n", i,
+ is_phy_port_privte(i));
+ printf("action handler of %d:%p\n", i,
+ p_loadb->p.p->ports_in[i].f_action);
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops =
+ pipeline_port_out_params_get_ops(&params->port_out
+ [i]),
+ .arg_create =
+ pipeline_port_out_params_convert(&params->port_out
+ [i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ printf("Outport p->port_out_id[%d] %p\n", i,
+ &p->port_out_id[i]);
+ }
+
+ int pipeline_num = 0;
+ int dont_care = sscanf(params->name, "PIPELINE%d", &pipeline_num);
+ if (dont_care != 1) {
+ printf("Unable to read pipeline number\n");
+ return NULL;
+ }
+ p_loadb->pipeline_num = pipeline_num;
+#if 0
+ set_outport_id(pipeline_num, p, lb_outport_id);
+ set_phy_outport_map(pipeline_num, p_loadb->links_map);
+
+ set_port_to_loadb_map(pipeline_num);
+
+ register_loadb_to_arp(pipeline_num, p, app);
+#endif
+ register_pipeline_Qs(p_loadb->pipeline_num, p);
+ set_link_map(p_loadb->pipeline_num, p, p_loadb->links_map);
+ //set_outport_id(p_loadb->pipeline_num, p, p_loadb->outport_id);
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+
+ struct rte_table_array_params table_array_params = {
+ .n_entries = MAX_VNF_THREADS,
+ .offset = p_loadb->outport_offset,
+ };
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_array_ops,
+ .arg_create = &table_array_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = p_loadb,
+ .action_data_size = 0,
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ } /* Tables */
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(
+ p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Initialize table entries */
+ {
+ for (i = 0; i < MAX_VNF_THREADS; i++) {
+ struct rte_table_array_key key = {
+ .pos = i,
+ };
+ struct loadb_table_entry entry;
+ entry.head.action = RTE_PIPELINE_ACTION_PORT;
+
+ if (i < p->n_ports_out) {
+ entry.head.port_id = p->port_out_id[i];
+ printf("\ni %d, p->port_out_id[%d] %d", i, i,
+ p->port_out_id[i]);
+ } else {
+ /* First CGNAPT thread */
+ entry.head.port_id = p->port_out_id[0];
+ entry.head.action = RTE_PIPELINE_ACTION_DROP;
+ }
+
+ struct rte_pipeline_table_entry *entry_ptr;
+ int key_found, status;
+ status = rte_pipeline_table_entry_add(
+ p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *)
+ &entry,
+ &key_found,
+ &entry_ptr);
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+ }
+ /* Add default entry to tables */
+ {
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ /* LB by default forward to 1st cgnat thread */
+ .port_id = p->port_out_id[0],
+ };
+
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ int status = rte_pipeline_table_default_entry_add(
+ p->p,
+ p->table_id[0],
+ &default_entry,
+ &default_entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_loadb->custom_handlers,
+ custom_handlers, sizeof(p_loadb->custom_handlers));
+
+ return p;
+}
+
+static int pipeline_loadb_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_loadb_track(void *pipeline,
+ __rte_unused uint32_t port_in, uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_in == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int pipeline_loadb_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *pipeline_loadb_msg_req_entry_dbg_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_loadb_entry_dbg_msg_rsp *rsp = msg;
+ uint8_t *Msg = msg;
+ struct pipeline_loadb *p_loadb = (struct pipeline_loadb *)p;
+
+ rsp->status = 0;
+
+ printf("LoadB debug handler called with args %x %x, offset %d\n",
+ Msg[LOADB_DBG_CMD_OFST], Msg[LOADB_DBG_CMD_OFST + 1],
+ LOADB_DBG_CMD_OFST);
+
+ if (Msg[LOADB_DBG_CMD_OFST] == LOADB_DBG_CMD_STATS_SHOW) {
+ printf("\nLoadB Packet Stats: Received %" PRIu64 "\n",
+ p_loadb->receivedLBPktCount);
+ return rsp;
+ }
+ if (Msg[LOADB_DBG_CMD_OFST] == LOADB_DBG_CMD_STATS_CLEAR) {
+ printf("\nLoadB Packet Stats: Received %" PRIu64 "\n",
+ p_loadb->receivedLBPktCount);
+ p_loadb->receivedLBPktCount = 0;
+ return rsp;
+ }
+
+ if (Msg[LOADB_DBG_CMD_OFST] == LOADB_DBG_CMD_DBG_LEVEL) {
+ LOADB_DEBUG = Msg[LOADB_DBG_CMD_OFST + 1];
+ printf("LOADB Debug level set to %d\n", LOADB_DEBUG);
+ lb_pkt_print_count = 0;
+ return rsp;
+ }
+ if (Msg[LOADB_DBG_CMD_OFST] == LOADB_DBG_CMD_DBG_SHOW) {
+ printf("\nLoadB DBG Level: %u\n", LOADB_DEBUG);
+ return rsp;
+ }
+ if (Msg[LOADB_DBG_CMD_OFST] == LOADB_DBG_CMD_IF_STATS) {
+ printf("\n");
+ uint8_t i, j;
+
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_eth_stats stats;
+ rte_eth_stats_get(p_loadb->links_map[i], &stats);
+ if (is_phy_port_privte(i))
+ printf("Private Port Stats %d\n", i);
+ else
+ printf("Public Port Stats %d\n", i);
+ printf("\n\tipackets : %" PRIu64 "\n\topackets : %"
+ PRIu64 "\n\tierrors : %" PRIu64
+ "\n\toerrors : %" PRIu64 "\n\trx_nombuf: %"
+ PRIu64 "\n", stats.ipackets, stats.opackets,
+ stats.ierrors, stats.oerrors, stats.rx_nombuf);
+ if (is_phy_port_privte(i))
+ printf("Private Q: ");
+ else
+ printf("Public Q: ");
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++)
+ printf(" %" PRIu64 ", %" PRIu64 "|",
+ stats.q_ipackets[j],
+ stats.q_opackets[j]);
+
+ printf("\n\n");
+
+ }
+ return rsp;
+ }
+
+ return rsp;
+
+}
+
+struct pipeline_be_ops pipeline_loadb_be_ops = {
+ .f_init = pipeline_loadb_init,
+ .f_free = pipeline_loadb_free,
+ .f_run = NULL,
+ .f_timer = pipeline_loadb_timer,
+ .f_track = pipeline_loadb_track,
+};
diff --git a/common/VIL/pipeline_loadb/pipeline_loadb_be.h b/common/VIL/pipeline_loadb/pipeline_loadb_be.h
new file mode 100644
index 00000000..a948f4cb
--- /dev/null
+++ b/common/VIL/pipeline_loadb/pipeline_loadb_be.h
@@ -0,0 +1,149 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_LOADB_BE_H__
+#define __INCLUDE_PIPELINE_LOADB_BE_H__
+
+#include <rte_ip.h>
+#include "pipeline_common_be.h"
+#include <app.h>
+
+#define MBUF_HDR_ROOM 256
+#define ETH_HDR_SIZE 14
+#define IP_HDR_SRC_ADR_OFST 12
+#define IP_HDR_DST_ADR_OFST 16
+#define IP_HDR_PROTOCOL_OFST 9
+#define IP_HDR_SIZE 20
+#define IPV6_HDR_SRC_ADR_OFST 8
+#define IPV6_HDR_DST_ADR_OFST 24
+#define IPV6_HDR_PROTOCOL_OFST 6
+#define IPV6_HDR_SIZE 40
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 17
+#define PIPELINE_LOADB_KEY_MAX_SIZE 64
+
+#define LOADB_ING_DIR 0
+#define LOADB_EGR_DIR 1
+
+#define LOADB_DBG_CMD_OFST 8
+#define LOADB_DBG_CMD_STATS_SHOW 0
+#define LOADB_DBG_CMD_STATS_CLEAR 1
+#define LOADB_DBG_CMD_DBG_LEVEL 2
+#define LOADB_DBG_CMD_DBG_SHOW 3
+#define LOADB_DBG_CMD_IF_STATS 4
+#define LOADB_DBG_CMD_OFST1 10
+
+#define PIPELINE_LOADB_KEY_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i, j; \
+ \
+ for (j = 0; j < n_pkts; j++) \
+ rte_prefetch0(pkts[j]); \
+ pkt_burst_cnt = 0; \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], i, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], i, arg); \
+ \
+ \
+ return 0; \
+}
+
+extern uint8_t LOADB_DEBUG;
+extern uint8_t in_port_egress_prv[PIPELINE_MAX_PORT_IN];
+extern uint8_t prv_que_port_index[PIPELINE_MAX_PORT_IN];
+extern uint8_t in_port_dir_a[PIPELINE_MAX_PORT_IN];
+
+extern uint8_t get_in_port_dir(uint8_t);
+extern uint8_t is_port_index_privte(uint16_t);
+extern uint8_t is_phy_port_privte(uint16_t);
+extern uint32_t get_prv_to_pub_port(uint32_t *ip_addr, uint8_t type);
+extern uint32_t get_pub_to_prv_port(uint32_t *ip_addr, uint8_t type);
+extern uint8_t prv_to_pub_map[PIPELINE_MAX_PORT_IN];
+//extern struct app_params *myApp;
+//extern struct pipeline_arpicmp *p_arp;
+
+/*
+ * LOADB Entry
+ */
+
+struct pipeline_loadb_in_port_h_arg {
+ struct pipeline_loadb *p;
+ uint8_t in_port_id;
+};
+
+/*
+ * Messages
+ */
+enum pipeline_loadb_msg_req_type {
+ /* to be used for debug purposes */
+ PIPELINE_LOADB_MSG_REQ_ENTRY_DBG,
+ PIPELINE_LOADB_MSG_REQS
+};
+
+/*
+ * MSG ENTRY DBG
+ */
+struct pipeline_loadb_entry_dbg_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_loadb_msg_req_type subtype;
+
+ /* data */
+ uint8_t data[5];
+};
+
+struct pipeline_loadb_entry_dbg_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+extern struct pipeline_be_ops pipeline_loadb_be_ops;
+struct ipv4_hdr_port {
+ struct ipv4_hdr ipv4;
+ uint16_t src_port;
+ uint16_t dst_port;
+
+} __attribute__((__packed__));
+struct ipv6_hdr_port {
+ struct ipv6_hdr ipv6;
+ uint16_t src_port;
+ uint16_t dst_port;
+
+} __attribute__((__packed__));
+
+struct lb_pkt {
+ struct ether_hdr eth;
+ union{
+ struct ipv4_hdr_port ipv4_port;
+ struct ipv6_hdr_port ipv6_port;
+ };
+} __attribute__((__packed__));
+
+uint8_t calculate_lb_thread_prv(struct rte_mbuf *pkt, void *arg);
+uint8_t calculate_lb_thread_pub(struct rte_mbuf *pkt, void *arg);
+int check_loadb_thread(
+ struct app_params *app,
+ struct pipeline_params *params,
+ int32_t n_vnf_threads);
+
+#endif
diff --git a/common/VIL/pipeline_master/pipeline_master.c b/common/VIL/pipeline_master/pipeline_master.c
new file mode 100644
index 00000000..bfab32ac
--- /dev/null
+++ b/common/VIL/pipeline_master/pipeline_master.c
@@ -0,0 +1,30 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "pipeline_master.h"
+#include "pipeline_master_be.h"
+
+static struct pipeline_fe_ops pipeline_master_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_master = {
+ .name = "MASTER",
+ .be_ops = &pipeline_master_be_ops,
+ .fe_ops = &pipeline_master_fe_ops,
+};
diff --git a/common/VIL/pipeline_master/pipeline_master.h b/common/VIL/pipeline_master/pipeline_master.h
new file mode 100644
index 00000000..d39afb5f
--- /dev/null
+++ b/common/VIL/pipeline_master/pipeline_master.h
@@ -0,0 +1,24 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_MASTER_H__
+#define __INCLUDE_PIPELINE_MASTER_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_master;
+
+#endif
diff --git a/common/VIL/pipeline_master/pipeline_master_be.c b/common/VIL/pipeline_master/pipeline_master_be.c
new file mode 100644
index 00000000..425e2334
--- /dev/null
+++ b/common/VIL/pipeline_master/pipeline_master_be.c
@@ -0,0 +1,134 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "app.h"
+#include "pipeline_master_be.h"
+
+struct pipeline_master {
+ struct app_params *app;
+ struct cmdline *cl;
+ int script_file_done;
+} __rte_cache_aligned;
+
+static void*
+pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ struct pipeline_master *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->app = app;
+
+ p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
+ if (p->cl == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ p->script_file_done = 0;
+ if (app->script_file == NULL)
+ p->script_file_done = 1;
+
+ return (void *) p;
+}
+
+static int
+pipeline_free(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ cmdline_stdin_exit(p->cl);
+ rte_free(p);
+
+ return 0;
+}
+
+static int
+pipeline_run(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+ int status;
+
+ if (p->script_file_done == 0) {
+ struct app_params *app = p->app;
+ int fd = open(app->script_file, O_RDONLY);
+
+ if (fd < 0)
+ printf("Cannot open CLI script file \"%s\"\n",
+ app->script_file);
+ else {
+ struct cmdline *file_cl;
+
+ printf("Running CLI script file \"%s\" ...\n",
+ app->script_file);
+ file_cl = cmdline_new(p->cl->ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+ }
+
+ p->script_file_done = 1;
+ }
+
+ status = cmdline_poll(p->cl);
+ if (status < 0)
+ rte_panic("CLI poll error (%" PRId32 ")\n", status);
+ else if (status == RDLINE_EXITED) {
+ cmdline_stdin_exit(p->cl);
+ rte_exit(0, "Bye!\n");
+ }
+
+ return 0;
+}
+
+static int
+pipeline_timer(__rte_unused void *pipeline)
+{
+ rte_timer_manage();
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_master_be_ops = {
+ .f_init = pipeline_init,
+ .f_free = pipeline_free,
+ .f_run = pipeline_run,
+ .f_timer = pipeline_timer,
+ .f_track = NULL,
+};
diff --git a/common/VIL/pipeline_master/pipeline_master_be.h b/common/VIL/pipeline_master/pipeline_master_be.h
new file mode 100644
index 00000000..fceb6849
--- /dev/null
+++ b/common/VIL/pipeline_master/pipeline_master_be.h
@@ -0,0 +1,24 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_MASTER_BE_H__
+#define __INCLUDE_PIPELINE_MASTER_BE_H__
+
+#include "pipeline_common_be.h"
+
+extern struct pipeline_be_ops pipeline_master_be_ops;
+
+#endif
diff --git a/common/VIL/pipeline_passthrough/pipeline_passthrough.c b/common/VIL/pipeline_passthrough/pipeline_passthrough.c
new file mode 100644
index 00000000..0463f8ef
--- /dev/null
+++ b/common/VIL/pipeline_passthrough/pipeline_passthrough.c
@@ -0,0 +1,30 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "pipeline_passthrough.h"
+#include "pipeline_passthrough_be.h"
+
+static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_passthrough = {
+ .name = "PASS-THROUGH",
+ .be_ops = &pipeline_passthrough_be_ops,
+ .fe_ops = &pipeline_passthrough_fe_ops,
+};
diff --git a/common/VIL/pipeline_passthrough/pipeline_passthrough.h b/common/VIL/pipeline_passthrough/pipeline_passthrough.h
new file mode 100644
index 00000000..9b650eb4
--- /dev/null
+++ b/common/VIL/pipeline_passthrough/pipeline_passthrough.h
@@ -0,0 +1,24 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_passthrough;
+
+#endif
diff --git a/common/VIL/pipeline_passthrough/pipeline_passthrough_be.c b/common/VIL/pipeline_passthrough/pipeline_passthrough_be.c
new file mode 100644
index 00000000..5b74d75b
--- /dev/null
+++ b/common/VIL/pipeline_passthrough/pipeline_passthrough_be.c
@@ -0,0 +1,787 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_table_stub.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_passthrough_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+struct pipeline_passthrough {
+ struct pipeline p;
+ struct pipeline_passthrough_params params;
+ rte_table_hash_op_hash f_hash;
+} __rte_cache_aligned;
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_msg_req_invalid_handler,
+};
+
+static inline __attribute__((always_inline)) void
+pkt_work(
+ struct rte_mbuf *pkt,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_dst_offset);
+ uint64_t *dma_src = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_src_offset);
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+ uint32_t *dma_hash = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ p->params.dma_hash_offset);
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++)
+ dma_dst[i] = dma_src[i] & dma_mask[i];
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash = p->f_hash(dma_dst, dma_size, 0);
+ *dma_hash = hash;
+
+ if (lb_hash) {
+ uint32_t port_out;
+
+ if (port_out_pow2)
+ port_out
+ = hash & (p->p.n_ports_out - 1);
+ else
+ port_out
+ = hash % p->p.n_ports_out;
+
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out, pkt);
+ }
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work(
+ struct rte_mbuf **pkts,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_dst_offset);
+
+ uint64_t *dma_src0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_src_offset);
+ uint64_t *dma_src1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_src_offset);
+ uint64_t *dma_src2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_src_offset);
+ uint64_t *dma_src3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_src_offset);
+
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+
+ uint32_t *dma_hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkts[0],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkts[1],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkts[2],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkts[3],
+ p->params.dma_hash_offset);
+
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++) {
+ dma_dst0[i] = dma_src0[i] & dma_mask[i];
+ dma_dst1[i] = dma_src1[i] & dma_mask[i];
+ dma_dst2[i] = dma_src2[i] & dma_mask[i];
+ dma_dst3[i] = dma_src3[i] & dma_mask[i];
+ }
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash0 = p->f_hash(dma_dst0, dma_size, 0);
+ uint32_t hash1 = p->f_hash(dma_dst1, dma_size, 0);
+ uint32_t hash2 = p->f_hash(dma_dst2, dma_size, 0);
+ uint32_t hash3 = p->f_hash(dma_dst3, dma_size, 0);
+
+ *dma_hash0 = hash0;
+ *dma_hash1 = hash1;
+ *dma_hash2 = hash2;
+ *dma_hash3 = hash3;
+
+ if (lb_hash) {
+ uint32_t port_out0, port_out1, port_out2, port_out3;
+
+ if (port_out_pow2) {
+ port_out0
+ = hash0 & (p->p.n_ports_out - 1);
+ port_out1
+ = hash1 & (p->p.n_ports_out - 1);
+ port_out2
+ = hash2 & (p->p.n_ports_out - 1);
+ port_out3
+ = hash3 & (p->p.n_ports_out - 1);
+ } else {
+ port_out0
+ = hash0 % p->p.n_ports_out;
+ port_out1
+ = hash1 % p->p.n_ports_out;
+ port_out2
+ = hash2 % p->p.n_ports_out;
+ port_out3
+ = hash3 % p->p.n_ports_out;
+ }
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out0, pkts[0]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out1, pkts[1]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out2, pkts[2]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out3, pkts[3]);
+ }
+ }
+}
+
+#define PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf *pkt, \
+ void *arg) \
+{ \
+ pkt_work(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf **pkts, \
+ void *arg) \
+{ \
+ pkt4_work(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define port_in_ah(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH(port_in_ah_size##dma_size##_hash \
+ ##hash_enabled##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+
+#define port_in_ah_lb(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH_HIJACK_ALL( \
+ port_in_ah_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+/* Port in AH (dma_size, hash_enabled, lb_hash, port_pow2) */
+
+port_in_ah(8, 0, 0, 0)
+port_in_ah(8, 1, 0, 0)
+port_in_ah_lb(8, 1, 1, 0)
+port_in_ah_lb(8, 1, 1, 1)
+
+port_in_ah(16, 0, 0, 0)
+port_in_ah(16, 1, 0, 0)
+port_in_ah_lb(16, 1, 1, 0)
+port_in_ah_lb(16, 1, 1, 1)
+
+port_in_ah(24, 0, 0, 0)
+port_in_ah(24, 1, 0, 0)
+port_in_ah_lb(24, 1, 1, 0)
+port_in_ah_lb(24, 1, 1, 1)
+
+port_in_ah(32, 0, 0, 0)
+port_in_ah(32, 1, 0, 0)
+port_in_ah_lb(32, 1, 1, 0)
+port_in_ah_lb(32, 1, 1, 1)
+
+port_in_ah(40, 0, 0, 0)
+port_in_ah(40, 1, 0, 0)
+port_in_ah_lb(40, 1, 1, 0)
+port_in_ah_lb(40, 1, 1, 1)
+
+port_in_ah(48, 0, 0, 0)
+port_in_ah(48, 1, 0, 0)
+port_in_ah_lb(48, 1, 1, 0)
+port_in_ah_lb(48, 1, 1, 1)
+
+port_in_ah(56, 0, 0, 0)
+port_in_ah(56, 1, 0, 0)
+port_in_ah_lb(56, 1, 1, 0)
+port_in_ah_lb(56, 1, 1, 1)
+
+port_in_ah(64, 0, 0, 0)
+port_in_ah(64, 1, 0, 0)
+port_in_ah_lb(64, 1, 1, 0)
+port_in_ah_lb(64, 1, 1, 1)
+
+static rte_pipeline_port_in_action_handler
+get_port_in_ah(struct pipeline_passthrough *p)
+{
+ if (p->params.dma_enabled == 0)
+ return NULL;
+
+ if (p->params.dma_hash_enabled) {
+ if (p->params.lb_hash_enabled) {
+ if (rte_is_power_of_2(p->p.n_ports_out))
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb1_pw1;
+ case 16: return port_in_ah_size16_hash1_lb1_pw1;
+ case 24: return port_in_ah_size24_hash1_lb1_pw1;
+ case 32: return port_in_ah_size32_hash1_lb1_pw1;
+ case 40: return port_in_ah_size40_hash1_lb1_pw1;
+ case 48: return port_in_ah_size48_hash1_lb1_pw1;
+ case 56: return port_in_ah_size56_hash1_lb1_pw1;
+ case 64: return port_in_ah_size64_hash1_lb1_pw1;
+ default: return NULL;
+ }
+ else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb1_pw0;
+ case 16: return port_in_ah_size16_hash1_lb1_pw0;
+ case 24: return port_in_ah_size24_hash1_lb1_pw0;
+ case 32: return port_in_ah_size32_hash1_lb1_pw0;
+ case 40: return port_in_ah_size40_hash1_lb1_pw0;
+ case 48: return port_in_ah_size48_hash1_lb1_pw0;
+ case 56: return port_in_ah_size56_hash1_lb1_pw0;
+ case 64: return port_in_ah_size64_hash1_lb1_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash1_lb0_pw0;
+ case 16: return port_in_ah_size16_hash1_lb0_pw0;
+ case 24: return port_in_ah_size24_hash1_lb0_pw0;
+ case 32: return port_in_ah_size32_hash1_lb0_pw0;
+ case 40: return port_in_ah_size40_hash1_lb0_pw0;
+ case 48: return port_in_ah_size48_hash1_lb0_pw0;
+ case 56: return port_in_ah_size56_hash1_lb0_pw0;
+ case 64: return port_in_ah_size64_hash1_lb0_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_size8_hash0_lb0_pw0;
+ case 16: return port_in_ah_size16_hash0_lb0_pw0;
+ case 24: return port_in_ah_size24_hash0_lb0_pw0;
+ case 32: return port_in_ah_size32_hash0_lb0_pw0;
+ case 40: return port_in_ah_size40_hash0_lb0_pw0;
+ case 48: return port_in_ah_size48_hash0_lb0_pw0;
+ case 56: return port_in_ah_size56_hash0_lb0_pw0;
+ case 64: return port_in_ah_size64_hash0_lb0_pw0;
+ default: return NULL;
+ }
+}
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t dma_dst_offset_present = 0;
+ uint32_t dma_src_offset_present = 0;
+ uint32_t dma_src_mask_present = 0;
+ uint32_t dma_size_present = 0;
+ uint32_t dma_hash_offset_present = 0;
+ uint32_t lb_present = 0;
+ uint32_t i;
+ char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2];
+
+ /* default values */
+ p->dma_enabled = 0;
+ p->dma_hash_enabled = 0;
+ p->lb_hash_enabled = 0;
+ memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask));
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* dma_dst_offset */
+ if (strcmp(arg_name, "dma_dst_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_dst_offset_present == 0, params->name,
+ arg_name);
+ dma_dst_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_dst_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_offset */
+ if (strcmp(arg_name, "dma_src_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_offset_present == 0, params->name,
+ arg_name);
+ dma_src_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_src_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_size */
+ if (strcmp(arg_name, "dma_size") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_size_present == 0, params->name,
+ arg_name);
+ dma_size_present = 1;
+
+ status = parser_read_uint32(&p->dma_size,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->dma_size != 0) &&
+ ((p->dma_size % 8) == 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->dma_size <=
+ PIPELINE_PASSTHROUGH_DMA_SIZE_MAX)),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_mask */
+ if (strcmp(arg_name, "dma_src_mask") == 0) {
+ int mask_str_len = strlen(arg_value);
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_mask_present == 0,
+ params->name, arg_name);
+ dma_src_mask_present = 1;
+
+ PIPELINE_ARG_CHECK((mask_str_len <
+ (PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2)),
+ "Parse error in section \"%s\": entry "
+ "\"%s\" too long", params->name,
+ arg_name);
+
+ snprintf(dma_mask_str, mask_str_len + 1,
+ "%s", arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_hash_offset */
+ if (strcmp(arg_name, "dma_hash_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_hash_offset_present == 0,
+ params->name, arg_name);
+ dma_hash_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_hash_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_hash_enabled = 1;
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* load_balance mode */
+ if (strcmp(arg_name, "lb") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ lb_present == 0,
+ params->name, arg_name);
+ lb_present = 1;
+
+ if ((strcmp(arg_value, "hash") == 0) ||
+ (strcmp(arg_value, "HASH") == 0))
+ p->lb_hash_enabled = 1;
+ else
+ PIPELINE_PARSE_ERR_INV_VAL(0,
+ params->name,
+ arg_name,
+ arg_value);
+
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check correlations between arguments */
+ PIPELINE_ARG_CHECK((dma_dst_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_dst_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_src_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_src_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_size_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_size\"", params->name);
+ PIPELINE_ARG_CHECK((dma_hash_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_hash_offset\"", params->name);
+ PIPELINE_ARG_CHECK((p->lb_hash_enabled <= p->dma_hash_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_hash_offset\"", params->name);
+
+ if (dma_src_mask_present) {
+ uint32_t dma_size = p->dma_size;
+ int status;
+
+ PIPELINE_ARG_CHECK((strlen(dma_mask_str) ==
+ (dma_size * 2)), "Parse error in section "
+ "\"%s\": dma_src_mask should have exactly %u hex "
+ "digits", params->name, (dma_size * 2));
+
+ status = parse_hex_string(dma_mask_str, p->dma_src_mask,
+ &p->dma_size);
+
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (dma_size == p->dma_size)), params->name,
+ "dma_src_mask", dma_mask_str);
+ }
+
+ return 0;
+}
+
+
+static rte_table_hash_op_hash
+get_hash_function(struct pipeline_passthrough *p)
+{
+ switch (p->params.dma_size) {
+
+ case 8: return hash_default_key8;
+ case 16: return hash_default_key16;
+ case 24: return hash_default_key24;
+ case 32: return hash_default_key32;
+ case 40: return hash_default_key40;
+ case 48: return hash_default_key48;
+ case 56: return hash_default_key56;
+ case 64: return hash_default_key64;
+ default: return NULL;
+ }
+}
+
+static void*
+pipeline_passthrough_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_passthrough *p_pt;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0) ||
+ (params->n_ports_in < params->n_ports_out) ||
+ (params->n_ports_in % params->n_ports_out))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_pt = (struct pipeline_passthrough *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Pass-through");
+
+ /* Parse arguments */
+ if (pipeline_passthrough_parse_args(&p_pt->params, params))
+ return NULL;
+ p_pt->f_hash = get_hash_function(p_pt);
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "PASS-THROUGH",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ p->n_tables = p->n_ports_in;
+
+ /*Input ports*/
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = get_port_in_ah(p_pt),
+ .arg_ah = p_pt,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Add entries to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[
+ i / (p->n_ports_in / p->n_ports_out)]},
+ };
+
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ int status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[i],
+ &default_entry,
+ &default_entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+
+ return p;
+}
+
+static int
+pipeline_passthrough_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_passthrough_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static int
+pipeline_passthrough_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ *port_out = port_in / p->n_ports_in;
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_passthrough_be_ops = {
+ .f_init = pipeline_passthrough_init,
+ .f_free = pipeline_passthrough_free,
+ .f_run = NULL,
+ .f_timer = pipeline_passthrough_timer,
+ .f_track = pipeline_passthrough_track,
+};
diff --git a/common/VIL/pipeline_passthrough/pipeline_passthrough_be.h b/common/VIL/pipeline_passthrough/pipeline_passthrough_be.h
new file mode 100644
index 00000000..442734f6
--- /dev/null
+++ b/common/VIL/pipeline_passthrough/pipeline_passthrough_be.h
@@ -0,0 +1,42 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+
+#include "pipeline_common_be.h"
+
+#define PIPELINE_PASSTHROUGH_DMA_SIZE_MAX 64
+
+struct pipeline_passthrough_params {
+ uint32_t dma_enabled;
+ uint32_t dma_dst_offset;
+ uint32_t dma_src_offset;
+ uint8_t dma_src_mask[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX];
+ uint32_t dma_size;
+
+ uint32_t dma_hash_enabled;
+ uint32_t dma_hash_offset;
+ uint32_t lb_hash_enabled;
+};
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params);
+
+extern struct pipeline_be_ops pipeline_passthrough_be_ops;
+
+#endif
diff --git a/common/VIL/pipeline_txrx/pipeline_txrx.c b/common/VIL/pipeline_txrx/pipeline_txrx.c
new file mode 100644
index 00000000..c1fc075f
--- /dev/null
+++ b/common/VIL/pipeline_txrx/pipeline_txrx.c
@@ -0,0 +1,151 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_txrx.h"
+#include "vnf_common.h"
+//#include "lib_arp.h"
+#include "pipeline_arpicmp_be.h"
+
+static int
+app_pipeline_txrx_entry_dbg(struct app_params *app,
+ uint32_t pipeline_id, uint8_t *msg)
+{
+ struct pipeline_txrx_entry_dbg_msg_req *req;
+ struct pipeline_txrx_entry_dbg_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_TXRX_MSG_REQ_ENTRY_DBG;
+ req->data[0] = msg[0];
+ req->data[1] = msg[1];
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ printf("Error rsp->status %d\n", rsp->status);
+ return -1;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * entry dbg
+ */
+
+
+struct cmd_entry_dbg_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t entry_string;
+ cmdline_fixed_string_t dbg_string;
+ uint8_t cmd;
+ uint8_t d1;
+};
+
+static void
+cmd_entry_dbg_parsed(void *parsed_result,
+ __rte_unused struct cmdline *cl, void *data)
+{
+ struct cmd_entry_dbg_result *params = parsed_result;
+ struct app_params *app = data;
+ uint8_t msg[2];
+ int status;
+
+ msg[0] = params->cmd;
+ msg[1] = params->d1;
+ status = app_pipeline_txrx_entry_dbg(app, params->p, msg);
+
+ if (status != 0) {
+ printf("Dbg Command failed\n");
+ return;
+ }
+}
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_p_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, p_string, "p");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_p =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, p, UINT32);
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_entry_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result,
+ entry_string, "txrx");
+
+static cmdline_parse_token_string_t lb_cmd_entry_dbg_dbg_string =
+TOKEN_STRING_INITIALIZER(struct cmd_entry_dbg_result, dbg_string,
+ "dbg");
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_cmd =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, cmd, UINT8);
+
+static cmdline_parse_token_num_t lb_cmd_entry_dbg_d1 =
+TOKEN_NUM_INITIALIZER(struct cmd_entry_dbg_result, d1, UINT8);
+
+static cmdline_parse_inst_t lb_cmd_entry_dbg = {
+ .f = cmd_entry_dbg_parsed,
+ .data = NULL,
+ .help_str = "TXRX dbg cmd",
+ .tokens = {
+ (void *)&lb_cmd_entry_dbg_p_string,
+ (void *)&lb_cmd_entry_dbg_p,
+ (void *)&lb_cmd_entry_dbg_entry_string,
+ (void *)&lb_cmd_entry_dbg_dbg_string,
+ (void *)&lb_cmd_entry_dbg_cmd,
+ (void *)&lb_cmd_entry_dbg_d1,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &lb_cmd_entry_dbg,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_txrx_fe_ops = {
+ .f_init = NULL,
+ .f_free = NULL,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_txrx = {
+ .name = "TXRX",
+ .be_ops = &pipeline_txrx_be_ops,
+ .fe_ops = &pipeline_txrx_fe_ops,
+};
diff --git a/common/VIL/pipeline_txrx/pipeline_txrx.h b/common/VIL/pipeline_txrx/pipeline_txrx.h
new file mode 100644
index 00000000..99069246
--- /dev/null
+++ b/common/VIL/pipeline_txrx/pipeline_txrx.h
@@ -0,0 +1,28 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_TXRX_H__
+#define __INCLUDE_PIPELINE_TXRX_H__
+
+#include "pipeline.h"
+#include "pipeline_txrx_be.h"
+
+/*
+ * Pipeline type
+ */
+extern struct pipeline_type pipeline_txrx;
+
+#endif
diff --git a/common/VIL/pipeline_txrx/pipeline_txrx_be.c b/common/VIL/pipeline_txrx/pipeline_txrx_be.c
new file mode 100644
index 00000000..9e7645dd
--- /dev/null
+++ b/common/VIL/pipeline_txrx/pipeline_txrx_be.c
@@ -0,0 +1,915 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_table_stub.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_txrx_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+#include "pipeline_arpicmp_be.h"
+#include "vnf_common.h"
+#include "app.h"
+#ifndef VNF_ACL
+#include "lib_icmpv6.h"
+#endif
+
+uint8_t TXRX_DEBUG;
+int pkt_burst_cnt;
+
+struct pipeline_txrx {
+ struct pipeline p;
+ pipeline_msg_req_handler
+ custom_handlers[PIPELINE_TXRX_MSG_REQS];
+ uint64_t receivedPktCount;
+ uint64_t droppedPktCount;
+ uint8_t links_map[PIPELINE_MAX_PORT_IN];
+ uint8_t outport_id[PIPELINE_MAX_PORT_IN];
+ uint8_t pipeline_num;
+ uint8_t txrx_type;
+} __rte_cache_aligned;
+
+enum{
+TYPE_TXTX,
+TYPE_RXRX,
+};
+static void *pipeline_txrx_msg_req_custom_handler(struct pipeline *p,
+ void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_txrx_msg_req_custom_handler,
+
+};
+
+static void *pipeline_txrx_msg_req_entry_dbg_handler(struct pipeline *p,
+ void *msg);
+static void *pipeline_txrx_msg_req_entry_dbg_handler(
+ __rte_unused struct pipeline *p,
+ __rte_unused void *msg)
+{
+ /*have to handle dbg commands*/
+ return NULL;
+}
+
+static __rte_unused pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_TXRX_MSG_REQ_ENTRY_DBG] =
+ pipeline_txrx_msg_req_entry_dbg_handler,
+};
+
+/**
+ * Function for pipeline custom handlers
+ *
+ * @param pipeline
+ * A void pointer to pipeline
+ * @param msg
+ * void pointer for incoming data
+ *
+ * @return
+ * void pointer of response
+ */
+void *pipeline_txrx_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_txrx *p_txrx = (struct pipeline_txrx *)p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_TXRX_MSG_REQS) ?
+ p_txrx->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+/* Not needed as no arguments are needed for TxRX
+ * ARP arguments are handled in ARP module
+ */
+int
+pipeline_txrx_parse_args(struct pipeline_txrx *p,
+ struct pipeline_params *params);
+int
+pipeline_txrx_parse_args(struct pipeline_txrx *p,
+ struct pipeline_params *params)
+{
+ uint32_t i;
+ uint8_t txrx_type_present = 0;
+
+ if (TXRX_DEBUG > 2)
+ printf("TXRX pipeline_txrx_parse_args params->n_args: %d\n",
+ params->n_args);
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (TXRX_DEBUG > 2)
+ printf("TXRX args[%d]: %s %d, %s\n", i, arg_name,
+ atoi(arg_value), arg_value);
+
+ /* txrx_type = val */
+ if (strcmp(arg_name, "pipeline_txrx_type") == 0) {
+ if (txrx_type_present)
+ return -1;
+ txrx_type_present = 1;
+
+
+ if (strcmp(arg_value, "TXTX") == 0) {
+ p->txrx_type = TYPE_TXTX;
+ printf("pipeline_txrx_type is TXTX\n");
+ }
+ if (strcmp(arg_value, "RXRX") == 0) {
+ p->txrx_type = TYPE_RXRX;
+ printf("pipeline_txrx_type is RXRX\n");
+ }
+ continue;
+ }
+ }
+
+ if (!txrx_type_present) {
+ printf("TXRX type not specified\n");
+ return -1;
+ }
+
+ return 0;
+
+}
+
+uint32_t txrx_pkt_print_count;
+static inline void
+pkt_work_txrx(struct rte_mbuf *pkt, uint32_t pkt_num, void *arg)
+{
+
+ struct pipeline_txrx_in_port_h_arg *ap = arg;
+ struct pipeline_txrx *p_txrx = (struct pipeline_txrx *)ap->p;
+ uint8_t solicited_node_multicast_addr[16] =
+ {0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
+
+ p_txrx->receivedPktCount++;
+
+ if (p_txrx->txrx_type == TYPE_TXTX)
+ return;
+
+ uint8_t in_port_id = pkt->port;
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+
+ uint32_t pkt_mask = 1 << pkt_num;
+ /* ARP outport number */
+ uint32_t out_port = p_txrx->p.n_ports_out - 1;
+
+ uint16_t *eth_proto =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt, eth_proto_offset);
+
+ uint8_t *protocol;
+ uint32_t prot_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
+
+ #ifdef IPV6
+ struct ipv6_hdr *ipv6_h;
+ ipv6_h = rte_pktmbuf_mtod_offset (pkt, struct ipv6_hdr *, sizeof(struct ether_hdr));
+ uint32_t prot_offset_ipv6 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
+
+ if (rte_be_to_cpu_16(*eth_proto) == ETHER_TYPE_IPv6)
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset_ipv6);
+ else
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
+ #else
+ protocol = RTE_MBUF_METADATA_UINT8_PTR(pkt, prot_offset);
+ #endif
+
+
+ if ((TXRX_DEBUG > 2) && (txrx_pkt_print_count < 10)) {
+ print_pkt1(pkt);
+ txrx_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto), *protocol, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+ /* Classifier for ICMP pass-through*/
+ struct app_link_params *link;
+
+ link = &myApp->link_params[in_port_id];
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ uint32_t dst_addr_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr = RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset);
+
+ if (TXRX_DEBUG > 2)
+ if (rte_be_to_cpu_16(*eth_proto) == ETH_TYPE_IPV4)
+ printf ("%s: linkIp: %x, dst_addr: %x\n", __FUNCTION__, link->ip, *dst_addr);
+
+ #if 1
+ switch (rte_be_to_cpu_16(*eth_proto)) {
+ case ETH_TYPE_ARP:
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask);
+ break;
+
+ case ETH_TYPE_IPV4:
+ if ((*protocol == IP_PROTOCOL_ICMP) &&
+ (link->ip == rte_be_to_cpu_32(*dst_addr))) {
+ if (is_phy_port_privte(pkt->port)) {
+ rte_pipeline_port_out_packet_insert(
+ p_txrx->p.p,
+ out_port, pkt);
+ rte_pipeline_ah_packet_drop(
+ p_txrx->p.p,
+ pkt_mask);
+ }
+ }
+
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (*protocol == ICMPV6_PROTOCOL_ID) {
+ #ifndef VNF_ACL
+ if (!memcmp(ipv6_h->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h->dst_addr, solicited_node_multicast_addr, 13)) {
+ #endif
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask);
+ #ifndef VNF_ACL
+ } else {
+ printf("Dropping the IPv6 pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask);
+ }
+ #endif
+ }
+ break;
+ #endif
+
+ default: /* Not valid pkt */
+ printf("Dropping the pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask);
+
+ }
+ #endif
+
+}
+
+static inline void
+pkt4_work_txrx(struct rte_mbuf **pkt, uint32_t pkt_num, void *arg)
+{
+ struct pipeline_txrx_in_port_h_arg *ap = arg;
+ struct pipeline_txrx *p_txrx = (struct pipeline_txrx *)ap->p;
+ uint8_t solicited_node_multicast_addr[16] =
+ {0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00};
+
+ if (p_txrx->txrx_type == TYPE_TXTX)
+ return;
+
+ uint16_t in_port_id = (*pkt)->port;
+ uint32_t eth_proto_offset = MBUF_HDR_ROOM + 12;
+
+
+ uint32_t pkt_mask0 = 1 << pkt_num;
+ uint32_t pkt_mask1 = 1 << (pkt_num + 1);
+ uint32_t pkt_mask2 = 1 << (pkt_num + 2);
+ uint32_t pkt_mask3 = 1 << (pkt_num + 3);
+
+ /* ARP outport number */
+ uint32_t out_port = p_txrx->p.n_ports_out - 1;
+
+ uint16_t *eth_proto0 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[0], eth_proto_offset);
+ uint16_t *eth_proto1 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[1], eth_proto_offset);
+ uint16_t *eth_proto2 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[2], eth_proto_offset);
+ uint16_t *eth_proto3 =
+ RTE_MBUF_METADATA_UINT16_PTR(pkt[3], eth_proto_offset);
+
+ uint8_t *protocol0, *protocol1, *protocol2, *protocol3;
+ uint32_t prot_offset =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_PROTOCOL_OFST;
+
+ #ifdef IPV6
+ struct ipv6_hdr *ipv6_h0, *ipv6_h1, *ipv6_h2, *ipv6_h3;
+ ipv6_h0 = rte_pktmbuf_mtod_offset (pkt[0], struct ipv6_hdr *, sizeof(struct ether_hdr));
+ uint32_t prot_offset_ipv6 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IPV6_HDR_PROTOCOL_OFST;
+
+/* --0-- */
+ if (rte_be_to_cpu_16(*eth_proto0) == ETHER_TYPE_IPv6)
+ protocol0 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset_ipv6);
+ else
+ protocol0 = RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset);
+
+/* --1-- */
+ ipv6_h1 = rte_pktmbuf_mtod_offset (pkt[1], struct ipv6_hdr *, sizeof(struct ether_hdr));
+ if (rte_be_to_cpu_16(*eth_proto1) == ETHER_TYPE_IPv6)
+ protocol1 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset_ipv6);
+ else
+ protocol1 = RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset);
+
+/* --2-- */
+ ipv6_h2 = rte_pktmbuf_mtod_offset (pkt[2], struct ipv6_hdr *, sizeof(struct ether_hdr));
+ if (rte_be_to_cpu_16(*eth_proto2) == ETHER_TYPE_IPv6)
+ protocol2 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset_ipv6);
+ else
+ protocol2 = RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset);
+
+/* --3-- */
+ ipv6_h3 = rte_pktmbuf_mtod_offset (pkt[3], struct ipv6_hdr *, sizeof(struct ether_hdr));
+ if (rte_be_to_cpu_16(*eth_proto3) == ETHER_TYPE_IPv6)
+ protocol3 =
+ RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset_ipv6);
+ else
+ protocol3 = RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset);
+ #else
+ protocol0 = RTE_MBUF_METADATA_UINT8_PTR(pkt[0], prot_offset);
+ protocol1 = RTE_MBUF_METADATA_UINT8_PTR(pkt[1], prot_offset);
+ protocol2 = RTE_MBUF_METADATA_UINT8_PTR(pkt[2], prot_offset);
+ protocol3 = RTE_MBUF_METADATA_UINT8_PTR(pkt[3], prot_offset);
+ #endif
+
+ if ((TXRX_DEBUG > 2) && (txrx_pkt_print_count < 10)) {
+ print_pkt1(pkt[0]);
+ txrx_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto0), *protocol0, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ struct app_link_params *link;
+
+ link = &myApp->link_params[in_port_id];
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ uint32_t dst_addr_offset0 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr0 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[0], dst_addr_offset0);
+
+ if (TXRX_DEBUG > 2)
+ if (rte_be_to_cpu_16(*eth_proto0) == ETH_TYPE_IPV4)
+ printf ("%s: linkIp: %x, dst_addr0: %x\n", __FUNCTION__, link->ip, *dst_addr0);
+
+ #if 1
+ switch (rte_be_to_cpu_16(*eth_proto0)) {
+ case ETH_TYPE_ARP:
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[0]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask0);
+ break;
+
+ case ETH_TYPE_IPV4:
+ if ((*protocol0 == IP_PROTOCOL_ICMP) &&
+ (link->ip == rte_be_to_cpu_32(*dst_addr0))) {
+ if (is_phy_port_privte(pkt[0]->port)) {
+ rte_pipeline_port_out_packet_insert(
+ p_txrx->p.p, out_port, pkt[0]);
+ rte_pipeline_ah_packet_drop(
+ p_txrx->p.p, pkt_mask0);
+ }
+ }
+
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (*protocol0 == ICMPV6_PROTOCOL_ID) {
+ #ifndef VNF_ACL
+ if (!memcmp(ipv6_h0->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h0->dst_addr, solicited_node_multicast_addr, 13)) {
+ #endif
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[0]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask0);
+
+ #ifndef VNF_ACL
+ } else {
+ printf("Dropping the IPv6 pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask0);
+ }
+ #endif
+ }
+ break;
+ #endif
+
+ default: /* Not valid pkt */
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask0);
+
+ }
+ #endif
+
+ if ((TXRX_DEBUG > 2) && (txrx_pkt_print_count < 10)) {
+ print_pkt1(pkt[1]);
+ txrx_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto1), *protocol1, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ uint32_t dst_addr_offset1 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr1 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[1], dst_addr_offset1);
+
+ if (TXRX_DEBUG > 2)
+ if (rte_be_to_cpu_16(*eth_proto1) == ETH_TYPE_IPV4)
+ printf ("%s: linkIp: %x, dst_addr1: %x\n", __FUNCTION__, link->ip, *dst_addr1);
+
+ switch (rte_be_to_cpu_16(*eth_proto1)) {
+ case ETH_TYPE_ARP:
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[1]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask1);
+ break;
+
+ case ETH_TYPE_IPV4:
+ if ((*protocol1 == IP_PROTOCOL_ICMP) &&
+ (link->ip == rte_be_to_cpu_32(*dst_addr1))) {
+ if (is_phy_port_privte(pkt[1]->port)) {
+ rte_pipeline_port_out_packet_insert(
+ p_txrx->p.p,
+ out_port, pkt[1]);
+ rte_pipeline_ah_packet_drop(
+ p_txrx->p.p,
+ pkt_mask1);
+ }
+ }
+
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (*protocol1 == ICMPV6_PROTOCOL_ID) {
+ #ifndef VNF_ACL
+ if (!memcmp(ipv6_h1->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h1->dst_addr, solicited_node_multicast_addr, 13)) {
+ #endif
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[1]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask1);
+ #ifndef VNF_ACL
+ } else {
+ printf("Dropping the IPv6 pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask1);
+ }
+ #endif
+ }
+ break;
+ #endif
+
+ default: /* Not valid pkt */
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask1);
+
+ }
+
+ if ((TXRX_DEBUG > 2) && (txrx_pkt_print_count < 10)) {
+ print_pkt1(pkt[2]);
+ txrx_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto2), *protocol2, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ uint32_t dst_addr_offset2 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr2 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt[2], dst_addr_offset2);
+
+ if (TXRX_DEBUG > 2)
+ if (rte_be_to_cpu_16(*eth_proto2) == ETH_TYPE_IPV4)
+ printf ("%s: linkIp: %x, dst_addr2: %x\n", __FUNCTION__, link->ip, *dst_addr2);
+
+ switch (rte_be_to_cpu_16(*eth_proto2)) {
+ case ETH_TYPE_ARP:
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[2]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask2);
+ break;
+
+ case ETH_TYPE_IPV4:
+ if ((*protocol2 == IP_PROTOCOL_ICMP) &&
+ (link->ip == rte_be_to_cpu_32(*dst_addr2))) {
+ if (is_phy_port_privte(pkt[2]->port)) {
+ rte_pipeline_port_out_packet_insert(
+ p_txrx->p.p,
+ out_port, pkt[2]);
+ rte_pipeline_ah_packet_drop(
+ p_txrx->p.p,
+ pkt_mask2);
+ }
+ }
+
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (*protocol2 == ICMPV6_PROTOCOL_ID) {
+ #ifndef VNF_ACL
+ if (!memcmp(ipv6_h2->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h2->dst_addr, solicited_node_multicast_addr, 13)) {
+ #endif
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[2]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask2);
+ #ifndef VNF_ACL
+ } else {
+ printf("Dropping the IPv6 pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask2);
+ }
+ #endif
+ }
+ break;
+ #endif
+
+ default: /* Not valid pkt */
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask2);
+
+ }
+
+ if ((TXRX_DEBUG > 2) && (txrx_pkt_print_count < 10)) {
+ print_pkt1(pkt[3]);
+ txrx_pkt_print_count++;
+ printf("\nEth Typ %x, Prot %x, ETH_TYPE_ARP %x, "
+ "ETH_TYPE_IPV4 %x, IP_PROTOCOL_ICMP %x\n",
+ rte_be_to_cpu_16(*eth_proto3), *protocol3, ETH_TYPE_ARP,
+ ETH_TYPE_IPV4, IP_PROTOCOL_ICMP);
+ }
+
+ /* header room + eth hdr size + src_aadr offset in ip header */
+ uint32_t dst_addr_offset3 =
+ MBUF_HDR_ROOM + ETH_HDR_SIZE + IP_HDR_DST_ADR_OFST;
+ uint32_t *dst_addr3 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, dst_addr_offset3);
+
+ if (TXRX_DEBUG > 2)
+ if (rte_be_to_cpu_16(*eth_proto3) == ETH_TYPE_IPV4)
+ printf ("%s: linkIp: %x, dst_addr3: %x\n", __FUNCTION__, link->ip, *dst_addr3);
+
+ switch (rte_be_to_cpu_16(*eth_proto3)) {
+ case ETH_TYPE_ARP:
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[3]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask3);
+ break;
+
+ case ETH_TYPE_IPV4:
+ if ((*protocol3 == IP_PROTOCOL_ICMP) &&
+ (link->ip == rte_be_to_cpu_32(*dst_addr3))) {
+ if (is_phy_port_privte(pkt[3]->port)) {
+ rte_pipeline_port_out_packet_insert(
+ p_txrx->p.p,
+ out_port, pkt[3]);
+ rte_pipeline_ah_packet_drop(
+ p_txrx->p.p,
+ pkt_mask3);
+ }
+ }
+
+ break;
+
+ #ifdef IPV6
+ case ETH_TYPE_IPV6:
+ if (*protocol3 == ICMPV6_PROTOCOL_ID) {
+ #ifndef VNF_ACL
+ if (!memcmp(ipv6_h3->dst_addr, link->ipv6, 16)
+ || !memcmp(ipv6_h3->dst_addr, solicited_node_multicast_addr, 13)) {
+ #endif
+ rte_pipeline_port_out_packet_insert(p_txrx->p.p,
+ out_port, pkt[3]);
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask3);
+ #ifndef VNF_ACL
+ } else {
+ printf("Dropping the IPv6 pkt\n");
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask3);
+ }
+ #endif
+ }
+ break;
+ #endif
+
+ default: /* Not valid pkt */
+ rte_pipeline_ah_packet_drop(p_txrx->p.p, pkt_mask3);
+
+ }
+
+ p_txrx->receivedPktCount += 4;
+
+}
+
+PIPELINE_TXRX_KEY_PORT_IN_AH(port_in_ah_txrx, pkt_work_txrx, pkt4_work_txrx);
+
+static void *pipeline_txrx_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_txrx *p_pt;
+ uint32_t size, i, in_ports_arg_size;
+
+ printf("Start pipeline_txrx_init\n");
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_txrx));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_pt = (struct pipeline_txrx *)p;
+ if (p == NULL)
+ return NULL;
+
+ PLOG(p, HIGH, "TXRX");
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ p_pt->receivedPktCount = 0;
+ p_pt->droppedPktCount = 0;
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++)
+ p_pt->links_map[i] = 0xff;
+
+ p_pt->pipeline_num = 0;
+ printf("txrx initialization of variables done\n");
+
+ /* Parse arguments */
+ if (pipeline_txrx_parse_args(p_pt, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "TXRX",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ p->n_tables = p->n_ports_in;
+
+ /* Memory allocation for in_port_h_arg */
+ in_ports_arg_size =
+ RTE_CACHE_LINE_ROUNDUP((sizeof
+ (struct pipeline_txrx_in_port_h_arg)) *
+ (params->n_ports_in));
+ struct pipeline_txrx_in_port_h_arg *ap =
+ (struct pipeline_txrx_in_port_h_arg *)rte_zmalloc(NULL,
+ in_ports_arg_size,
+ RTE_CACHE_LINE_SIZE);
+ if (ap == NULL)
+ return NULL;
+ /*Input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ /* passing our txrx pipeline in call back arg */
+ (ap[i]).p = p_pt;
+ (ap[i]).in_port_id = i;
+ struct rte_pipeline_port_in_params port_params = {
+ .ops =
+ pipeline_port_in_params_get_ops(&params->
+ port_in[i]),
+ .arg_create =
+ pipeline_port_in_params_convert(&params->
+ port_in[i]),
+ .f_action = NULL,
+ .arg_ah = &(ap[i]),
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ port_params.f_action = port_in_ah_txrx;
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops =
+ pipeline_port_out_params_get_ops(&params->
+ port_out[i]),
+ .arg_create =
+ pipeline_port_out_params_convert(&params->
+ port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ int pipeline_num = 0;
+ int status = sscanf(params->name, "PIPELINE%d", &pipeline_num);
+ if (status < 0) {
+ printf("Unable to read pipeline number\n");
+ return NULL;
+ }
+ p_pt->pipeline_num = (uint8_t) pipeline_num;
+
+ register_pipeline_Qs(p_pt->pipeline_num, p);
+ set_link_map(p_pt->pipeline_num, p, p_pt->links_map);
+ set_outport_id(p_pt->pipeline_num, p, p_pt->outport_id);
+
+ /* Tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->
+ port_in_id
+ [i],
+ p->
+ table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Add entries to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ .port_id = p->port_out_id[i],
+ };
+
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ int status = rte_pipeline_table_default_entry_add(
+ p->p,
+ p->
+ table_id[i],
+ &default_entry,
+ &default_entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+
+ return p;
+}
+
+static int pipeline_txrx_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int pipeline_txrx_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static int
+pipeline_txrx_track(void *pipeline, uint32_t port_in, uint32_t *port_out)
+{
+ struct pipeline *p = (struct pipeline *)pipeline;
+
+ /* Check input arguments */
+ if ((p == NULL) || (port_in >= p->n_ports_in) || (port_out == NULL))
+ return -1;
+
+ *port_out = port_in / p->n_ports_in;
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_txrx_be_ops = {
+ .f_init = pipeline_txrx_init,
+ .f_free = pipeline_txrx_free,
+ .f_run = NULL,
+ .f_timer = pipeline_txrx_timer,
+ .f_track = pipeline_txrx_track,
+};
diff --git a/common/VIL/pipeline_txrx/pipeline_txrx_be.h b/common/VIL/pipeline_txrx/pipeline_txrx_be.h
new file mode 100644
index 00000000..f794729e
--- /dev/null
+++ b/common/VIL/pipeline_txrx/pipeline_txrx_be.h
@@ -0,0 +1,76 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_TXRX_BE_H__
+#define __INCLUDE_PIPELINE_TXRX_BE_H__
+
+#include "pipeline_common_be.h"
+#define PIPELINE_TXRX_KEY_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *rte_p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i, j; \
+ \
+ for (j = 0; j < n_pkts; j++) \
+ rte_prefetch0(pkts[j]); \
+ \
+ pkt_burst_cnt = 0; \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], i, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], i, arg); \
+ \
+ \
+ return 0; \
+}
+
+extern struct pipeline_be_ops pipeline_txrx_be_ops;
+/*
+ * Messages
+ */
+enum pipeline_txrx_msg_req_type {
+ PIPELINE_TXRX_MSG_REQ_ENTRY_DBG,
+ PIPELINE_TXRX_MSG_REQS
+};
+/*
+ * MSG ENTRY DBG
+ */
+struct pipeline_txrx_entry_dbg_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_txrx_msg_req_type subtype;
+
+ /* data */
+ uint8_t data[5];
+};
+/*
+ * TXRX Entry
+ */
+
+struct pipeline_txrx_in_port_h_arg {
+ struct pipeline_txrx *p;
+ uint8_t in_port_id;
+};
+
+struct pipeline_txrx_entry_dbg_msg_rsp {
+ int status;
+};
+
+#endif
diff --git a/common/vnf_common/app.h b/common/vnf_common/app.h
new file mode 100644
index 00000000..2af62f11
--- /dev/null
+++ b/common/vnf_common/app.h
@@ -0,0 +1,972 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_APP_H__
+#define __INCLUDE_APP_H__
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_ring.h>
+#include <rte_sched.h>
+#include <rte_timer.h>
+#include <cmdline_parse.h>
+
+#include <rte_ethdev.h>
+
+#include "cpu_core_map.h"
+#include "pipeline.h"
+
+#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
+#define APP_LINK_PCI_BDF_SIZE 16
+struct app_link_params *fdir_p_link;
+struct app_mempool_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+ uint32_t cpu_socket_id;
+};
+
+struct app_link_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t pmd_id; /* Generated based on port mask */
+ uint32_t arp_q; /* 0 = Disabled (packets go to default queue 0) */
+ uint32_t tcp_syn_q; /* 0 = Disabled (pkts go to default queue) */
+ uint32_t ip_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t state; /* DOWN = 0, UP = 1 */
+ uint32_t ip; /* 0 = Invalid */
+ uint8_t ipv6[16];
+ uint32_t depth; /* Valid only when IP is valid */
+ uint32_t depth_ipv6;
+ uint64_t mac_addr; /* Read from HW */
+ char pci_bdf[APP_LINK_PCI_BDF_SIZE];
+
+ struct rte_eth_conf conf;
+ uint8_t promisc;
+};
+
+struct app_pktq_hwq_in_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t size;
+ uint32_t burst;
+
+ struct rte_eth_rxconf conf;
+};
+
+struct app_pktq_hwq_out_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst;
+ uint32_t dropless;
+ uint64_t n_retries;
+ struct rte_eth_txconf conf;
+};
+
+struct app_pktq_swq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+ uint32_t cpu_socket_id;
+ uint32_t ipv4_frag;
+ uint32_t ipv6_frag;
+ uint32_t ipv4_ras;
+ uint32_t ipv6_ras;
+ uint32_t mtu;
+ uint32_t metadata_size;
+ uint32_t mempool_direct_id;
+ uint32_t mempool_indirect_id;
+};
+
+#ifndef APP_FILE_NAME_SIZE
+#define APP_FILE_NAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_SCHED_SUBPORTS
+#define APP_MAX_SCHED_SUBPORTS 8
+#endif
+
+#ifndef APP_MAX_SCHED_PIPES
+#define APP_MAX_SCHED_PIPES 4096
+#endif
+
+struct app_pktq_tm_params {
+ char *name;
+ uint32_t parsed;
+ const char *file_name;
+ struct rte_sched_port_params sched_port_params;
+ struct rte_sched_subport_params
+ sched_subport_params[APP_MAX_SCHED_SUBPORTS];
+ struct rte_sched_pipe_params
+ sched_pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ int sched_pipe_to_profile[APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES];
+ uint32_t burst_read;
+ uint32_t burst_write;
+};
+
+struct app_pktq_source_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params array */
+ uint32_t burst;
+ char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_bytes_per_pkt;
+};
+
+struct app_pktq_sink_params {
+ char *name;
+ uint8_t parsed;
+ char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_pkts_to_dump;
+};
+
+struct app_msgq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t cpu_socket_id;
+};
+
+enum app_pktq_in_type {
+ APP_PKTQ_IN_HWQ,
+ APP_PKTQ_IN_SWQ,
+ APP_PKTQ_IN_TM,
+ APP_PKTQ_IN_SOURCE,
+};
+
+struct app_pktq_in_params {
+ enum app_pktq_in_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+enum app_pktq_out_type {
+ APP_PKTQ_OUT_HWQ,
+ APP_PKTQ_OUT_SWQ,
+ APP_PKTQ_OUT_TM,
+ APP_PKTQ_OUT_SINK,
+};
+
+struct app_pktq_out_params {
+ enum app_pktq_out_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+#ifndef APP_PIPELINE_TYPE_SIZE
+#define APP_PIPELINE_TYPE_SIZE 64
+#endif
+
+#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
+#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
+#define APP_MAX_PIPELINE_MSGQ_IN PIPELINE_MAX_MSGQ_IN
+#define APP_MAX_PIPELINE_MSGQ_OUT PIPELINE_MAX_MSGQ_OUT
+
+#define APP_MAX_PIPELINE_ARGS PIPELINE_MAX_ARGS
+
+struct app_pipeline_params {
+ char *name;
+ uint8_t parsed;
+
+ char type[APP_PIPELINE_TYPE_SIZE];
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+
+ struct app_pktq_in_params pktq_in[APP_MAX_PIPELINE_PKTQ_IN];
+ struct app_pktq_out_params pktq_out[APP_MAX_PIPELINE_PKTQ_OUT];
+ uint32_t msgq_in[APP_MAX_PIPELINE_MSGQ_IN];
+ uint32_t msgq_out[APP_MAX_PIPELINE_MSGQ_OUT];
+
+ uint32_t n_pktq_in;
+ uint32_t n_pktq_out;
+ uint32_t n_msgq_in;
+ uint32_t n_msgq_out;
+
+ uint32_t timer_period;
+
+ char *args_name[APP_MAX_PIPELINE_ARGS];
+ char *args_value[APP_MAX_PIPELINE_ARGS];
+ uint32_t n_args;
+};
+
+struct app_pipeline_data {
+ void *be;
+ void *fe;
+ struct pipeline_type *ptype;
+ uint64_t timer_period;
+ uint32_t enabled;
+};
+
+struct app_thread_pipeline_data {
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+ uint64_t deadline;
+};
+
+#ifndef APP_MAX_THREAD_PIPELINES
+#define APP_MAX_THREAD_PIPELINES 16
+#endif
+
+#ifndef APP_THREAD_TIMER_PERIOD
+#define APP_THREAD_TIMER_PERIOD 1
+#endif
+
+struct app_thread_data {
+ struct app_thread_pipeline_data regular[APP_MAX_THREAD_PIPELINES];
+ struct app_thread_pipeline_data custom[APP_MAX_THREAD_PIPELINES];
+
+ uint32_t n_regular;
+ uint32_t n_custom;
+
+ uint64_t timer_period;
+ uint64_t thread_req_deadline;
+
+ uint64_t deadline;
+
+ struct rte_ring *msgq_in;
+ struct rte_ring *msgq_out;
+
+ uint64_t headroom_time;
+ uint64_t headroom_cycles;
+ double headroom_ratio;
+};
+
+#ifndef APP_MAX_LINKS
+#define APP_MAX_LINKS 16
+#endif
+
+struct app_eal_params {
+ /* Map lcore set to physical cpu set */
+ char *coremap;
+
+ /* Core ID that is used as master */
+ uint32_t master_lcore_present;
+ uint32_t master_lcore;
+
+ /* Number of memory channels */
+ uint32_t channels_present;
+ uint32_t channels;
+
+ /* Memory to allocate (see also --socket-mem) */
+ uint32_t memory_present;
+ uint32_t memory;
+
+ /* Force number of memory ranks (don't detect) */
+ uint32_t ranks_present;
+ uint32_t ranks;
+
+ /* Add a PCI device in black list. */
+ char *pci_blacklist[APP_MAX_LINKS];
+
+ /* Add a PCI device in white list. */
+ char *pci_whitelist[APP_MAX_LINKS];
+
+ /* Add a virtual device. */
+ char *vdev[APP_MAX_LINKS];
+
+ /* Use VMware TSC map instead of native RDTSC */
+ uint32_t vmware_tsc_map_present;
+ int vmware_tsc_map;
+
+ /* Type of this process (primary|secondary|auto) */
+ char *proc_type;
+
+ /* Set syslog facility */
+ char *syslog;
+
+ /* Set default log level */
+ uint32_t log_level_present;
+ uint32_t log_level;
+
+ /* Display version information on startup */
+ uint32_t version_present;
+ int version;
+
+ /* This help */
+ uint32_t help_present;
+ int help;
+
+ /* Use malloc instead of hugetlbfs */
+ uint32_t no_huge_present;
+ int no_huge;
+
+ /* Disable PCI */
+ uint32_t no_pci_present;
+ int no_pci;
+
+ /* Disable HPET */
+ uint32_t no_hpet_present;
+ int no_hpet;
+
+ /* No shared config (mmap'd files) */
+ uint32_t no_shconf_present;
+ int no_shconf;
+
+ /* Add driver */
+ char *add_driver;
+
+ /* Memory to allocate on sockets (comma separated values)*/
+ char *socket_mem;
+
+ /* Directory where hugetlbfs is mounted */
+ char *huge_dir;
+
+ /* Prefix for hugepage filenames */
+ char *file_prefix;
+
+ /* Base virtual address */
+ char *base_virtaddr;
+
+ /* Create /dev/uioX (usually done by hotplug) */
+ uint32_t create_uio_dev_present;
+ int create_uio_dev;
+
+ /* Interrupt mode for VFIO (legacy|msi|msix) */
+ char *vfio_intr;
+
+ /* Support running on Xen dom0 without hugetlbfs */
+ uint32_t xen_dom0_present;
+ int xen_dom0;
+};
+
+#ifndef APP_APPNAME_SIZE
+#define APP_APPNAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_MEMPOOLS
+#define APP_MAX_MEMPOOLS 8
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN 64
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT 64
+#endif
+
+#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
+
+#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+
+#ifndef APP_MAX_PKTQ_SWQ
+#define APP_MAX_PKTQ_SWQ 256
+#endif
+
+#define APP_MAX_PKTQ_TM APP_MAX_LINKS
+
+#ifndef APP_MAX_PKTQ_SOURCE
+#define APP_MAX_PKTQ_SOURCE 16
+#endif
+
+#ifndef APP_MAX_PKTQ_SINK
+#define APP_MAX_PKTQ_SINK 16
+#endif
+
+#ifndef APP_MAX_MSGQ
+#define APP_MAX_MSGQ 128
+#endif
+
+#ifndef APP_MAX_PIPELINES
+#define APP_MAX_PIPELINES 64
+#endif
+
+#ifndef APP_EAL_ARGC
+#define APP_EAL_ARGC 32
+#endif
+
+#ifndef APP_MAX_PIPELINE_TYPES
+#define APP_MAX_PIPELINE_TYPES 64
+#endif
+
+#ifndef APP_MAX_THREADS
+#define APP_MAX_THREADS RTE_MAX_LCORE
+#endif
+
+#ifndef APP_MAX_CMDS
+#define APP_MAX_CMDS 128
+#endif
+
+#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
+#define APP_THREAD_HEADROOM_STATS_COLLECT 1
+#endif
+
+uint8_t enable_hwlb;
+uint8_t enable_flow_dir;
+
+#define APP_CORE_MASK_SIZE \
+ (RTE_MAX_LCORE / 64 + ((RTE_MAX_LCORE % 64) ? 1 : 0))
+
+struct app_params {
+ /* Config */
+ char app_name[APP_APPNAME_SIZE];
+ const char *config_file;
+ const char *script_file;
+ const char *parser_file;
+ const char *output_file;
+ const char *preproc;
+ const char *preproc_args;
+ uint64_t port_mask;
+ uint32_t log_level;
+
+ struct app_eal_params eal_params;
+ struct app_mempool_params mempool_params[APP_MAX_MEMPOOLS];
+ struct app_link_params link_params[APP_MAX_LINKS];
+ struct app_pktq_hwq_in_params hwq_in_params[APP_MAX_HWQ_IN];
+ struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
+ struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
+ struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+ struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
+ struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
+ struct app_msgq_params msgq_params[APP_MAX_MSGQ];
+ struct app_pipeline_params pipeline_params[APP_MAX_PIPELINES];
+
+ uint32_t n_mempools;
+ uint32_t n_links;
+ uint32_t n_pktq_hwq_in;
+ uint32_t n_pktq_hwq_out;
+ uint32_t n_pktq_swq;
+ uint32_t n_pktq_tm;
+ uint32_t n_pktq_source;
+ uint32_t n_pktq_sink;
+ uint32_t n_msgq;
+ uint32_t n_pipelines;
+
+ uint32_t header_csum_req;
+ uint32_t n_hwlb_q;
+ /* Init */
+ char *eal_argv[1 + APP_EAL_ARGC];
+ struct cpu_core_map *core_map;
+ uint64_t core_mask[APP_CORE_MASK_SIZE];
+ struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
+ struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
+ struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+ struct rte_ring *msgq[APP_MAX_MSGQ];
+ struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
+ struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
+ struct app_thread_data thread_data[APP_MAX_THREADS];
+ cmdline_parse_ctx_t cmds[APP_MAX_CMDS + 1];
+
+ int eal_argc;
+ uint32_t n_pipeline_types;
+ uint32_t n_cmds;
+};
+
+#define APP_PARAM_VALID(obj) ((obj)->name != NULL)
+
+#define APP_PARAM_COUNT(obj_array, n_objs) \
+{ \
+ size_t i; \
+ \
+ n_objs = 0; \
+ for (i = 0; i < RTE_DIM(obj_array); i++) \
+ if (APP_PARAM_VALID(&((obj_array)[i]))) \
+ n_objs++; \
+}
+
+#define APP_PARAM_FIND(obj_array, key) \
+({ \
+ ssize_t obj_idx; \
+ const ssize_t obj_count = RTE_DIM(obj_array); \
+ \
+ for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
+ if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
+ continue; \
+ \
+ if (strcmp(key, (obj_array)[obj_idx].name) == 0) \
+ break; \
+ } \
+ obj_idx < obj_count ? obj_idx : -ENOENT; \
+})
+
+#define APP_PARAM_FIND_BY_ID(obj_array, prefix, id, obj) \
+do { \
+ char name[APP_PARAM_NAME_SIZE]; \
+ ssize_t pos; \
+ \
+ sprintf(name, prefix "%" PRIu32, id); \
+ pos = APP_PARAM_FIND(obj_array, name); \
+ obj = (pos < 0) ? NULL : &((obj_array)[pos]); \
+} while (0)
+
+#define APP_PARAM_GET_ID(obj, prefix, id) \
+do \
+ sscanf(obj->name, prefix "%" SCNu32, &id); \
+while (0) \
+
+#define APP_PARAM_ADD(obj_array, obj_name) \
+({ \
+ ssize_t obj_idx; \
+ const ssize_t obj_count = RTE_DIM(obj_array); \
+ \
+ obj_idx = APP_PARAM_FIND(obj_array, obj_name); \
+ if (obj_idx < 0) { \
+ for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
+ if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
+ break; \
+ } \
+ \
+ if (obj_idx < obj_count) { \
+ (obj_array)[obj_idx].name = strdup(obj_name); \
+ if ((obj_array)[obj_idx].name == NULL) \
+ obj_idx = -EINVAL; \
+ } else \
+ obj_idx = -ENOMEM; \
+ } \
+ obj_idx; \
+})
+
+#define APP_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ abort(); \
+ } \
+} while (0)
+
+enum app_log_level {
+ APP_LOG_LEVEL_HIGH = 1,
+ APP_LOG_LEVEL_LOW,
+ APP_LOG_LEVELS
+};
+
+#define APP_LOG(app, level, fmt, ...) \
+do { \
+ if (app->log_level >= APP_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[APP] " fmt "\n", ## __VA_ARGS__); \
+} while (0)
+
+static inline uint32_t
+app_link_get_n_rxq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_rxq = 0, link_id, i;
+ uint32_t n_pktq_hwq_in = RTE_MIN(app->n_pktq_hwq_in,
+ RTE_DIM(app->hwq_in_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (rxq_link_id == link_id)
+ n_rxq++;
+ }
+
+ return n_rxq;
+}
+
+static inline uint32_t
+app_link_get_n_txq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_txq = 0, link_id, i;
+ uint32_t n_pktq_hwq_out = RTE_MIN(app->n_pktq_hwq_out,
+ RTE_DIM(app->hwq_out_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ if (txq_link_id == link_id)
+ n_txq++;
+ }
+
+ return n_txq;
+}
+
+static inline uint32_t
+app_rxq_get_readers(struct app_params *app, struct app_pktq_hwq_in_params *rxq)
+{
+ uint32_t pos = rxq - app->hwq_in_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_HWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_source_get_readers(struct app_params *app,
+struct app_pktq_source_params *source)
+{
+ uint32_t pos = source - app->source_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SOURCE) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_msgq_get_readers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_in = RTE_MIN(p->n_msgq_in, RTE_DIM(p->msgq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_in; j++)
+ if (p->msgq_in[j] == pos)
+ n_readers++;
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_txq_get_writers(struct app_params *app, struct app_pktq_hwq_out_params *txq)
+{
+ uint32_t pos = txq - app->hwq_out_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_HWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
+{
+ uint32_t pos = sink - app->sink_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SINK) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_core_is_enabled(struct app_params *app, uint32_t lcore_id) {
+ return(app->core_mask[lcore_id / 64] &
+ (1LLU << (lcore_id % 64)));
+}
+
+static inline void
+app_core_enable_in_core_mask(struct app_params *app, int lcore_id) {
+ app->core_mask[lcore_id / 64] |= 1LLU << (lcore_id % 64);
+
+}
+
+static inline void
+app_core_build_core_mask_string(struct app_params *app, char
+*mask_buffer) {
+ int i;
+
+ mask_buffer[0] = '\0';
+ for (i = (int)RTE_DIM(app->core_mask); i > 0; i--) {
+ /* For Hex representation of bits in uint64_t */
+ char buffer[(64 / 8) * 2 + 1];
+ memset(buffer, 0, sizeof(buffer));
+ snprintf(buffer, sizeof(buffer), "%016" PRIx64,
+ app->core_mask[i-1]);
+ strcat(mask_buffer, buffer);
+ }
+}
+
+static inline uint32_t
+app_msgq_get_writers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_out = RTE_MIN(p->n_msgq_out,
+ RTE_DIM(p->msgq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_out; j++)
+ if (p->msgq_out[j] == pos)
+ n_writers++;
+ }
+
+ return n_writers;
+}
+
+static inline struct app_link_params *
+app_get_link_for_rxq(struct app_params *app, struct app_pktq_hwq_in_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, rxq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_txq(struct app_params *app, struct app_pktq_hwq_out_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, txq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_tm->name, "TM%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_tm->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+int app_config_init(struct app_params *app);
+
+int app_config_args(struct app_params *app,
+ int argc, char **argv);
+
+int app_config_preproc(struct app_params *app);
+
+int app_config_parse(struct app_params *app,
+ const char *file_name);
+
+int app_config_parse_tm(struct app_params *app);
+
+void app_config_save(struct app_params *app,
+ const char *file_name);
+
+int app_config_check(struct app_params *app);
+
+int app_init(struct app_params *app);
+
+int app_thread(void *arg);
+
+int app_pipeline_type_register(struct app_params *app,
+ struct pipeline_type *ptype);
+
+struct pipeline_type *app_pipeline_type_find(struct app_params *app,
+ char *name);
+
+void app_link_up_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+void app_link_down_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+#endif
diff --git a/common/vnf_common/config_check.c b/common/vnf_common/config_check.c
new file mode 100644
index 00000000..09638c6d
--- /dev/null
+++ b/common/vnf_common/config_check.c
@@ -0,0 +1,443 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdio.h>
+
+#include <rte_ip.h>
+
+#include "app.h"
+//uint8_t g_n_hwq_in = N_RXQ;
+uint8_t g_n_hwq_in;
+static void
+check_mempools(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_CHECK((p->pool_size > 0),
+ "Mempool %s size is 0\n", p->name);
+
+ APP_CHECK((p->cache_size > 0),
+ "Mempool %s cache size is 0\n", p->name);
+
+ APP_CHECK(rte_is_power_of_2(p->cache_size),
+ "Mempool %s cache size not a power of 2\n", p->name);
+ }
+}
+
+static void
+check_links(struct app_params *app)
+{
+ uint32_t i;
+
+ /* Check that number of links matches the port mask */
+ if (app->port_mask) {
+ uint32_t n_links_port_mask =
+ __builtin_popcountll(app->port_mask);
+
+ APP_CHECK((app->n_links == n_links_port_mask),
+ "Not enough links provided in the PORT_MASK\n");
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+ uint32_t rxq_max, n_rxq, n_txq, link_id, i;
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ /* Check that link RXQs are contiguous */
+ rxq_max = 0;
+ if (link->arp_q > rxq_max)
+ rxq_max = link->arp_q;
+ if (link->tcp_syn_q > rxq_max)
+ rxq_max = link->tcp_syn_q;
+ if (link->ip_local_q > rxq_max)
+ rxq_max = link->ip_local_q;
+ if (link->tcp_local_q > rxq_max)
+ rxq_max = link->tcp_local_q;
+ if (link->udp_local_q > rxq_max)
+ rxq_max = link->udp_local_q;
+ if (link->sctp_local_q > rxq_max)
+ rxq_max = link->sctp_local_q;
+
+if(enable_hwlb || enable_flow_dir){
+ g_n_hwq_in = app->n_hwlb_q;
+ rxq_max = (g_n_hwq_in - 1);
+ for (i = g_n_hwq_in; i <= rxq_max; i++)
+ APP_CHECK(((link->arp_q == i) ||
+ (link->tcp_syn_q == i) ||
+ (link->ip_local_q == i) ||
+ (link->tcp_local_q == i) ||
+ (link->udp_local_q == i) ||
+ (link->sctp_local_q == i)),
+ "%s RXQs are not contiguous (A)\n", link->name);
+
+}
+else{
+ for (i = 1; i <= rxq_max; i++)
+ APP_CHECK(((link->arp_q == i) ||
+ (link->tcp_syn_q == i) ||
+ (link->ip_local_q == i) ||
+ (link->tcp_local_q == i) ||
+ (link->udp_local_q == i) ||
+ (link->sctp_local_q == i)),
+ "%s RXQs are not contiguous (A)\n", link->name);
+}
+ n_rxq = app_link_get_n_rxq(app, link);
+
+ APP_CHECK((n_rxq), "%s does not have any RXQ\n", link->name);
+ printf("n_rxq = %d\n",n_rxq);
+ printf("rxq_max = %d\n",rxq_max);
+ //APP_CHECK((n_rxq == rxq_max + 1),
+ // "%s RXQs are not contiguous (B)\n", link->name);
+
+ for (i = 0; i < n_rxq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "RXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_in_params, name);
+ APP_CHECK((pos >= 0),
+ "%s RXQs are not contiguous (C)\n", link->name);
+ }
+
+ /* Check that link RXQs are contiguous */
+ n_txq = app_link_get_n_txq(app, link);
+
+ APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
+
+ for (i = 0; i < n_txq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "TXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_out_params, name);
+ APP_CHECK((pos >= 0),
+ "%s TXQs are not contiguous\n", link->name);
+ }
+ }
+}
+
+static void
+check_rxqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t n_readers = app_rxq_get_readers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_txqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t n_writers = app_txq_get_writers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_swqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ uint32_t n_readers = app_swq_get_readers(app, p);
+ uint32_t n_writers = app_swq_get_writers(app, p);
+ uint32_t n_flags;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst_read > 0),
+ "%s read burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_read <= p->size),
+ "%s read burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((p->burst_write > 0),
+ "%s write burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_write <= p->size),
+ "%s write burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ if (n_readers > 1)
+ APP_LOG(app, LOW, "%s has more than one reader", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ if (n_writers > 1)
+ APP_LOG(app, LOW, "%s has more than one writer", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag + p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((n_flags < 2),
+ "%s has more than one fragmentation or reassembly mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_readers > 1) && (n_flags == 1))),
+ "%s has more than one reader when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_writers > 1) && (n_flags == 1))),
+ "%s has more than one writer when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ n_flags = p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((!((p->dropless == 1) && (n_flags == 1))),
+ "%s has dropless when reassembly mode enabled\n", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag;
+
+ if (n_flags == 1) {
+ uint16_t ip_hdr_size = (p->ipv4_frag) ? sizeof(struct ipv4_hdr) :
+ sizeof(struct ipv6_hdr);
+
+ APP_CHECK((p->mtu > ip_hdr_size),
+ "%s mtu size is smaller than ip header\n", p->name);
+
+ APP_CHECK((!((p->mtu - ip_hdr_size) % 8)),
+ "%s mtu size is incorrect\n", p->name);
+ }
+ }
+}
+
+static void
+check_tms(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ uint32_t n_readers = app_tm_get_readers(app, p);
+ uint32_t n_writers = app_tm_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_sources(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+ uint32_t n_readers = app_source_get_readers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_sinks(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_sink; i++) {
+ struct app_pktq_sink_params *p = &app->sink_params[i];
+ uint32_t n_writers = app_sink_get_writers(app, p);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_msgqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+ uint32_t n_readers = app_msgq_get_readers(app, p);
+ uint32_t n_writers = app_msgq_get_writers(app, p);
+ uint32_t msgq_req_pipeline, msgq_rsp_pipeline;
+ uint32_t msgq_req_core, msgq_rsp_core;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ msgq_req_pipeline = (strncmp(p->name, "MSGQ-REQ-PIPELINE",
+ strlen("MSGQ-REQ-PIPELINE")) == 0);
+
+ msgq_rsp_pipeline = (strncmp(p->name, "MSGQ-RSP-PIPELINE",
+ strlen("MSGQ-RSP-PIPELINE")) == 0);
+
+ msgq_req_core = (strncmp(p->name, "MSGQ-REQ-CORE",
+ strlen("MSGQ-REQ-CORE")) == 0);
+
+ msgq_rsp_core = (strncmp(p->name, "MSGQ-RSP-CORE",
+ strlen("MSGQ-RSP-CORE")) == 0);
+
+ if ((msgq_req_pipeline == 0) &&
+ (msgq_rsp_pipeline == 0) &&
+ (msgq_req_core == 0) &&
+ (msgq_rsp_core == 0)) {
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+
+ if (msgq_req_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-REQ-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+
+ if (msgq_rsp_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-RSP-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+ }
+}
+
+static void
+check_pipelines(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ APP_CHECK((p->n_msgq_in == p->n_msgq_out),
+ "%s number of input MSGQs does not match "
+ "the number of output MSGQs\n", p->name);
+ }
+}
+
+int
+app_config_check(struct app_params *app)
+{
+ check_mempools(app);
+ check_links(app);
+ check_rxqs(app);
+ check_txqs(app);
+ check_swqs(app);
+ check_tms(app);
+ check_sources(app);
+ check_sinks(app);
+ check_msgqs(app);
+ check_pipelines(app);
+
+ return 0;
+}
diff --git a/common/vnf_common/config_parse.c b/common/vnf_common/config_parse.c
new file mode 100644
index 00000000..b4b99d1c
--- /dev/null
+++ b/common/vnf_common/config_parse.c
@@ -0,0 +1,3434 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+/**
+ * Default config values
+ **/
+
+static struct app_params app_params_default = {
+ .config_file = "./config/ip_pipeline.cfg",
+ .log_level = APP_LOG_LEVEL_HIGH,
+ .port_mask = 0,
+
+ .eal_params = {
+ .channels = 4,
+ },
+};
+
+static const struct app_mempool_params mempool_params_default = {
+ .parsed = 0,
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+};
+
+static const struct app_link_params link_params_default = {
+ .parsed = 0,
+ .pmd_id = 0,
+ .arp_q = 0,
+ .tcp_syn_q = 0,
+ .ip_local_q = 0,
+ .tcp_local_q = 0,
+ .udp_local_q = 0,
+ .sctp_local_q = 0,
+ .state = 0,
+ .ip = 0,
+ .ipv6 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .depth = 0,
+ .depth_ipv6 = 0,
+ .mac_addr = 0,
+ .pci_bdf = {0},
+
+ .conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 0, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .lpbk_mode = 0,
+ #ifndef VNF_ACL
+ #ifdef LSC_GRARP
+ .intr_conf = {
+ .lsc = 1, /**< lsc interrupt feature enabled */
+ }
+ #endif
+ #endif
+ },
+
+ .promisc = 1,
+};
+
+static const struct app_pktq_hwq_in_params default_hwq_in_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .size = 128,
+ .burst = 32,
+
+ .conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_hwq_out_params default_hwq_out_params = {
+ .parsed = 0,
+ .size = 512,
+ .burst = 32,
+ .dropless = 0,
+ .n_retries = 0,
+
+ .conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_swq_params default_swq_params = {
+ .parsed = 0,
+ .size = 256,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+ .cpu_socket_id = 0,
+ .ipv4_frag = 0,
+ .ipv6_frag = 0,
+ .ipv4_ras = 0,
+ .ipv6_ras = 0,
+ .mtu = 0,
+ .metadata_size = 0,
+ .mempool_direct_id = 0,
+ .mempool_indirect_id = 0,
+};
+
+struct app_pktq_tm_params default_tm_params = {
+ .parsed = 0,
+ .file_name = "./config/tm_profile.cfg",
+ .burst_read = 64,
+ .burst_write = 32,
+};
+
+struct app_pktq_source_params default_source_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .burst = 32,
+ .file_name = NULL,
+ .n_bytes_per_pkt = 0,
+};
+
+struct app_pktq_sink_params default_sink_params = {
+ .parsed = 0,
+ .file_name = NULL,
+ .n_pkts_to_dump = 0,
+};
+
+struct app_msgq_params default_msgq_params = {
+ .parsed = 0,
+ .size = 64,
+ .cpu_socket_id = 0,
+};
+
+struct app_pipeline_params default_pipeline_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .n_pktq_in = 0,
+ .n_pktq_out = 0,
+ .n_msgq_in = 0,
+ .n_msgq_out = 0,
+ .timer_period = 1,
+ .n_args = 0,
+};
+
+static const char app_usage[] =
+ "Usage: %s [-f CONFIG_FILE] [-s SCRIPT_FILE] [-p PORT_MASK] "
+ "[-l LOG_LEVEL] [--preproc PREPROCESSOR] [--preproc-args ARGS]\n"
+ "\n"
+ "Arguments:\n"
+ "\t-f CONFIG_FILE: Default config file is %s\n"
+ "\t-p PORT_MASK: Mask of NIC port IDs in hex format (generated from "
+ "config file when not provided)\n"
+ "\t-s SCRIPT_FILE: No CLI script file is run when not specified\n"
+ "\t-l LOG_LEVEL: 0 = NONE, 1 = HIGH PRIO (default), 2 = LOW PRIO\n"
+ "\t--disable-hw-csum Disable TCP/UDP HW checksum\n"
+ "\t--preproc PREPROCESSOR: Configuration file pre-processor\n"
+ "\t--preproc-args ARGS: Arguments to be passed to pre-processor\n"
+ "\n";
+
+static void
+app_print_usage(char *prgname)
+{
+ rte_exit(0, app_usage, prgname, app_params_default.config_file);
+}
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++); \
+ _p; \
+})
+
+#define PARSER_PARAM_ADD_CHECK(result, params_array, section_name) \
+do { \
+ APP_CHECK((result != -EINVAL), \
+ "Parse error: no free memory"); \
+ APP_CHECK((result != -ENOMEM), \
+ "Parse error: too many \"%s\" sections", section_name); \
+ APP_CHECK(((result >= 0) && (params_array)[result].parsed == 0),\
+ "Parse error: duplicate \"%s\" section", section_name); \
+ APP_CHECK((result >= 0), \
+ "Parse error in section \"%s\"", section_name); \
+} while (0)
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+#define PARSE_ERROR(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"\n", section, entry)
+
+#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s\n", \
+ section, entry, message)
+
+
+#define PARSE_ERROR_MALLOC(exp) \
+APP_CHECK(exp, "Parse error: no free memory\n")
+
+#define PARSE_ERROR_SECTION(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\"", section)
+
+#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\": no entries\n", section)
+
+#define PARSE_WARNING_IGNORED(exp, section, entry) \
+do \
+if (!(exp)) \
+ fprintf(stderr, "Parse warning in section \"%s\": " \
+ "entry \"%s\" is ignored\n", section, entry); \
+while (0)
+
+#define PARSE_ERROR_INVALID(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"\n",\
+ section, entry)
+
+#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"\n",\
+ section, entry)
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+ if(num_len < sizeof(num))
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ *socket = s;
+ *core = c;
+ *ht = h;
+ return 0;
+}
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+static size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++);
+
+ return i;
+}
+
+static int
+validate_name(const char *name, const char *prefix, int num)
+{
+ size_t i, j;
+
+ for (i = 0; (name[i] != '\0') && (prefix[i] != '\0'); i++) {
+ if (name[i] != prefix[i])
+ return -1;
+ }
+
+ if (prefix[i] != '\0')
+ return -1;
+
+ if (!num) {
+ if (name[i] != '\0')
+ return -1;
+ else
+ return 0;
+ }
+
+ if (num == 2) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '.'))
+ return -1;
+ i++;
+ }
+
+ if (num == 1) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '\0'))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+parse_eal(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_eal_params *p = &app->eal_params;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *entry = &entries[i];
+
+ /* coremask */
+ if (strcmp(entry->name, "c") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* corelist */
+ if (strcmp(entry->name, "l") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* coremap */
+ if (strcmp(entry->name, "lcores") == 0) {
+ PARSE_ERROR_DUPLICATE((p->coremap == NULL),
+ section_name,
+ entry->name);
+ p->coremap = strdup(entry->value);
+ continue;
+ }
+
+ /* master_lcore */
+ if (strcmp(entry->name, "master_lcore") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->master_lcore_present == 0),
+ section_name,
+ entry->name);
+ p->master_lcore_present = 1;
+
+ status = parser_read_uint32(&p->master_lcore,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* channels */
+ if (strcmp(entry->name, "n") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->channels_present == 0),
+ section_name,
+ entry->name);
+ p->channels_present = 1;
+
+ status = parser_read_uint32(&p->channels, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* memory */
+ if (strcmp(entry->name, "m") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->memory_present == 0),
+ section_name,
+ entry->name);
+ p->memory_present = 1;
+
+ status = parser_read_uint32(&p->memory, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* ranks */
+ if (strcmp(entry->name, "r") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->ranks_present == 0),
+ section_name,
+ entry->name);
+ p->ranks_present = 1;
+
+ status = parser_read_uint32(&p->ranks, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* pci_blacklist */
+ if ((strcmp(entry->name, "pci_blacklist") == 0) ||
+ (strcmp(entry->name, "b") == 0)) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i])
+ continue;
+
+ p->pci_blacklist[i] =
+ strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_blacklist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* pci_whitelist */
+ if ((strcmp(entry->name, "pci_whitelist") == 0) ||
+ (strcmp(entry->name, "w") == 0)) {
+ uint32_t i;
+
+ PARSE_ERROR_MESSAGE((app->port_mask != 0),
+ section_name, entry->name, "entry to be "
+ "generated by the application (port_mask "
+ "not provided)");
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i])
+ continue;
+
+ p->pci_whitelist[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_whitelist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vdev */
+ if (strcmp(entry->name, "vdev") == 0) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i])
+ continue;
+
+ p->vdev[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->vdev[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vmware_tsc_map */
+ if (strcmp(entry->name, "vmware_tsc_map") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->vmware_tsc_map_present == 0),
+ section_name,
+ entry->name);
+ p->vmware_tsc_map_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->vmware_tsc_map = val;
+ continue;
+ }
+
+ /* proc_type */
+ if (strcmp(entry->name, "proc_type") == 0) {
+ PARSE_ERROR_DUPLICATE((p->proc_type == NULL),
+ section_name,
+ entry->name);
+ p->proc_type = strdup(entry->value);
+ continue;
+ }
+
+ /* syslog */
+ if (strcmp(entry->name, "syslog") == 0) {
+ PARSE_ERROR_DUPLICATE((p->syslog == NULL),
+ section_name,
+ entry->name);
+ p->syslog = strdup(entry->value);
+ continue;
+ }
+
+ /* log_level */
+ if (strcmp(entry->name, "log_level") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->log_level_present == 0),
+ section_name,
+ entry->name);
+ p->log_level_present = 1;
+
+ status = parser_read_uint32(&p->log_level,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* version */
+ if (strcmp(entry->name, "v") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->version_present == 0),
+ section_name,
+ entry->name);
+ p->version_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->version = val;
+ continue;
+ }
+
+ /* help */
+ if ((strcmp(entry->name, "help") == 0) ||
+ (strcmp(entry->name, "h") == 0)) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->help_present == 0),
+ section_name,
+ entry->name);
+ p->help_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->help = val;
+ continue;
+ }
+
+ /* no_huge */
+ if (strcmp(entry->name, "no_huge") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_huge_present == 0),
+ section_name,
+ entry->name);
+ p->no_huge_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_huge = val;
+ continue;
+ }
+
+ /* no_pci */
+ if (strcmp(entry->name, "no_pci") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_pci_present == 0),
+ section_name,
+ entry->name);
+ p->no_pci_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_pci = val;
+ continue;
+ }
+
+ /* no_hpet */
+ if (strcmp(entry->name, "no_hpet") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_hpet_present == 0),
+ section_name,
+ entry->name);
+ p->no_hpet_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_hpet = val;
+ continue;
+ }
+
+ /* no_shconf */
+ if (strcmp(entry->name, "no_shconf") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_shconf_present == 0),
+ section_name,
+ entry->name);
+ p->no_shconf_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_shconf = val;
+ continue;
+ }
+
+ /* add_driver */
+ if (strcmp(entry->name, "d") == 0) {
+ PARSE_ERROR_DUPLICATE((p->add_driver == NULL),
+ section_name,
+ entry->name);
+ p->add_driver = strdup(entry->value);
+ continue;
+ }
+
+ /* socket_mem */
+ if (strcmp(entry->name, "socket_mem") == 0) {
+ PARSE_ERROR_DUPLICATE((p->socket_mem == NULL),
+ section_name,
+ entry->name);
+ p->socket_mem = strdup(entry->value);
+ continue;
+ }
+
+ /* huge_dir */
+ if (strcmp(entry->name, "huge_dir") == 0) {
+ PARSE_ERROR_DUPLICATE((p->huge_dir == NULL),
+ section_name,
+ entry->name);
+ p->huge_dir = strdup(entry->value);
+ continue;
+ }
+
+ /* file_prefix */
+ if (strcmp(entry->name, "file_prefix") == 0) {
+ PARSE_ERROR_DUPLICATE((p->file_prefix == NULL),
+ section_name,
+ entry->name);
+ p->file_prefix = strdup(entry->value);
+ continue;
+ }
+
+ /* base_virtaddr */
+ if (strcmp(entry->name, "base_virtaddr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->base_virtaddr == NULL),
+ section_name,
+ entry->name);
+ p->base_virtaddr = strdup(entry->value);
+ continue;
+ }
+
+ /* create_uio_dev */
+ if (strcmp(entry->name, "create_uio_dev") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->create_uio_dev_present == 0),
+ section_name,
+ entry->name);
+ p->create_uio_dev_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->create_uio_dev = val;
+ continue;
+ }
+
+ /* vfio_intr */
+ if (strcmp(entry->name, "vfio_intr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->vfio_intr == NULL),
+ section_name,
+ entry->name);
+ p->vfio_intr = strdup(entry->value);
+ continue;
+ }
+
+ /* xen_dom0 */
+ if (strcmp(entry->name, "xen_dom0") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->xen_dom0_present == 0),
+ section_name,
+ entry->name);
+ p->xen_dom0_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->xen_dom0 = val;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, entry->name);
+ }
+
+ free(entries);
+}
+
+static int
+parse_pipeline_pcap_source(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *file_name, const char *cp_size)
+{
+ const char *next = NULL;
+ char *end;
+ uint32_t i;
+ int parse_file = 0;
+
+ if (file_name && !cp_size) {
+ next = file_name;
+ parse_file = 1; /* parse file path */
+ } else if (cp_size && !file_name) {
+ next = cp_size;
+ parse_file = 0; /* parse copy size */
+ } else
+ return -EINVAL;
+
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ if (p->n_pktq_in == 0)
+ return -EINVAL;
+
+ i = 0;
+ while (*next != '\0') {
+ uint32_t id;
+
+ if (i >= p->n_pktq_in)
+ return -EINVAL;
+
+ id = p->pktq_in[i].id;
+
+ end = strchr(next, ' ');
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (parse_file) {
+ app->source_params[id].file_name = strdup(name);
+ if (app->source_params[id].file_name == NULL)
+ return -ENOMEM;
+ } else {
+ if (parser_read_uint32(
+ &app->source_params[id].n_bytes_per_pkt,
+ name) != 0) {
+ if (app->source_params[id].
+ file_name != NULL)
+ free(app->source_params[id].
+ file_name);
+ return -EINVAL;
+ }
+ }
+
+ i++;
+
+ if (i == p->n_pktq_in)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+parse_pipeline_pcap_sink(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *file_name, const char *n_pkts_to_dump)
+{
+ const char *next = NULL;
+ char *end;
+ uint32_t i;
+ int parse_file = 0;
+
+ if (file_name && !n_pkts_to_dump) {
+ next = file_name;
+ parse_file = 1; /* parse file path */
+ } else if (n_pkts_to_dump && !file_name) {
+ next = n_pkts_to_dump;
+ parse_file = 0; /* parse copy size */
+ } else
+ return -EINVAL;
+
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ if (p->n_pktq_out == 0)
+ return -EINVAL;
+
+ i = 0;
+ while (*next != '\0') {
+ uint32_t id;
+
+ if (i >= p->n_pktq_out)
+ return -EINVAL;
+
+ id = p->pktq_out[i].id;
+
+ end = strchr(next, ' ');
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (parse_file) {
+ app->sink_params[id].file_name = strdup(name);
+ if (app->sink_params[id].file_name == NULL)
+ return -ENOMEM;
+ } else {
+ if (parser_read_uint32(
+ &app->sink_params[id].n_pkts_to_dump,
+ name) != 0) {
+ if (app->sink_params[id].file_name !=
+ NULL)
+ free(app->sink_params[id].
+ file_name);
+ return -EINVAL;
+ }
+ }
+
+ i++;
+
+ if (i == p->n_pktq_out)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+parse_pipeline_pktq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ if(next == NULL)
+ return -EINVAL;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ while (*next != '\0') {
+ enum app_pktq_in_type type;
+ int id;
+ char *end_space;
+ char *end_tab;
+ if(next != NULL)
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "RXQ", 2) == 0) {
+ type = APP_PKTQ_IN_HWQ;
+ id = APP_PARAM_ADD(app->hwq_in_params, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_IN_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_IN_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ } else if (validate_name(name, "SOURCE", 1) == 0) {
+ type = APP_PKTQ_IN_SOURCE;
+ id = APP_PARAM_ADD(app->source_params, name);
+ } else
+ return -EINVAL;
+
+ if (id < 0)
+ return id;
+
+ p->pktq_in[p->n_pktq_in].type = type;
+ p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
+ p->n_pktq_in++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_pktq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ if(next == NULL)
+ return -EINVAL;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+
+ while (*next != '\0') {
+ enum app_pktq_out_type type;
+ int id;
+ char *end_space;
+ char *end_tab;
+ if(next != NULL)
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+ if (validate_name(name, "TXQ", 2) == 0) {
+ type = APP_PKTQ_OUT_HWQ;
+ id = APP_PARAM_ADD(app->hwq_out_params, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_OUT_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_OUT_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ } else if (validate_name(name, "SINK", 1) == 0) {
+ type = APP_PKTQ_OUT_SINK;
+ id = APP_PARAM_ADD(app->sink_params, name);
+ } else
+ return -EINVAL;
+
+ if (id < 0)
+ return id;
+
+ p->pktq_out[p->n_pktq_out].type = type;
+ p->pktq_out[p->n_pktq_out].id = id;
+ p->n_pktq_out++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_msgq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ if(next == NULL)
+ return -EINVAL;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+ ssize_t idx;
+
+ while (*next != '\0') {
+ char *end_space;
+ char *end_tab;
+ if(next != NULL)
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "MSGQ", 1) != 0)
+ return -EINVAL;
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ if (idx < 0)
+ return idx;
+
+ p->msgq_in[p->n_msgq_in] = idx;
+ p->n_msgq_in++;
+ }
+
+ return 0;
+}
+
+static int
+parse_pipeline_msgq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ const char *value)
+{
+ const char *next = value;
+ if(next == NULL)
+ return -EINVAL;
+ char *end;
+ char name[APP_PARAM_NAME_SIZE];
+ size_t name_len;
+ ssize_t idx;
+
+ while (*next != '\0') {
+ char *end_space;
+ char *end_tab;
+ if(next != NULL)
+ next = skip_white_spaces(next);
+ if (!next)
+ break;
+
+ end_space = strchr(next, ' ');
+ end_tab = strchr(next, ' ');
+
+ if (end_space && (!end_tab))
+ end = end_space;
+ else if ((!end_space) && end_tab)
+ end = end_tab;
+ else if (end_space && end_tab)
+ end = RTE_MIN(end_space, end_tab);
+ else
+ end = NULL;
+
+ if (!end)
+ name_len = strlen(next);
+ else
+ name_len = end - next;
+
+ if (name_len == 0 || name_len == sizeof(name))
+ return -EINVAL;
+
+ strncpy(name, next, name_len);
+ name[name_len] = '\0';
+ next += name_len;
+ if (*next != '\0')
+ next++;
+
+ if (validate_name(name, "MSGQ", 1) != 0)
+ return -EINVAL;
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ if (idx < 0)
+ return idx;
+
+ p->msgq_out[p->n_msgq_out] = idx;
+ p->n_msgq_out++;
+ }
+
+ return 0;
+}
+
+static void
+parse_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ char name[CFG_NAME_LEN];
+ struct app_pipeline_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->pipeline_params, section_name);
+
+ param = &app->pipeline_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "type") == 0) {
+ int w_size = snprintf(param->type, RTE_DIM(param->type),
+ "%s", ent->value);
+
+ PARSE_ERROR(((w_size > 0) &&
+ (w_size < (int)RTE_DIM(param->type))),
+ section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id, &param->core_id,
+ &param->hyper_th_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_in") == 0) {
+ int status = parse_pipeline_pktq_in(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_out") == 0) {
+ int status = parse_pipeline_pktq_out(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_in") == 0) {
+ int status = parse_pipeline_msgq_in(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_out") == 0) {
+ int status = parse_pipeline_msgq_out(app, param,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "timer_period") == 0) {
+ int status = parser_read_uint32(
+ &param->timer_period,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_rd") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_source(app,
+ param, ent->value, NULL);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_source(app,
+ param, NULL, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_wr") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_sink(app, param,
+ ent->value, NULL);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
+ int status;
+
+#ifndef RTE_PORT_PCAP
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+#endif
+
+ status = parse_pipeline_pcap_sink(app, param,
+ NULL, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* pipeline type specific items */
+ APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
+ "Parse error in section \"%s\": too many "
+ "pipeline specified parameters", section_name);
+
+ param->args_name[param->n_args] = strdup(ent->name);
+ param->args_value[param->n_args] = strdup(ent->value);
+
+ APP_CHECK((param->args_name[param->n_args] != NULL) &&
+ (param->args_value[param->n_args] != NULL),
+ "Parse error: no free memory");
+
+ param->n_args++;
+ }
+
+ param->parsed = 1;
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_in[param->n_msgq_in++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_out[param->n_msgq_out++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ free(entries);
+}
+
+static void
+parse_mempool(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_mempool_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->mempool_params, section_name);
+
+ param = &app->mempool_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "buffer_size") == 0) {
+ int status = parser_read_uint32(
+ &param->buffer_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pool_size") == 0) {
+ int status = parser_read_uint32(
+ &param->pool_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cache_size") == 0) {
+ int status = parser_read_uint32(
+ &param->cache_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_link(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_link_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ int pci_bdf_present = 0;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->link_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->link_params, section_name);
+
+ param = &app->link_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "promisc") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->promisc = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "arp_q") == 0) {
+ int status = parser_read_uint32(&param->arp_q,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "tcp_syn_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_syn_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ip_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->ip_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+
+ if (strcmp(ent->name, "tcp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "udp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->udp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "sctp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->sctp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pci_bdf") == 0) {
+ PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
+ section_name, ent->name);
+
+ snprintf(param->pci_bdf, APP_LINK_PCI_BDF_SIZE,
+ "%s", ent->value);
+ pci_bdf_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ /* Check for mandatory fields */
+ if (app->port_mask)
+ PARSE_ERROR_MESSAGE((pci_bdf_present == 0),
+ section_name, "pci_bdf",
+ "entry not allowed (port_mask is provided)");
+ else
+ PARSE_ERROR_MESSAGE((pci_bdf_present),
+ section_name, "pci_bdf",
+ "this entry is mandatory (port_mask is not "
+ "provided)");
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_rxq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_in_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_in_params, section_name);
+
+ param = &app->hwq_in_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_txq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_out_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->hwq_out_params, section_name);
+
+ param = &app->hwq_out_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_swq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_swq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ uint32_t mtu_present = 0;
+ uint32_t metadata_size_present = 0;
+ uint32_t mempool_direct_present = 0;
+ uint32_t mempool_indirect_present = 0;
+
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->swq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->swq_params, section_name);
+
+ param = &app->swq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&
+ param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+
+ param->ipv4_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1500;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1320;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv4_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mtu") == 0) {
+ int status = parser_read_uint32(&param->mtu,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ mtu_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "metadata_size") == 0) {
+ int status = parser_read_uint32(
+ &param->metadata_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ metadata_size_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_direct") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_direct_id = idx;
+ mempool_direct_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_indirect") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_indirect_id = idx;
+ mempool_indirect_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ APP_CHECK(((mtu_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mtu\" is not allowed",
+ section_name);
+
+ APP_CHECK(((metadata_size_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"metadata_size\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_direct_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_direct\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_indirect_present) &&
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_indirect\" is "
+ "not allowed", section_name);
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_tm(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_tm_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->tm_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->tm_params, section_name);
+
+ param = &app->tm_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "cfg") == 0) {
+ param->file_name = strdup(ent->value);
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_source(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_source_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_size_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->source_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->source_params, section_name);
+
+ param = &app->source_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ idx = APP_PARAM_ADD(app->mempool_params,
+ ent->value);
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params,
+ section_name);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_rd")) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ pcap_file_present = 1;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_size_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_bytes_per_pkt, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ pcap_size_present = 1;
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_sink(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_sink_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_n_pkt_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->sink_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->sink_params, section_name);
+
+ param = &app->sink_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "pcap_file_wr")) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC((param->file_name != NULL));
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_n_pkt_wr")) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_pkts_to_dump, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_msgq_req_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+ free(entries);
+}
+
+static void
+parse_msgq_rsp_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+static void
+parse_msgq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ PARSER_PARAM_ADD_CHECK(param_idx, app->msgq_params, section_name);
+
+ param = &app->msgq_params[param_idx];
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ param->parsed = 1;
+
+ free(entries);
+}
+
+typedef void (*config_section_load)(struct app_params *p,
+ const char *section_name,
+ struct rte_cfgfile *cfg);
+
+struct config_section {
+ const char prefix[CFG_NAME_LEN];
+ int numbers;
+ config_section_load load;
+};
+
+static const struct config_section cfg_file_scheme[] = {
+ {"EAL", 0, parse_eal},
+ {"PIPELINE", 1, parse_pipeline},
+ {"MEMPOOL", 1, parse_mempool},
+ {"LINK", 1, parse_link},
+ {"RXQ", 2, parse_rxq},
+ {"TXQ", 2, parse_txq},
+ {"SWQ", 1, parse_swq},
+ {"TM", 1, parse_tm},
+ {"SOURCE", 1, parse_source},
+ {"SINK", 1, parse_sink},
+ {"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
+ {"MSGQ-RSP-PIPELINE", 1, parse_msgq_rsp_pipeline},
+ {"MSGQ", 1, parse_msgq},
+};
+
+static void
+create_implicit_mempools(struct app_params *app)
+{
+ ssize_t idx;
+
+ idx = APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
+ PARSER_PARAM_ADD_CHECK(idx, app->mempool_params, "start-up");
+}
+
+static void
+create_implicit_links_from_port_mask(struct app_params *app,
+ uint64_t port_mask)
+{
+ uint32_t pmd_id, link_id;
+
+ link_id = 0;
+ for (pmd_id = 0; pmd_id < RTE_MAX_ETHPORTS; pmd_id++) {
+ char name[APP_PARAM_NAME_SIZE];
+ ssize_t idx;
+
+ if ((port_mask & (1LLU << pmd_id)) == 0)
+ continue;
+
+ snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
+ idx = APP_PARAM_ADD(app->link_params, name);
+ PARSER_PARAM_ADD_CHECK(idx, app->link_params, name);
+
+ app->link_params[idx].pmd_id = pmd_id;
+ link_id++;
+ }
+}
+
+static void
+assign_link_pmd_id_from_pci_bdf(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ link->pmd_id = i;
+ }
+}
+
+int
+app_config_parse(struct app_params *app, const char *file_name)
+{
+ struct rte_cfgfile *cfg;
+ char **section_names;
+ int i, j, sect_count;
+
+ /* Implicit mempools */
+ create_implicit_mempools(app);
+
+ /* Port mask */
+ if (app->port_mask)
+ create_implicit_links_from_port_mask(app, app->port_mask);
+
+ /* Load application configuration file */
+ cfg = rte_cfgfile_load(file_name, 0);
+ APP_CHECK((cfg != NULL), "Parse error: Unable to load config "
+ "file %s", file_name);
+
+ sect_count = rte_cfgfile_num_sections(cfg, NULL, 0);
+ APP_CHECK((sect_count > 0), "Parse error: number of sections "
+ "in file \"%s\" return %d", file_name,
+ sect_count);
+
+ section_names = malloc(sect_count * sizeof(char *));
+ PARSE_ERROR_MALLOC(section_names != NULL);
+
+ for (i = 0; i < sect_count; i++)
+ section_names[i] = malloc(CFG_NAME_LEN);
+
+ rte_cfgfile_sections(cfg, section_names, sect_count);
+
+ for (i = 0; i < sect_count; i++) {
+ const struct config_section *sch_s;
+ int len, cfg_name_len;
+
+ cfg_name_len = strlen(section_names[i]);
+
+ /* Find section type */
+ for (j = 0; j < (int)RTE_DIM(cfg_file_scheme); j++) {
+ sch_s = &cfg_file_scheme[j];
+ len = strlen(sch_s->prefix);
+
+ if (cfg_name_len < len)
+ continue;
+
+ /* After section name we expect only '\0' or digit or
+ * digit dot digit, so protect against false matching,
+ * for example: "ABC" should match section name
+ * "ABC0.0", but it should not match section_name
+ * "ABCDEF".
+ */
+ if ((section_names[i][len] != '\0') &&
+ !isdigit(section_names[i][len]))
+ continue;
+
+ if (strncmp(sch_s->prefix, section_names[i], len) == 0)
+ break;
+ }
+
+ APP_CHECK(j < (int)RTE_DIM(cfg_file_scheme),
+ "Parse error: unknown section %s",
+ section_names[i]);
+
+ APP_CHECK(validate_name(section_names[i],
+ sch_s->prefix,
+ sch_s->numbers) == 0,
+ "Parse error: invalid section name \"%s\"",
+ section_names[i]);
+
+ sch_s->load(app, section_names[i], cfg);
+ }
+
+ for (i = 0; i < sect_count; i++)
+ free(section_names[i]);
+
+ free(section_names);
+
+ rte_cfgfile_close(cfg);
+
+ APP_PARAM_COUNT(app->mempool_params, app->n_mempools);
+ APP_PARAM_COUNT(app->link_params, app->n_links);
+ APP_PARAM_COUNT(app->hwq_in_params, app->n_pktq_hwq_in);
+ APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
+ APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
+ APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+ APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
+ APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
+ APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
+ APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
+
+#ifdef RTE_PORT_PCAP
+ for (i = 0; i < (int)app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+
+ APP_CHECK((p->file_name), "Parse error: missing "
+ "mandatory field \"pcap_file_rd\" for \"%s\"",
+ p->name);
+ }
+#else
+ for (i = 0; i < (int)app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+
+ APP_CHECK((!p->file_name), "Parse error: invalid field "
+ "\"pcap_file_rd\" for \"%s\"", p->name);
+ }
+#endif
+
+ if (app->port_mask == 0)
+ assign_link_pmd_id_from_pci_bdf(app);
+
+ /* Save configuration to output file */
+ app_config_save(app, app->output_file);
+
+ /* Load TM configuration files */
+ app_config_parse_tm(app);
+
+ return 0;
+}
+
+static void
+save_eal_params(struct app_params *app, FILE *f)
+{
+ struct app_eal_params *p = &app->eal_params;
+ uint32_t i;
+
+ fprintf(f, "[EAL]\n");
+
+ if (p->coremap)
+ fprintf(f, "%s = %s\n", "lcores", p->coremap);
+
+ if (p->master_lcore_present)
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "master_lcore", p->master_lcore);
+
+ fprintf(f, "%s = %" PRIu32 "\n", "n", p->channels);
+
+ if (p->memory_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "m", p->memory);
+
+ if (p->ranks_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "r", p->ranks);
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_blacklist",
+ p->pci_blacklist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_whitelist",
+ p->pci_whitelist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "vdev",
+ p->vdev[i]);
+ }
+
+ if (p->vmware_tsc_map_present)
+ fprintf(f, "%s = %s\n", "vmware_tsc_map",
+ (p->vmware_tsc_map) ? "yes" : "no");
+
+ if (p->proc_type)
+ fprintf(f, "%s = %s\n", "proc_type", p->proc_type);
+
+ if (p->syslog)
+ fprintf(f, "%s = %s\n", "syslog", p->syslog);
+
+ if (p->log_level_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "log_level", p->log_level);
+
+ if (p->version_present)
+ fprintf(f, "%s = %s\n", "v", (p->version) ? "yes" : "no");
+
+ if (p->help_present)
+ fprintf(f, "%s = %s\n", "help", (p->help) ? "yes" : "no");
+
+ if (p->no_huge_present)
+ fprintf(f, "%s = %s\n", "no_huge", (p->no_huge) ? "yes" : "no");
+
+ if (p->no_pci_present)
+ fprintf(f, "%s = %s\n", "no_pci", (p->no_pci) ? "yes" : "no");
+
+ if (p->no_hpet_present)
+ fprintf(f, "%s = %s\n", "no_hpet", (p->no_hpet) ? "yes" : "no");
+
+ if (p->no_shconf_present)
+ fprintf(f, "%s = %s\n", "no_shconf",
+ (p->no_shconf) ? "yes" : "no");
+
+ if (p->add_driver)
+ fprintf(f, "%s = %s\n", "d", p->add_driver);
+
+ if (p->socket_mem)
+ fprintf(f, "%s = %s\n", "socket_mem", p->socket_mem);
+
+ if (p->huge_dir)
+ fprintf(f, "%s = %s\n", "huge_dir", p->huge_dir);
+
+ if (p->file_prefix)
+ fprintf(f, "%s = %s\n", "file_prefix", p->file_prefix);
+
+ if (p->base_virtaddr)
+ fprintf(f, "%s = %s\n", "base_virtaddr", p->base_virtaddr);
+
+ if (p->create_uio_dev_present)
+ fprintf(f, "%s = %s\n", "create_uio_dev",
+ (p->create_uio_dev) ? "yes" : "no");
+
+ if (p->vfio_intr)
+ fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
+
+ if (p->xen_dom0_present)
+ fprintf(f, "%s = %s\n", "xen_dom0",
+ (p->xen_dom0) ? "yes" : "no");
+
+ fputc('\n', f);
+}
+
+static void
+save_mempool_params(struct app_params *app, FILE *f)
+{
+ struct app_mempool_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->mempool_params);
+ for (i = 0; i < count; i++) {
+ p = &app->mempool_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "buffer_size", p->buffer_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "pool_size", p->pool_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cache_size", p->cache_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_links_params(struct app_params *app, FILE *f)
+{
+ struct app_link_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->link_params);
+ for (i = 0; i < count; i++) {
+ p = &app->link_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "; %s = %" PRIu32 "\n", "pmd_id", p->pmd_id);
+ fprintf(f, "%s = %s\n", "promisc", p->promisc ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu32 "\n", "arp_q", p->arp_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_syn_q",
+ p->tcp_syn_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "ip_local_q", p->ip_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_local_q", p->tcp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "udp_local_q", p->udp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
+ p->sctp_local_q);
+
+ if (strlen(p->pci_bdf))
+ fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_rxq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_in_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_in_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_in_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_txq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_out_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_out_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_out_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_swq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_swq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->swq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->swq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+ fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+ fprintf(f, "%s = %s\n", "ipv4_frag", p->ipv4_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_frag", p->ipv6_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv4_ras", p->ipv4_ras ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_ras", p->ipv6_ras ? "yes" : "no");
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1)) {
+ fprintf(f, "%s = %" PRIu32 "\n", "mtu", p->mtu);
+ fprintf(f, "%s = %" PRIu32 "\n", "metadata_size", p->metadata_size);
+ fprintf(f, "%s = %s\n",
+ "mempool_direct",
+ app->mempool_params[p->mempool_direct_id].name);
+ fprintf(f, "%s = %s\n",
+ "mempool_indirect",
+ app->mempool_params[p->mempool_indirect_id].name);
+ }
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_tm_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_tm_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->tm_params);
+ for (i = 0; i < count; i++) {
+ p = &app->tm_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "cfg", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_source_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_source_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->source_params);
+ for (i = 0; i < count; i++) {
+ p = &app->source_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n", "pcap_file_rd", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "pcap_bytes_rd_per_pkt",
+ p->n_bytes_per_pkt);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_sink_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_sink_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->sink_params);
+ for (i = 0; i < count; i++) {
+ p = &app->sink_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "pcap_file_wr", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "pcap_n_pkt_wr", p->n_pkts_to_dump);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_msgq_params(struct app_params *app, FILE *f)
+{
+ struct app_msgq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->msgq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->msgq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_pipeline_params(struct app_params *app, FILE *f)
+{
+ size_t i, count;
+
+ count = RTE_DIM(app->pipeline_params);
+ for (i = 0; i < count; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* type */
+ fprintf(f, "type = %s\n", p->type);
+
+ /* core */
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+
+ /* pktq_in */
+ if (p->n_pktq_in) {
+ uint32_t j;
+
+ fprintf(f, "pktq_in =");
+ for (j = 0; j < p->n_pktq_in; j++) {
+ struct app_pktq_in_params *pp = &p->pktq_in[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_IN_HWQ:
+ name = app->hwq_in_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SOURCE:
+ name = app->source_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* pktq_in */
+ if (p->n_pktq_out) {
+ uint32_t j;
+
+ fprintf(f, "pktq_out =");
+ for (j = 0; j < p->n_pktq_out; j++) {
+ struct app_pktq_out_params *pp =
+ &p->pktq_out[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_OUT_HWQ:
+ name = app->hwq_out_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SINK:
+ name = app->sink_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_in */
+ if (p->n_msgq_in) {
+ uint32_t j;
+
+ fprintf(f, "msgq_in =");
+ for (j = 0; j < p->n_msgq_in; j++) {
+ uint32_t id = p->msgq_in[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_out */
+ if (p->n_msgq_out) {
+ uint32_t j;
+
+ fprintf(f, "msgq_out =");
+ for (j = 0; j < p->n_msgq_out; j++) {
+ uint32_t id = p->msgq_out[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* timer_period */
+ fprintf(f, "timer_period = %" PRIu32 "\n", p->timer_period);
+
+ /* args */
+ if (p->n_args) {
+ uint32_t j;
+
+ for (j = 0; j < p->n_args; j++)
+ fprintf(f, "%s = %s\n", p->args_name[j],
+ p->args_value[j]);
+ }
+
+ fprintf(f, "\n");
+ }
+}
+
+void
+app_config_save(struct app_params *app, const char *file_name)
+{
+ FILE *file;
+ char *name, *dir_name;
+ int status;
+
+ name = strdup(file_name);
+ dir_name = dirname(name);
+ status = access(dir_name, W_OK);
+ APP_CHECK((status == 0),
+ "Error: need write access privilege to directory "
+ "\"%s\" to save configuration\n", dir_name);
+
+ file = fopen(file_name, "w");
+ APP_CHECK((file != NULL),
+ "Error: failed to save configuration to file \"%s\"",
+ file_name);
+
+ save_eal_params(app, file);
+ save_pipeline_params(app, file);
+ save_mempool_params(app, file);
+ save_links_params(app, file);
+ save_rxq_params(app, file);
+ save_txq_params(app, file);
+ save_swq_params(app, file);
+ save_tm_params(app, file);
+ save_source_params(app, file);
+ save_sink_params(app, file);
+ save_msgq_params(app, file);
+
+ fclose(file);
+ free(name);
+}
+
+int
+app_config_init(struct app_params *app)
+{
+ size_t i;
+
+ memcpy(app, &app_params_default, sizeof(struct app_params));
+
+ for (i = 0; i < RTE_DIM(app->mempool_params); i++)
+ memcpy(&app->mempool_params[i],
+ &mempool_params_default,
+ sizeof(struct app_mempool_params));
+
+ for (i = 0; i < RTE_DIM(app->link_params); i++)
+ memcpy(&app->link_params[i],
+ &link_params_default,
+ sizeof(struct app_link_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_in_params); i++)
+ memcpy(&app->hwq_in_params[i],
+ &default_hwq_in_params,
+ sizeof(default_hwq_in_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_out_params); i++)
+ memcpy(&app->hwq_out_params[i],
+ &default_hwq_out_params,
+ sizeof(default_hwq_out_params));
+
+ for (i = 0; i < RTE_DIM(app->swq_params); i++)
+ memcpy(&app->swq_params[i],
+ &default_swq_params,
+ sizeof(default_swq_params));
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++)
+ memcpy(&app->tm_params[i],
+ &default_tm_params,
+ sizeof(default_tm_params));
+
+ for (i = 0; i < RTE_DIM(app->source_params); i++)
+ memcpy(&app->source_params[i],
+ &default_source_params,
+ sizeof(default_source_params));
+
+ for (i = 0; i < RTE_DIM(app->sink_params); i++)
+ memcpy(&app->sink_params[i],
+ &default_sink_params,
+ sizeof(default_sink_params));
+
+ for (i = 0; i < RTE_DIM(app->msgq_params); i++)
+ memcpy(&app->msgq_params[i],
+ &default_msgq_params,
+ sizeof(default_msgq_params));
+
+ for (i = 0; i < RTE_DIM(app->pipeline_params); i++)
+ memcpy(&app->pipeline_params[i],
+ &default_pipeline_params,
+ sizeof(default_pipeline_params));
+
+ return 0;
+}
+
+static char *
+filenamedup(const char *filename, const char *suffix)
+{
+ char *s = malloc(strlen(filename) + strlen(suffix) + 1);
+
+ if (!s)
+ return NULL;
+
+ sprintf(s, "%s%s", filename, suffix);
+ return s;
+}
+
+int
+app_config_args(struct app_params *app, int argc, char **argv)
+{
+ const char *optname;
+ int opt, option_index;
+ int f_present, s_present, p_present, l_present;
+ int preproc_present, preproc_params_present, disable_csum_present;
+ int hwlb_present;
+ int flow_dir_present;
+ int scaned = 0;
+
+ static struct option lgopts[] = {
+ { "disable-hw-csum", 0, 0, 0 },
+ { "preproc", 1, 0, 0 },
+ { "preproc-args", 1, 0, 0 },
+ { "hwlb", 1, 0, 0 },
+ { "flow_dir", 0, 0, 0 },
+ { NULL, 0, 0, 0 }
+ };
+
+ /* Copy application name */
+ strncpy(app->app_name, argv[0], APP_APPNAME_SIZE - 1);
+
+ f_present = 0;
+ s_present = 0;
+ p_present = 0;
+ l_present = 0;
+ disable_csum_present = 0;
+ preproc_present = 0;
+ preproc_params_present = 0;
+ app->header_csum_req =1; //Default enable haeader checksum
+ hwlb_present = 0;
+ flow_dir_present = 0;
+
+
+ while ((opt = getopt_long(argc, argv, "f:s:p:l:", lgopts,
+ &option_index)) != EOF)
+ switch (opt) {
+ case 'f':
+ if (f_present)
+ rte_panic("Error: Config file is provided "
+ "more than once\n");
+ f_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Config file name is null\n");
+
+ app->config_file = strdup(optarg);
+ if (app->config_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 's':
+ if (s_present)
+ rte_panic("Error: Script file is provided "
+ "more than once\n");
+ s_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Script file name is null\n");
+
+ app->script_file = strdup(optarg);
+ if (app->script_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 'p':
+ if (p_present)
+ rte_panic("Error: PORT_MASK is provided "
+ "more than once\n");
+ p_present = 1;
+
+ if ((sscanf(optarg, "%" SCNx64 "%n", &app->port_mask,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)))
+ rte_panic("Error: PORT_MASK is not "
+ "a hexadecimal integer\n");
+
+ if (app->port_mask == 0)
+ rte_panic("Error: PORT_MASK is null\n");
+
+ break;
+
+ case 'l':
+ if (l_present)
+ rte_panic("Error: LOG_LEVEL is provided "
+ "more than once\n");
+ l_present = 1;
+
+ if ((sscanf(optarg, "%" SCNu32 "%n", &app->log_level,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)) ||
+ (app->log_level >= APP_LOG_LEVELS))
+ rte_panic("Error: LOG_LEVEL invalid value\n");
+
+ break;
+
+ case 0:
+ optname = lgopts[option_index].name;
+
+ if (strcmp(optname, "hwlb") == 0) {
+ if (hwlb_present)
+ rte_panic("Error: hwlb argument "
+ "is provided more than once\n");
+ hwlb_present = 1;
+ printf(" HWLB is configured\n");
+
+ app->n_hwlb_q = atoi(optarg);
+
+ if(!app->n_hwlb_q)
+ rte_panic("HWQs for HWLB must be atleast 1\n");
+
+ printf("No of HWQs for HWLB are %d\n",app->n_hwlb_q);
+ enable_hwlb = 1;
+ break;
+ }
+
+ if (strcmp(optname, "flow_dir") == 0) {
+ if (flow_dir_present)
+ rte_panic("Error: flow_dir argument "
+ "is provided more than once\n");
+ flow_dir_present = 1;
+ printf(" FLOW DIR is configured\n");
+
+ enable_flow_dir = 1;
+
+ break;
+ }
+
+ if (strcmp(optname, "disable-hw-csum") == 0) {
+ if (disable_csum_present)
+ rte_panic("Error: disable-hw-csum argument "
+ "is provided more than once\n");
+
+ printf("Disable TCP/UDP HW checksumi\n");
+ app->header_csum_req = 0;
+ disable_csum_present = 1;
+ break;
+ }
+
+ if (strcmp(optname, "preproc") == 0) {
+ if (preproc_present)
+ rte_panic("Error: Preprocessor argument "
+ "is provided more than once\n");
+ preproc_present = 1;
+
+ app->preproc = strdup(optarg);
+ break;
+ }
+
+ if (strcmp(optname, "preproc-args") == 0) {
+ if (preproc_params_present)
+ rte_panic("Error: Preprocessor args "
+ "are provided more than once\n");
+ preproc_params_present = 1;
+
+ app->preproc_args = strdup(optarg);
+ break;
+ }
+
+ app_print_usage(argv[0]);
+ break;
+
+ default:
+ app_print_usage(argv[0]);
+ }
+
+ optind = 0; /* reset getopt lib */
+
+ /* Check dependencies between args */
+ if (preproc_params_present && (preproc_present == 0))
+ rte_panic("Error: Preprocessor args specified while "
+ "preprocessor is not defined\n");
+
+ app->parser_file = preproc_present ?
+ filenamedup(app->config_file, ".preproc") :
+ strdup(app->config_file);
+ app->output_file = filenamedup(app->config_file, ".out");
+
+ return 0;
+}
+
+int
+app_config_preproc(struct app_params *app)
+{
+ char buffer[256];
+ int status;
+
+ if (app->preproc == NULL)
+ return 0;
+
+ status = access(app->config_file, F_OK | R_OK);
+ APP_CHECK((status == 0), "Error: Unable to open file %s",
+ app->config_file);
+
+ snprintf(buffer, sizeof(buffer), "%s %s %s > %s",
+ app->preproc,
+ app->preproc_args ? app->preproc_args : "",
+ app->config_file,
+ app->parser_file);
+
+ status = system(buffer);
+ APP_CHECK((WIFEXITED(status) && (WEXITSTATUS(status) == 0)),
+ "Error occurred while pre-processing file \"%s\"\n",
+ app->config_file);
+
+ return status;
+}
diff --git a/common/vnf_common/config_parse_tm.c b/common/vnf_common/config_parse_tm.c
new file mode 100644
index 00000000..fe7eb642
--- /dev/null
+++ b/common/vnf_common/config_parse_tm.c
@@ -0,0 +1,431 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+
+static int
+tm_cfgfile_load_sched_port(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params)
+{
+ const char *entry;
+ int j;
+
+ entry = rte_cfgfile_get_entry(file, "port", "frame overhead");
+ if (entry)
+ port_params->frame_overhead = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "mtu");
+ if (entry)
+ port_params->mtu = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of subports per port");
+ if (entry)
+ port_params->n_subports_per_port = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of pipes per subport");
+ if (entry)
+ port_params->n_pipes_per_subport = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "queue sizes");
+ if (entry) {
+ char *next;
+
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ port_params->qsize[j] = (uint16_t)
+ strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+#ifdef RTE_SCHED_RED
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ char str[32];
+
+ /* Parse WRED min thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred min", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].min_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED max thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred max", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].max_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED inverse mark probabilities */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred inv prob", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].maxp_inv
+ = (uint8_t)strtol(entry, &next, 10);
+
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED EWMA filter weights */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred weight", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].wq_log2
+ = (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ }
+#endif /* RTE_SCHED_RED */
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_pipe(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params,
+ struct rte_sched_pipe_params *pipe_params)
+{
+ int i, j;
+ char *next;
+ const char *entry;
+ int profiles;
+
+ profiles = rte_cfgfile_num_sections(file,
+ "pipe profile", sizeof("pipe profile") - 1);
+ port_params->n_pipe_profiles = profiles;
+
+ for (j = 0; j < profiles; j++) {
+ char pipe_name[32];
+
+ snprintf(pipe_name, sizeof(pipe_name),
+ "pipe profile %" PRId32, j);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb rate");
+ if (entry)
+ pipe_params[j].tb_rate = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb size");
+ if (entry)
+ pipe_params[j].tb_size = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc period");
+ if (entry)
+ pipe_params[j].tc_period = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 0 rate");
+ if (entry)
+ pipe_params[j].tc_rate[0] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 rate");
+ if (entry)
+ pipe_params[j].tc_rate[1] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 rate");
+ if (entry)
+ pipe_params[j].tc_rate[2] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 rate");
+ if (entry)
+ pipe_params[j].tc_rate[3] = (uint32_t) atoi(entry);
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ entry = rte_cfgfile_get_entry(file, pipe_name,
+ "tc 3 oversubscription weight");
+ if (entry)
+ pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
+#endif
+
+ entry = rte_cfgfile_get_entry(file,
+ pipe_name,
+ "tc 0 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_subport(
+ struct rte_cfgfile *file,
+ struct rte_sched_subport_params *subport_params,
+ int *pipe_to_profile)
+{
+ const char *entry;
+ int i, j, k;
+
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS; i++) {
+ char sec_name[CFG_NAME_LEN];
+
+ snprintf(sec_name, sizeof(sec_name),
+ "subport %" PRId32, i);
+
+ if (rte_cfgfile_has_section(file, sec_name)) {
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb rate");
+ if (entry)
+ subport_params[i].tb_rate =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb size");
+ if (entry)
+ subport_params[i].tb_size =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc period");
+ if (entry)
+ subport_params[i].tc_period =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 0 rate");
+ if (entry)
+ subport_params[i].tc_rate[0] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 1 rate");
+ if (entry)
+ subport_params[i].tc_rate[1] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 2 rate");
+ if (entry)
+ subport_params[i].tc_rate[2] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 3 rate");
+ if (entry)
+ subport_params[i].tc_rate[3] =
+ (uint32_t) atoi(entry);
+
+ int n_entries = rte_cfgfile_section_num_entries(file,
+ sec_name);
+ struct rte_cfgfile_entry entries[n_entries];
+
+ rte_cfgfile_section_entries(file,
+ sec_name,
+ entries,
+ n_entries);
+
+ for (j = 0; j < n_entries; j++)
+ if (strncmp("pipe",
+ entries[j].name,
+ sizeof("pipe") - 1) == 0) {
+ int profile;
+ char *tokens[2] = {NULL, NULL};
+ int n_tokens;
+ int begin, end;
+ char name[CFG_NAME_LEN + 1];
+
+ profile = atoi(entries[j].value);
+ strncpy(name,
+ entries[j].name,
+ sizeof(name));
+ n_tokens = rte_strsplit(
+ &name[sizeof("pipe")],
+ strnlen(name, CFG_NAME_LEN),
+ tokens, 2, '-');
+
+ begin = atoi(tokens[0]);
+ if (n_tokens == 2)
+ end = atoi(tokens[1]);
+ else
+ end = begin;
+
+ if ((end >= APP_MAX_SCHED_PIPES) ||
+ (begin > end))
+ return -1;
+
+ for (k = begin; k <= end; k++) {
+ char profile_name[CFG_NAME_LEN];
+
+ snprintf(profile_name,
+ sizeof(profile_name),
+ "pipe profile %" PRId32,
+ profile);
+ if (rte_cfgfile_has_section(file, profile_name))
+ pipe_to_profile[i * APP_MAX_SCHED_PIPES + k] = profile;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Wrong pipe profile %s\n",
+ entries[j].value);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load(struct app_pktq_tm_params *tm)
+{
+ struct rte_cfgfile *file;
+ uint32_t i;
+
+ memset(tm->sched_subport_params, 0, sizeof(tm->sched_subport_params));
+ memset(tm->sched_pipe_profiles, 0, sizeof(tm->sched_pipe_profiles));
+ memset(&tm->sched_port_params, 0, sizeof(tm->sched_port_params));
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES; i++)
+ tm->sched_pipe_to_profile[i] = -1;
+
+ tm->sched_port_params.pipe_profiles = &tm->sched_pipe_profiles[0];
+
+ if (tm->file_name[0] == '\0')
+ return -1;
+
+ file = rte_cfgfile_load(tm->file_name, 0);
+ if (file == NULL)
+ return -1;
+
+ tm_cfgfile_load_sched_port(file,
+ &tm->sched_port_params);
+ tm_cfgfile_load_sched_subport(file,
+ tm->sched_subport_params,
+ tm->sched_pipe_to_profile);
+ tm_cfgfile_load_sched_pipe(file,
+ &tm->sched_port_params,
+ tm->sched_pipe_profiles);
+
+ rte_cfgfile_close(file);
+ return 0;
+}
+
+int
+app_config_parse_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ int status;
+
+ if (!APP_PARAM_VALID(p))
+ break;
+
+ status = tm_cfgfile_load(p);
+ APP_CHECK(status == 0,
+ "Parse error for %s configuration file \"%s\"\n",
+ p->name,
+ p->file_name);
+ }
+
+ return 0;
+}
diff --git a/common/vnf_common/cpu_core_map.c b/common/vnf_common/cpu_core_map.c
new file mode 100644
index 00000000..f0a08f40
--- /dev/null
+++ b/common/vnf_common/cpu_core_map.c
@@ -0,0 +1,475 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+
+#include "cpu_core_map.h"
+
+struct cpu_core_map {
+ uint32_t n_max_sockets;
+ uint32_t n_max_cores_per_socket;
+ uint32_t n_max_ht_per_core;
+ uint32_t n_sockets;
+ uint32_t n_cores_per_socket;
+ uint32_t n_ht_per_core;
+ int map[0];
+};
+
+static inline uint32_t
+cpu_core_map_pos(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ return (socket_id * map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+}
+
+static int
+cpu_core_map_compute_eal(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_linux(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_and_check(struct cpu_core_map *map);
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized)
+{
+ uint32_t map_size, map_mem_size, i;
+ struct cpu_core_map *map;
+ int status;
+
+ /* Check input arguments */
+ if ((n_max_sockets == 0) ||
+ (n_max_cores_per_socket == 0) ||
+ (n_max_ht_per_core == 0))
+ return NULL;
+
+ /* Memory allocation */
+ map_size = n_max_sockets * n_max_cores_per_socket * n_max_ht_per_core;
+ map_mem_size = sizeof(struct cpu_core_map) + map_size * sizeof(int);
+ map = (struct cpu_core_map *) malloc(map_mem_size);
+ if (map == NULL)
+ return NULL;
+
+ /* Initialization */
+ map->n_max_sockets = n_max_sockets;
+ map->n_max_cores_per_socket = n_max_cores_per_socket;
+ map->n_max_ht_per_core = n_max_ht_per_core;
+ map->n_sockets = 0;
+ map->n_cores_per_socket = 0;
+ map->n_ht_per_core = 0;
+
+ for (i = 0; i < map_size; i++)
+ map->map[i] = -1;
+
+ status = (eal_initialized) ?
+ cpu_core_map_compute_eal(map) :
+ cpu_core_map_compute_linux(map);
+
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ status = cpu_core_map_compute_and_check(map);
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ return map;
+}
+
+int
+cpu_core_map_compute_eal(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ struct lcore_config *p = &lcore_config[lcore_id];
+
+ if ((p->detected) && (p->socket_id == socket_id))
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0;
+ lcore_id < RTE_MAX_LCORE;
+ lcore_id++) {
+ struct lcore_config *p =
+ &lcore_config[lcore_id];
+
+ if ((p->detected) &&
+ (p->socket_id == socket_id) &&
+ (p->core_id == core_id)) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cpu_core_map_compute_and_check(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute n_ht_per_core, n_cores_per_socket, n_sockets */
+ for (ht_id = 0; ht_id < map->n_max_ht_per_core; ht_id++) {
+ if (map->map[ht_id] == -1)
+ break;
+
+ map->n_ht_per_core++;
+ }
+
+ if (map->n_ht_per_core == 0)
+ return -1;
+
+ for (core_id = 0; core_id < map->n_max_cores_per_socket; core_id++) {
+ uint32_t pos = core_id * map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_cores_per_socket++;
+ }
+
+ if (map->n_cores_per_socket == 0)
+ return -1;
+
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t pos = socket_id * map->n_max_cores_per_socket *
+ map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_sockets++;
+ }
+
+ if (map->n_sockets == 0)
+ return -1;
+
+ /* Check that each socket has exactly the same number of cores
+ and that each core has exactly the same number of hyper-threads */
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ for (core_id = 0; core_id < map->n_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = (socket_id *
+ map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+
+ if (((ht_id < map->n_ht_per_core) &&
+ (map->map[pos] == -1)) ||
+ ((ht_id >= map->n_ht_per_core) &&
+ (map->map[pos] != -1)))
+ return -1;
+ }
+
+ for ( ; core_id < map->n_max_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ if (map->map[pos] != -1)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define FILE_LINUX_CPU_N_LCORES \
+ "/sys/devices/system/cpu/present"
+
+static int
+cpu_core_map_get_n_lcores_linux(void)
+{
+ char buffer[64], *string;
+ FILE *fd;
+
+ fd = fopen(FILE_LINUX_CPU_N_LCORES, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ string = index(buffer, '-');
+ if (string == NULL)
+ return -1;
+
+ return atoi(++string) + 1;
+}
+
+#define FILE_LINUX_CPU_CORE_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
+
+static int
+cpu_core_map_get_core_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int core_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_CORE_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ core_id = atoi(buffer);
+ return core_id;
+}
+
+#define FILE_LINUX_CPU_SOCKET_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id"
+
+static int
+cpu_core_map_get_socket_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int socket_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_SOCKET_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ socket_id = atoi(buffer);
+ return socket_id;
+}
+
+int
+cpu_core_map_compute_linux(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+ int n_lcores;
+
+ n_lcores = cpu_core_map_get_n_lcores_linux();
+ if (n_lcores <= 0)
+ return -1;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(lcore_id);
+
+ if (lcore_socket_id < 0)
+ return -1;
+
+ if (((uint32_t) lcore_socket_id) == socket_id)
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(
+ lcore_id);
+
+ if (lcore_socket_id < 0)
+ return -1;
+
+ int lcore_core_id =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ if (lcore_core_id < 0)
+ return -1;
+
+ if (((uint32_t) lcore_socket_id == socket_id) &&
+ ((uint32_t) lcore_core_id == core_id)) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+cpu_core_map_print(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ if (map == NULL)
+ return;
+
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ printf("Socket %" PRIu32 ":\n", socket_id);
+
+ for (core_id = 0;
+ core_id < map->n_cores_per_socket;
+ core_id++) {
+ printf("[%" PRIu32 "] = [", core_id);
+
+ for (ht_id = 0; ht_id < map->n_ht_per_core; ht_id++) {
+ int lcore_id = cpu_core_map_get_lcore_id(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ uint32_t core_id_noncontig =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ printf(" %" PRId32 " (%" PRIu32 ") ",
+ lcore_id,
+ core_id_noncontig);
+ }
+
+ printf("]\n");
+ }
+ }
+}
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_sockets;
+}
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_cores_per_socket;
+}
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_ht_per_core;
+}
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ uint32_t pos;
+
+ if ((map == NULL) ||
+ (socket_id >= map->n_sockets) ||
+ (core_id >= map->n_cores_per_socket) ||
+ (ht_id >= map->n_ht_per_core))
+ return -1;
+
+ pos = cpu_core_map_pos(map, socket_id, core_id, ht_id);
+
+ return map->map[pos];
+}
+
+void
+cpu_core_map_free(struct cpu_core_map *map)
+{
+ free(map);
+}
diff --git a/common/vnf_common/cpu_core_map.h b/common/vnf_common/cpu_core_map.h
new file mode 100644
index 00000000..03c00c72
--- /dev/null
+++ b/common/vnf_common/cpu_core_map.h
@@ -0,0 +1,52 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_CPU_CORE_MAP_H__
+#define __INCLUDE_CPU_CORE_MAP_H__
+
+#include <stdio.h>
+
+#include <rte_lcore.h>
+
+struct cpu_core_map;
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized);
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map);
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id);
+
+void cpu_core_map_print(struct cpu_core_map *map);
+
+void
+cpu_core_map_free(struct cpu_core_map *map);
+
+#endif
diff --git a/common/vnf_common/hash_func.h b/common/vnf_common/hash_func.h
new file mode 100644
index 00000000..c2564910
--- /dev/null
+++ b/common/vnf_common/hash_func.h
@@ -0,0 +1,334 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+static inline uint64_t
+hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = seed ^ k[0];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ xor0 ^= k[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+ xor2 ^= k[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+ xor3 = k[6] ^ k[7];
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_SSE4_2)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/common/vnf_common/parser.h b/common/vnf_common/parser.h
new file mode 100644
index 00000000..b104c168
--- /dev/null
+++ b/common/vnf_common/parser.h
@@ -0,0 +1,32 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+int
+parser_read_arg_bool(const char *p);
+
+int
+parser_read_uint64(uint64_t *value, const char *p);
+
+int
+parser_read_uint32(uint32_t *value, const char *p);
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+#endif
diff --git a/common/vnf_common/pipeline.h b/common/vnf_common/pipeline.h
new file mode 100644
index 00000000..7bbc0aef
--- /dev/null
+++ b/common/vnf_common/pipeline.h
@@ -0,0 +1,76 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_H__
+#define __INCLUDE_PIPELINE_H__
+
+#include <cmdline_parse.h>
+
+#include "pipeline_be.h"
+
+/*
+ * Pipeline type front-end operations
+ */
+
+typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params, void *arg);
+
+typedef int (*pipeline_fe_op_free)(void *pipeline);
+
+struct pipeline_fe_ops {
+ pipeline_fe_op_init f_init;
+ pipeline_fe_op_free f_free;
+ cmdline_parse_ctx_t *cmds;
+};
+
+/*
+ * Pipeline type
+ */
+
+struct pipeline_type {
+ const char *name;
+
+ /* pipeline back-end */
+ struct pipeline_be_ops *be_ops;
+
+ /* pipeline front-end */
+ struct pipeline_fe_ops *fe_ops;
+};
+
+static inline uint32_t
+pipeline_type_cmds_count(struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds;
+
+ if (ptype->fe_ops == NULL)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+ if (cmds == NULL)
+ return 0;
+
+ for (n_cmds = 0; cmds[n_cmds]; n_cmds++);
+
+ return n_cmds;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry);
+
+#endif
diff --git a/common/vnf_common/pipeline_actions_common.h b/common/vnf_common/pipeline_actions_common.h
new file mode 100644
index 00000000..c8c29917
--- /dev/null
+++ b/common/vnf_common/pipeline_actions_common.h
@@ -0,0 +1,214 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+#ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+#define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_pipeline.h>
+
+#define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_PORT_IN_AH_HIJACK_ALL(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t); \
+ uint32_t i; \
+ \
+ rte_pipeline_ah_packet_hijack(p, pkt_mask); \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], &entries[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entries[i], arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entries[pos], arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], entry, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entry, arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entry, arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ &entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], \
+ entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entries[pos], arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ entry, arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], entry, arg, time);\
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entry, arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#endif
diff --git a/common/vnf_common/pipeline_be.h b/common/vnf_common/pipeline_be.h
new file mode 100644
index 00000000..006a415e
--- /dev/null
+++ b/common/vnf_common/pipeline_be.h
@@ -0,0 +1,288 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_PIPELINE_BE_H__
+#define __INCLUDE_PIPELINE_BE_H__
+
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_frag.h>
+#include <rte_port_ras.h>
+#include <rte_port_sched.h>
+#include <rte_port_source_sink.h>
+#include <rte_pipeline.h>
+
+enum pipeline_port_in_type {
+ PIPELINE_PORT_IN_ETHDEV_READER,
+ PIPELINE_PORT_IN_RING_READER,
+ PIPELINE_PORT_IN_RING_MULTI_READER,
+ PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
+ PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
+ PIPELINE_PORT_IN_SCHED_READER,
+ PIPELINE_PORT_IN_SOURCE,
+};
+
+struct pipeline_port_in_params {
+ enum pipeline_port_in_type type;
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_ring_multi_reader_params ring_multi;
+ struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
+ struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_source_params source;
+ } params;
+ uint32_t burst_size;
+};
+
+static inline void *
+pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_IN_RING_READER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return (void *) &p->params.ring_ipv4_frag;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return (void *) &p->params.ring_ipv6_frag;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_IN_SOURCE:
+ return (void *) &p->params.source;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct rte_port_in_ops *
+pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return &rte_port_ethdev_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER:
+ return &rte_port_ring_reader_ops;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return &rte_port_ring_multi_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return &rte_port_ring_reader_ipv4_frag_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return &rte_port_ring_reader_ipv6_frag_ops;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return &rte_port_sched_reader_ops;
+ case PIPELINE_PORT_IN_SOURCE:
+ return &rte_port_source_ops;
+ default:
+ return NULL;
+ }
+}
+
+enum pipeline_port_out_type {
+ PIPELINE_PORT_OUT_ETHDEV_WRITER,
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER,
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
+ PIPELINE_PORT_OUT_SCHED_WRITER,
+ PIPELINE_PORT_OUT_SINK,
+};
+
+struct pipeline_port_out_params {
+ enum pipeline_port_out_type type;
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ethdev_writer_nodrop_params ethdev_nodrop;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_ring_multi_writer_params ring_multi;
+ struct rte_port_ring_writer_nodrop_params ring_nodrop;
+ struct rte_port_ring_multi_writer_nodrop_params ring_multi_nodrop;
+ struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
+ struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_sink_params sink;
+ } params;
+};
+
+static inline void *
+pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return (void *) &p->params.ethdev_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return (void *) &p->params.ring_nodrop;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return (void *) &p->params.ring_multi_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return (void *) &p->params.ring_ipv4_ras;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return (void *) &p->params.ring_ipv6_ras;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_OUT_SINK:
+ return (void *) &p->params.sink;
+ default:
+ return NULL;
+ }
+}
+
+static inline void *
+pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return &rte_port_ethdev_writer_ops;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return &rte_port_ethdev_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return &rte_port_ring_writer_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return &rte_port_ring_multi_writer_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return &rte_port_ring_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return &rte_port_ring_multi_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return &rte_port_ring_writer_ipv4_ras_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return &rte_port_ring_writer_ipv6_ras_ops;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return &rte_port_sched_writer_ops;
+ case PIPELINE_PORT_OUT_SINK:
+ return &rte_port_sink_ops;
+ default:
+ return NULL;
+ }
+}
+
+#ifndef PIPELINE_NAME_SIZE
+#define PIPELINE_NAME_SIZE 32
+#endif
+
+#ifndef PIPELINE_MAX_PORT_IN
+#define PIPELINE_MAX_PORT_IN 16
+#endif
+
+#ifndef PIPELINE_MAX_PORT_OUT
+#define PIPELINE_MAX_PORT_OUT 16
+#endif
+
+#ifndef PIPELINE_MAX_TABLES
+#define PIPELINE_MAX_TABLES 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_IN
+#define PIPELINE_MAX_MSGQ_IN 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_OUT
+#define PIPELINE_MAX_MSGQ_OUT 16
+#endif
+
+#ifndef PIPELINE_MAX_ARGS
+#define PIPELINE_MAX_ARGS 32
+#endif
+
+struct pipeline_params {
+ char name[PIPELINE_NAME_SIZE];
+
+ struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
+ struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_msgq;
+
+ int socket_id;
+
+ char *args_name[PIPELINE_MAX_ARGS];
+ char *args_value[PIPELINE_MAX_ARGS];
+ uint32_t n_args;
+
+ uint32_t log_level;
+};
+
+/*
+ * Pipeline type back-end operations
+ */
+
+typedef void* (*pipeline_be_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_be_op_free)(void *pipeline);
+
+typedef int (*pipeline_be_op_run)(void *pipeline);
+
+typedef int (*pipeline_be_op_timer)(void *pipeline);
+
+typedef int (*pipeline_be_op_track)(void *pipeline,
+ uint32_t port_in,
+ uint32_t *port_out);
+
+struct pipeline_be_ops {
+ pipeline_be_op_init f_init;
+ pipeline_be_op_free f_free;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ pipeline_be_op_track f_track;
+};
+
+/* Pipeline specific config parse error messages */
+#define PIPELINE_ARG_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ return -1; \
+ } \
+} while (0)
+
+#define PIPELINE_PARSE_ERR_INV_VAL(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "has invalid value (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_OUT_RNG(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "value is out of range (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_DUPLICATE(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": duplicated " \
+ "entry \"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_INV_ENT(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": invalid entry " \
+ "\"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_MANDATORY(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": mandatory " \
+ "entry \"%s\" is missing", section, entry)
+
+#endif
diff --git a/common/vnf_common/thread.c b/common/vnf_common/thread.c
new file mode 100644
index 00000000..dcf272ff
--- /dev/null
+++ b/common/vnf_common/thread.c
@@ -0,0 +1,305 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_common_be.h"
+#include "app.h"
+#include "thread.h"
+
+#if APP_THREAD_HEADROOM_STATS_COLLECT
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = rte_pipeline_run(pipeline->p); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = data->f_run(data->be); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+#else
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+ rte_pipeline_run(pipeline->p)
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+ data->f_run(data->be)
+
+#endif
+
+static inline void *
+thread_msg_recv(struct rte_ring *r)
+{
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *r,
+ void *msg)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static int
+thread_pipeline_enable(struct app_thread_data *t,
+ struct thread_pipeline_enable_msg_req *req)
+{
+ struct app_thread_pipeline_data *p;
+
+ if (req->f_run == NULL) {
+ if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ } else {
+ if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ }
+
+ p = (req->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = req->pipeline_id;
+ p->be = req->be;
+ p->f_run = req->f_run;
+ p->f_timer = req->f_timer;
+ p->timer_period = req->timer_period;
+ p->deadline = 0;
+
+ if (req->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+
+ return 0;
+}
+
+static int
+thread_pipeline_disable(struct app_thread_data *t,
+ struct thread_pipeline_disable_msg_req *req)
+{
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+ uint32_t i;
+
+ /* search regular pipelines of current thread */
+ for (i = 0; i < n_regular; i++) {
+ if (t->regular[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_regular - 1)
+ memcpy(&t->regular[i],
+ &t->regular[i+1],
+ (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_regular--;
+ t->n_regular = n_regular;
+
+ return 0;
+ }
+
+ /* search custom pipelines of current thread */
+ for (i = 0; i < n_custom; i++) {
+ if (t->custom[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_custom - 1)
+ memcpy(&t->custom[i],
+ &t->custom[i+1],
+ (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_custom--;
+ t->n_custom = n_custom;
+
+ return 0;
+ }
+
+ /* return if pipeline not found */
+ return -1;
+}
+
+static int
+thread_msg_req_handle(struct app_thread_data *t)
+{
+ void *msg_ptr;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ msg_ptr = thread_msg_recv(t->msgq_in);
+ req = msg_ptr;
+ rsp = msg_ptr;
+
+ if (req != NULL)
+ switch (req->type) {
+ case THREAD_MSG_REQ_PIPELINE_ENABLE: {
+ rsp->status = thread_pipeline_enable(t,
+ (struct thread_pipeline_enable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_PIPELINE_DISABLE: {
+ rsp->status = thread_pipeline_disable(t,
+ (struct thread_pipeline_disable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_HEADROOM_READ: {
+ struct thread_headroom_read_msg_rsp *rsp =
+ (struct thread_headroom_read_msg_rsp *)
+ req;
+
+ rsp->headroom_ratio = t->headroom_ratio;
+ rsp->status = 0;
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void
+thread_headroom_update(struct app_thread_data *t, uint64_t time)
+{
+ uint64_t time_diff = time - t->headroom_time;
+
+ t->headroom_ratio =
+ ((double) t->headroom_cycles) / ((double) time_diff);
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_rdtsc_precise();
+}
+
+int
+app_thread(void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ uint32_t core_id = rte_lcore_id(), i, j;
+ struct app_thread_data *t = &app->thread_data[core_id];
+
+ for (i = 0; ; i++) {
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+
+ /* Run regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data = &t->regular[j];
+ struct pipeline *p = data->be;
+
+ PIPELINE_RUN_REGULAR(t, p);
+ }
+
+ /* Run custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data = &t->custom[j];
+
+ PIPELINE_RUN_CUSTOM(t, data);
+ }
+
+ /* Timer */
+ if ((i & 0xF) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t t_deadline = UINT64_MAX;
+
+ if (time < t->deadline)
+ continue;
+
+ /* Timer for regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->regular[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->custom[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for thread message request */
+ {
+ uint64_t deadline = t->thread_req_deadline;
+
+ if (deadline <= time) {
+ thread_msg_req_handle(t);
+ thread_headroom_update(t, time);
+ deadline = time + t->timer_period;
+ t->thread_req_deadline = deadline;
+ }
+
+ if (deadline < t_deadline)
+ t_deadline = deadline;
+ }
+
+
+ t->deadline = t_deadline;
+ }
+ }
+
+ return 0;
+}
diff --git a/common/vnf_common/thread.h b/common/vnf_common/thread.h
new file mode 100644
index 00000000..24bcd233
--- /dev/null
+++ b/common/vnf_common/thread.h
@@ -0,0 +1,81 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef THREAD_H_
+#define THREAD_H_
+
+#include "app.h"
+#include "pipeline_be.h"
+
+enum thread_msg_req_type {
+ THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
+ THREAD_MSG_REQ_PIPELINE_DISABLE,
+ THREAD_MSG_REQ_HEADROOM_READ,
+ THREAD_MSG_REQS
+};
+
+struct thread_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE ENABLE
+ */
+struct thread_pipeline_enable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+};
+
+struct thread_pipeline_enable_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE DISABLE
+ */
+struct thread_pipeline_disable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+};
+
+struct thread_pipeline_disable_msg_rsp {
+ int status;
+};
+
+/*
+ * THREAD HEADROOM
+ */
+struct thread_headroom_read_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_headroom_read_msg_rsp {
+ int status;
+
+ double headroom_ratio;
+};
+
+#endif /* THREAD_H_ */
diff --git a/common/vnf_common/thread_fe.c b/common/vnf_common/thread_fe.c
new file mode 100644
index 00000000..7029620d
--- /dev/null
+++ b/common/vnf_common/thread_fe.c
@@ -0,0 +1,480 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "thread.h"
+#include "thread_fe.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "app.h"
+
+static inline void *
+thread_msg_send_recv(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_thread_msgq_in_get(app,
+ socket_id, core_id, ht_id);
+ if(r_req == NULL)
+ return NULL;
+ struct rte_ring *r_rsp = app_thread_msgq_out_get(app,
+ socket_id, core_id, ht_id);
+ if(r_rsp == NULL)
+ return NULL;
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv;
+ uint64_t deadline;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_enable_msg_req *req;
+ struct thread_pipeline_enable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ struct app_pipeline_params *p_params;
+ struct pipeline_type *p_type;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+ p_params = &app->pipeline_params[pipeline_id];
+ p_type = app_pipeline_type_find(app, p_params->type);
+ if (p_type == NULL)
+ return -1;
+
+ if (p->enabled == 1)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_ENABLE;
+ req->pipeline_id = pipeline_id;
+ req->be = p->be;
+ req->f_run = p_type->be_ops->f_run;
+ req->f_timer = p_type->be_ops->f_timer;
+ req->timer_period = p->timer_period;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 1;
+ return 0;
+}
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_disable_msg_req *req;
+ struct thread_pipeline_disable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+
+ if (p->enabled == 0)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_DISABLE;
+ req->pipeline_id = pipeline_id;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 0;
+ return 0;
+}
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id)
+{
+ struct thread_headroom_read_msg_req *req;
+ struct thread_headroom_read_msg_rsp *rsp;
+ int thread_id;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_HEADROOM_READ;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+
+ if (status != 0)
+ return -1;
+
+ printf("%.3f%%\n", rsp->headroom_ratio * 100);
+
+
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * pipeline enable
+ */
+
+struct cmd_pipeline_enable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_pipeline_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_enable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
+
+cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
+ NULL);
+
+cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
+ "pipeline");
+
+cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
+ "enable");
+
+cmdline_parse_inst_t cmd_pipeline_enable = {
+ .f = cmd_pipeline_enable_parsed,
+ .data = NULL,
+ .help_str = "Enable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_enable_t_string,
+ (void *)&cmd_pipeline_enable_t_id_string,
+ (void *)&cmd_pipeline_enable_pipeline_string,
+ (void *)&cmd_pipeline_enable_pipeline_id,
+ (void *)&cmd_pipeline_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * pipeline disable
+ */
+
+struct cmd_pipeline_disable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_pipeline_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_disable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
+
+cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
+ NULL);
+
+cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
+ pipeline_string, "pipeline");
+
+cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
+ UINT32);
+
+cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
+ "disable");
+
+cmdline_parse_inst_t cmd_pipeline_disable = {
+ .f = cmd_pipeline_disable_parsed,
+ .data = NULL,
+ .help_str = "Disable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_disable_t_string,
+ (void *)&cmd_pipeline_disable_t_id_string,
+ (void *)&cmd_pipeline_disable_pipeline_string,
+ (void *)&cmd_pipeline_disable_pipeline_id,
+ (void *)&cmd_pipeline_disable_disable_string,
+ NULL,
+ },
+};
+
+
+/*
+ * thread headroom
+ */
+
+struct cmd_thread_headroom_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t headroom_string;
+};
+
+static void
+cmd_thread_headroom_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_thread_headroom_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_thread_headroom(app,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_string, "t");
+
+cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_id_string, NULL);
+
+cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ headroom_string, "headroom");
+
+cmdline_parse_inst_t cmd_thread_headroom = {
+ .f = cmd_thread_headroom_parsed,
+ .data = NULL,
+ .help_str = "Display thread headroom",
+ .tokens = {
+ (void *)&cmd_thread_headroom_t_string,
+ (void *)&cmd_thread_headroom_t_id_string,
+ (void *)&cmd_thread_headroom_headroom_string,
+ NULL,
+ },
+};
+
+
+static cmdline_parse_ctx_t thread_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_pipeline_enable,
+ (cmdline_parse_inst_t *) &cmd_pipeline_disable,
+ (cmdline_parse_inst_t *) &cmd_thread_headroom,
+ NULL,
+};
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(thread_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push thread commands into the application */
+ memcpy(&app->cmds[app->n_cmds], thread_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/common/vnf_common/thread_fe.h b/common/vnf_common/thread_fe.h
new file mode 100644
index 00000000..7499bffd
--- /dev/null
+++ b/common/vnf_common/thread_fe.h
@@ -0,0 +1,84 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef THREAD_FE_H_
+#define THREAD_FE_H_
+
+static inline struct rte_ring *
+app_thread_msgq_in_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+}
+
+static inline struct rte_ring *
+app_thread_msgq_out_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+
+}
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app);
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id);
+
+#endif /* THREAD_FE_H_ */
diff --git a/common/vnf_common/vnf_common.c b/common/vnf_common/vnf_common.c
new file mode 100644
index 00000000..6ce815be
--- /dev/null
+++ b/common/vnf_common/vnf_common.c
@@ -0,0 +1,146 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdint.h>
+#include <stdio.h>
+#include "vnf_common.h"
+#include "pipeline_arpicmp_be.h"
+#ifndef VNF_ACL
+#include "lib_arp.h"
+#endif
+
+uint8_t in_port_dir_a[PIPELINE_MAX_PORT_IN];
+uint8_t prv_to_pub_map[PIPELINE_MAX_PORT_IN];
+uint8_t pub_to_prv_map[PIPELINE_MAX_PORT_IN];
+uint8_t prv_in_port_a[PIPELINE_MAX_PORT_IN];
+uint8_t prv_que_port_index[PIPELINE_MAX_PORT_IN];
+uint8_t in_port_egress_prv[PIPELINE_MAX_PORT_IN];
+
+uint8_t get_in_port_dir(uint8_t in_port_id)
+{
+ return in_port_dir_a[in_port_id];
+}
+
+uint8_t is_phy_port_privte(uint16_t phy_port)
+{
+ return in_port_dir_a[phy_port];
+}
+
+uint8_t is_port_index_privte(uint16_t phy_port)
+{
+ return in_port_egress_prv[phy_port];
+}
+
+uint32_t get_prv_to_pub_port(uint32_t *ip_addr, uint8_t type)
+{
+ uint32_t dest_if = 0xff;
+
+ switch (type) {
+ case 4:
+ {
+ uint32_t nhip;
+ nhip = get_nh(ip_addr[0], &dest_if);
+
+ if (nhip)
+ return dest_if;
+ return 0xff;
+ }
+ break;
+ case 6:
+ {
+ uint8_t nhipv6[16];
+ get_nh_ipv6((uint8_t *)ip_addr, &dest_if, &nhipv6[0]);
+ if (dest_if != 0xff)
+ return dest_if;
+ return 0xff;
+ }
+ break;
+ }
+ return 0xff;
+}
+
+uint32_t get_pub_to_prv_port(uint32_t *ip_addr, uint8_t type)
+{
+ uint32_t dest_if = 0xff;
+
+ switch (type) {
+ case 4:
+ {
+ uint32_t nhip;
+ nhip = get_nh(ip_addr[0], &dest_if);
+
+ if (nhip)
+ return dest_if;
+ return 0xff;
+ }
+ break;
+ case 6:
+ {
+ uint8_t nhipv6[16];
+ get_nh_ipv6((uint8_t *)ip_addr, &dest_if, &nhipv6[0]);
+ if (dest_if != 0xff)
+ return dest_if;
+ return 0xff;
+ }
+ break;
+ }
+ return 0xff;
+}
+
+void show_ports_info(void)
+{
+ printf("\nin_port_dir_a: %d %d %d %d %d", in_port_dir_a[0],
+ in_port_dir_a[1], in_port_dir_a[2], in_port_dir_a[3],
+ in_port_dir_a[4]);
+
+ uint8_t i = 0, j = 0;
+
+ printf("\nprv_to_pub_map: ");
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ if (prv_to_pub_map[i] != 0xff)
+ printf("(%d,%d) ", i, prv_to_pub_map[i]);
+ }
+
+ printf("\npub_to_prv_map: ");
+ for (i = 0; i < PIPELINE_MAX_PORT_IN; i++) {
+ if (pub_to_prv_map[i] != 0xff)
+ printf("(%d,%d) ", i, pub_to_prv_map[i]);
+ }
+
+ printf("\n%d entries in Ports MAC List\n", link_hw_addr_array_idx);
+ for (j = 0; j < link_hw_addr_array_idx; j++) {
+ struct ether_addr *link_hw_addr = get_link_hw_addr(j);
+
+ for (i = 0; i < 6; i++)
+ printf(" %02x ", ((struct ether_addr *)link_hw_addr)->addr_bytes[i]);
+ printf("\n");
+ }
+}
+
+void trim(char *input)
+{
+ int i, j = 0;
+ int len = strlen(input);
+ char result[len + 1];
+
+ memset(result, 0, sizeof(result));
+ for (i = 0; input[i] != '\0'; i++) {
+ if (!isspace(input[i]))
+ result[j++] = input[i];
+ }
+
+ strncpy(input, result, len);
+}
diff --git a/common/vnf_common/vnf_common.h b/common/vnf_common/vnf_common.h
new file mode 100644
index 00000000..a6b1aaa2
--- /dev/null
+++ b/common/vnf_common/vnf_common.h
@@ -0,0 +1,200 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_VNF_COMMON_H__
+#define __INCLUDE_VNF_COMMON_H__
+
+#include <rte_pipeline.h>
+#include <rte_ether.h>
+
+#define MBUF_HDR_ROOM 256
+#define ETH_HDR_SIZE 14
+#define IP_HDR_SRC_ADR_OFST 12
+#define IP_HDR_DST_ADR_OFST 16
+#define IP_HDR_PROTOCOL_OFST 9
+#define IP_HDR_SIZE 20
+#define IPV6_HDR_SRC_ADR_OFST 8
+#define IPV6_HDR_DST_ADR_OFST 24
+#define IPV6_HDR_PROTOCOL_OFST 6
+#define IPV6_HDR_SIZE 40
+
+#define ETH_TYPE_ARP 0x0806
+#define ETH_TYPE_IPV4 0x0800
+
+#define IP_PROTOCOL_ICMP 1
+#define IP_PROTOCOL_TCP 6
+#define IP_PROTOCOL_UDP 17
+
+#define ETH_TYPE_IPV6 0x86DD
+#define IP_PROTOCOL_ICMPV6 58
+
+#define PKT_ING_DIR 0
+#define PKT_EGR_DIR 1
+
+#ifndef PIPELINE_MAX_PORT_IN
+#define PIPELINE_MAX_PORT_IN 16
+#endif
+
+#define RTE_PIPELINE_MAX_NAME_SZ 124
+
+#define INVALID_DESTIF 255
+
+enum {
+ VNF_PRV_PORT_ID,
+ VNF_PUB_PORT_ID,
+};
+void show_ports_info(void);
+void trim(char *input);
+uint8_t get_in_port_dir(uint8_t in_port_id);
+uint8_t is_phy_port_privte(uint16_t phy_port);
+uint32_t get_prv_to_pub_port(uint32_t *ip_addr, uint8_t type);
+uint32_t get_pub_to_prv_port(uint32_t *ip_addr, uint8_t type);
+
+static inline void drop_pkt(uint32_t pkt_num, uint64_t *mask)
+{
+ *mask ^= 1LLU << pkt_num;
+}
+
+extern uint8_t in_port_dir_a[PIPELINE_MAX_PORT_IN];
+extern uint8_t prv_to_pub_map[PIPELINE_MAX_PORT_IN];
+extern uint8_t pub_to_prv_map[PIPELINE_MAX_PORT_IN];
+extern uint8_t prv_in_port_a[PIPELINE_MAX_PORT_IN];
+
+extern uint32_t link_hw_addr_array_idx;
+
+struct rte_port_in {
+ /* Input parameters */
+ struct rte_port_in_ops ops;
+ rte_pipeline_port_in_action_handler f_action;
+ void *arg_ah;
+ uint32_t burst_size;
+
+ /* The table to which this port is connected */
+ uint32_t table_id;
+
+ /* Handle to low-level port */
+ void *h_port;
+
+ /* List of enabled ports */
+ struct rte_port_in *next;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+struct rte_port_out {
+ /* Input parameters */
+ struct rte_port_out_ops ops;
+ rte_pipeline_port_out_action_handler f_action;
+ void *arg_ah;
+
+ /* Handle to low-level port */
+ void *h_port;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+struct rte_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ rte_pipeline_table_action_handler_miss f_action_miss;
+ void *arg_ah;
+ struct rte_pipeline_table_entry *default_entry;
+ uint32_t entry_size;
+
+ uint32_t table_next_id;
+ uint32_t table_next_id_valid;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_lkp_hit_ah;
+ uint64_t n_pkts_dropped_by_lkp_miss_ah;
+ uint64_t n_pkts_dropped_lkp_hit;
+ uint64_t n_pkts_dropped_lkp_miss;
+};
+
+
+struct rte_pipeline {
+ /* Input parameters */
+ char name[RTE_PIPELINE_MAX_NAME_SZ];
+ int socket_id;
+ uint32_t offset_port_id;
+
+ /* Internal tables */
+ struct rte_port_in ports_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct rte_port_out ports_out[RTE_PIPELINE_PORT_OUT_MAX];
+ struct rte_table tables[RTE_PIPELINE_TABLE_MAX];
+
+ /* Occupancy of internal tables */
+ uint32_t num_ports_in;
+ uint32_t num_ports_out;
+ uint32_t num_tables;
+
+ /* List of enabled ports */
+ uint64_t enabled_port_in_mask;
+ struct rte_port_in *port_in_next;
+
+ /* Pipeline run structures */
+ struct rte_mbuf *pkts[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_pipeline_table_entry *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t action_mask0[RTE_PIPELINE_ACTIONS];
+ uint64_t action_mask1[RTE_PIPELINE_ACTIONS];
+ uint64_t pkts_mask;
+ uint64_t n_pkts_ah_drop;
+ uint64_t pkts_drop_mask;
+} __rte_cache_aligned;
+
+/* RTE_ DPDK LIB structures to get HWQ & SWQ info */
+struct rte_port_ethdev_writer {
+ struct rte_port_out_stats stats;
+
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ uint32_t tx_burst_sz;
+ uint16_t tx_buf_count;
+ uint64_t bsz_mask;
+ uint16_t queue_id;
+ uint8_t port_id;
+};
+struct rte_port_ethdev_reader {
+ struct rte_port_in_stats stats;
+
+ uint16_t queue_id;
+ uint8_t port_id;
+};
+struct rte_port_ring_writer {
+ struct rte_port_out_stats stats;
+
+ struct rte_mbuf *tx_buf[2 * RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_ring *ring;
+ uint32_t tx_burst_sz;
+ uint32_t tx_buf_count;
+ uint64_t bsz_mask;
+ uint32_t is_multi;
+};
+struct rte_port_ring_reader {
+ struct rte_port_in_stats stats;
+
+ struct rte_ring *ring;
+};
+
+uint8_t get_in_port_dir(uint8_t in_port_id);
+uint8_t is_phy_port_privte(uint16_t phy_port);
+uint8_t is_port_index_privte(uint16_t phy_port);
+#endif
diff --git a/common/vnf_common/vnf_define.h b/common/vnf_common/vnf_define.h
new file mode 100644
index 00000000..380ada5f
--- /dev/null
+++ b/common/vnf_common/vnf_define.h
@@ -0,0 +1,29 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef __INCLUDE_VNF_DEFINE_H__
+#define __INCLUDE_VNF_DEFINE_H__
+#define DEBUG_LEVEL_4 4
+#define PKT_BUFFER_SIZE 64
+#define PVT_PUB_MAP 2
+#define IPV6_ADD_SIZE 16
+#define TWO_BYTE_PRINT 3
+#define VERSION_NO_BYTE 4
+#define BIT_CARRY 16
+#define HW_ADDR_SIZE 20
+#define IPV6_ADD_CMP_MULTI 13
+#define DIV_CONV_HZ_SEC 1000
+#endif