summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--INFO16
-rw-r--r--INFO.yaml42
-rw-r--r--VNFs/DPPD-PROX/Makefile73
-rw-r--r--VNFs/DPPD-PROX/README118
-rw-r--r--VNFs/DPPD-PROX/acl_field_def.h12
-rw-r--r--VNFs/DPPD-PROX/arp.h87
-rw-r--r--VNFs/DPPD-PROX/bng_pkts.h35
-rw-r--r--VNFs/DPPD-PROX/cfgfile.c19
-rw-r--r--VNFs/DPPD-PROX/cfgfile.h3
-rw-r--r--VNFs/DPPD-PROX/clock.c3
-rw-r--r--VNFs/DPPD-PROX/cmd_parser.c727
-rw-r--r--VNFs/DPPD-PROX/commands.c221
-rw-r--r--VNFs/DPPD-PROX/commands.h3
-rw-r--r--VNFs/DPPD-PROX/config/cgnat.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/ipv6.cfg85
-rw-r--r--VNFs/DPPD-PROX/config/l2fwd-4ports.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/l3fwd-4ports.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/mcast.cfg108
-rw-r--r--VNFs/DPPD-PROX/config/nop-rings.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/nop.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/nsh_acl.cfg2
-rw-r--r--VNFs/DPPD-PROX/config/nsh_nat.cfg2
-rw-r--r--VNFs/DPPD-PROX/defaults.c88
-rw-r--r--VNFs/DPPD-PROX/defaults.h29
-rw-r--r--VNFs/DPPD-PROX/defines.h9
-rw-r--r--VNFs/DPPD-PROX/display.c14
-rw-r--r--VNFs/DPPD-PROX/display.h6
-rw-r--r--VNFs/DPPD-PROX/display_latency.c18
-rw-r--r--VNFs/DPPD-PROX/display_latency_distr.c190
-rw-r--r--VNFs/DPPD-PROX/display_latency_distr.h (renamed from VNFs/DPPD-PROX/handle_swap.h)11
-rw-r--r--VNFs/DPPD-PROX/display_pkt_len.c2
-rw-r--r--VNFs/DPPD-PROX/display_ports.c34
-rw-r--r--VNFs/DPPD-PROX/display_rings.c2
-rw-r--r--VNFs/DPPD-PROX/display_tasks.c18
-rw-r--r--VNFs/DPPD-PROX/eld.h7
-rw-r--r--VNFs/DPPD-PROX/file_utils.c3
-rw-r--r--VNFs/DPPD-PROX/gen/gen_tap.cfg69
-rw-r--r--VNFs/DPPD-PROX/gen/l3-ipv4.lua29
-rw-r--r--VNFs/DPPD-PROX/genl4_stream.h4
-rw-r--r--VNFs/DPPD-PROX/genl4_stream_tcp.c123
-rw-r--r--VNFs/DPPD-PROX/genl4_stream_udp.c16
-rw-r--r--VNFs/DPPD-PROX/git_version.c.in1
-rw-r--r--VNFs/DPPD-PROX/handle_acl.c23
-rw-r--r--VNFs/DPPD-PROX/handle_aggregator.c14
-rw-r--r--VNFs/DPPD-PROX/handle_arp.c14
-rw-r--r--VNFs/DPPD-PROX/handle_blockudp.c4
-rw-r--r--VNFs/DPPD-PROX/handle_cgnat.c189
-rw-r--r--VNFs/DPPD-PROX/handle_classify.c11
-rw-r--r--VNFs/DPPD-PROX/handle_dump.c22
-rw-r--r--VNFs/DPPD-PROX/handle_esp.c1216
-rw-r--r--VNFs/DPPD-PROX/handle_fm.c19
-rw-r--r--VNFs/DPPD-PROX/handle_gen.c1173
-rw-r--r--VNFs/DPPD-PROX/handle_gen.h6
-rw-r--r--VNFs/DPPD-PROX/handle_genl4.c22
-rw-r--r--VNFs/DPPD-PROX/handle_gre_decap_encap.c64
-rw-r--r--VNFs/DPPD-PROX/handle_impair.c85
-rw-r--r--VNFs/DPPD-PROX/handle_impair.h4
-rw-r--r--VNFs/DPPD-PROX/handle_ipv6_tunnel.c72
-rw-r--r--VNFs/DPPD-PROX/handle_irq.c3
-rw-r--r--VNFs/DPPD-PROX/handle_l2fwd.c23
-rw-r--r--VNFs/DPPD-PROX/handle_lat.c339
-rw-r--r--VNFs/DPPD-PROX/handle_lat.h23
-rw-r--r--VNFs/DPPD-PROX/handle_lb_5tuple.c15
-rw-r--r--VNFs/DPPD-PROX/handle_lb_net.c30
-rw-r--r--VNFs/DPPD-PROX/handle_lb_pos.c6
-rw-r--r--VNFs/DPPD-PROX/handle_lb_qinq.c28
-rw-r--r--VNFs/DPPD-PROX/handle_master.c1092
-rw-r--r--VNFs/DPPD-PROX/handle_master.h91
-rw-r--r--VNFs/DPPD-PROX/handle_mirror.c49
-rw-r--r--VNFs/DPPD-PROX/handle_mplstag.c14
-rw-r--r--VNFs/DPPD-PROX/handle_nat.c16
-rw-r--r--VNFs/DPPD-PROX/handle_nop.c6
-rw-r--r--VNFs/DPPD-PROX/handle_nsh.c74
-rw-r--r--VNFs/DPPD-PROX/handle_police.c54
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_decap4.c39
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_decap6.c8
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_encap4.c16
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_encap4.h10
-rw-r--r--VNFs/DPPD-PROX/handle_qinq_encap6.c12
-rw-r--r--VNFs/DPPD-PROX/handle_qos.c15
-rw-r--r--VNFs/DPPD-PROX/handle_routing.c31
-rw-r--r--VNFs/DPPD-PROX/handle_sched.h45
-rw-r--r--VNFs/DPPD-PROX/handle_swap.c489
-rw-r--r--VNFs/DPPD-PROX/handle_tsc.c7
-rw-r--r--VNFs/DPPD-PROX/handle_untag.c12
-rw-r--r--VNFs/DPPD-PROX/hash_entry_types.h3
-rw-r--r--VNFs/DPPD-PROX/hash_utils.c5
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/README128
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py412
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test56
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py248
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms31
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py574
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test59
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile119
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/README183
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s94
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/centos.json52
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service12
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/config_file8
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg81
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg47
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg)40
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg)39
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg78
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg)27
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg)15
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg)31
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg59
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg)19
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg)13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg)14
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg)21
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg50
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg47
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py64
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py53
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh305
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh12
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh97
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml105
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua77
-rw-r--r--[-rwxr-xr-x]VNFs/DPPD-PROX/helper-scripts/rapid/machine.map (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh)29
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml168
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml10
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml33
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile42
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build101
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c70
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py293
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml94
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml82
-rw-r--r--[-rwxr-xr-x]VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh)22
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py93
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py90
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py36
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py326
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py181
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore23
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml6
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml26
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml36
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml8
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py108
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py106
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py236
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py264
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py140
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py259
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py193
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py83
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key49
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub1
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py164
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py441
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py52
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py56
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py199
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg16
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/setup.py9
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh31
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py177
-rwxr-xr-xVNFs/DPPD-PROX/helper-scripts/rapid/start.sh43
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/README194
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test54
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test61
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test51
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test65
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test73
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test63
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test31
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test70
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test)57
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test64
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test65
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test37
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test)43
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test60
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test (renamed from VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test)53
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test32
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test60
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile28
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml13
-rw-r--r--VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml54
-rw-r--r--VNFs/DPPD-PROX/igmp.h59
-rw-r--r--VNFs/DPPD-PROX/input_conn.c17
-rw-r--r--VNFs/DPPD-PROX/input_conn.h1
-rw-r--r--VNFs/DPPD-PROX/input_curses.c10
-rw-r--r--VNFs/DPPD-PROX/ip6_addr.h4
-rw-r--r--VNFs/DPPD-PROX/lconf.c35
-rw-r--r--VNFs/DPPD-PROX/lconf.h10
-rw-r--r--VNFs/DPPD-PROX/log.c40
-rw-r--r--VNFs/DPPD-PROX/log.h4
-rw-r--r--VNFs/DPPD-PROX/main.c423
-rw-r--r--VNFs/DPPD-PROX/mbuf_utils.h5
-rw-r--r--VNFs/DPPD-PROX/meson.build206
-rw-r--r--VNFs/DPPD-PROX/meson_options.txt9
-rw-r--r--VNFs/DPPD-PROX/packet_utils.c829
-rw-r--r--VNFs/DPPD-PROX/packet_utils.h57
-rw-r--r--VNFs/DPPD-PROX/parse_utils.c107
-rw-r--r--VNFs/DPPD-PROX/parse_utils.h9
-rw-r--r--VNFs/DPPD-PROX/pkt_parser.h31
-rw-r--r--VNFs/DPPD-PROX/pkt_prototypes.h2
-rw-r--r--VNFs/DPPD-PROX/prox_args.c530
-rw-r--r--VNFs/DPPD-PROX/prox_cfg.h17
-rw-r--r--VNFs/DPPD-PROX/prox_cksum.c55
-rw-r--r--VNFs/DPPD-PROX/prox_cksum.h25
-rw-r--r--VNFs/DPPD-PROX/prox_compat.c30
-rw-r--r--VNFs/DPPD-PROX/prox_compat.h545
-rw-r--r--VNFs/DPPD-PROX/prox_globals.h1
-rw-r--r--VNFs/DPPD-PROX/prox_ipv6.c339
-rw-r--r--VNFs/DPPD-PROX/prox_ipv6.h141
-rw-r--r--VNFs/DPPD-PROX/prox_lua.c9
-rw-r--r--VNFs/DPPD-PROX/prox_lua_types.c27
-rw-r--r--VNFs/DPPD-PROX/prox_lua_types.h10
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.c628
-rw-r--r--VNFs/DPPD-PROX/prox_port_cfg.h38
-rw-r--r--VNFs/DPPD-PROX/prox_shared.c12
-rw-r--r--VNFs/DPPD-PROX/qinq.h7
-rw-r--r--VNFs/DPPD-PROX/quit.h3
-rw-r--r--VNFs/DPPD-PROX/run.c19
-rw-r--r--VNFs/DPPD-PROX/rw_reg.c4
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.c254
-rw-r--r--VNFs/DPPD-PROX/rx_pkt.h8
-rw-r--r--VNFs/DPPD-PROX/stats_irq.h2
-rw-r--r--VNFs/DPPD-PROX/stats_latency.c36
-rw-r--r--VNFs/DPPD-PROX/stats_latency.h8
-rw-r--r--VNFs/DPPD-PROX/stats_parser.c22
-rw-r--r--VNFs/DPPD-PROX/stats_port.c22
-rw-r--r--VNFs/DPPD-PROX/stats_task.c21
-rw-r--r--VNFs/DPPD-PROX/stats_task.h22
-rw-r--r--VNFs/DPPD-PROX/swap_tap.cfg50
-rw-r--r--VNFs/DPPD-PROX/task_base.h24
-rw-r--r--VNFs/DPPD-PROX/task_init.c48
-rw-r--r--VNFs/DPPD-PROX/task_init.h55
-rw-r--r--VNFs/DPPD-PROX/thread_generic.c25
-rw-r--r--VNFs/DPPD-PROX/toeplitz.c4
-rw-r--r--VNFs/DPPD-PROX/toeplitz.h2
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.c258
-rw-r--r--VNFs/DPPD-PROX/tx_pkt.h133
-rw-r--r--VNFs/DPPD-PROX/version.h18
-rw-r--r--VNFs/DPPD-PROX/vxlangpe_nsh.h11
-rw-r--r--VNFs/vACL/Makefile2
-rw-r--r--VNFs/vACL/pipeline/pipeline_acl.h2
-rw-r--r--VNFs/vACL/pipeline/pipeline_acl_be.c15
-rw-r--r--VNFs/vCGNAPT/Makefile2
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h2
-rw-r--r--VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c57
-rw-r--r--VNFs/vFW/Makefile2
-rw-r--r--VNFs/vFW/pipeline/pipeline_vfw.h2
-rw-r--r--VNFs/vFW/pipeline/pipeline_vfw_be.c17
-rw-r--r--common/VIL/gateway/gateway.c21
-rw-r--r--common/VIL/gateway/gateway.h46
-rw-r--r--common/VIL/l2l3_stack/lib_arp.c6
-rw-r--r--common/vnf_common/rest_api.c2
-rw-r--r--docs/conf.py4
-rw-r--r--docs/conf.yaml3
-rw-r--r--docs/index.rst17
-rw-r--r--docs/release/release-notes/release-notes.rst221
-rw-r--r--docs/release/results/overview.rst2
-rw-r--r--docs/release/results/results.rst4
-rw-r--r--docs/requirements.txt2
-rw-r--r--docs/testing/developer/design/02-Get_started_Guide.rst2
-rw-r--r--docs/testing/developer/design/04-SampleVNF_Design.rst77
-rw-r--r--docs/testing/developer/requirements/03-Requirements.rst2
-rw-r--r--[-rwxr-xr-x]docs/testing/user/userguide/01-introduction.rst48
-rw-r--r--docs/testing/user/userguide/01-prox_documentation.rst4
-rw-r--r--docs/testing/user/userguide/02-methodology.rst101
-rw-r--r--[-rwxr-xr-x]docs/testing/user/userguide/03-architecture.rst14
-rw-r--r--docs/testing/user/userguide/03-installation.rst162
-rw-r--r--docs/testing/user/userguide/04-installation.rst230
-rw-r--r--docs/testing/user/userguide/04-running_the_test.rst226
-rw-r--r--docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst26
-rw-r--r--docs/testing/user/userguide/06-How_to_use_REST_api.rst23
-rw-r--r--docs/testing/user/userguide/07-Config_files.rst2
-rw-r--r--docs/testing/user/userguide/images/rapid.pngbin0 -> 34588 bytes
-rw-r--r--docs/testing/user/userguide/index.rst15
-rw-r--r--docs/testing/user/userguide/references.rst6
-rw-r--r--rapidvm/README.rst38
-rwxr-xr-xrapidvm/dib/build-image.sh99
-rw-r--r--rapidvm/dib/elements/rapid/element-deps5
-rw-r--r--rapidvm/dib/elements/rapid/package-installs.yaml20
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/40-mlib30
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk38
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/60-compile-prox28
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/70-os-cfg50
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/80-change-permissions18
-rwxr-xr-xrapidvm/dib/elements/rapid/post-install.d/81-clean-rpms20
-rw-r--r--rapidvm/dib/elements/rapid/source-repository-dpdk1
-rw-r--r--rapidvm/dib/elements/rapid/source-repository-samplevnf1
-rwxr-xr-xtools/vnf_build.sh3
-rw-r--r--tox.ini19
299 files changed, 19048 insertions, 4903 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..71d7636c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+.tox
+docs/_build/*
diff --git a/INFO b/INFO
index a1dcb92d..f27c106b 100644
--- a/INFO
+++ b/INFO
@@ -1,19 +1,19 @@
Project: Sample Virtual Network Function (SAMPLEVNF)
Project Creation Date:27/03/2017
Lifecycle State: Incubation
-Primary Contact: kannan.babu.ramia@intel.com
-Project Lead: deepak.s@intel.com
+Primary Contact: luc.provoost@gmail.com
+Project Lead: luc.provoost@gmail.com
Jira Name: Sample Virtual Network Function
Jira Prefix: [SAMPLEVNF]
mailing list tag:[samplevnf]
Repo: samplevnf
Committers:
-deepak.s@intel.com
-sonika.jindal@intel.com
-anand.b.jyoti@intel.com
-fbrockne@cisco.com
-shang.xiaodong@zte.com.cn
+luc.provoost@gmail.com
+acm@research.att.com
+trevor.cooper@intel.com
+simonartxavier@gmail.com
+patrice.buriez@chenapan.org
-Link to TSC approval: http://meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-02-07-15.00.html
+Link to TSC approval: https://wiki.opnfv.org/display/meetings/OPNFV+TSC+Meeting+2020-09-29
Link to approval of additional submitters:
diff --git a/INFO.yaml b/INFO.yaml
index 7091c004..a335375b 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -4,10 +4,10 @@ project_creation_date: '27/03/2017'
project_category: ''
lifecycle_state: 'Incubation'
project_lead: &opnfv_samplevnf_ptl
- name: 'Deepak S'
- email: 'deepak.s@linux.intel.com'
- company: 'linux.intel.com'
- id: 'ds2'
+ name: 'Luc Provoost'
+ email: 'luc.provoost@gmail.com'
+ company: '-'
+ id: 'LucProvoost'
timezone: 'Unknown'
primary_contact: *opnfv_samplevnf_ptl
issue_tracking:
@@ -34,23 +34,23 @@ repositories:
- 'samplevnf'
committers:
- <<: *opnfv_samplevnf_ptl
- - name: 'Frank Brockners'
- email: 'fbrockne@cisco.com'
- company: 'cisco.com'
- id: 'brockners'
- - name: 'xiaodong shang'
- email: 'shang.xiaodong@zte.com.cn'
- company: 'zte.com.cn'
- id: 'shangxdy'
- - name: 'Deepak S'
- email: 'deepak.s@linux.intel.com'
- company: 'linux.intel.com'
- id: 'ds2'
- - name: 'Sonika Jindal'
- email: 'sonijindal@gmail.com'
- company: 'gmail.com'
- id: 'sonika.jindal'
+ - name: 'Al Morton'
+ email: 'acm@research.att.com'
+ company: 'att.com'
+ id: 'acm'
+ - name: 'Trevor Cooper'
+ email: 'trevor.cooper@intel.com'
+ company: 'intel.com'
+ id: 'trev'
+ - name: 'Xavier Simonart'
+ email: 'simonartxavier@gmail.com'
+ company: '-'
+ id: 'xavier.simonart'
+ - name: 'Patrice Buriez'
+ email: 'patrice.buriez@chenapan.org'
+ company: '-'
+ id: 'pburiez'
tsc:
# yamllint disable rule:line-length
- approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2017/opnfv-meeting.2017-02-07-15.00.html'
+ approval: 'https://ircbot.wl.linuxfoundation.org/meetings/opnfv-meeting/2020/opnfv_tsc_09_29_20/opnfv-meeting-opnfv_tsc_09_29_20.2020-09-29-13.06.html'
# yamllint enable rule:line-length
diff --git a/VNFs/DPPD-PROX/Makefile b/VNFs/DPPD-PROX/Makefile
index 7109cb48..9a675ca0 100644
--- a/VNFs/DPPD-PROX/Makefile
+++ b/VNFs/DPPD-PROX/Makefile
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2010-2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -15,25 +15,49 @@
##
ifeq ($(RTE_SDK),)
-$(error "Please define RTE_SDK environment variable")
+define err_msg
+
+Please define RTE_SDK environment variable.
+If DPDK was built with Meson, please use meson to build Prox too.
+***
+endef
+$(error $(err_msg))
endif
# Default target, can be overriden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
+ifeq ($(wildcard $(RTE_SDK)/$(RTE_TARGET)/.),)
+define err_msg
+
+Could not find build target: $(RTE_TARGET)
+Perhaps DPDK was built using meson?
+***
+endef
+$(error $(err_msg))
+endif
+
rte_version_h := $(RTE_SDK)/$(RTE_TARGET)/include/rte_version.h
+rte_config_h := $(RTE_SDK)/$(RTE_TARGET)/include/rte_config.h
rte_ver_part = $(shell sed -n -e 's/^\#define\s*$1\s*\(.*\)$$/\1/p' $(rte_version_h))
+rte_config_part = $(shell sed -n -e 's/^\#define\s*$1\s*\(.*\)$$/\1/p' $(rte_config_h))
rte_ver_eval = $(shell printf '%u' $$(printf '0x%02x%02x%02x%02x' $1 $2 $3 $4))
rte_ver_MMLR = $(call rte_ver_eval,$(call \
rte_ver_part,RTE_VER_MAJOR),$(call \
rte_ver_part,RTE_VER_MINOR),$(call \
rte_ver_part,RTE_VER_PATCH_LEVEL),$(call \
rte_ver_part,RTE_VER_PATCH_RELEASE))
-rte_ver_YMMR = $(call rte_ver_eval,$(call \
+rte_version_YMMR = $(call rte_ver_eval,$(call \
rte_ver_part,RTE_VER_YEAR),$(call \
rte_ver_part,RTE_VER_MONTH),$(call \
rte_ver_part,RTE_VER_MINOR),$(call \
rte_ver_part,RTE_VER_RELEASE))
+rte_config_YMMR = $(call rte_ver_eval,$(call \
+ rte_config_part,RTE_VER_YEAR),$(call \
+ rte_config_part,RTE_VER_MONTH),$(call \
+ rte_config_part,RTE_VER_MINOR),$(call \
+ rte_config_part,RTE_VER_RELEASE))
+rte_ver_YMMR = $(if $(shell test $(rte_config_YMMR) -gt 0 && echo 'y'),$(rte_config_YMMR),$(rte_version_YMMR))
rte_ver_dpdk := $(if $(call rte_ver_part,RTE_VER_MAJOR),$(rte_ver_MMLR),$(rte_ver_YMMR))
rte_ver_comp = $(shell test $(rte_ver_dpdk) $5 $(call rte_ver_eval,$1,$2,$3,$4) && echo 'y')
rte_ver_EQ = $(call rte_ver_comp,$1,$2,$3,$4,-eq)
@@ -125,7 +149,7 @@ CFLAGS += -DPROX_PREFETCH_OFFSET=2
#CFLAGS += -DASSERT
#CFLAGS += -DENABLE_EXTRA_USER_STATISTICS
CFLAGS += -DLATENCY_PER_PACKET
-CFLAGS += -DLATENCY_DETAILS
+CFLAGS += -DLATENCY_HISTOGRAM
CFLAGS += -DGRE_TP
CFLAGS += -std=gnu99
CFLAGS += -D_GNU_SOURCE # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
@@ -172,14 +196,12 @@ SRCS-y += handle_mirror.c
SRCS-y += handle_genl4.c
SRCS-y += handle_ipv6_tunnel.c
SRCS-y += handle_read.c
-ifeq ($(call rte_ver_LT,17,8,0,0),y)
-ifeq ($(call rte_ver_GE,17,2,0,16),y)
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += handle_esp.c
ifneq ($(CONFIG_RTE_LIBRTE_PMD_AESNI_MB),y)
+ifeq ($(FIRST_PROX_MAKE),)
$(warning "Building w/o IPSEC support")
endif
endif
-endif
SRCS-y += handle_cgnat.c
SRCS-y += handle_nat.c
SRCS-y += handle_dump.c
@@ -198,7 +220,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread_pipeline.c
SRCS-y += prox_args.c prox_cfg.c prox_cksum.c prox_port_cfg.c
SRCS-y += cfgfile.c clock.c commands.c cqm.c msr.c defaults.c
-SRCS-y += display.c display_latency.c display_mempools.c
+SRCS-y += display.c display_latency.c display_latency_distr.c display_mempools.c
SRCS-y += display_ports.c display_rings.c display_priority.c display_pkt_len.c display_l4gen.c display_tasks.c display_irq.c
SRCS-y += log.c hash_utils.c main.c parse_utils.c file_utils.c
SRCS-y += run.c input_conn.c input_curses.c
@@ -207,17 +229,48 @@ SRCS-y += stats_port.c stats_mempool.c stats_ring.c stats_l4gen.c
SRCS-y += stats_latency.c stats_global.c stats_core.c stats_task.c stats_prio.c stats_irq.c
SRCS-y += cmd_parser.c input.c prox_shared.c prox_lua_types.c
SRCS-y += genl4_bundle.c heap.c genl4_stream_tcp.c genl4_stream_udp.c cdf.c
-SRCS-y += stats.c stats_cons_log.c stats_cons_cli.c stats_parser.c hash_set.c prox_lua.c prox_malloc.c
+SRCS-y += stats.c stats_cons_log.c stats_cons_cli.c stats_parser.c hash_set.c prox_lua.c prox_malloc.c prox_ipv6.c prox_compat.c
+SRCS-y += git_version.c
+
+GIT_VERSION := "$(shell git describe --abbrev=8 --dirty --always)"
ifeq ($(FIRST_PROX_MAKE),)
MAKEFLAGS += --no-print-directory
FIRST_PROX_MAKE = 1
export FIRST_PROX_MAKE
-all:
+all: libedit_autoconf.h git_version.c
@./helper-scripts/trailing.sh
@$(MAKE) $@
+clean:
+ $(Q) $(RM) -- 'libedit_autoconf.h'
+ @$(MAKE) $@
%::
@$(MAKE) $@
+
+ifeq ($(call rte_ver_LT,17,2,0,0),y)
+AUTO-CONFIG-SCRIPT = $(RTE_SDK)/scripts/auto-config-h.sh
+else
+AUTO-CONFIG-SCRIPT = $(RTE_SDK)/buildtools/auto-config-h.sh
+endif
+
+# DPDK CFLAGS prevents auto-conf program to properly compile
+export CFLAGS=
+# if el_rfunc_t exists, define HAVE_LIBEDIT_EL_RFUNC_T so that PROX knows it can use it
+libedit_autoconf.h: $(AUTO-CONFIG-SCRIPT)
+ $(Q) $(RM) -- '$@'
+ $(Q) sh -- '$(AUTO-CONFIG-SCRIPT)' '$@' \
+ HAVE_LIBEDIT_EL_RFUNC_T \
+ histedit.h \
+ type 'el_rfunc_t' \
+ > /dev/null
+# auto-conf adds empty line at the end of the file, considered as error by trailing.sh script
+ $(Q) sed -i '$$ d' '$@'
+
+git_version.c: force
+ @echo 'const char *git_version=$(GIT_VERSION);' | cmp -s - $@ || echo 'const char *git_version=$(GIT_VERSION);' > $@
+ @echo $@
+force:
+
else
include $(RTE_SDK)/mk/rte.extapp.mk
endif
diff --git a/VNFs/DPPD-PROX/README b/VNFs/DPPD-PROX/README
index 7527479b..1d7ad51f 100644
--- a/VNFs/DPPD-PROX/README
+++ b/VNFs/DPPD-PROX/README
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2010-2020 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -24,40 +24,104 @@ finer grained network functions like QoS, Routing, load-balancing...
Compiling and running this application
--------------------------------------
-This application supports DPDK 16.04, 16.11, 16.11.1, 17.02, 17.05 17.08 and 17.11.
-The following commands assume that the following variables have been set:
+This application supports DPDK 16.04, 16.07, 16.11, 17.02, 17.05, 17.08,
+17.11, 18.02, 18.05, 18.08, 18.11, 19.02, 19.05, 19.08, 19.11, 20.02, 20.05,
+20.08, 20.11, 21.02, 21.05, 21.08, 21.11, 22.03, 22.07, 22.11
-export RTE_SDK=/path/to/dpdk
-export RTE_TARGET=x86_64-native-linuxapp-gcc
+DPDK meson compilation
+----------------------
+Compilation with meson and ninja is supported since DPDK 18.02, while support
+for make has been removed in DPDK 20.11.
+
+Example: DPDK 20.11 installation with meson
+-------------------------------------------
+cd /your/path/for/dpdk/meson/compilation/
+git clone http://dpdk.org/git/dpdk-stable
+cd dpdk-stable/
+git checkout 20.11
+meson setup build
+# For DPDK 21.11 and above, please run:
+# meson setup -Denable_driver_sdk=true build # instead, or
+# meson configure build/ -Denable_driver_sdk=true # afterwards.
+cd build/
+ninja
+sudo ninja install
+sudo ldconfig
+
+PROX meson compilation
+----------------------
+Depending on the distribution in use the DPDK libraries will be installed in
+different locations. The PKG_CONFIG_PATH environment variable is used to
+point to the correct location.
+
+On RHEL/CentOS: export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig
+On Ubuntu: export PKG_CONFIG_PATH=/usr/local/lib/x86_64-linux-gnu/pkgconfig
+
+cd /the/path/where/you/cloned/this/repo/
+cd VNFs/DPPD-PROX/
+meson setup build
+# Additional options can be specified on the 'meson setup' command line, or
+# using 'meson configure' afterwards. See the meson_options.txt file for
+# possible options.
+ninja -C build/
+
+Legacy DPDK make compilation
+----------------------------
+Compilation with make has been supported until DPDK 20.08, and has been removed
+in DPDK 20.11.
-IPSec is only supported in PROX with DPDK 17.02 and DPDK 17.05
-It will only be compiled if CONFIG_RTE_LIBRTE_PMD_AESNI_MB is
-set in DPDK .config. This also requires AESNI_MULTI_BUFFER_LIB_PATH to point to
-the multi-buffer library which can be downloaded from
-<https://github.com/01org/intel-ipsec-mb>.
-See doc/guides/cryptodevs/aesni_mb.rst within dpdk for more details
+The following commands require that the following environment variables are
+properly defined, as shown in the examples below:
+- RTE_SDK: absolute path to the DPDK sources directory,
+- RTE_TARGET: target (arch-machine-execenv-toolchain format) for
+ which we are building DPDK,
+- RTE_DEVEL_BUILD: set it to 'n' to prevent warnings to be considered
+ as errors when building DPDK inside a git tree.
-Example: DPDK 17.05 installation
---------------------------------
+Configuration options can be defined, before building DPDK, by means of
+appending lines into the config/defconfig_$RTE_TARGET file.
+
+For example, IPSec is supported in PROX (handle_esp.c) since DPDK 17.11,
+although it has not been recently verified, but it only gets compiled when
+CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y has been defined in DPDK configuration. It also
+requires AESNI_MULTI_BUFFER_LIB_PATH environment variable to point to the
+"Multi-Buffer Crypto for IPsec" library, which can be downloaded from
+https://github.com/intel/intel-ipsec-mb. See doc/guides/cryptodevs/aesni_mb.rst
+within DPDK sources directory for more details.
+
+Example: DPDK 20.05 installation with make
+------------------------------------------
+cd /your/path/for/dpdk/make/compilation/
git clone http://dpdk.org/git/dpdk
-cd dpdk
-git checkout v17.05
-make install T=$RTE_TARGET
+cd dpdk/
+git checkout v20.05
+export RTE_SDK=$PWD
+export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_DEVEL_BUILD=n
+# Edit config/defconfig_$RTE_TARGET file to define options as needed.
+make config T=$RTE_TARGET O=$RTE_TARGET
+make O=$RTE_TARGET
-PROX compilation
-----------------
-The Makefile with this application expects RTE_SDK to point to the
-root directory of DPDK (e.g. export RTE_SDK=/root/dpdk). If RTE_TARGET
-has not been set, x86_64-native-linuxapp-gcc will be assumed.
+Legacy PROX make compilation
+----------------------------
+As explained above, PROX Makefile expects RTE_SDK to point to the DPDK sources
+directory. If RTE_TARGET is not set, it defaults to x86_64-native-linuxapp-gcc.
+
+cd /the/path/where/you/cloned/this/repo/
+cd VNFs/DPPD-PROX/
+export RTE_SDK=/your/path/for/dpdk/make/compilation/./dpdk/
+export RTE_TARGET=x86_64-native-linuxapp-gcc
+export RTE_DEVEL_BUILD=n
+make
Running PROX
------------
-After DPDK has been set up, run make from the directory where you have
-extracted this application. A build directory will be created
-containing the PROX executable. The usage of the application is shown
-below. Note that this application assumes that all required ports have
-been bound to the DPDK provided igb_uio driver. Refer to the "Getting
-Started Guide - DPDK" document for more details.
+After DPDK has been installed and PROX has been compiled, the build subdirectory
+has been created and contains the PROX executable. The usage of the application
+is shown below. Note that this application assumes that all required ports have
+been bound to the DPDK provided igb_uio driver. Refer to the "Getting Started
+Guide" (http://doc.dpdk.org/guides/linux_gsg/ or doc/guides/linux_gsg/*.rst in
+DPDK sources directory) for more details.
Usage: ./build/prox [-f CONFIG_FILE] [-l LOG_FILE] [-p] [-o DISPLAY] [-v] [-a|-e] \
[-m|-s|-i] [-n] [-w DEF] [-q] [-k] [-d] [-z] [-r VAL] [-u] [-t]
diff --git a/VNFs/DPPD-PROX/acl_field_def.h b/VNFs/DPPD-PROX/acl_field_def.h
index ede5bea7..da60e1c0 100644
--- a/VNFs/DPPD-PROX/acl_field_def.h
+++ b/VNFs/DPPD-PROX/acl_field_def.h
@@ -24,10 +24,10 @@
#include "qinq.h"
struct pkt_eth_ipv4_udp {
- struct ether_hdr ether_hdr;
- struct ipv4_hdr ipv4_hdr;
- struct udp_hdr udp_hdr;
-} __attribute__((packed));
+ prox_rte_ether_hdr ether_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_udp_hdr udp_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = {
/* first input field - always one byte long. */
@@ -73,8 +73,8 @@ static struct rte_acl_field_def pkt_eth_ipv4_udp_defs[] = {
struct pkt_qinq_ipv4_udp {
struct qinq_hdr qinq_hdr;
- struct ipv4_hdr ipv4_hdr;
- struct udp_hdr udp_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_udp_hdr udp_hdr;
};
static struct rte_acl_field_def pkt_qinq_ipv4_udp_defs[] = {
diff --git a/VNFs/DPPD-PROX/arp.h b/VNFs/DPPD-PROX/arp.h
index 488008d7..ebf8a89e 100644
--- a/VNFs/DPPD-PROX/arp.h
+++ b/VNFs/DPPD-PROX/arp.h
@@ -18,6 +18,7 @@
#define _ARP_H_
#include <rte_ether.h>
+#include "prox_compat.h"
#include "etypes.h"
#include "mbuf_utils.h"
@@ -25,11 +26,11 @@
#define ARP_REPLY 0x200
struct _arp_ipv4 {
- struct ether_addr sha; /* Sender hardware address */
+ prox_rte_ether_addr sha; /* Sender hardware address */
uint32_t spa; /* Sender protocol address */
- struct ether_addr tha; /* Target hardware address */
+ prox_rte_ether_addr tha; /* Target hardware address */
uint32_t tpa; /* Target protocol address */
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
typedef struct _arp_ipv4 arp_ipv4_t;
struct my_arp_t {
@@ -39,61 +40,79 @@ struct my_arp_t {
uint8_t plen;
uint16_t oper;
arp_ipv4_t data;
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
struct ether_hdr_arp {
- struct ether_hdr ether_hdr;
+ prox_rte_ether_hdr ether_hdr;
struct my_arp_t arp;
};
-static int arp_is_gratuitous(struct ether_hdr_arp *hdr)
+static int arp_is_gratuitous(struct my_arp_t *arp)
{
- return hdr->arp.data.spa == hdr->arp.data.tpa;
+ return arp->data.spa == arp->data.tpa;
}
-static inline void build_arp_reply(struct ether_hdr_arp *hdr_arp, struct ether_addr *s_addr)
+// This build an arp reply based on a an request
+static inline void build_arp_reply(prox_rte_ether_hdr *ether_hdr, prox_rte_ether_addr *s_addr, struct my_arp_t *arp)
{
- uint32_t ip_source = hdr_arp->arp.data.spa;
+ uint32_t ip_source = arp->data.spa;
- memcpy(hdr_arp->ether_hdr.d_addr.addr_bytes, hdr_arp->ether_hdr.s_addr.addr_bytes, sizeof(struct ether_addr));
- memcpy(hdr_arp->ether_hdr.s_addr.addr_bytes, s_addr, sizeof(struct ether_addr));
+ memcpy(ether_hdr->d_addr.addr_bytes, ether_hdr->s_addr.addr_bytes, sizeof(prox_rte_ether_addr));
+ memcpy(ether_hdr->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
- hdr_arp->arp.data.spa = hdr_arp->arp.data.tpa;
- hdr_arp->arp.data.tpa = ip_source;
- hdr_arp->arp.oper = 0x200;
- memcpy(&hdr_arp->arp.data.tha, &hdr_arp->arp.data.sha, sizeof(struct ether_addr));
- memcpy(&hdr_arp->arp.data.sha, s_addr, sizeof(struct ether_addr));
+ arp->data.spa = arp->data.tpa;
+ arp->data.tpa = ip_source;
+ arp->oper = 0x200;
+ memcpy(&arp->data.tha, &arp->data.sha, sizeof(prox_rte_ether_addr));
+ memcpy(&arp->data.sha, s_addr, sizeof(prox_rte_ether_addr));
}
-static inline void build_arp_request(struct rte_mbuf *mbuf, struct ether_addr *src_mac, uint32_t ip_dst, uint32_t ip_src)
+static inline void build_arp_request(struct rte_mbuf *mbuf, prox_rte_ether_addr *src_mac, uint32_t ip_dst, uint32_t ip_src, uint16_t vlan)
{
- struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
+ struct ether_hdr_arp *hdr_arp;
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ether_hdr *ether_hdr;
+ struct my_arp_t *arp;
uint64_t mac_bcast = 0xFFFFFFFFFFFF;
- rte_pktmbuf_pkt_len(mbuf) = 42;
- rte_pktmbuf_data_len(mbuf) = 42;
init_mbuf_seg(mbuf);
- memcpy(&hdr_arp->ether_hdr.d_addr.addr_bytes, &mac_bcast, 6);
- memcpy(&hdr_arp->ether_hdr.s_addr.addr_bytes, src_mac, 6);
- hdr_arp->ether_hdr.ether_type = ETYPE_ARP;
- hdr_arp->arp.htype = 0x100,
- hdr_arp->arp.ptype = 0x0008;
- hdr_arp->arp.hlen = 6;
- hdr_arp->arp.plen = 4;
- hdr_arp->arp.oper = 0x100;
- hdr_arp->arp.data.spa = ip_src;
- hdr_arp->arp.data.tpa = ip_dst;
- memset(&hdr_arp->arp.data.tha, 0, sizeof(struct ether_addr));
- memcpy(&hdr_arp->arp.data.sha, src_mac, sizeof(struct ether_addr));
+ if (vlan) {
+ ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ vlan_hdr = (prox_rte_vlan_hdr *)(ether_hdr + 1);
+ arp = (struct my_arp_t *)(vlan_hdr + 1);
+ ether_hdr->ether_type = ETYPE_VLAN;
+ vlan_hdr->eth_proto = ETYPE_ARP;
+ vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan);
+ rte_pktmbuf_pkt_len(mbuf) = 42 + sizeof(prox_rte_vlan_hdr);
+ rte_pktmbuf_data_len(mbuf) = 42 + sizeof(prox_rte_vlan_hdr);
+ } else {
+ ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ arp = (struct my_arp_t *)(ether_hdr + 1);
+ ether_hdr->ether_type = ETYPE_ARP;
+ rte_pktmbuf_pkt_len(mbuf) = 42;
+ rte_pktmbuf_data_len(mbuf) = 42;
+ }
+
+ memcpy(&ether_hdr->d_addr.addr_bytes, &mac_bcast, 6);
+ memcpy(&ether_hdr->s_addr.addr_bytes, src_mac, 6);
+ arp->htype = 0x100,
+ arp->ptype = 0x0008;
+ arp->hlen = 6;
+ arp->plen = 4;
+ arp->oper = 0x100;
+ arp->data.spa = ip_src;
+ arp->data.tpa = ip_dst;
+ memset(&arp->data.tha, 0, sizeof(prox_rte_ether_addr));
+ memcpy(&arp->data.sha, src_mac, sizeof(prox_rte_ether_addr));
}
-static void create_mac(struct ether_hdr_arp *hdr, struct ether_addr *addr)
+static void create_mac(struct my_arp_t *arp, prox_rte_ether_addr *addr)
{
addr->addr_bytes[0] = 0x2;
addr->addr_bytes[1] = 0;
// Instead of sending a completely random MAC address, create the following MAC:
// 02:00:x1:x2:x3:x4 where x1:x2:x3:x4 is the IP address
- memcpy(addr->addr_bytes + 2, (uint32_t *)&hdr->arp.data.tpa, 4);
+ memcpy(addr->addr_bytes + 2, (uint32_t *)&arp->data.tpa, 4);
}
#endif /* _ARP_H_ */
diff --git a/VNFs/DPPD-PROX/bng_pkts.h b/VNFs/DPPD-PROX/bng_pkts.h
index 82e6199c..85114a0c 100644
--- a/VNFs/DPPD-PROX/bng_pkts.h
+++ b/VNFs/DPPD-PROX/bng_pkts.h
@@ -22,6 +22,7 @@
#include <rte_udp.h>
#include <rte_byteorder.h>
+#include "prox_compat.h"
#include "gre.h"
#include "mpls.h"
#include "qinq.h"
@@ -32,41 +33,41 @@ struct cpe_pkt {
#ifdef USE_QINQ
struct qinq_hdr qinq_hdr;
#else
- struct ether_hdr ether_hdr;
+ prox_rte_ether_hdr ether_hdr;
#endif
- struct ipv4_hdr ipv4_hdr;
- struct udp_hdr udp_hdr;
-} __attribute__((packed));
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_udp_hdr udp_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct cpe_packet_arp {
struct qinq_hdr qinq_hdr;
struct my_arp_t arp;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
/* Struct used for setting all the values a packet
going to the core netwerk. Payload may follow
after the headers, but no need to touch that. */
struct core_net_pkt_m {
- struct ether_hdr ether_hdr;
+ prox_rte_ether_hdr ether_hdr;
#ifdef MPLS_ROUTING
union {
struct mpls_hdr mpls;
uint32_t mpls_bytes;
};
#endif
- struct ipv4_hdr tunnel_ip_hdr;
+ prox_rte_ipv4_hdr tunnel_ip_hdr;
struct gre_hdr gre_hdr;
- struct ipv4_hdr ip_hdr;
- struct udp_hdr udp_hdr;
-} __attribute__((packed));
+ prox_rte_ipv4_hdr ip_hdr;
+ prox_rte_udp_hdr udp_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct core_net_pkt {
- struct ether_hdr ether_hdr;
- struct ipv4_hdr tunnel_ip_hdr;
+ prox_rte_ether_hdr ether_hdr;
+ prox_rte_ipv4_hdr tunnel_ip_hdr;
struct gre_hdr gre_hdr;
- struct ipv4_hdr ip_hdr;
- struct udp_hdr udp_hdr;
-} __attribute__((packed));
+ prox_rte_ipv4_hdr ip_hdr;
+ prox_rte_udp_hdr udp_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
#define UPSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt) - sizeof(struct cpe_pkt)))
#define DOWNSTREAM_DELTA ((uint32_t)(sizeof(struct core_net_pkt_m) - sizeof(struct cpe_pkt)))
@@ -74,7 +75,7 @@ struct core_net_pkt {
struct cpe_pkt_delta {
uint8_t encap[DOWNSTREAM_DELTA];
struct cpe_pkt pkt;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static inline void extract_key_cpe(struct rte_mbuf *mbuf, uint64_t* key)
{
@@ -86,7 +87,7 @@ static inline void extract_key_cpe(struct rte_mbuf *mbuf, uint64_t* key)
#endif
}
-static inline void key_core(struct gre_hdr* gre, __attribute__((unused)) struct ipv4_hdr* ip, uint64_t* key)
+static inline void key_core(struct gre_hdr* gre, __attribute__((unused)) prox_rte_ipv4_hdr* ip, uint64_t* key)
{
struct cpe_key *cpe_key = (struct cpe_key*)key;
diff --git a/VNFs/DPPD-PROX/cfgfile.c b/VNFs/DPPD-PROX/cfgfile.c
index 0c5950e4..2bc9e5f1 100644
--- a/VNFs/DPPD-PROX/cfgfile.c
+++ b/VNFs/DPPD-PROX/cfgfile.c
@@ -26,6 +26,7 @@
#include "parse_utils.h"
#include "log.h"
#include "quit.h"
+#include "prox_compat.h"
#define UINT32_MAX_STR "4294967295"
@@ -191,10 +192,11 @@ static struct cfg_section *cfg_check_section(char *buffer, struct cfg_section *p
/* only numeric characters are valid for section index */
char val[MAX_CFG_STRING_LEN];
- if (pend[0] == '$')
- parse_single_var(val, sizeof(val), pend);
- else
- strncpy(val, pend, sizeof(val));
+ if (pend[0] == '$') {
+ if (parse_vars(val, sizeof(val), pend))
+ return NULL;
+ } else
+ prox_strncpy(val, pend, sizeof(val));
for (len = 0; val[len] != '\0'; ++len) {
if (strchr(valid, val[len]) == NULL) {
@@ -272,9 +274,14 @@ int cfg_parse(struct cfg_file *pcfg, struct cfg_section *psec)
do {
ret = fgets(buffer, sizeof(buffer), pcfg->pfile);
+ /* remove comments */
+ if (*ret == ';') {
+ *ret = '\0';
+ }
+
if (ret && *ret != '[') {
size_t l = strlen(buffer);
- strncpy(lines, buffer, max_len);
+ prox_strncpy(lines, buffer, max_len);
max_len -= l;
lines += l;
}
@@ -298,7 +305,7 @@ int cfg_parse(struct cfg_file *pcfg, struct cfg_section *psec)
}
while (cfg_get_line(pcfg, buffer, MAX_CFG_STRING_LEN, psec->raw_lines) > 0) {
- strncpy(pcfg->cur_line, buffer, sizeof(pcfg->cur_line));
+ prox_strncpy(pcfg->cur_line, buffer, sizeof(pcfg->cur_line));
if (*buffer == '[') {
if (index_count + 1 < psec->nbindex) {
// Need to loop - go back to recorded postion in file
diff --git a/VNFs/DPPD-PROX/cfgfile.h b/VNFs/DPPD-PROX/cfgfile.h
index 41b474ee..c0de4f16 100644
--- a/VNFs/DPPD-PROX/cfgfile.h
+++ b/VNFs/DPPD-PROX/cfgfile.h
@@ -18,6 +18,7 @@
#define _CFG_FILE_H_
#include <stdio.h>
+#include "defaults.h"
#define DEFAULT_CONFIG_FILE "./prox.cfg"
@@ -38,7 +39,7 @@ struct cfg_section {
int error;
};
-#define MAX_CFG_STRING_LEN 8192
+#define MAX_CFG_STRING_LEN (3 * MAX_PKT_SIZE)
#define STRING_TERMINATOR_LEN 4
struct cfg_file {
diff --git a/VNFs/DPPD-PROX/clock.c b/VNFs/DPPD-PROX/clock.c
index 6e057101..43caccad 100644
--- a/VNFs/DPPD-PROX/clock.c
+++ b/VNFs/DPPD-PROX/clock.c
@@ -20,6 +20,7 @@
#include <string.h>
#include <rte_cycles.h>
+#include "prox_compat.h"
/* Calibrate TSC overhead by reading NB_READ times and take the smallest value.
Bigger values are caused by external influence and can be discarded. The best
@@ -92,7 +93,7 @@ uint64_t str_to_tsc(const char *from)
uint64_t ret;
char str[16];
- strncpy(str, from, sizeof(str));
+ prox_strncpy(str, from, sizeof(str));
char *frac = strchr(str, '.');
diff --git a/VNFs/DPPD-PROX/cmd_parser.c b/VNFs/DPPD-PROX/cmd_parser.c
index f88ee942..bc796b55 100644
--- a/VNFs/DPPD-PROX/cmd_parser.c
+++ b/VNFs/DPPD-PROX/cmd_parser.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -52,6 +52,8 @@
#include "handle_cgnat.h"
#include "handle_impair.h"
#include "rx_pkt.h"
+#include "prox_compat.h"
+#include "igmp.h"
static int core_task_is_valid(int lcore_id, int task_id)
{
@@ -76,24 +78,13 @@ static int cores_task_are_valid(unsigned int *lcores, int task_id, unsigned int
unsigned int lcore_id;
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if (lcore_id >= RTE_MAX_LCORE) {
- plog_err("Invalid core id %u (lcore ID above %d)\n", lcore_id, RTE_MAX_LCORE);
+ if (core_task_is_valid(lcore_id, task_id) == 0)
return 0;
- }
- else if (!prox_core_active(lcore_id, 0)) {
- plog_err("Invalid core id %u (lcore is not active)\n", lcore_id);
- return 0;
- }
- else if (task_id >= lcore_cfg[lcore_id].n_tasks_all) {
- plog_err("Invalid task id (valid task IDs for core %u are below %u)\n",
- lcore_id, lcore_cfg[lcore_id].n_tasks_all);
- return 0;
- }
}
return 1;
}
-static int parse_core_task(const char *str, uint32_t *lcore_id, uint32_t *task_id, unsigned int *nb_cores)
+static int parse_cores_task(const char *str, uint32_t *lcore_id, uint32_t *task_id, unsigned *nb_cores)
{
char str_lcore_id[128];
int ret;
@@ -110,6 +101,29 @@ static int parse_core_task(const char *str, uint32_t *lcore_id, uint32_t *task_i
return 0;
}
+static int parse_cores_tasks(const char *str, uint32_t *lcore_id, uint32_t *task_id, unsigned *nb_cores, unsigned *nb_tasks)
+{
+ char str_lcore_id[128], str_task_id[128];
+ int ret;
+
+ if (2 != sscanf(str, "%s %s", str_lcore_id, str_task_id))
+ return -1;
+
+ if ((ret = parse_list_set(lcore_id, str_lcore_id, RTE_MAX_LCORE)) <= 0) {
+ plog_err("Invalid core while parsing command (%s)\n", get_parse_err());
+ return -1;
+ }
+ *nb_cores = ret;
+
+ if ((ret = parse_list_set(task_id, str_task_id, MAX_TASKS_PER_CORE)) <= 0) {
+ plog_err("Invalid task while parsing command (%s)\n", get_parse_err());
+ return -1;
+ }
+ *nb_tasks = ret;
+
+ return 0;
+}
+
static const char *strchr_skip_twice(const char *str, int chr)
{
str = strchr(str, chr);
@@ -207,7 +221,7 @@ static int parse_cmd_trace(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -226,7 +240,7 @@ static int parse_cmd_dump_rx(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -273,7 +287,7 @@ static int parse_cmd_dump_tx(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], task_id, nb_packets, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -360,7 +374,7 @@ static int parse_cmd_count(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, count, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -370,7 +384,7 @@ static int parse_cmd_count(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
else {
@@ -384,26 +398,78 @@ static int parse_cmd_count(const char *str, struct input *input)
return 0;
}
-static int parse_cmd_set_probability(const char *str, struct input *input)
+static int parse_cmd_set_proba_no_drop(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- float probability;
+ float proba_no_drop;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
- if (sscanf(str, "%f", &probability) != 1)
+ if (sscanf(str, "%f", &proba_no_drop) != 1)
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "impair", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "impair", "l3"))){
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
} else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
- task_impair_set_proba(tbase, probability);
+ task_impair_set_proba_no_drop(tbase, proba_no_drop);
+ }
+ }
+ }
+ return 0;
+}
+
+static int parse_cmd_set_proba_delay(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ float proba_delay;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%f", &proba_delay) != 1)
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
+ plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_impair_set_proba_delay(tbase, proba_delay);
+ }
+ }
+ }
+ return 0;
+}
+
+static int parse_cmd_set_proba_duplicate(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ float proba_duplicate;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%f", &proba_duplicate) != 1)
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
+ plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_impair_set_proba_duplicate(tbase, proba_duplicate);
}
}
}
@@ -414,7 +480,7 @@ static int parse_cmd_delay_us(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, delay_us, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -424,7 +490,7 @@ static int parse_cmd_delay_us(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "impair", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "impair", "l3"))){
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
} else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
@@ -439,7 +505,7 @@ static int parse_cmd_random_delay_us(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, delay_us, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -449,7 +515,7 @@ static int parse_cmd_random_delay_us(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "impair", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "impair", "l3"))){
+ if (!task_is_mode(lcore_id, task_id, "impair")) {
plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
} else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
@@ -464,7 +530,7 @@ static int parse_cmd_bypass(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if ((prox_cfg.flags & DSF_ENABLE_BYPASS) == 0) {
plog_err("enable bypass not set => command not supported\n");
@@ -485,7 +551,7 @@ static int parse_cmd_reconnect(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
@@ -501,7 +567,7 @@ static int parse_cmd_pkt_size(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, pkt_size, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -511,7 +577,7 @@ static int parse_cmd_pkt_size(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
} else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
@@ -522,13 +588,54 @@ static int parse_cmd_pkt_size(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_imix(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ uint32_t pkt_sizes[MAX_IMIX_PKTS], tmp;
+ uint32_t pkt_index = 0;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ while (pkt_index < MAX_IMIX_PKTS) {
+ if (sscanf(str, "%d", &pkt_sizes[pkt_index]) != 1)
+ break;
+ pkt_index++;
+ if ((str = strchr(str, ',')) == NULL)
+ break;
+ str = str + 1;
+ }
+ if (pkt_index == 0) {
+ plog_err("No pkt size found\n");
+ return -1;
+ }
+ if ((pkt_index == MAX_IMIX_PKTS) && (str) && (sscanf(str, "%d", &tmp) == 1)) {
+ plog_err("Too many inputs - unexpected inputs starting at %s\n", str);
+ return -1;
+ }
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_gen_set_imix(tbase, pkt_index, pkt_sizes); /* error printed within function */
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_speed(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], task_id, lcore_id, nb_cores;
float speed;
unsigned i;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -542,7 +649,7 @@ static int parse_cmd_speed(const char *str, struct input *input)
for (i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
else if (speed > 1000.0f || speed < 0.0f) { // Up to 100 Gbps
@@ -565,7 +672,7 @@ static int parse_cmd_speed_byte(const char *str, struct input *input)
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
uint64_t bps;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -576,7 +683,7 @@ static int parse_cmd_speed_byte(const char *str, struct input *input)
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
else if (bps > 12500000000) { // Up to 100Gbps
@@ -602,7 +709,7 @@ static int parse_cmd_reset_randoms_all(const char *str, struct input *input)
unsigned task_id, lcore_id = -1;
while (prox_core_next(&lcore_id, 0) == 0) {
for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
- if ((task_is_mode_and_submode(lcore_id, task_id, "gen", "")) || (task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
uint32_t n_rands = task_gen_get_n_randoms(tbase);
@@ -614,6 +721,27 @@ static int parse_cmd_reset_randoms_all(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_reset_ranges_all(const char *str, struct input *input)
+{
+ if (strcmp(str, "") != 0) {
+ return -1;
+ }
+
+ unsigned task_id, lcore_id = -1;
+ while (prox_core_next(&lcore_id, 0) == 0) {
+ for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ uint32_t n_ranges = task_gen_get_n_ranges(tbase);
+
+ plog_info("Resetting ranges on core %d task %d from %d ranges\n", lcore_id, task_id, n_ranges);
+ task_gen_reset_ranges(tbase);
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_reset_values_all(const char *str, struct input *input)
{
if (strcmp(str, "") != 0) {
@@ -623,7 +751,7 @@ static int parse_cmd_reset_values_all(const char *str, struct input *input)
unsigned task_id, lcore_id = -1;
while (prox_core_next(&lcore_id, 0) == 0) {
for (task_id = 0; task_id < lcore_cfg[lcore_id].n_tasks_all; task_id++) {
- if ((task_is_mode_and_submode(lcore_id, task_id, "gen", "")) || (task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (task_is_mode(lcore_id, task_id, "gen")) {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
plog_info("Resetting values on core %d task %d\n", lcore_id, task_id);
@@ -638,13 +766,13 @@ static int parse_cmd_reset_values(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
else {
@@ -664,7 +792,7 @@ static int parse_cmd_set_value(const char *str, struct input *input)
unsigned short offset;
uint8_t value_len;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -675,12 +803,10 @@ static int parse_cmd_set_value(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
- else if (offset > ETHER_MAX_LEN) {
- plog_err("Offset out of range (must be less then %u)\n", ETHER_MAX_LEN);
- }
+ // do not check offset here - gen knows better than us the maximum frame size
else if (value_len > 4) {
plog_err("Length out of range (must be less then 4)\n");
}
@@ -688,7 +814,7 @@ static int parse_cmd_set_value(const char *str, struct input *input)
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
if (task_gen_set_value(tbase, value, offset, value_len))
- plog_info("Unable to set Byte %"PRIu16" to %"PRIu8" - too many value set\n", offset, value);
+ plog_info("Unable to set Byte %"PRIu16" to %"PRIu8" - invalid offset/len\n", offset, value);
else
plog_info("Setting Byte %"PRIu16" to %"PRIu32"\n", offset, value);
}
@@ -705,7 +831,7 @@ static int parse_cmd_set_random(const char *str, struct input *input)
char rand_str[64];
int16_t rand_id = -1;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -716,11 +842,11 @@ static int parse_cmd_set_random(const char *str, struct input *input)
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if ((!task_is_mode_and_submode(lcore_id, task_id, "gen", "")) && (!task_is_mode_and_submode(lcore_id, task_id, "gen", "l3"))) {
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
}
- else if (offset > ETHER_MAX_LEN) {
- plog_err("Offset out of range (must be less then %u)\n", ETHER_MAX_LEN);
+ else if (offset > PROX_RTE_ETHER_MAX_LEN) {
+ plog_err("Offset out of range (must be less then %u)\n", PROX_RTE_ETHER_MAX_LEN);
}
else if (value_len > 4) {
plog_err("Length out of range (must be less then 4)\n");
@@ -736,11 +862,44 @@ static int parse_cmd_set_random(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_set_range(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ struct range range;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%u %u %u", &range.offset, &range.min, &range.max) != 3) {
+ return -1;
+ }
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+ if (!task_is_mode(lcore_id, task_id, "gen")) {
+ plog_err("Core %u task %u is not generating packets\n", lcore_id, task_id);
+ } else if (range.offset > PROX_RTE_ETHER_MAX_LEN) {
+ plog_err("Offset out of range (must be less then %u)\n", PROX_RTE_ETHER_MAX_LEN);
+ } else if (range.min > range.max) {
+ plog_err("Wrong range: end (%d) must be >= start (%d)\n", range.max, range.min);
+ } else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ if (task_gen_add_range(tbase, &range)) {
+ plog_warn("Range not added on core %u task %u\n", lcore_id, task_id);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int parse_cmd_thread_info(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
for (unsigned int i = 0; i < nb_cores; i++) {
cmd_thread_info(lcores[i], task_id);
@@ -769,7 +928,7 @@ static int parse_cmd_arp_add(const char *str, struct input *input)
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
struct rte_ring *ring;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -803,7 +962,7 @@ static int parse_cmd_rule_add(const char *str, struct input *input)
struct rte_ring *ring;
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -811,7 +970,7 @@ static int parse_cmd_rule_add(const char *str, struct input *input)
return -1;
char *fields[9];
char str_cpy[255];
- strncpy(str_cpy, str, 255);
+ prox_strncpy(str_cpy, str, 255);
// example add rule command: rule add 15 0 1&0x0fff 1&0x0fff 0&0 128.0.0.0/1 128.0.0.0/1 5000-5000 5000-5000 allow
int ret = rte_strsplit(str_cpy, 255, fields, 9, ' ');
if (ret != 8) {
@@ -847,7 +1006,7 @@ static int parse_cmd_gateway_ip(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, ip[4], nb_cores, i;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -876,7 +1035,7 @@ static int parse_cmd_local_ip(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, ip[4], nb_cores, i;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -890,12 +1049,7 @@ static int parse_cmd_local_ip(const char *str, struct input *input)
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
uint32_t local_ip = ((ip[3] & 0xFF) << 24) | ((ip[2] & 0xFF) << 16) | ((ip[1] & 0xFF) << 8) | ((ip[0] & 0xFF) << 0);
if (!task_is_mode_and_submode(lcore_id, task_id, "arp", "local")) {
- if (!task_is_sub_mode(lcore_id, task_id, "l3")) {
- plog_err("Core %u task %u is not in l3 mode\n", lcore_id, task_id);
- } else {
- plog_info("Setting local ip to %s\n", str);
- task_set_local_ip(tbase, local_ip);
- }
+ plog_err("Core %u task %u is not in arp mode\n", lcore_id, task_id);
} else {
plog_info("Setting local ip to %s\n", str);
task_arp_set_local_ip(tbase, local_ip);
@@ -908,7 +1062,7 @@ static int parse_cmd_route_add(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, prefix, next_hop_idx, ip[4], nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
@@ -1241,6 +1395,31 @@ static int parse_cmd_tot_imissed_tot(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_enable_multicast(const char *str, struct input *input)
+{
+ uint8_t port_id;
+ prox_rte_ether_addr mac;
+
+ if (sscanf(str, "%hhu %hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &port_id, mac.addr_bytes, mac.addr_bytes + 1, mac.addr_bytes + 2, mac.addr_bytes + 3, mac.addr_bytes + 4, mac.addr_bytes + 5 ) != 7) {
+ return -1;
+ }
+ cmd_multicast(port_id, 1, &mac);
+ return 0;
+}
+
+static int parse_cmd_disable_multicast(const char *str, struct input *input)
+{
+ uint8_t port_id;
+ prox_rte_ether_addr mac;
+
+ if (sscanf(str, "%hhu %hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &port_id, mac.addr_bytes, mac.addr_bytes + 1, mac.addr_bytes + 2, mac.addr_bytes + 3, mac.addr_bytes + 4, mac.addr_bytes + 5 ) != 7) {
+ return -1;
+ }
+
+ cmd_multicast(port_id, 0, &mac);
+ return 0;
+}
+
static int parse_cmd_reset_port(const char *str, struct input *input)
{
uint32_t port_id;
@@ -1479,7 +1658,7 @@ static int parse_cmd_stats(const char *str, struct input *input)
char *ret = ret2;
int list = 0;
- strncpy(buf, str, sizeof(buf) - 1);
+ prox_strncpy(buf, str, sizeof(buf) - 1);
char *tok;
uint64_t stat_val;
@@ -1541,7 +1720,7 @@ static int parse_cmd_ring_info(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -1582,11 +1761,54 @@ static int parse_cmd_port_stats(const char *str, struct input *input)
return 0;
}
+static int parse_cmd_multi_port_stats(const char *str, struct input *input)
+{
+ uint32_t ports[PROX_MAX_PORTS];
+ int nb_ports = parse_list_set(ports, str, PROX_MAX_PORTS);
+ if (nb_ports <= 0) {
+ return -1;
+ }
+
+ char buf[PROX_MAX_PORTS * (11+5*21) + 1], *pbuf = buf;
+ int left = sizeof(buf);
+ for (int i = 0; i < nb_ports; ++i) {
+ struct get_port_stats s;
+ if (stats_port(ports[i], &s)) {
+ plog_err("Invalid port %u\n", ports[i]);
+ return 0;
+ }
+
+ int len = snprintf(pbuf, left,
+ "%u,"
+ "%"PRIu64",%"PRIu64","
+ "%"PRIu64",%"PRIu64","
+ "%"PRIu64";",
+ //TODO: adjust buf size above when adding fields
+ ports[i],
+ s.rx_tot, s.tx_tot,
+ s.no_mbufs_tot, s.ierrors_tot + s.imissed_tot,
+ s.last_tsc);
+ if ((len < 0) || (len >= left)) {
+ plog_err("Cannot print stats for port %u\n", ports[i]);
+ return 0;
+ }
+ pbuf += len;
+ left -= len;
+ }
+ pbuf--;
+ *pbuf = '\n';
+
+ plog_info("%s", buf);
+ if (input->reply)
+ input->reply(input, buf, sizeof(buf) - left);
+ return 0;
+}
+
static int parse_cmd_core_stats(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -1613,54 +1835,212 @@ static int parse_cmd_core_stats(const char *str, struct input *input)
return 0;
}
-static int parse_cmd_lat_stats(const char *str, struct input *input)
+typedef void (*parser_handler)(unsigned, unsigned, struct input *);
+static int handle_cores_tasks(const char *str, struct input *input, const char *mode_str, const char *mode_name, parser_handler f)
{
- unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ // This function either outputs a single line, in case of syntax error on the lists of cores and/or tasks
+ // or outputs (nb_cores * nb_tasks) lines, one line for each core/task pair:
+ // - if the core/task pair is invalid, the output line reports an error
+ // - otherwise, the output line provides the latency statistics for the core/task pair
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ unsigned lcores[RTE_MAX_LCORE], tasks[MAX_TASKS_PER_CORE], lcore_id, task_id, nb_cores, nb_tasks;
+ if (parse_cores_tasks(str, lcores, tasks, &nb_cores, &nb_tasks)) {
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "error: invalid syntax\n");
+ input->reply(input, buf, strlen(buf));
+ }
return -1;
+ }
- if (cores_task_are_valid(lcores, task_id, nb_cores)) {
- for (unsigned int i = 0; i < nb_cores; i++) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ for (unsigned int j = 0; j < nb_tasks; j++) {
lcore_id = lcores[i];
- if (!task_is_mode(lcore_id, task_id, "lat")) {
- plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id);
- }
- else {
- struct stats_latency *stats = stats_latency_find(lcore_id, task_id);
- struct stats_latency *tot = stats_latency_tot_find(lcore_id, task_id);
-
- uint64_t last_tsc = stats_core_task_last_tsc(lcore_id, task_id);
- uint64_t lat_min_usec = time_unit_to_usec(&stats->min.time);
- uint64_t lat_max_usec = time_unit_to_usec(&stats->max.time);
- uint64_t tot_lat_min_usec = time_unit_to_usec(&tot->min.time);
- uint64_t tot_lat_max_usec = time_unit_to_usec(&tot->max.time);
- uint64_t lat_avg_usec = time_unit_to_usec(&stats->avg.time);
-
+ task_id = tasks[j];
+ if (core_task_is_valid(lcore_id, task_id) == 0) {
if (input->reply) {
char buf[128];
- snprintf(buf, sizeof(buf),
- "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64"\n",
- lat_min_usec,
- lat_max_usec,
- lat_avg_usec,
- tot_lat_min_usec,
- tot_lat_max_usec,
- last_tsc,
- rte_get_tsc_hz());
+ snprintf(buf, sizeof(buf), "error: invalid core %u, task %u\n", lcore_id, task_id);
input->reply(input, buf, strlen(buf));
+ } else {
+ plog_info("error: invalid core %u, task %u\n", lcore_id, task_id);
}
- else {
- plog_info("min: %"PRIu64", max: %"PRIu64", avg: %"PRIu64", min since reset: %"PRIu64", max since reset: %"PRIu64"\n",
- lat_min_usec,
- lat_max_usec,
- lat_avg_usec,
- tot_lat_min_usec,
- tot_lat_max_usec);
+ continue;
+ }
+ if ((mode_str) && (!task_is_mode(lcore_id, task_id, mode_str))) {
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "error: core %u task %u is not measuring %s\n", lcore_id, task_id, mode_name);
+ input->reply(input, buf, strlen(buf));
+ } else {
+ plog_info("error: core %u task %u is not measuring %s\n", lcore_id, task_id, mode_name);
}
+ continue;
}
+ f(lcore_id, task_id, input);
+ }
+ }
+ return 0;
+}
+
+static void handle_dp_core_stats(unsigned lcore_id, unsigned task_id, struct input *input)
+{
+ uint64_t tot_rx = stats_core_task_tot_rx(lcore_id, task_id);
+ uint64_t tot_tx = stats_core_task_tot_tx(lcore_id, task_id);
+ uint64_t tot_tx_fail = stats_core_task_tot_tx_fail(lcore_id, task_id);
+ uint64_t tot_rx_non_dp = stats_core_task_tot_rx_non_dp(lcore_id, task_id);
+ uint64_t tot_tx_non_dp = stats_core_task_tot_tx_non_dp(lcore_id, task_id);
+ uint64_t tot_drop = stats_core_task_tot_drop(lcore_id, task_id);
+ uint64_t last_tsc = stats_core_task_last_tsc(lcore_id, task_id);
+
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf),
+ "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%u,%u\n",
+ tot_rx, tot_tx, tot_rx_non_dp, tot_tx_non_dp, tot_drop, tot_tx_fail, last_tsc, rte_get_tsc_hz(), lcore_id, task_id);
+ input->reply(input, buf, strlen(buf));
+ }
+ else {
+ plog_info("core: %u, task: %u, RX: %"PRIu64", TX: %"PRIu64", RX_NON_DP: %"PRIu64", TX_NON_DP: %"PRIu64", DROP: %"PRIu64", TX_FAIL: %"PRIu64"\n",
+ lcore_id, task_id, tot_rx, tot_tx, tot_rx_non_dp, tot_tx_non_dp, tot_drop, tot_tx_fail);
+ }
+}
+
+static void handle_lat_stats(unsigned lcore_id, unsigned task_id, struct input *input)
+{
+ struct stats_latency *stats = stats_latency_find(lcore_id, task_id);
+ struct stats_latency *tot = stats_latency_tot_find(lcore_id, task_id);
+ if (!stats || !tot) {
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf),
+ "error: core %u task %u stats = %p tot = %p\n",
+ lcore_id, task_id, stats, tot);
+ input->reply(input, buf, strlen(buf));
+ } else {
+ plog_info("error: core %u task %u stats = %p tot = %p\n",
+ lcore_id, task_id, stats, tot);
+ }
+ return;
+ }
+
+ uint64_t last_tsc = stats_core_task_last_tsc(lcore_id, task_id);
+ uint64_t lat_min_usec = time_unit_to_usec(&stats->min.time);
+ uint64_t lat_max_usec = time_unit_to_usec(&stats->max.time);
+ uint64_t tot_lat_min_usec = time_unit_to_usec(&tot->min.time);
+ uint64_t tot_lat_max_usec = time_unit_to_usec(&tot->max.time);
+ uint64_t lat_avg_usec = time_unit_to_usec(&stats->avg.time);
+
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf),
+ "%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%"PRIu64",%u,%u,%"PRIu64",%"PRIu64",%"PRIu64"\n",
+ lat_min_usec,
+ lat_max_usec,
+ lat_avg_usec,
+ tot_lat_min_usec,
+ tot_lat_max_usec,
+ last_tsc,
+ rte_get_tsc_hz(),
+ lcore_id,
+ task_id,
+ stats->mis_ordered,
+ stats->extent,
+ stats->duplicate);
+ input->reply(input, buf, strlen(buf));
+ }
+ else {
+ plog_info("core: %u, task: %u, min: %"PRIu64", max: %"PRIu64", avg: %"PRIu64", min since reset: %"PRIu64", max since reset: %"PRIu64", mis_ordered: %"PRIu64", extent: %"PRIu64", duplicates: %"PRIu64"\n",
+ lcore_id,
+ task_id,
+ lat_min_usec,
+ lat_max_usec,
+ lat_avg_usec,
+ tot_lat_min_usec,
+ tot_lat_max_usec,
+ stats->mis_ordered,
+ stats->extent,
+ stats->duplicate);
+ }
+}
+
+#ifdef LATENCY_HISTOGRAM
+static void handle_latency_histogram(unsigned lcore_id, unsigned task_id, struct input *input)
+{
+ uint64_t *buckets;
+
+ stats_core_lat_histogram(lcore_id, task_id, &buckets);
+
+ if (buckets == NULL) {
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "error: unexpected NULL bucket\n");
+ input->reply(input, buf, strlen(buf));
}
+ return;
+ }
+
+ if (input->reply) {
+ char buf[4096] = {0};
+ for (size_t i = 0; i < LAT_BUCKET_COUNT; i++)
+ sprintf(buf+strlen(buf), "Bucket [%zu]: %"PRIu64"\n", i, buckets[i]);
+ input->reply(input, buf, strlen(buf));
+ }
+ else {
+ for (size_t i = 0; i < LAT_BUCKET_COUNT; i++)
+ if (buckets[i])
+ plog_info("Bucket [%zu]: %"PRIu64"\n", i, buckets[i]);
+ }
+}
+
+static void handle_stats_and_packets(unsigned lcore_id, unsigned task_id, struct input *input)
+{
+ handle_lat_stats(lcore_id, task_id, input);
+ handle_latency_histogram(lcore_id, task_id, input);
+}
+#endif
+
+static int parse_cmd_dp_core_stats(const char *str, struct input *input)
+{
+ handle_cores_tasks(str, input, NULL, NULL, handle_dp_core_stats);
+ return 0;
+}
+
+static int parse_cmd_lat_stats(const char *str, struct input *input)
+{
+ handle_cores_tasks(str, input, "lat", "latency", handle_lat_stats);
+ return 0;
+}
+
+static int parse_cmd_lat_packets(const char *str, struct input *input)
+{
+#ifdef LATENCY_HISTOGRAM
+ handle_cores_tasks(str, input, "lat", "latency", handle_latency_histogram);
+#else
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "error: invalid syntax (LATENCY_HISTOGRAM disabled)\n");
+ input->reply(input, buf, strlen(buf));
+ } else {
+ plog_info("LATENCY_HISTOGRAMS disabled\n");
+ }
+#endif
+ return 0;
+}
+
+static int parse_cmd_lat_stats_and_packets(const char *str, struct input *input)
+{
+#ifdef LATENCY_HISTOGRAM
+ handle_cores_tasks(str, input, "lat", "latency", handle_stats_and_packets);
+#else
+ if (input->reply) {
+ char buf[128];
+ snprintf(buf, sizeof(buf), "error: invalid syntax (LATENCY_HISTOGRAMS disabled)\n");
+ input->reply(input, buf, strlen(buf));
+ } else {
+ plog_info("LATENCY_HISTOGRAMS disabled\n");
}
+#endif
return 0;
}
@@ -1670,7 +2050,7 @@ static int parse_cmd_show_irq_buckets(const char *str, struct input *input)
unsigned int i, c;
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -1691,7 +2071,7 @@ static int parse_cmd_irq(const char *str, struct input *input)
unsigned int i, c;
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -1709,123 +2089,151 @@ static int parse_cmd_irq(const char *str, struct input *input)
return 0;
}
-static void task_lat_show_latency_histogram(uint8_t lcore_id, uint8_t task_id, struct input *input)
+static int parse_cmd_cgnat_public_hash(const char *str, struct input *input)
{
-#ifdef LATENCY_HISTOGRAM
- uint64_t *buckets;
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- stats_core_lat_histogram(lcore_id, task_id, &buckets);
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
- if (buckets == NULL)
- return;
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
- if (input->reply) {
- char buf[4096] = {0};
- for (size_t i = 0; i < 128; i++)
- sprintf(buf+strlen(buf), "Bucket [%zu]: %"PRIu64"\n", i, buckets[i]);
- input->reply(input, buf, strlen(buf));
- }
- else {
- for (size_t i = 0; i < 128; i++)
- if (buckets[i])
- plog_info("Bucket [%zu]: %"PRIu64"\n", i, buckets[i]);
+ if (!task_is_mode(lcore_id, task_id, "cgnat")) {
+ plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id);
+ }
+ else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_cgnat_dump_public_hash((struct task_nat *)tbase);
+ }
+ }
}
-#else
- plog_info("LATENCY_DETAILS disabled\n");
-#endif
+ return 0;
}
-static int parse_cmd_lat_packets(const char *str, struct input *input)
+static int parse_cmd_cgnat_private_hash(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ uint32_t val;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if (!task_is_mode(lcore_id, task_id, "lat")) {
- plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id);
+
+ if (!task_is_mode(lcore_id, task_id, "cgnat")) {
+ plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id);
}
else {
- task_lat_show_latency_histogram(lcore_id, task_id, input);
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ task_cgnat_dump_private_hash((struct task_nat *)tbase);
}
}
}
return 0;
}
-static int parse_cmd_cgnat_public_hash(const char *str, struct input *input)
+static int parse_cmd_accuracy(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+ uint32_t val;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+ if (!(str = strchr_skip_twice(str, ' ')))
+ return -1;
+ if (sscanf(str, "%"PRIu32"", &val) != 1)
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if (!task_is_mode(lcore_id, task_id, "cgnat")) {
- plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id);
+ if (!task_is_mode(lcore_id, task_id, "lat")) {
+ plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id);
}
else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
- task_cgnat_dump_public_hash((struct task_nat *)tbase);
+
+ task_lat_set_accuracy_limit((struct task_lat *)tbase, val);
}
}
}
return 0;
}
-static int parse_cmd_cgnat_private_hash(const char *str, struct input *input)
+static int parse_cmd_leave_igmp(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- uint32_t val;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if (!task_is_mode(lcore_id, task_id, "cgnat")) {
- plog_err("Core %u task %u is not cgnat\n", lcore_id, task_id);
+ if (!task_is_mode(lcore_id, task_id, "swap")) {
+ plog_err("Core %u task %u is not running swap\n", lcore_id, task_id);
}
else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
- task_cgnat_dump_private_hash((struct task_nat *)tbase);
+ igmp_leave_group(tbase);
}
}
}
return 0;
}
-static int parse_cmd_accuracy(const char *str, struct input *input)
+static int parse_cmd_join_igmp(const char *str, struct input *input)
{
unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
- uint32_t val;
+ uint32_t igmp_ip;
+ uint8_t *igmp_bytes = (uint8_t *)&igmp_ip;
- if (parse_core_task(str, lcores, &task_id, &nb_cores))
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
return -1;
if (!(str = strchr_skip_twice(str, ' ')))
return -1;
- if (sscanf(str, "%"PRIu32"", &val) != 1)
+ if (sscanf(str, "%hhu.%hhu.%hhu.%hhu", igmp_bytes, igmp_bytes + 1, igmp_bytes + 2, igmp_bytes + 3) != 4) {
return -1;
-
+ }
if (cores_task_are_valid(lcores, task_id, nb_cores)) {
for (unsigned int i = 0; i < nb_cores; i++) {
lcore_id = lcores[i];
- if (!task_is_mode(lcore_id, task_id, "lat")) {
- plog_err("Core %u task %u is not measuring latency\n", lcore_id, task_id);
+ if (!task_is_mode(lcore_id, task_id, "swap")) {
+ plog_err("Core %u task %u is not running swap\n", lcore_id, task_id);
}
else {
struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ igmp_join_group(tbase, igmp_ip);
+ }
+ }
+ }
+ return 0;
+}
- task_lat_set_accuracy_limit((struct task_lat *)tbase, val);
+static int parse_cmd_send_unsollicited_na(const char *str, struct input *input)
+{
+ unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+
+ if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+ return -1;
+
+ if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+ for (unsigned int i = 0; i < nb_cores; i++) {
+ lcore_id = lcores[i];
+
+ if (!task_is_sub_mode(lcore_id, task_id, "ndp")) {
+ plog_err("Core %u task %u is not running ndp\n", lcore_id, task_id);
+ }
+ else {
+ struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+ send_unsollicited_neighbour_advertisement(tbase);
}
}
}
@@ -1906,12 +2314,15 @@ static struct cmd_str cmd_strings[] = {
{"bypass", "<core_id> <task_id>", "Bypass task", parse_cmd_bypass},
{"reconnect", "<core_id> <task_id>", "Reconnect task", parse_cmd_reconnect},
{"pkt_size", "<core_id> <task_id> <pkt_size>", "Set the packet size to <pkt_size>", parse_cmd_pkt_size},
+ {"imix", "<core_id> <task_id> <pkt_size,pkt_size ... >", "Set the packet sizes to <pkt_size>", parse_cmd_imix},
{"speed", "<core_id> <task_id> <speed percentage>", "Change the speed to <speed percentage> at which packets are being generated on core <core_id> in task <task_id>.", parse_cmd_speed},
{"speed_byte", "<core_id> <task_id> <speed>", "Change speed to <speed>. The speed is specified in units of bytes per second.", parse_cmd_speed_byte},
{"set value", "<core_id> <task_id> <offset> <value> <value_len>", "Set <value_len> bytes to <value> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_value},
{"set random", "<core_id> <task_id> <offset> <random_str> <value_len>", "Set <value_len> bytes to <rand_str> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_random},
+ {"set range", "<core_id> <task_id> <offset> <range_start> <range_end>", "Set bytes from <range_start> to <range_end> at offset <offset> in packets generated on <core_id> <task_id>", parse_cmd_set_range},
{"reset values all", "", "Undo all \"set value\" commands on all cores/tasks", parse_cmd_reset_values_all},
{"reset randoms all", "", "Undo all \"set random\" commands on all cores/tasks", parse_cmd_reset_randoms_all},
+ {"reset ranges all", "", "Undo all \"set range\" commands on all cores/tasks", parse_cmd_reset_ranges_all},
{"reset values", "<core id> <task id>", "Undo all \"set value\" commands on specified core/task", parse_cmd_reset_values},
{"arp add", "<core id> <task id> <port id> <gre id> <svlan> <cvlan> <ip addr> <mac addr> <user>", "Add a single ARP entry into a CPE table on <core id>/<task id>.", parse_cmd_arp_add},
@@ -1930,9 +2341,12 @@ static struct cmd_str cmd_strings[] = {
{"irq stats", "<core id> <task id>", "Print irq related infos", parse_cmd_irq},
{"show irq buckets", "<core id> <task id>", "Print irq buckets", parse_cmd_show_irq_buckets},
{"lat packets", "<core id> <task id>", "Print the latency for each of the last set of packets", parse_cmd_lat_packets},
+ {"lat all stats", "<core id> <task id>", "Print the latency for each of the last set of packets as well as latency distribution", parse_cmd_lat_stats_and_packets},
{"accuracy limit", "<core id> <task id> <nsec>", "Only consider latency of packets that were measured with an error no more than <nsec>", parse_cmd_accuracy},
{"core stats", "<core id> <task id>", "Print rx/tx/drop for task <task id> running on core <core id>", parse_cmd_core_stats},
+ {"dp core stats", "<core id> <task id>", "Print rx/tx/non_dp_rx/non_dp_tx/drop for task <task id> running on core <core id>", parse_cmd_dp_core_stats},
{"port_stats", "<port id>", "Print rate for no_mbufs, ierrors + imissed, rx_bytes, tx_bytes, rx_pkts, tx_pkts; totals for RX, TX, no_mbufs, ierrors + imissed for port <port id>", parse_cmd_port_stats},
+ {"multi port stats", "<port list>", "Get stats for multiple ports, semi-colon separated: port id, total for rx_pkts, tx_pkts, no_mbufs, ierrors + imissed, last_tsc", parse_cmd_multi_port_stats},
{"read reg", "", "Read register", parse_cmd_read_reg},
{"write reg", "", "Read register", parse_cmd_write_reg},
{"set vlan offload", "", "Set Vlan offload", parse_cmd_set_vlan_offload},
@@ -1943,7 +2357,9 @@ static struct cmd_str cmd_strings[] = {
{"set cache class", "<core id> <class>", "Set cache class", parse_cmd_set_cache_class},
{"get cache class", "<core id>", "Get cache class", parse_cmd_get_cache_class},
{"get cache mask", "<core id>", "Get cache mask", parse_cmd_get_cache_mask},
- {"reset port", "", "Reset port", parse_cmd_reset_port},
+ {"reset port", "<port id>", "Reset port", parse_cmd_reset_port},
+ {"enable multicast", "<port id> <MAC>", "Enable multicast", parse_cmd_enable_multicast},
+ {"disable multicast", "<port id> <MAC>", "Disable multicast", parse_cmd_disable_multicast},
{"ring info all", "", "Get information about ring, such as ring size and number of elements in the ring", parse_cmd_ring_info_all},
{"ring info", "<core id> <task id>", "Get information about ring on core <core id> in task <task id>, such as ring size and number of elements in the ring", parse_cmd_ring_info},
{"port info", "<port id> [brief?]", "Get port related information, such as MAC address, socket, number of descriptors..., . Adding \"brief\" after command prints short version of output.", parse_cmd_port_info},
@@ -1956,8 +2372,16 @@ static struct cmd_str cmd_strings[] = {
{"cgnat dump private hash", "<core id> <task id>", "Dump cgnat private hash table", parse_cmd_cgnat_private_hash},
{"delay_us", "<core_id> <task_id> <delay_us>", "Set the delay in usec for the impair mode to <delay_us>", parse_cmd_delay_us},
{"random delay_us", "<core_id> <task_id> <random delay_us>", "Set the delay in usec for the impair mode to <random delay_us>", parse_cmd_random_delay_us},
- {"probability", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_probability},
+ {"probability", "<core_id> <task_id> <probability>", "Old - Use <proba no drop> instead. Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop}, // old - backward compatibility
+ {"proba no drop", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop},
+ {"proba delay", "<core_id> <task_id> <probability>", "Set the percent of delayed packets for the impair mode", parse_cmd_set_proba_delay},
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ {"proba duplicate", "<core_id> <task_id> <probability>", "Set the percent of duplicate packets for the impair mode", parse_cmd_set_proba_duplicate},
+#endif
{"version", "", "Show version", parse_cmd_version},
+ {"join igmp", "<core_id> <task_id> <ip>", "Send igmp membership report for group <ip>", parse_cmd_join_igmp},
+ {"leave igmp", "<core_id> <task_id>", "Send igmp leave group", parse_cmd_leave_igmp},
+ {"send unsollicited na", "<core_id> <task_id>", "Send Unsollicited Neighbor Advertisement", parse_cmd_send_unsollicited_na},
{0,0,0,0},
};
@@ -2001,7 +2425,7 @@ static int parse_cmd_help(const char *str, struct input *input)
if (strlen(cmd_strings[i].args)) {
char tmp[256] = {0};
- strncpy(tmp, cmd_strings[i].args, 128);
+ prox_strncpy(tmp, cmd_strings[i].args, 128);
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "Arguments: %s\n", tmp);
len2 = len;
if (strlen(cmd_strings[i].help)) {
@@ -2033,6 +2457,9 @@ static int parse_cmd_help(const char *str, struct input *input)
len3 = max_len;
}
+ // Use strncpy here and not prox_strncpy. The dest (tmp) has been initialized with 0.
+ // The fact that we are copying 80 characters potentially not null terminated is hence not an issue.
+ // Using prox_strncpy here might cause a PROX_PANIC
strncpy(tmp, h, len3);
h += len3;
while (h[0] == ' ' && strlen(h))
diff --git a/VNFs/DPPD-PROX/commands.c b/VNFs/DPPD-PROX/commands.c
index 9f0be145..a8953a68 100644
--- a/VNFs/DPPD-PROX/commands.c
+++ b/VNFs/DPPD-PROX/commands.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,6 +18,9 @@
#include <rte_table_hash.h>
#include <rte_version.h>
#include <rte_malloc.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+#include <rte_eal_memconfig.h>
+#endif
#include "prox_malloc.h"
#include "display.h"
@@ -110,8 +113,8 @@ static inline int wait_command_handled(struct lcore_cfg *lconf)
static inline void start_l3(struct task_args *targ)
{
if (!task_is_master(targ)) {
- if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
- if (targ->flags & TASK_ARG_L3)
+ if ((targ->nb_txports != 0)) {
+ if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP))
task_start_l3(targ->tbase, targ);
}
}
@@ -134,10 +137,14 @@ void start_cores(uint32_t *cores, int count, int task_id)
targ = &lconf->targs[tid];
start_l3(targ);
}
- } else {
+ } else if (task_id < lconf->n_tasks_all) {
targ = &lconf->targs[task_id];
start_l3(targ);
+ } else {
+ plog_warn("Invalid task id %d on core %u\n", task_id, cores[i]);
+ continue;
}
+ if (wait_command_handled(lconf) == -1) return;
lconf->msg.type = LCONF_MSG_START;
lconf->msg.task_id = task_id;
lconf_set_req(lconf);
@@ -174,6 +181,10 @@ void stop_cores(uint32_t *cores, int count, int task_id)
for (int i = 0; i < count; ++i) {
struct lcore_cfg *lconf = &lcore_cfg[cores[i]];
+ if (task_id >= lconf->n_tasks_all) {
+ plog_warn("Trying to stop invalid task id %d on core %u\n", task_id, cores[i]);
+ continue;
+ }
if (lconf->n_tasks_run) {
if (wait_command_handled(lconf) == -1) return;
@@ -235,6 +246,61 @@ static struct size_unit to_size_unit(uint64_t bytes)
return ret;
}
+static int add_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr)
+{
+ unsigned int i;
+ int rc = 0;
+
+ struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
+
+ if (port_cfg->nb_mc_addr >= NB_MCAST_ADDR) {
+ plog_err("Already reached maximum number (%d) of mcast addr on port %u\n", NB_MCAST_ADDR, port_id);
+ return -1;
+ }
+ for (i = 0; i < port_cfg->nb_mc_addr; i++) {
+ if (prox_rte_is_same_ether_addr(addr, &port_cfg->mc_addr[i])) {
+ plog_info("multicast address already added to port\n");
+ return -1;
+ }
+ }
+
+ prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[port_cfg->nb_mc_addr]);
+ if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr + 1)) != 0) {
+ plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id);
+ return rc;
+ }
+
+ port_cfg->nb_mc_addr++;
+ plog_info("rte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
+ return rc;
+}
+
+static int del_multicast_addr(uint8_t port_id, prox_rte_ether_addr *addr)
+{
+ unsigned int i;
+ int rc = 0;
+
+ struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
+
+ for (i = 0; i < port_cfg->nb_mc_addr; i++) {
+ if (prox_rte_is_same_ether_addr(addr, &port_cfg->mc_addr[i])) {
+ // Copy last address to the slot to be deleted
+ prox_rte_ether_addr_copy(&port_cfg->mc_addr[port_cfg->nb_mc_addr-1], &port_cfg->mc_addr[i]);
+
+ if ((rc = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr - 1)) != 0) {
+ plog_err("rte_eth_dev_set_mc_addr_list returns %d on port %u\n", rc, port_id);
+ // When set failed, let restore the situation we were before calling the function...
+ prox_rte_ether_addr_copy(addr, &port_cfg->mc_addr[i]);
+ return rc;
+ }
+ port_cfg->nb_mc_addr--;
+ plog_info("rte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
+ return 0;
+ }
+ }
+ plog_err("multicast address not found on port %u\n", port_id);
+ return -1;
+}
void cmd_mem_stats(void)
{
struct rte_malloc_socket_stats sock_stats;
@@ -259,8 +325,101 @@ void cmd_mem_stats(void)
}
}
+static void get_hp_sz_string(char *sz_str, uint64_t hp_sz)
+{
+ switch (hp_sz >> 20) {
+ case 0:
+ strcpy(sz_str, " 0 ");
+ break;
+ case 2:
+ strcpy(sz_str, "2MB");
+ break;
+ case 1024:
+ strcpy(sz_str, "1GB");
+ break;
+ default:
+ strcpy(sz_str, "??");
+ }
+}
+
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+// Print all segments, 1 by 1
+// Unused for now, keep for reference
+static int print_all_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, void *arg)
+{
+ int memseg_list_idx = 0, memseg_idx;
+ int n = (*(int *)arg)++;
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ memseg_list_idx = memseg_list - mcfg->memsegs;
+ if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
+ plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
+ return -1;
+ }
+#endif
+ memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
+ if (memseg_idx < 0) {
+ plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
+ return -1;
+ }
+
+ char sz_str[5];
+ get_hp_sz_string(sz_str, memseg->hugepage_sz);
+ plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
+ n,
+ memseg->socket_id,
+ memseg_list_idx,
+ memseg_idx,
+ memseg->iova,
+ memseg->iova+memseg->len,
+ memseg->addr,
+ memseg->len/memseg->hugepage_sz, sz_str);
+
+ return 0;
+}
+
+// Print memory segments
+// Contiguous segments are shown as 1 big segment
+static int print_segments(const struct rte_memseg_list *memseg_list, const struct rte_memseg *memseg, size_t len, void *arg)
+{
+ int memseg_list_idx = 0, memseg_idx;
+ static int n = 0;
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ memseg_list_idx = memseg_list - mcfg->memsegs;
+ if ((memseg_list_idx < 0) || (memseg_list_idx >= RTE_MAX_MEMSEG_LISTS)) {
+ plog_err("Invalid memseg_list_idx = %d; memseg_list = %p, mcfg->memsegs = %p\n", memseg_list_idx, memseg_list, mcfg->memsegs);
+ return -1;
+ }
+#endif
+ memseg_idx = rte_fbarray_find_idx(&memseg_list->memseg_arr, memseg);
+ if (memseg_idx < 0) {
+ plog_err("Invalid memseg_idx = %d; memseg_list = %p, memseg = %p\n", memseg_idx, memseg_list, memseg);
+ return -1;
+ }
+
+ char sz_str[5];
+ get_hp_sz_string(sz_str, memseg->hugepage_sz);
+ plog_info("Segment %u (sock %d): [%i-%i] [%#lx-%#lx] at %p using %zu pages of %s\n",
+ n++,
+ memseg->socket_id,
+ memseg_list_idx,
+ memseg_idx,
+ memseg->iova,
+ memseg->iova+len,
+ memseg->addr,
+ memseg->hugepage_sz?len/memseg->hugepage_sz:0, sz_str);
+
+ return 0;
+}
+
+#endif
+
void cmd_mem_layout(void)
{
+#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
const struct rte_memseg* memseg = rte_eal_get_physmem_layout();
plog_info("Memory layout:\n");
@@ -268,17 +427,8 @@ void cmd_mem_layout(void)
if (memseg[i].addr == NULL)
break;
- const char *sz_str;
- switch (memseg[i].hugepage_sz >> 20) {
- case 2:
- sz_str = "2MB";
- break;
- case 1024:
- sz_str = "1GB";
- break;
- default:
- sz_str = "??";
- }
+ char sz_str[5];
+ get_hp_sz_string(sz_str, memseg[i].hugepage_sz);
plog_info("Segment %u: [%#lx-%#lx] at %p using %zu pages of %s\n",
i,
@@ -287,6 +437,11 @@ void cmd_mem_layout(void)
memseg[i].addr,
memseg[i].len/memseg[i].hugepage_sz, sz_str);
}
+#else
+ int segment_number = 0;
+ //rte_memseg_walk(print_all_segments, &segment_number);
+ rte_memseg_contig_walk(print_segments, &segment_number);
+#endif
}
void cmd_dump(uint8_t lcore_id, uint8_t task_id, uint32_t nb_packets, struct input *input, int rx, int tx)
@@ -740,7 +895,7 @@ void cmd_portinfo(int port_id, char *dst, size_t max_len)
dst += snprintf(dst, end - dst,
"%2d:%10s; "MAC_BYTES_FMT"; %s\n",
port_id,
- port_cfg->name,
+ port_cfg->names[0],
MAC_BYTES(port_cfg->eth_addr.addr_bytes),
port_cfg->pci_addr);
}
@@ -754,14 +909,18 @@ void cmd_portinfo(int port_id, char *dst, size_t max_len)
struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
dst += snprintf(dst, end - dst, "Port info for port %u\n", port_id);
- dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->name);
+ dst += snprintf(dst, end - dst, "\tName: %s\n", port_cfg->names[0]);
dst += snprintf(dst, end - dst, "\tDriver: %s\n", port_cfg->driver_name);
dst += snprintf(dst, end - dst, "\tMac address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
dst += snprintf(dst, end - dst, "\tLink speed: %u Mbps\n", port_cfg->link_speed);
+ dst += snprintf(dst, end - dst, "\tLink max speed: %u Mbps\n", port_cfg->max_link_speed);
dst += snprintf(dst, end - dst, "\tLink status: %s\n", port_cfg->link_up? "up" : "down");
dst += snprintf(dst, end - dst, "\tSocket: %u\n", port_cfg->socket);
dst += snprintf(dst, end - dst, "\tPCI address: %s\n", port_cfg->pci_addr);
dst += snprintf(dst, end - dst, "\tPromiscuous: %s\n", port_cfg->promiscuous? "yes" : "no");
+ for (unsigned int i = 0; i < port_cfg->nb_mc_addr; i++) {
+ dst += snprintf(dst, end - dst, "\tmcast address: "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->mc_addr[i].addr_bytes));
+ }
dst += snprintf(dst, end - dst, "\tNumber of RX/TX descriptors: %u/%u\n", port_cfg->n_rxd, port_cfg->n_txd);
dst += snprintf(dst, end - dst, "\tNumber of RX/TX queues: %u/%u (max: %u/%u)\n", port_cfg->n_rxq, port_cfg->n_txq, port_cfg->max_rxq, port_cfg->max_txq);
dst += snprintf(dst, end - dst, "\tMemory pools:\n");
@@ -802,6 +961,32 @@ void cmd_reset_port(uint8_t portid)
plog_warn("Failed to restart port %d\n", portid);
}
}
+
+void cmd_multicast(uint8_t port_id, unsigned int val, prox_rte_ether_addr *mac)
+{
+ if (!port_is_active(port_id)) {
+ return;
+ }
+ struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
+ if (val == 1) {
+ if (port_cfg->nb_mc_addr == 0) {
+ rte_eth_allmulticast_enable(port_id);
+ }
+ if (add_multicast_addr(port_id, mac) != 0) {
+ if (port_cfg->nb_mc_addr == 0)
+ rte_eth_allmulticast_disable(port_id);
+ }
+ } else if (val == 0) {
+ if (del_multicast_addr(port_id, mac) == 0) {
+ if (port_cfg->nb_mc_addr == 0) {
+ rte_eth_allmulticast_disable(port_id);
+ }
+ }
+ } else {
+ plog_err("Unexpected value in cmd_multicast on port %d\n", port_id);
+ }
+}
+
void cmd_write_reg(uint8_t port_id, unsigned int id, unsigned int val)
{
if (!port_is_active(port_id)) {
@@ -819,7 +1004,7 @@ void cmd_set_vlan_offload(uint8_t port_id, unsigned int val)
}
plog_info("setting vlan offload to %d\n", val);
- if (val & ~(ETH_VLAN_STRIP_OFFLOAD | ETH_VLAN_FILTER_OFFLOAD | ETH_VLAN_EXTEND_OFFLOAD)) {
+ if (val & ~(RTE_ETH_VLAN_STRIP_OFFLOAD | RTE_ETH_VLAN_FILTER_OFFLOAD | RTE_ETH_VLAN_EXTEND_OFFLOAD)) {
plog_info("wrong vlan offload value\n");
}
int ret = rte_eth_dev_set_vlan_offload(port_id, val);
diff --git a/VNFs/DPPD-PROX/commands.h b/VNFs/DPPD-PROX/commands.h
index 6c4a29a3..5ddb81e2 100644
--- a/VNFs/DPPD-PROX/commands.h
+++ b/VNFs/DPPD-PROX/commands.h
@@ -18,6 +18,8 @@
#define _COMMANDS_H_
#include <inttypes.h>
+#include <rte_ether.h>
+#include "prox_compat.h"
struct input;
@@ -64,6 +66,7 @@ void cmd_set_cache_class(uint32_t lcore_id, uint32_t set);
void cmd_cache_reset(void);
void cmd_reset_port(uint8_t port_id);
+void cmd_multicast(uint8_t port_id, unsigned int val, prox_rte_ether_addr *mac);
int reconnect_task(uint32_t lcore_id, uint32_t task_id);
int bypass_task(uint32_t lcore_id, uint32_t task_id);
diff --git a/VNFs/DPPD-PROX/config/cgnat.cfg b/VNFs/DPPD-PROX/config/cgnat.cfg
index 4015d3ab..cae02c7e 100644
--- a/VNFs/DPPD-PROX/config/cgnat.cfg
+++ b/VNFs/DPPD-PROX/config/cgnat.cfg
@@ -30,7 +30,7 @@ nat_table = dofile("cgnat_table.lua")
lpm4 = dofile("ipv4_1port.lua")
[defaults]
-mempool size=4K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/config/ipv6.cfg b/VNFs/DPPD-PROX/config/ipv6.cfg
new file mode 100644
index 00000000..6ad4725a
--- /dev/null
+++ b/VNFs/DPPD-PROX/config/ipv6.cfg
@@ -0,0 +1,85 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=6 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+[port 0]
+name=p0
+
+[port 2]
+name=p1
+
+[defaults]
+mempool size=8K
+
+[global]
+start time=5
+name=Basic IPv6
+
+[variables]
+$loc_gen_hex_ip1=fe 80 00 00 00 00 00 00 00 00 00 00 00 00 00 01
+$loc_swp_hex_ip1=fe 80 00 00 00 00 00 00 00 00 00 00 00 00 00 02
+$glob_gen_hex_ip1=20 01 db 80 00 00 00 00 00 00 00 00 00 00 00 01
+$glob_swp_hex_ip1=20 01 db 80 00 00 00 00 00 00 00 00 00 00 00 02
+$loc_gen_ip1=fe80::0001
+$glob_gen_ip1=2001:db80::0001
+$loc_swp_ip1=fe80::0002
+$glob_swp_ip1=2001:db80::0002
+
+[core 0s0]
+mode=master
+
+; 84 bytes packet to include latency related data.
+; for 64 bytes packets, comment out lat pos, packet id pos, signature pos, accuracy pos; set pkt size to 60 and
+; set payload & udp length to 8 (bytes 19 and 59 changed from 1a to 08)
+[core 1s0]
+name=TX0
+task=0
+mode=gen
+sub mode=ndp
+local ipv6=${loc_gen_ip1}
+global ipv6=${glob_gen_ip1}
+tx port=p0
+bps=1000
+pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 86 dd 60 00 00 00 00 1a 11 40 ${glob_gen_hex_ip1} ${glob_swp_hex_ip1} 13 88 13 88 00 1a 55 7b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+lat pos=62
+packet id pos=66
+signature pos=72
+accuracy pos=76
+pkt size=80
+
+task=1
+mode=lat
+sub mode=ndp
+rx port=p0
+local ipv6=${loc_gen_ip1}
+global ipv6=${glob_gen_ip1}
+lat pos=62
+packet id pos=66
+signature pos=72
+accuracy pos=76
+
+[core 2s0]
+name=SWAP
+task=0
+mode=swap
+sub mode=ndp
+rx port=p1
+tx port=p1
+local ipv6=${loc_swp_ip1}
+global ipv6=${glob_swp_ip1}
diff --git a/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg b/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg
index 27fd08e5..70496063 100644
--- a/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg
+++ b/VNFs/DPPD-PROX/config/l2fwd-4ports.cfg
@@ -32,7 +32,7 @@ name=if3
mac=50:00:00:00:00:04
[defaults]
-mempool size=4K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg b/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg
index 3c452b0e..a8cae86f 100644
--- a/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg
+++ b/VNFs/DPPD-PROX/config/l3fwd-4ports.cfg
@@ -32,7 +32,7 @@ name=if3
mac=50:00:00:00:00:04
[defaults]
-mempool size=4K
+mempool size=8K
[lua]
lpm4 = dofile("ipv4.lua")
diff --git a/VNFs/DPPD-PROX/config/mcast.cfg b/VNFs/DPPD-PROX/config/mcast.cfg
new file mode 100644
index 00000000..9257c85a
--- /dev/null
+++ b/VNFs/DPPD-PROX/config/mcast.cfg
@@ -0,0 +1,108 @@
+;;
+;; Copyright (c) 2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=6 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+;; This example uses 2 physical ports, connected back to back or through a
+;; switch.
+;; The 1st physical port (DPDK port 0) generates (through tasks 2 and 3 on
+;; core 1) 2x IGMP membership query per second: one towards 224.0.0.3 and
+;; one towards 224.0.0.4.
+;; mcast packets can be generated using "speed 1 0 1" (to generate packets
+;; on core 1, task 0 at 100Mbps) and "speed 1 1 1" (to generate packets
+;; on core 1, task 1 at 100Mbps). They will resp. generate mcast packets
+;; towards 224.0.0.3 and 224.0.0.4;
+
+;; 2x VF are configured on the second physical port.
+;; They receive packets from the generator, each on their own mcast address.
+
+[port 0]
+name=gen_port
+
+[port 1]
+name=vf0
+multicast=01:00:5e:00:00:03
+
+[port 2]
+name=vf1
+multicast=01:00:5e:00:00:04
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=8K
+
+[global]
+start time=5
+name=Basic multicast
+
+[core 0s0]
+mode=master
+
+[core 1s0]
+name=gen
+task=0
+mode=gen
+tx port=gen_port
+bps=0
+pkt inline=01 00 5e 00 00 03 00 00 00 00 00 01 08 00 45 00 05 dc 00 00 00 00 ff 11 00 00 c0 a8 01 01 e0 00 00 03 0b b8 0b b8 05 c8 00 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7
+min bulk size=$mbs
+
+task=1
+mode=gen
+tx port=gen_port
+bps=0
+pkt inline=01 00 5e 00 00 04 00 00 00 00 00 01 08 00 45 00 05 dc 00 00 00 00 ff 11 00 00 c0 a8 01 01 e0 00 00 04 0b b8 0b b8 05 c8 00 00 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 da db dc dd de df e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 ea eb ec ed ee ef f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 fa fb fc fd fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 10 11 12 13 14 15 16 17 18 19 2a 2b 2c 2d 2e 2f 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f 80 81 82 83 84 85 86 87 88 89 8a 8b 8c 8d 8e 8f 90 91 92 93 94 95 96 97 98 99 9a 9b 9c 9d 9e 9f a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 aa ab ac ad ae af b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 ba bb bc bd be bf c0 c1 c2 c3 c4 c5 c6 c7
+min bulk size=$mbs
+
+task=2
+mode=gen
+tx port=gen_port
+bps=125
+pkt inline=01 00 5e 00 00 03 00 00 01 00 00 01 08 00 45 00 00 1c 00 00 00 00 01 02 b0 dd c0 a8 01 02 e0 00 00 03 11 0a 0c f0 e0 00 00 03
+
+task=3
+mode=gen
+tx port=gen_port
+bps=125
+pkt inline=01 00 5e 00 00 04 00 00 01 00 00 01 08 00 45 00 00 1c 00 00 00 00 01 02 b0 dd c0 a8 01 02 e0 00 00 04 11 0a 0c f0 e0 00 00 04
+
+[core 2s0]
+name=RX gen
+task=0
+mode=nop
+rx port=gen_port
+
+[core 3s1]
+name=swap 0
+task=0
+mode=swap
+rx port=vf0
+tx port=vf0
+local ipv4=20.21.22.23
+igmp ipv4=224.0.0.3
+
+[core 4s1]
+name=swap 1
+task=0
+mode=swap
+rx port=vf1
+tx port=vf1
+local ipv4=20.21.22.24
+igmp ipv4=224.0.0.4
diff --git a/VNFs/DPPD-PROX/config/nop-rings.cfg b/VNFs/DPPD-PROX/config/nop-rings.cfg
index 000353ad..7513c433 100644
--- a/VNFs/DPPD-PROX/config/nop-rings.cfg
+++ b/VNFs/DPPD-PROX/config/nop-rings.cfg
@@ -67,7 +67,7 @@ rx_ring=dpdkr3_tx
tx_ring=dpdkr3_rx
[defaults]
-mempool size=4K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/config/nop.cfg b/VNFs/DPPD-PROX/config/nop.cfg
index 757b1eda..2741b41c 100644
--- a/VNFs/DPPD-PROX/config/nop.cfg
+++ b/VNFs/DPPD-PROX/config/nop.cfg
@@ -44,7 +44,7 @@ name=if3
mac=hardware
[defaults]
-mempool size=2K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/config/nsh_acl.cfg b/VNFs/DPPD-PROX/config/nsh_acl.cfg
index 2893bd4d..6ad1d5fc 100644
--- a/VNFs/DPPD-PROX/config/nsh_acl.cfg
+++ b/VNFs/DPPD-PROX/config/nsh_acl.cfg
@@ -28,7 +28,7 @@ mac=hardware
acl_table=dofile("acl_table.lua")
[defaults]
-mempool size=4K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/config/nsh_nat.cfg b/VNFs/DPPD-PROX/config/nsh_nat.cfg
index bb3bf4bc..3ae82c47 100644
--- a/VNFs/DPPD-PROX/config/nsh_nat.cfg
+++ b/VNFs/DPPD-PROX/config/nsh_nat.cfg
@@ -28,7 +28,7 @@ mac=hardware
nat_table = dofile("nat_table.lua")
[defaults]
-mempool size=4K
+mempool size=8K
[global]
start time=5
diff --git a/VNFs/DPPD-PROX/defaults.c b/VNFs/DPPD-PROX/defaults.c
index 6688e8c6..f5624b97 100644
--- a/VNFs/DPPD-PROX/defaults.c
+++ b/VNFs/DPPD-PROX/defaults.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
#include <string.h>
#include <libgen.h>
#include <rte_sched.h>
+#include <rte_ether.h>
#include <rte_version.h>
#include "lconf.h"
@@ -27,14 +28,16 @@
#include "etypes.h"
#include "toeplitz.h"
#include "handle_master.h"
+#include "prox_compat.h"
+#include "prox_ipv6.h"
#define TEN_GIGABIT 1250000000
#define QUEUE_SIZES 128
#define NB_PIPES 32768
#define NB_MBUF 4096
#define RING_RX_SIZE 256
-#define NB_RX_RING_DESC 256
-#define NB_TX_RING_DESC 256
+#define NB_RX_RING_DESC 2048
+#define NB_TX_RING_DESC 2048
/* 1500000 milliseconds */
#define DEFAULT_CPE_TIMEOUT_MS 1500000
@@ -46,15 +49,12 @@
static const struct rte_eth_conf default_port_conf = {
.rxmode = {
- .split_hdr_size = 0,
- .header_split = 0, /* Header Split disabled */
- .hw_ip_checksum = 0, /* IP checksum offload disabled */
- .hw_vlan_filter = 0, /* VLAN filtering disabled */
- .hw_vlan_strip = 0, /* VLAN filtering disabled */
- .jumbo_frame = 0, /* Jumbo frame support disabled */
- .hw_strip_crc = 1, /* CRC stripped by hardware --- always set to 1 in VF */
- .hw_vlan_extend = 0,
- .mq_mode = 0
+ .mq_mode = 0,
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+ .max_rx_pkt_len = PROX_MTU + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN,
+#else
+ .mtu = PROX_MTU,
+#endif
},
.rx_adv_conf = {
.rss_conf = {
@@ -80,6 +80,16 @@ static struct rte_eth_txconf default_tx_conf = {
.tx_rs_thresh = 32, /* Use PMD default values */
};
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+static struct rte_sched_subport_profile_params subport_profile_params_default = {
+ .tb_rate = TEN_GIGABIT / NB_PIPES,
+ .tb_size = 4000000,
+
+ .tc_rate = {TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES},
+ .tc_period = 40,
+};
+#endif
+
static struct rte_sched_port_params port_params_default = {
.name = "port_0",
.socket = 0,
@@ -87,10 +97,15 @@ static struct rte_sched_port_params port_params_default = {
.rate = 0,
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ .subport_profiles = &subport_profile_params_default,
+#endif
.n_pipes_per_subport = NB_PIPES,
+#if RTE_VERSION < RTE_VERSION_NUM(19,11,0,0)
.qsize = {QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES},
.pipe_profiles = NULL,
.n_pipe_profiles = 1 /* only one profile */
+#endif
};
static struct rte_sched_pipe_params pipe_params_default = {
@@ -100,18 +115,37 @@ static struct rte_sched_pipe_params pipe_params_default = {
.tc_rate = {TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES, TEN_GIGABIT / NB_PIPES},
.tc_period = 40,
+#if RTE_VERSION >= RTE_VERSION_NUM(19,8,0,0)
+ .wrr_weights = {1, 1, 1, 1},
+#else
.wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+#endif
};
static struct rte_sched_subport_params subport_params_default = {
+#if RTE_VERSION < RTE_VERSION_NUM(20,11,0,0)
.tb_rate = TEN_GIGABIT,
.tb_size = 4000000,
.tc_rate = {TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT, TEN_GIGABIT},
.tc_period = 40, /* default was 10 */
+#endif
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ .qsize = {QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES, QUEUE_SIZES},
+ .pipe_profiles = NULL,
+ .n_pipe_profiles = 1 /* only one profile */
+#endif
};
-void set_global_defaults(__attribute__((unused)) struct prox_cfg *prox_cfg)
+void set_global_defaults(struct prox_cfg *prox_cfg)
{
+ if (parse_ip6(&prox_cfg->all_routers_ipv6_mcast_addr, ALL_ROUTERS_IPV6_MCAST_ADDR) != 0)
+ plog_err("Failed to parse %s\n", ALL_ROUTERS_IPV6_MCAST_ADDR);
+ if (parse_ip6(&prox_cfg->all_nodes_ipv6_mcast_addr, ALL_NODES_IPV6_MCAST_ADDR) != 0)
+ plog_err("Failed to parse %s\n", ALL_NODES_IPV6_MCAST_ADDR);
+ if (parse_ip6(&prox_cfg->random_ip, RANDOM_IPV6) != 0)
+ plog_err("Failed to parse %s\n", RANDOM_IPV6);
+ set_mcast_mac_from_ipv6(&prox_cfg->all_routers_mac_addr, &prox_cfg->all_routers_ipv6_mcast_addr);
+ set_mcast_mac_from_ipv6(&prox_cfg->all_nodes_mac_addr, &prox_cfg->all_nodes_ipv6_mcast_addr);
}
void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_init)
@@ -139,7 +173,11 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in
targ->qos_conf.port_params = port_params_default;
targ->qos_conf.pipe_params[0] = pipe_params_default;
targ->qos_conf.subport_params[0] = subport_params_default;
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ targ->qos_conf.subport_params[0].pipe_profiles = targ->qos_conf.pipe_params;
+#else
targ->qos_conf.port_params.pipe_profiles = targ->qos_conf.pipe_params;
+#endif
targ->qos_conf.port_params.rate = TEN_GIGABIT;
targ->qinq_tag = ETYPE_8021ad;
targ->n_concur_conn = 8192*2;
@@ -152,22 +190,24 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in
targ->mapping[i] = i; // identity
}
- targ->cbs = ETHER_MAX_LEN;
- targ->ebs = ETHER_MAX_LEN;
- targ->pbs = ETHER_MAX_LEN;
+ targ->cbs = PROX_RTE_ETHER_MAX_LEN;
+ targ->ebs = PROX_RTE_ETHER_MAX_LEN;
+ targ->pbs = PROX_RTE_ETHER_MAX_LEN;
targ->n_max_rules = 1024;
targ->ring_size = RING_RX_SIZE;
targ->nb_cache_mbuf = MAX_PKT_BURST * 4;
- targ->overhead = ETHER_CRC_LEN + 20;
+ targ->overhead = PROX_RTE_ETHER_CRC_LEN + 20;
targ->tunnel_hop_limit = 3;
targ->ctrl_freq = 1000;
targ->lb_friend_core = 0xFF;
- targ->mbuf_size = MBUF_SIZE;
- targ->n_pkts = 1024*64;
+ targ->n_pkts = 0;
+
targ->runtime_flags |= TASK_TX_CRC;
targ->accuracy_limit_nsec = 5000;
+ targ->probability_delay = 1000000;
+ targ->probability_no_drop = 1000000;
}
}
}
@@ -176,6 +216,7 @@ void set_port_defaults(void)
{
for (uint8_t i = 0; i < PROX_MAX_PORTS; ++i ) {
prox_port_cfg[i].promiscuous = 1;
+ prox_port_cfg[i].nb_mc_addr = 0;
prox_port_cfg[i].n_rxd = NB_RX_RING_DESC;
prox_port_cfg[i].n_txd = NB_TX_RING_DESC;
prox_port_cfg[i].port_conf = default_port_conf;
@@ -184,5 +225,14 @@ void set_port_defaults(void)
prox_port_cfg[i].rx_ring[0] = '\0';
prox_port_cfg[i].tx_ring[0] = '\0';
prox_port_cfg[i].mtu = PROX_MTU;
+ prox_port_cfg[i].dpdk_mapping = NO_VDEV_PORT;
+ prox_port_cfg[i].v6_mask_length = 8;
+
+ // CRC_STRIP becoming the default behavior in DPDK 18.08, and
+ // DEV_RX_OFFLOAD_CRC_STRIP define has been deleted
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ prox_port_cfg[i].requested_rx_offload = RTE_ETH_RX_OFFLOAD_CRC_STRIP;
+#endif
+ prox_port_cfg[i].requested_tx_offload = RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
}
}
diff --git a/VNFs/DPPD-PROX/defaults.h b/VNFs/DPPD-PROX/defaults.h
index 573cc9c5..69017710 100644
--- a/VNFs/DPPD-PROX/defaults.h
+++ b/VNFs/DPPD-PROX/defaults.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@
#define _DEFAULTS_H_
#include <rte_ether.h>
+#include "prox_compat.h"
struct prox_cfg;
struct lcore_cfg;
@@ -26,9 +27,11 @@ void set_global_defaults(struct prox_cfg* prox_cfg);
void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_init);
void set_port_defaults(void);
+#define MAX_PKT_SIZE 10000
#define MAX_PKT_BURST 64
#define MAX_RING_BURST 64
-#define DUMP_PKT_LEN 128
+#define DUMP_PKT_LEN MAX_PKT_SIZE
+#define MAX_IMIX_PKTS 128
#if MAX_RING_BURST < MAX_PKT_BURST
#error MAX_RING_BURST < MAX_PKT_BURST
@@ -40,8 +43,26 @@ void set_port_defaults(void);
#define MAX_RSS_QUEUE_BITS 9
#define PROX_VLAN_TAG_SIZE 4
-#define MBUF_SIZE (ETHER_MAX_LEN + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE)
-#define PROX_MTU ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN
+/* MBUF_SIZE can be configured based on the following:
+ - If only one segment is used ETH_TXQ_FLAGS_NOMULTSEGS can be used resulting
+ in vector mode used for transmission hence higher performance
+ - Only one segment is used by the rx function if the mbuf size is big enough
+ - Bigger mbufs result in more memory used, hence slighly lower performance (DTLB misses)
+ - Selecting the smaller mbuf is not obvious as pmds might behave slighly differently:
+ - on ixgbe a 1526 + 256 mbuf size will cause any packets bigger than 1024 bytes to be segmented
+ - on i40e a 1526 + 256 mbuf size will cause any packets bigger than 1408 bytes to be segmented
+ - other pmds might have additional requirements
+ As the performance decrease due to the usage of bigger mbuf is not very important, we prefer
+ here to use the same, bigger, mbuf size for all pmds, making the code easier to support.
+ An mbuf size of 2048 + 128 + 128 + 8 can hold a 2048 packet, and only one segment will be used
+ except if jumbo frames are enabled. +8 (VLAN) is needed for i40e (and maybe other pmds).
+ TX_MBUF_SIZE is used for when transmitting only: in this case the mbuf size can be smaller.
+*/
+#define MBUF_SIZE (2048 + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE)
+#define TX_MBUF_SIZE (PROX_RTE_ETHER_MAX_LEN + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM + 2 * PROX_VLAN_TAG_SIZE)
+
+#define PROX_MTU PROX_RTE_ETHER_MAX_LEN - PROX_RTE_ETHER_HDR_LEN - PROX_RTE_ETHER_CRC_LEN
+#define NO_VDEV_PORT 0xFF
#endif /* _DEFAULTS_H_ */
diff --git a/VNFs/DPPD-PROX/defines.h b/VNFs/DPPD-PROX/defines.h
index c2309be1..3be1a963 100644
--- a/VNFs/DPPD-PROX/defines.h
+++ b/VNFs/DPPD-PROX/defines.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -42,6 +42,13 @@
addr[12], addr[13], addr[14], addr[15]
#endif
+#ifndef IPv6_PREFIX
+#define IPv6_PREFIX_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define IPv6_PREFIX(addr) \
+ addr[0], addr[1], addr[2], addr[3], \
+ addr[4], addr[5], addr[6], addr[7]
+#endif
+
#ifndef MAC_BYTES
#define MAC_BYTES_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
diff --git a/VNFs/DPPD-PROX/display.c b/VNFs/DPPD-PROX/display.c
index 2c52d448..d81a40e4 100644
--- a/VNFs/DPPD-PROX/display.c
+++ b/VNFs/DPPD-PROX/display.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -27,6 +27,7 @@
#include "display_ports.h"
#include "display_priority.h"
#include "display_irq.h"
+#include "display_latency_distr.h"
#include "display_rings.h"
#include "display_pkt_len.h"
#include "display_l4gen.h"
@@ -292,6 +293,9 @@ static void display_init_screens(void)
display_add_screen(display_ports());
display_add_screen(display_mempools());
display_add_screen(display_latency());
+#ifdef LATENCY_HISTOGRAM
+ display_add_screen(display_latency_distr());
+#endif
display_add_screen(display_rings());
display_add_screen(display_l4gen());
display_add_screen(display_pkt_len());
@@ -541,7 +545,7 @@ static void draw_title(void)
{
char title_str[128];
- snprintf(title_str, sizeof(title_str), "%s %s: %s", PROGRAM_NAME, VERSION_STR, prox_cfg.name);
+ snprintf(title_str, sizeof(title_str), "%s %s: %s", PROGRAM_NAME, VERSION_STR(), prox_cfg.name);
wbkgd(win_title, COLOR_PAIR(BLACK_ON_GREEN));
title_len = strlen(title_str);
@@ -665,6 +669,7 @@ void display_end(void)
if (scr != NULL) {
endwin();
}
+ scr = NULL;
}
static void pps_print(WINDOW *dst_scr, int y, int x, uint64_t val, int is_blue)
@@ -917,6 +922,11 @@ void display_refresh(void)
stats_display_layout(1);
}
+void display_renew(void)
+{
+ stats_display_layout(0);
+}
+
void display_stats(void)
{
display_lock();
diff --git a/VNFs/DPPD-PROX/display.h b/VNFs/DPPD-PROX/display.h
index 4b517546..4c9f9ba7 100644
--- a/VNFs/DPPD-PROX/display.h
+++ b/VNFs/DPPD-PROX/display.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
#ifndef _DISPLAY_H_
#define _DISPLAY_H_
+#define PROX_MAX_COLS 32
#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
@@ -33,7 +34,7 @@ struct display_column {
};
struct display_table {
- struct display_column cols[16];
+ struct display_column cols[PROX_MAX_COLS];
char title[32];
int n_cols;
int offset;
@@ -86,6 +87,7 @@ void display_init(void);
void display_end(void);
void display_stats(void);
void display_refresh(void);
+void display_renew(void);
void display_print(const char *str);
void display_cmd(const char *cmd, int cmd_len, int cursor_pos);
void display_screen(unsigned screen_id);
diff --git a/VNFs/DPPD-PROX/display_latency.c b/VNFs/DPPD-PROX/display_latency.c
index 04382e46..f43dd69d 100644
--- a/VNFs/DPPD-PROX/display_latency.c
+++ b/VNFs/DPPD-PROX/display_latency.c
@@ -26,6 +26,9 @@ static struct display_column *stddev_col;
static struct display_column *accuracy_limit_col;
static struct display_column *used_col;
static struct display_column *lost_col;
+static struct display_column *mis_ordered_col;
+static struct display_column *extent_col;
+static struct display_column *duplicate_col;
static struct display_page display_page_latency;
static void display_latency_draw_frame(struct screen_state *screen_state)
@@ -68,12 +71,18 @@ static void display_latency_draw_frame(struct screen_state *screen_state)
used_col = display_table_add_col(acc);
display_column_init(used_col, "Used Packets (%)", 16);
accuracy_limit_col = display_table_add_col(acc);
- display_column_init(accuracy_limit_col, "limit (us)", 16);
+ display_column_init(accuracy_limit_col, "limit (us)", 12);
display_table_init(other, "Other");
lost_col = display_table_add_col(other);
- display_column_init(lost_col, "Lost Packets", 16);
+ display_column_init(lost_col, "Lost", 12);
+ mis_ordered_col = display_table_add_col(other);
+ display_column_init(mis_ordered_col, "mis-ordered", 12);
+ extent_col = display_table_add_col(other);
+ display_column_init(extent_col, "extent", 12);
+ duplicate_col = display_table_add_col(other);
+ display_column_init(duplicate_col, "duplicate", 12);
display_page_draw_frame(&display_page_latency, n_latency);
@@ -117,8 +126,11 @@ static void display_stats_latency_entry(int row, struct stats_latency *stats_lat
}
display_column_print(accuracy_limit_col, row, "%s", print_time_unit_usec(dst, &accuracy_limit));
- display_column_print(lost_col, row, "%16"PRIu64"", stats_latency->lost_packets);
+ display_column_print(lost_col, row, "%12"PRIu64"", stats_latency->lost_packets);
display_column_print(used_col, row, "%3u.%06u", used / AFTER_POINT, used % AFTER_POINT);
+ display_column_print(mis_ordered_col, row, "%12"PRIu64"", stats_latency->mis_ordered);
+ display_column_print(extent_col, row, "%12"PRIu64"", stats_latency->extent);
+ display_column_print(duplicate_col, row, "%12"PRIu64"", stats_latency->duplicate);
}
static void display_latency_draw_stats(struct screen_state *screen_state)
diff --git a/VNFs/DPPD-PROX/display_latency_distr.c b/VNFs/DPPD-PROX/display_latency_distr.c
new file mode 100644
index 00000000..3e1cc38a
--- /dev/null
+++ b/VNFs/DPPD-PROX/display_latency_distr.c
@@ -0,0 +1,190 @@
+/*
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <math.h>
+#include "handle_lat.h"
+#include "display_latency_distr.h"
+#include "stats_latency.h"
+#include "display.h"
+#include "lconf.h"
+
+static struct display_page display_page_latency_distr;
+static struct display_column *stats_latency_distr[LAT_BUCKET_COUNT];
+static struct display_column *stats_max;
+static struct display_column *core_col;
+static struct display_column *name_col;
+static uint32_t global_min_bucket_id = 0, global_max_bucket_id = LAT_BUCKET_COUNT - 1;
+static const uint16_t global_nb_buckets_displayed = 15;
+static uint32_t group_size = 9; //LAT_BUCKET_COUNT / global_nb_buckets_displayed;
+
+#define UNIT_INT(i) (((i) * bucket_unit_nsec)/1000)
+#define UNIT_FRACT(i) ((((i) * bucket_unit_nsec) % 1000) / 100)
+
+static void display_latency_distr_draw_frame(struct screen_state *state)
+{
+ uint32_t n_tasks = stats_get_n_latency();
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ char name[32];
+ char *ptr;
+
+ display_page_init(&display_page_latency_distr);
+
+ struct display_table *core_name = display_page_add_table(&display_page_latency_distr);
+
+ display_table_init(core_name, "Core/task");
+ core_col = display_table_add_col(core_name);
+ name_col = display_table_add_col(core_name);
+ display_column_init(core_col, "Nb", 4);
+ display_column_init(name_col, "Name", 5);
+
+ uint32_t bucket_size = stats_get_latency_bucket_size();
+ struct display_table *stats = display_page_add_table(&display_page_latency_distr);
+ uint32_t bucket_unit_nsec = 1000000000 / (rte_get_tsc_hz() >> bucket_size);
+ if (state->toggle == 0) {
+ display_table_init(stats, "Statistics per second");
+ } else {
+ display_table_init(stats, "Total statistics");
+ }
+ char title[64];
+ stats_max = display_table_add_col(stats);
+ snprintf(title, sizeof(title), " MAXIMUM(mic)");
+ display_column_init(stats_max, title, 11);
+ plog_info("Bucket unit is %d nsec, bucket size is %d, freq is %ld\n", bucket_unit_nsec, bucket_size, rte_get_tsc_hz());
+
+ uint32_t i = global_min_bucket_id, first = i, k = 0;
+ while ((i < LAT_BUCKET_COUNT) && (i <= global_max_bucket_id)) {
+ stats_latency_distr[k] = display_table_add_col(stats);
+ if (i < LAT_BUCKET_COUNT - group_size) {
+ snprintf(title, sizeof(title), "%d.%01d-%d.%01d", UNIT_INT(i), UNIT_FRACT(i), UNIT_INT(i + group_size), UNIT_FRACT(i + group_size));
+ } else {
+ snprintf(title, sizeof(title), "> %d.%01d", UNIT_INT(i), UNIT_FRACT(i));
+ }
+ display_column_init(stats_latency_distr[k++], title, 9);
+ i += group_size;
+ }
+ display_page_draw_frame(&display_page_latency_distr, n_tasks);
+
+ uint32_t count = 0;
+ lconf = NULL;
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ if (strcmp(targ->task_init->mode_str, "lat") == 0) {
+ display_column_print_core_task(core_col, count, lconf, targ);
+ if (targ->id == 0)
+ display_column_print(name_col, count, "%s", lconf->name);
+ count++;
+ }
+ }
+}
+
+static void display_latency_distr_draw_stats(struct screen_state *state)
+{
+ const uint32_t n_latency = stats_get_n_latency();
+ uint64_t *bucket;
+ uint32_t bucket_id = 0, min_bucket_id = LAT_BUCKET_COUNT - 1, max_bucket_id = 0;
+ struct time_unit tu;
+
+ for (uint32_t count = 0; count < n_latency; ++count) {
+ if (state->toggle == 0)
+ tu = stats_latency_get(count)->max.time;
+ else
+ tu = stats_latency_tot_get(count)->max.time;
+ display_column_print(stats_max, count, "%9lu.%03lu", tu.sec * 1000000 + tu.nsec / 1000, tu.nsec % 1000);
+ }
+
+ // Calculate min_bucket_id: id of 1st bucket with data for any tasks
+ // Calculate max_bucket_id: id of last bucket with data for any tasks
+ for (uint i = 0; i < LAT_BUCKET_COUNT; ++i) {
+ for (uint32_t count = 0; count < n_latency; ++count) {
+ if (state->toggle == 0)
+ bucket = stats_latency_get_bucket(count);
+ else
+ bucket = stats_latency_get_tot_bucket(count);
+ if (bucket[i] != 0) {
+ min_bucket_id = i;
+ break;
+ }
+ }
+ if (min_bucket_id != LAT_BUCKET_COUNT - 1)
+ break;
+ }
+
+ for (uint i = LAT_BUCKET_COUNT; i > 0; i--) {
+ for (uint32_t count = 0; count < n_latency; ++count) {
+ if (state->toggle == 0)
+ bucket = stats_latency_get_bucket(count);
+ else
+ bucket = stats_latency_get_tot_bucket(count);
+ if (bucket[i - 1] != 0) {
+ max_bucket_id = i - 1;
+ break;
+ }
+ }
+ if (max_bucket_id)
+ break;
+ }
+
+ if (max_bucket_id - min_bucket_id + 1 < global_nb_buckets_displayed) {
+ max_bucket_id = global_nb_buckets_displayed + min_bucket_id - 1;
+ }
+
+ if ((global_min_bucket_id != min_bucket_id) || (global_max_bucket_id != max_bucket_id)) {
+ global_min_bucket_id = min_bucket_id;
+ global_max_bucket_id = max_bucket_id;
+ // Calculate how many buckets must be grouped together
+ if (max_bucket_id - min_bucket_id + 1 > global_nb_buckets_displayed)
+ group_size = ceil(1.0 * (max_bucket_id - min_bucket_id + 1) / global_nb_buckets_displayed);
+ else
+ group_size = 1;
+ display_latency_distr_draw_frame(state);
+ display_renew();
+ plog_info("min_bucket_id = %d, max_bucket_id = %d\n", min_bucket_id, max_bucket_id);
+ }
+
+ for (uint32_t count = 0; count < n_latency; ++count) {
+ if (state->toggle == 0)
+ bucket = stats_latency_get_bucket(count);
+ else
+ bucket = stats_latency_get_tot_bucket(count);
+ uint32_t i = min_bucket_id, k = 0;
+ uint64_t nb = 0;
+ while ((i < LAT_BUCKET_COUNT) && (i <= global_max_bucket_id)){
+ for (uint32_t j = 0; j <= group_size; j++)
+ if (i + j < LAT_BUCKET_COUNT)
+ nb += bucket[i+j];
+ display_column_print(stats_latency_distr[k++], count, "%9lu", nb);
+ nb = 0;
+ i += group_size;
+ }
+ }
+}
+
+static int display_latency_distr_get_height(void)
+{
+ return stats_get_n_latency();
+}
+
+static struct display_screen display_screen_latency_distr = {
+ .draw_frame = display_latency_distr_draw_frame,
+ .draw_stats = display_latency_distr_draw_stats,
+ .get_height = display_latency_distr_get_height,
+ .title = "latency_distr",
+};
+
+struct display_screen *display_latency_distr(void)
+{
+ return &display_screen_latency_distr;
+}
diff --git a/VNFs/DPPD-PROX/handle_swap.h b/VNFs/DPPD-PROX/display_latency_distr.h
index b589051d..d22f16a4 100644
--- a/VNFs/DPPD-PROX/handle_swap.h
+++ b/VNFs/DPPD-PROX/display_latency_distr.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,9 +14,10 @@
// limitations under the License.
*/
-#ifndef _HANDLE_SWAP_H_
-#define _HANDLE_SWAP_H_
+#ifndef DISPLAY_LATENCY_DISTR_H
+#define DISPLAY_LATENCY_DISTR_H
-struct task_base;
+struct display_screen;
+struct display_screen *display_latency_distr(void);
-#endif /* _HANDLE_SWAP_H_ */
+#endif /* DISPLAY_LATENCY_DISTR_H */
diff --git a/VNFs/DPPD-PROX/display_pkt_len.c b/VNFs/DPPD-PROX/display_pkt_len.c
index df34616a..83fbc655 100644
--- a/VNFs/DPPD-PROX/display_pkt_len.c
+++ b/VNFs/DPPD-PROX/display_pkt_len.c
@@ -81,7 +81,7 @@ static void display_pkt_len_draw_frame(struct screen_state *screen_state)
const uint32_t port_id = port_disp[i];
display_column_print(port_col, i, "%4u", port_id);
- display_column_print(name_col, i, "%8s", prox_port_cfg[port_id].name);
+ display_column_print(name_col, i, "%8s", prox_port_cfg[port_id].names[0]);
display_column_print(type_col, i, "%7s", prox_port_cfg[port_id].short_name);
}
}
diff --git a/VNFs/DPPD-PROX/display_ports.c b/VNFs/DPPD-PROX/display_ports.c
index b1027f93..d2140f1e 100644
--- a/VNFs/DPPD-PROX/display_ports.c
+++ b/VNFs/DPPD-PROX/display_ports.c
@@ -22,6 +22,7 @@
#include "stats_port.h"
#include "prox_globals.h"
#include "prox_port_cfg.h"
+#include "prox_compat.h"
static struct display_page display_page_ports;
static struct display_column *nb_col;
@@ -115,7 +116,7 @@ static void display_ports_draw_frame(struct screen_state *state)
const uint32_t port_id = port_disp[i];
display_column_print(nb_col, i, "%u", port_id);
- display_column_print(name_col, i, "%s", prox_port_cfg[port_id].name);
+ display_column_print(name_col, i, "%s", prox_port_cfg[port_id].names[0]);
display_column_print(type_col, i, "%s", prox_port_cfg[port_id].short_name);
}
}
@@ -178,16 +179,17 @@ static void display_ports_draw_per_sec_stats(void)
/* Take 20 bytes overhead (or 24 if crc strip is enabled) into accound */
struct percent rx_percent;
struct percent tx_percent;
- if (strcmp(prox_port_cfg[port_id].short_name, "i40e") == 0) {
- if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) {
- rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
+ if (strcmp(prox_port_cfg[port_id].short_name, "i40e_vf") == 0) {
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
+ rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
}
} else {
- if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1) {
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
} else {
@@ -195,6 +197,28 @@ static void display_ports_draw_per_sec_stats(void)
tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
}
}
+#else
+#if defined RTE_ETH_RX_OFFLOAD_KEEP_CRC
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC ) {
+ rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
+ tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
+ } else {
+ rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
+ tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
+ }
+ } else {
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC ) {
+ rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 20 * (last->rx_tot - prev->rx_tot), delta_t);
+ tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 20 * (last->tx_tot - prev->tx_tot), delta_t);
+ } else {
+ rx_percent = calc_percent(last->rx_bytes - prev->rx_bytes + 24 * (last->rx_tot - prev->rx_tot), delta_t);
+ tx_percent = calc_percent(last->tx_bytes - prev->tx_bytes + 24 * (last->tx_tot - prev->tx_tot), delta_t);
+ }
+ }
+#else
+#error neither RTE_ETH_RX_OFFLOAD_CRC_STRIP or RTE_ETH_RX_OFFLOAD_KEEP_CRC is defined
+#endif
+#endif
display_column_print(no_mbufs_col, i, "%lu", no_mbufs_rate);
display_column_print(ierrors_col, i, "%lu", ierrors_rate);
diff --git a/VNFs/DPPD-PROX/display_rings.c b/VNFs/DPPD-PROX/display_rings.c
index 618350e2..b3154237 100644
--- a/VNFs/DPPD-PROX/display_rings.c
+++ b/VNFs/DPPD-PROX/display_rings.c
@@ -68,7 +68,7 @@ static void display_rings_draw_frame(struct screen_state *state)
int offset = 0;
for (uint32_t j = 0; j < rs->nb_ports; j++)
- offset += sprintf(name + offset, "%s", rs->port[j]->name);
+ offset += sprintf(name + offset, "%s", rs->port[j]->names[0]);
}
sc_val = (rs->ring->flags & RING_F_SC_DEQ) ? 'y' : 'n';
diff --git a/VNFs/DPPD-PROX/display_tasks.c b/VNFs/DPPD-PROX/display_tasks.c
index 75075a10..f7520092 100644
--- a/VNFs/DPPD-PROX/display_tasks.c
+++ b/VNFs/DPPD-PROX/display_tasks.c
@@ -56,6 +56,8 @@ static struct display_column *class_col;
static struct display_column *mbm_tot_col;
static struct display_column *mbm_loc_col;
static struct display_column *frac_col;
+static struct display_column *rx_non_dp_col;
+static struct display_column *tx_non_dp_col;
static void stats_display_core_task_entry(struct lcore_cfg *lconf, struct task_args *targ, unsigned row)
{
@@ -115,6 +117,12 @@ static void display_tasks_draw_frame(struct screen_state *state)
handled_col = display_table_add_col(stats);
display_column_init(handled_col, "Handled (K)", 9);
+ rx_non_dp_col = display_table_add_col(stats);
+ display_column_init(rx_non_dp_col, "Rx non DP (K)", 9);
+
+ tx_non_dp_col = display_table_add_col(stats);
+ display_column_init(tx_non_dp_col, "Tx non DP (K)", 9);
+
if (stats_cpu_freq_enabled()) {
struct display_table *other = display_page_add_table(&display_page_tasks);
@@ -151,6 +159,12 @@ static void display_tasks_draw_frame(struct screen_state *state)
handled_col = display_table_add_col(stats);
display_column_init(handled_col, "Handled (K)", 14);
+ rx_non_dp_col = display_table_add_col(stats);
+ display_column_init(rx_non_dp_col, "RX non DP (K)", 14);
+
+ tx_non_dp_col = display_table_add_col(stats);
+ display_column_init(tx_non_dp_col, "TX non DP (K)", 14);
+
if (stats_cmt_enabled()) {
struct display_table *other = display_page_add_table(&display_page_tasks);
@@ -231,6 +245,8 @@ static void display_core_task_stats_per_sec(const struct task_stats_disp *t, str
print_kpps(tx_fail_col, row, last->drop_tx_fail - prev->drop_tx_fail, delta_t);
print_kpps(discard_col, row, last->drop_discard - prev->drop_discard, delta_t);
print_kpps(handled_col, row, last->drop_handled - prev->drop_handled, delta_t);
+ print_kpps(rx_non_dp_col, row, last->rx_non_dp - prev->rx_non_dp, delta_t);
+ print_kpps(tx_non_dp_col, row, last->tx_non_dp - prev->tx_non_dp, delta_t);
if (stats_cpu_freq_enabled()) {
uint8_t lcore_stat_id = t->lcore_stat_id;
@@ -285,6 +301,8 @@ static void display_core_task_stats_tot(const struct task_stats_disp *t, struct
display_column_print(tx_fail_col, row, "%lu", ts->tot_drop_tx_fail);
display_column_print(discard_col, row, "%lu", ts->tot_drop_discard);
display_column_print(handled_col, row, "%lu", ts->tot_drop_handled);
+ display_column_print(rx_non_dp_col, row, "%lu", ts->tot_rx_non_dp);
+ display_column_print(tx_non_dp_col, row, "%lu", ts->tot_tx_non_dp);
if (stats_cmt_enabled()) {
struct lcore_stats *c = stats_get_lcore_stats(t->lcore_stat_id);
diff --git a/VNFs/DPPD-PROX/eld.h b/VNFs/DPPD-PROX/eld.h
index b5de59d7..d3ec2f22 100644
--- a/VNFs/DPPD-PROX/eld.h
+++ b/VNFs/DPPD-PROX/eld.h
@@ -17,7 +17,7 @@
#ifndef _ELD_H_
#define _ELD_H_
-#define PACKET_QUEUE_BITS 14
+#define PACKET_QUEUE_BITS 20
#define PACKET_QUEUE_SIZE (1 << PACKET_QUEUE_BITS)
#define PACKET_QUEUE_MASK (PACKET_QUEUE_SIZE - 1)
@@ -76,7 +76,10 @@ static uint32_t early_loss_detect_add(struct early_loss_detect *eld, uint32_t pa
old_queue_id = eld->entries[queue_pos];
eld->entries[queue_pos] = packet_index >> PACKET_QUEUE_BITS;
- return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+ if (eld->entries[queue_pos] != old_queue_id)
+ return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+ else
+ return 0;
}
#endif /* _ELD_H_ */
diff --git a/VNFs/DPPD-PROX/file_utils.c b/VNFs/DPPD-PROX/file_utils.c
index b3cf0846..cf1d3444 100644
--- a/VNFs/DPPD-PROX/file_utils.c
+++ b/VNFs/DPPD-PROX/file_utils.c
@@ -22,6 +22,7 @@
#include "prox_args.h"
#include "file_utils.h"
+#include "prox_compat.h"
static char file_error_string[128] = {0};
@@ -44,7 +45,7 @@ static void resolve_path_cfg_dir(char *file_name, size_t len, const char *path)
if (path[0] != '/')
snprintf(file_name, len, "%s/%s", get_cfg_dir(), path);
else
- strncpy(file_name, path, len);
+ prox_strncpy(file_name, path, len);
}
long file_get_size(const char *path)
diff --git a/VNFs/DPPD-PROX/gen/gen_tap.cfg b/VNFs/DPPD-PROX/gen/gen_tap.cfg
new file mode 100644
index 00000000..60239681
--- /dev/null
+++ b/VNFs/DPPD-PROX/gen/gen_tap.cfg
@@ -0,0 +1,69 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+[lua]
+lpm4 = dofile("l3-ipv4.lua")
+
+[port 0]
+name=p0
+vdev=gen_tap
+local ipv4=$ip1
+
+[defaults]
+mempool size=16K
+
+[global]
+start time=5
+name=Basic Gen
+
+[variables]
+$hex_ip1=c0 a8 7a 7e
+$hex_ip2=c0 a8 7b 7f
+$ip1=192.168.122.126
+$ip2=192.168.123.127
+
+[core 0s0]
+mode=master
+
+[core 1]
+name=p0
+task=0
+mode=gen
+sub mode=l3
+tx port=p0
+route table=lpm4
+bps=1250000000
+pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 08 00 45 00 00 1c 00 01 00 00 40 11 f7 7d ${hex_ip1} ${hex_ip2} 13 88 13 88 00 08 55 7b
+pkt size=60
+lat pos=42
+packet id pos=46
+min bulk size=8
+local ipv4=${ip1}/24
+
+[core 2]
+name=nop
+task=0
+mode=lat
+sub mode=l3
+rx port=p0
+drop=no
+lat pos=42
+packet id pos=46
+local ipv4=${ip1}/24
diff --git a/VNFs/DPPD-PROX/gen/l3-ipv4.lua b/VNFs/DPPD-PROX/gen/l3-ipv4.lua
new file mode 100644
index 00000000..1c988341
--- /dev/null
+++ b/VNFs/DPPD-PROX/gen/l3-ipv4.lua
@@ -0,0 +1,29 @@
+--
+-- Copyright (c) 2010-2017 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("192.168.122.240")},
+ {id = 1, port_id = 0, ip = ip("192.168.122.246")},
+ {id = 2, port_id = 0, ip = ip("192.168.122.247")}
+}
+
+lpm4.routes = {
+ {cidr = {ip = ip("192.168.123.0"), depth = 24}, next_hop_id = 0},
+ {cidr = {ip = ip("192.168.124.0"), depth = 24}, next_hop_id = 1},
+ {cidr = {ip = ip("192.168.125.0"), depth = 24}, next_hop_id = 2},
+}
+return lpm4
diff --git a/VNFs/DPPD-PROX/genl4_stream.h b/VNFs/DPPD-PROX/genl4_stream.h
index b180765d..3f1b6c87 100644
--- a/VNFs/DPPD-PROX/genl4_stream.h
+++ b/VNFs/DPPD-PROX/genl4_stream.h
@@ -160,9 +160,9 @@ static void stream_ctx_reset_move(struct stream_ctx *ctx, struct stream_cfg *cfg
static int stream_cfg_calc_max_payload_len(struct stream_cfg *cfg, enum l4gen_peer peer)
{
const uint32_t l4_hdr_len = cfg->proto == IPPROTO_UDP?
- sizeof(struct udp_hdr) : sizeof(struct tcp_hdr);
+ sizeof(prox_rte_udp_hdr) : sizeof(prox_rte_tcp_hdr);
- return ETHER_MAX_LEN - ETHER_CRC_LEN - cfg->data[peer].hdr_len - l4_hdr_len;
+ return PROX_RTE_ETHER_MAX_LEN - PROX_RTE_ETHER_CRC_LEN - cfg->data[peer].hdr_len - l4_hdr_len;
}
static int stream_cfg_max_n_segments(struct stream_cfg *cfg)
diff --git a/VNFs/DPPD-PROX/genl4_stream_tcp.c b/VNFs/DPPD-PROX/genl4_stream_tcp.c
index d05455b7..4d92546b 100644
--- a/VNFs/DPPD-PROX/genl4_stream_tcp.c
+++ b/VNFs/DPPD-PROX/genl4_stream_tcp.c
@@ -16,6 +16,7 @@
#include <rte_cycles.h>
#include <rte_ether.h>
+#include <rte_ethdev.h> // required by rte_eth_ctrl.h in 19.05
#include <rte_eth_ctrl.h>
#include "log.h"
@@ -67,8 +68,8 @@ struct tcp_option {
void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struct pkt_tuple *tuple)
{
- struct tcp_hdr *tcp = (struct tcp_hdr *)l4_meta->l4_hdr;
- struct ipv4_hdr *ip = ((struct ipv4_hdr *)tcp) - 1;
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)l4_meta->l4_hdr;
+ prox_rte_ipv4_hdr *ip = ((prox_rte_ipv4_hdr *)tcp) - 1;
ip->src_addr = tuple->dst_addr;
ip->dst_addr = tuple->src_addr;
@@ -76,9 +77,9 @@ void stream_tcp_create_rst(struct rte_mbuf *mbuf, struct l4_meta *l4_meta, struc
tcp->dst_port = tuple->src_port;
tcp->src_port = tuple->dst_port;
- ip->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr));
- tcp->tcp_flags = TCP_RST_FLAG;
- tcp->data_off = ((sizeof(struct tcp_hdr) / 4) << 4);
+ ip->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_tcp_hdr));
+ tcp->tcp_flags = PROX_RTE_TCP_RST_FLAG;
+ tcp->data_off = ((sizeof(prox_rte_tcp_hdr) / 4) << 4);
rte_pktmbuf_pkt_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *);
rte_pktmbuf_data_len(mbuf) = l4_meta->payload - rte_pktmbuf_mtod(mbuf, uint8_t *);
}
@@ -93,8 +94,8 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_
pkt = rte_pktmbuf_mtod(mbuf, uint8_t *);
rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len);
- struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)];
- struct tcp_hdr *l4_hdr = (struct tcp_hdr *)&pkt[stream_cfg->data[act->peer].hdr_len];
+ prox_rte_ipv4_hdr *l3_hdr = (prox_rte_ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(prox_rte_ipv4_hdr)];
+ prox_rte_tcp_hdr *l4_hdr = (prox_rte_tcp_hdr *)&pkt[stream_cfg->data[act->peer].hdr_len];
l3_hdr->src_addr = ctx->tuple->dst_addr;
l3_hdr->dst_addr = ctx->tuple->src_addr;
@@ -103,17 +104,17 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_
l4_hdr->src_port = ctx->tuple->dst_port;
l4_hdr->dst_port = ctx->tuple->src_port;
- uint32_t tcp_len = sizeof(struct tcp_hdr);
+ uint32_t tcp_len = sizeof(prox_rte_tcp_hdr);
uint32_t tcp_payload_len = 0;
uint32_t seq_len = 0;
struct tcp_option *tcp_op;
- if (tcp_flags & TCP_RST_FLAG) {
- tcp_flags |= TCP_RST_FLAG;
+ if (tcp_flags & PROX_RTE_TCP_RST_FLAG) {
+ tcp_flags |= PROX_RTE_TCP_RST_FLAG;
seq_len = 1;
}
- else if (tcp_flags & TCP_SYN_FLAG) {
- tcp_flags |= TCP_SYN_FLAG;
+ else if (tcp_flags & PROX_RTE_TCP_SYN_FLAG) {
+ tcp_flags |= PROX_RTE_TCP_SYN_FLAG;
/* Window scaling */
/* TODO: make options come from the stream. */
@@ -128,14 +129,14 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_
ctx->seq_first_byte = ctx->ackd_seq + 1;
}
- else if (tcp_flags & TCP_FIN_FLAG) {
- tcp_flags |= TCP_FIN_FLAG;
+ else if (tcp_flags & PROX_RTE_TCP_FIN_FLAG) {
+ tcp_flags |= PROX_RTE_TCP_FIN_FLAG;
seq_len = 1;
}
- if (tcp_flags & TCP_ACK_FLAG) {
+ if (tcp_flags & PROX_RTE_TCP_ACK_FLAG) {
l4_hdr->recv_ack = rte_bswap32(ctx->recv_seq);
- tcp_flags |= TCP_ACK_FLAG;
+ tcp_flags |= PROX_RTE_TCP_ACK_FLAG;
}
else
l4_hdr->recv_ack = 0;
@@ -162,13 +163,13 @@ static void create_tcp_pkt(struct stream_ctx *ctx, struct rte_mbuf *mbuf, uint8_
rte_pktmbuf_pkt_len(mbuf) = l4_payload_offset + data_len;
rte_pktmbuf_data_len(mbuf) = l4_payload_offset + data_len;
- l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + tcp_len + data_len);
+ l3_hdr->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + tcp_len + data_len);
plogdx_dbg(mbuf, NULL);
plogx_dbg("put tcp packet with flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n",
- tcp_flags & TCP_SYN_FLAG? "SYN ":"",
- tcp_flags & TCP_ACK_FLAG? "ACK ":"",
- tcp_flags & TCP_FIN_FLAG? "FIN ":"",
+ tcp_flags & PROX_RTE_TCP_SYN_FLAG? "SYN ":"",
+ tcp_flags & PROX_RTE_TCP_ACK_FLAG? "ACK ":"",
+ tcp_flags & PROX_RTE_TCP_FIN_FLAG? "FIN ":"",
data_len, rte_bswap32(l4_hdr->sent_seq), rte_bswap32(l4_hdr->recv_ack));
}
@@ -186,9 +187,9 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx)
the current implementation this packet
contains the TCP option field to set the
MSS. For this, add 4 bytes. */
- return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + 4;
+ return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + 4;
}
- return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr);
+ return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr);
}
else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer) {
/* The reply _could_ (due to races, still possibly
@@ -203,7 +204,7 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx)
if (remaining_len == 0) {
if (ctx->cur_action + 1 != ctx->stream_cfg->n_actions) {
if (ctx->stream_cfg->actions[ctx->cur_action + 1].peer == ctx->peer)
- return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr);
+ return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr);
else {
uint32_t seq_beg = ctx->recv_seq - ctx->other_seq_first_byte;
uint32_t end = ctx->stream_cfg->actions[ctx->cur_action + 1].beg +
@@ -211,15 +212,15 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx)
uint32_t remaining = end - seq_beg;
uint16_t data_len = remaining > 1460? 1460: remaining;
- return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len;
+ return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + data_len;
}
}
else {
- return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr);
+ return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr);
}
}
else {
- return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr);
+ return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr);
}
}
else if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) {
@@ -229,10 +230,10 @@ uint16_t stream_tcp_reply_len(struct stream_ctx *ctx)
uint32_t remaining = end - seq_beg;
uint16_t data_len = remaining > 1460? 1460: remaining;
- return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(struct tcp_hdr) + data_len;
+ return ctx->stream_cfg->data[!ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr) + data_len;
}
else
- return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(struct tcp_hdr);
+ return ctx->stream_cfg->data[ctx->peer].hdr_len + sizeof(prox_rte_tcp_hdr);
}
static void stream_tcp_proc_in_order_data(struct stream_ctx *ctx, struct l4_meta *l4_meta, int *progress_seq)
@@ -293,18 +294,18 @@ static void stream_tcp_proc_in_order_data(struct stream_ctx *ctx, struct l4_meta
static int stream_tcp_proc_in(struct stream_ctx *ctx, struct l4_meta *l4_meta)
{
- struct tcp_hdr *tcp = NULL;
+ prox_rte_tcp_hdr *tcp = NULL;
int got_syn = 0;
int got_ack = 0;
int got_fin = 0;
int got_rst = 0;
- tcp = (struct tcp_hdr *)l4_meta->l4_hdr;
+ tcp = (prox_rte_tcp_hdr *)l4_meta->l4_hdr;
- got_syn = tcp->tcp_flags & TCP_SYN_FLAG;
- got_ack = tcp->tcp_flags & TCP_ACK_FLAG;
- got_fin = tcp->tcp_flags & TCP_FIN_FLAG;
- got_rst = tcp->tcp_flags & TCP_RST_FLAG;
+ got_syn = tcp->tcp_flags & PROX_RTE_TCP_SYN_FLAG;
+ got_ack = tcp->tcp_flags & PROX_RTE_TCP_ACK_FLAG;
+ got_fin = tcp->tcp_flags & PROX_RTE_TCP_FIN_FLAG;
+ got_rst = tcp->tcp_flags & PROX_RTE_TCP_RST_FLAG;
plogx_dbg("TCP, flags: %s%s%s, (len = %d, seq = %d, ack =%d)\n", got_syn? "SYN ":"", got_ack? "ACK ":"", got_fin? "FIN " : "", l4_meta->len, rte_bswap32(tcp->sent_seq), rte_bswap32(tcp->recv_ack));
if (got_syn)
@@ -399,7 +400,7 @@ static int stream_tcp_proc_in(struct stream_ctx *ctx, struct l4_meta *l4_meta)
}
/* parse options */
- if (((tcp->data_off >> 4)*4) > sizeof(struct tcp_hdr)) {
+ if (((tcp->data_off >> 4)*4) > sizeof(prox_rte_tcp_hdr)) {
struct tcp_option *tcp_op = (struct tcp_option *)(tcp + 1);
uint8_t *payload = (uint8_t *)tcp + ((tcp->data_off >> 4)*4);
@@ -439,7 +440,7 @@ static int stream_tcp_proc_out_closed(struct stream_ctx *ctx, struct rte_mbuf *m
ctx->next_seq = 99;
ctx->ackd_seq = 99;
- create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -460,7 +461,7 @@ static int stream_tcp_proc_out_listen(struct stream_ctx *ctx, struct rte_mbuf *m
pkt_tuple_debug(ctx->tuple);
ctx->flags |= STREAM_CTX_F_TCP_ENDED;
- create_tcp_pkt(ctx, mbuf, TCP_RST_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_RST_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -474,7 +475,7 @@ static int stream_tcp_proc_out_listen(struct stream_ctx *ctx, struct rte_mbuf *m
ctx->tcp_state = SYN_RECEIVED;
- create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -516,7 +517,7 @@ static int stream_tcp_proc_out_syn_sent(struct stream_ctx *ctx, struct rte_mbuf
return -1;
}
else {
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
}
@@ -541,7 +542,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf
ctx->same_state = 0;
ctx->tcp_state = ESTABLISHED;
if (ctx->stream_cfg->actions[ctx->cur_action].peer != ctx->peer) {
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -561,7 +562,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf
data.
*/
- /* create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0); */
+ /* create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0); */
/* token_time_take(&ctx->token_time, mbuf_wire_size(mbuf)); */
*next_tsc = tcp_resched_timeout(ctx);
return -1;
@@ -575,7 +576,7 @@ static int stream_tcp_proc_out_syn_recv(struct stream_ctx *ctx, struct rte_mbuf
++ctx->same_state;
tcp_set_retransmit(ctx);
ctx->next_seq = ctx->ackd_seq;
- create_tcp_pkt(ctx, mbuf, TCP_SYN_FLAG | TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_SYN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -602,7 +603,7 @@ static int stream_tcp_proc_out_estab_tx(struct stream_ctx *ctx, struct rte_mbuf
plogx_dbg("Moving to FIN_WAIT\n");
ctx->tcp_state = FIN_WAIT;
ctx->same_state = 0;
- create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -681,7 +682,7 @@ static int stream_tcp_proc_out_estab_tx(struct stream_ctx *ctx, struct rte_mbuf
else
ctx->flags &= ~STREAM_CTX_F_MORE_DATA;
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, data_beg, data_len);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, data_beg, data_len);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
if (ctx->flags & STREAM_CTX_F_MORE_DATA)
*next_tsc = tcp_resched_timeout(ctx);
@@ -704,14 +705,14 @@ static int stream_tcp_proc_out_estab_rx(struct stream_ctx *ctx, struct rte_mbuf
plogx_dbg("Got fin!\n");
if (1) {
ctx->tcp_state = LAST_ACK;
- create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
}
else {
ctx->tcp_state = CLOSE_WAIT;
- create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_resched_timeout(ctx);
return 0;
@@ -726,7 +727,7 @@ static int stream_tcp_proc_out_estab_rx(struct stream_ctx *ctx, struct rte_mbuf
plogx_dbg("state++ (ack = %d)\n", ctx->recv_seq);
}
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -755,7 +756,7 @@ static int stream_tcp_proc_out_close_wait(struct stream_ctx *ctx, struct rte_mbu
when the FIN is sent after ACK'ing the incoming FIN. In any
case, it does not matter if there was a packet or not. */
ctx->tcp_state = LAST_ACK;
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG | PROX_RTE_TCP_FIN_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -785,7 +786,7 @@ static int stream_tcp_proc_out_last_ack(struct stream_ctx *ctx, struct rte_mbuf
ctx->next_seq = ctx->ackd_seq;
ctx->same_state++;
tcp_set_retransmit(ctx);
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG | TCP_FIN_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG | PROX_RTE_TCP_FIN_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -807,7 +808,7 @@ static int stream_tcp_proc_out_fin_wait(struct stream_ctx *ctx, struct rte_mbuf
ctx->tcp_state = TIME_WAIT;
ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait;
plogx_dbg("from FIN_WAIT to TIME_WAIT\n");
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = ctx->stream_cfg->tsc_timeout_time_wait;
return 0;
@@ -829,7 +830,7 @@ static int stream_tcp_proc_out_fin_wait(struct stream_ctx *ctx, struct rte_mbuf
ctx->same_state++;
tcp_set_retransmit(ctx);
ctx->next_seq = ctx->ackd_seq;
- create_tcp_pkt(ctx, mbuf, TCP_FIN_FLAG | TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_FIN_FLAG | PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = tcp_retx_timeout(ctx);
return 0;
@@ -852,7 +853,7 @@ static int stream_tcp_proc_out_time_wait(struct stream_ctx *ctx, struct rte_mbuf
plogx_dbg("Got packet while in TIME_WAIT (pkt ACK reTX)\n");
ctx->sched_tsc = rte_rdtsc() + ctx->stream_cfg->tsc_timeout_time_wait;
- create_tcp_pkt(ctx, mbuf, TCP_ACK_FLAG, 0, 0);
+ create_tcp_pkt(ctx, mbuf, PROX_RTE_TCP_ACK_FLAG, 0, 0);
token_time_take(&ctx->token_time, mbuf_wire_size(mbuf));
*next_tsc = ctx->stream_cfg->tsc_timeout_time_wait;
return 0;
@@ -916,7 +917,7 @@ int stream_tcp_is_ended(struct stream_ctx *ctx)
static void add_pkt_bytes(uint32_t *n_pkts, uint32_t *n_bytes, uint32_t len)
{
- len = (len < 60? 60 : len) + 20 + ETHER_CRC_LEN;
+ len = (len < 60? 60 : len) + 20 + PROX_RTE_ETHER_CRC_LEN;
(*n_pkts)++;
*n_bytes += len;
@@ -931,9 +932,9 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b
*n_bytes = 0;
/* Connection setup */
- add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN */
- add_pkt_bytes(n_pkts, n_bytes, server_hdr_len + sizeof(struct tcp_hdr) + 4); /* SYN/ACK */
- add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(struct tcp_hdr)); /* ACK */
+ add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(prox_rte_tcp_hdr) + 4); /* SYN */
+ add_pkt_bytes(n_pkts, n_bytes, server_hdr_len + sizeof(prox_rte_tcp_hdr) + 4); /* SYN/ACK */
+ add_pkt_bytes(n_pkts, n_bytes, client_hdr_len + sizeof(prox_rte_tcp_hdr)); /* ACK */
for (uint32_t i = 0; i < cfg->n_actions; ++i) {
const uint32_t mss = 1440; /* TODO: should come from peer's own mss. */
@@ -946,11 +947,11 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b
while (remaining) {
uint32_t seg = remaining > mss? mss: remaining;
- add_pkt_bytes(n_pkts, n_bytes, send_hdr_len + sizeof(struct tcp_hdr) + seg);
+ add_pkt_bytes(n_pkts, n_bytes, send_hdr_len + sizeof(prox_rte_tcp_hdr) + seg);
remaining -= seg;
}
- add_pkt_bytes(n_pkts, n_bytes, reply_hdr_len + sizeof(struct tcp_hdr));
+ add_pkt_bytes(n_pkts, n_bytes, reply_hdr_len + sizeof(prox_rte_tcp_hdr));
}
/* Connection Tear-down */
@@ -959,7 +960,7 @@ void stream_tcp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b
const uint32_t init_hdr_len = last_peer == PEER_CLIENT? client_hdr_len : server_hdr_len;
const uint32_t resp_hdr_len = last_peer == PEER_CLIENT? server_hdr_len : client_hdr_len;
- add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* FIN */
- add_pkt_bytes(n_pkts, n_bytes, resp_hdr_len + sizeof(struct tcp_hdr)); /* FIN/ACK */
- add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(struct tcp_hdr)); /* ACK */
+ add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(prox_rte_tcp_hdr)); /* FIN */
+ add_pkt_bytes(n_pkts, n_bytes, resp_hdr_len + sizeof(prox_rte_tcp_hdr)); /* FIN/ACK */
+ add_pkt_bytes(n_pkts, n_bytes, init_hdr_len + sizeof(prox_rte_tcp_hdr)); /* ACK */
}
diff --git a/VNFs/DPPD-PROX/genl4_stream_udp.c b/VNFs/DPPD-PROX/genl4_stream_udp.c
index 3de2db09..31661682 100644
--- a/VNFs/DPPD-PROX/genl4_stream_udp.c
+++ b/VNFs/DPPD-PROX/genl4_stream_udp.c
@@ -93,7 +93,7 @@ int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_met
uint8_t *pkt = rte_pktmbuf_mtod(mbuf, uint8_t *);
const struct peer_action *act = &stream_cfg->actions[ctx->cur_action];
- uint16_t pkt_len = stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr) + act->len;
+ uint16_t pkt_len = stream_cfg->data[act->peer].hdr_len + sizeof(prox_rte_udp_hdr) + act->len;
rte_pktmbuf_pkt_len(mbuf) = pkt_len;
rte_pktmbuf_data_len(mbuf) = pkt_len;
@@ -101,19 +101,19 @@ int stream_udp_proc(struct stream_ctx *ctx, struct rte_mbuf *mbuf, struct l4_met
/* Construct the packet. The template is used up to L4 header,
a gap of sizeof(l4_hdr) is skipped, followed by the payload. */
rte_memcpy(pkt, stream_cfg->data[act->peer].hdr, stream_cfg->data[act->peer].hdr_len);
- rte_memcpy(pkt + stream_cfg->data[act->peer].hdr_len + sizeof(struct udp_hdr), stream_cfg->data[act->peer].content + act->beg, act->len);
+ rte_memcpy(pkt + stream_cfg->data[act->peer].hdr_len + sizeof(prox_rte_udp_hdr), stream_cfg->data[act->peer].content + act->beg, act->len);
- struct ipv4_hdr *l3_hdr = (struct ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(struct ipv4_hdr)];
- struct udp_hdr *l4_hdr = (struct udp_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len];
+ prox_rte_ipv4_hdr *l3_hdr = (prox_rte_ipv4_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len - sizeof(prox_rte_ipv4_hdr)];
+ prox_rte_udp_hdr *l4_hdr = (prox_rte_udp_hdr*)&pkt[stream_cfg->data[act->peer].hdr_len];
l3_hdr->src_addr = ctx->tuple->dst_addr;
l3_hdr->dst_addr = ctx->tuple->src_addr;
l3_hdr->next_proto_id = IPPROTO_UDP;
l4_hdr->src_port = ctx->tuple->dst_port;
l4_hdr->dst_port = ctx->tuple->src_port;
- l4_hdr->dgram_len = rte_bswap16(sizeof(struct udp_hdr) + act->len);
+ l4_hdr->dgram_len = rte_bswap16(sizeof(prox_rte_udp_hdr) + act->len);
/* TODO: UDP checksum calculation */
- l3_hdr->total_length = rte_bswap16(sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + act->len);
+ l3_hdr->total_length = rte_bswap16(sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + act->len);
ctx->cur_pos[ctx->peer] += act->len;
ctx->cur_action++;
@@ -144,7 +144,7 @@ uint16_t stream_udp_reply_len(struct stream_ctx *ctx)
else if (ctx->stream_cfg->actions[ctx->cur_action].peer == ctx->peer)
return 0;
else
- return ctx->stream_cfg->data[ctx->stream_cfg->actions[ctx->cur_action].peer].hdr_len + sizeof(struct udp_hdr) +
+ return ctx->stream_cfg->data[ctx->stream_cfg->actions[ctx->cur_action].peer].hdr_len + sizeof(prox_rte_udp_hdr) +
ctx->stream_cfg->actions[ctx->cur_action].len;
}
@@ -158,7 +158,7 @@ void stream_udp_calc_len(struct stream_cfg *cfg, uint32_t *n_pkts, uint32_t *n_b
for (uint32_t i = 0; i < cfg->n_actions; ++i) {
const uint32_t send_hdr_len = cfg->actions[i].peer == PEER_CLIENT? client_hdr_len : server_hdr_len;
- uint32_t len = send_hdr_len + sizeof(struct udp_hdr) + cfg->actions[i].len;
+ uint32_t len = send_hdr_len + sizeof(prox_rte_udp_hdr) + cfg->actions[i].len;
*n_bytes += (len < 60? 60 : len) + 24;
(*n_pkts)++;
}
diff --git a/VNFs/DPPD-PROX/git_version.c.in b/VNFs/DPPD-PROX/git_version.c.in
new file mode 100644
index 00000000..d151b589
--- /dev/null
+++ b/VNFs/DPPD-PROX/git_version.c.in
@@ -0,0 +1 @@
+const char *git_version="@GIT_VERSION@";
diff --git a/VNFs/DPPD-PROX/handle_acl.c b/VNFs/DPPD-PROX/handle_acl.c
index 03949360..57b476a7 100644
--- a/VNFs/DPPD-PROX/handle_acl.c
+++ b/VNFs/DPPD-PROX/handle_acl.c
@@ -34,6 +34,8 @@
#include "lconf.h"
#include "prefetch.h"
#include "etypes.h"
+#include "prox_compat.h"
+#include "handle_sched.h"
struct task_acl {
struct task_base base;
@@ -46,18 +48,20 @@ struct task_acl {
void *field_defs;
size_t field_defs_size;
uint32_t n_field_defs;
+ struct rte_sched_port *sched_port;
};
-static void set_tc(struct rte_mbuf *mbuf, uint32_t tc)
+static void set_tc(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t tc)
{
+ struct task_acl *task = (struct task_acl *)tbase;
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
uint32_t subport, pipe, traffic_class, queue;
- enum rte_meter_color color;
+ enum prox_rte_color color;
- rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue);
+ prox_rte_sched_port_pkt_read_tree_path(task->sched_port, mbuf, &subport, &pipe, &traffic_class, &queue);
color = rte_sched_port_pkt_read_color(mbuf);
- rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue, color);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, subport, pipe, tc, queue, color);
#else
struct rte_sched_port_hierarchy *sched =
(struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
@@ -108,8 +112,9 @@ static int handle_acl_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
break;
case ACL_ALLOW:
out[i] = 0;
+ // __attribute__ ((fallthrough));
case ACL_RATE_LIMIT:
- set_tc(mbufs[i], 3);
+ set_tc(tbase, mbufs[i], 3);
break;
};
}
@@ -209,6 +214,14 @@ static void init_task_acl(struct task_base *tbase, struct task_args *targ)
targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
targ->lconf->ctrl_func_m[targ->task] = acl_msg;
+
+ // If rate limiting is used tc will be set, sched_port must be initialized, and tc will be used by a following policing or qos task
+ int rc = init_port_sched(&task->sched_port, targ);
+
+ // ACL can be used to accept/drop packets and/or to set rate limiting. If using rate limiting, then sched_port must be defined
+ // TODO: check whether rate limiting is configured, and, if yes, check that QoS or policing task configures the qos_conf.params.
+ if (rc)
+ plog_info("Did not find any QoS or Policing task to transmit to => setting tc will not work\n");
}
int str_to_rule(struct acl4_rule *rule, char** fields, int n_rules, int use_qinq)
diff --git a/VNFs/DPPD-PROX/handle_aggregator.c b/VNFs/DPPD-PROX/handle_aggregator.c
index 6434d759..ccf8b8cc 100644
--- a/VNFs/DPPD-PROX/handle_aggregator.c
+++ b/VNFs/DPPD-PROX/handle_aggregator.c
@@ -44,10 +44,10 @@
(stats)->rx_prio[prio] += ntx; \
} while(0) \
-static inline uint8_t detect_l4_priority(uint8_t l3_priority, const struct ipv4_hdr *ipv4_hdr)
+static inline uint8_t detect_l4_priority(uint8_t l3_priority, const prox_rte_ipv4_hdr *ipv4_hdr)
{
if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
- const struct udp_hdr *udp = (const struct udp_hdr *)((const uint8_t *)ipv4_hdr + sizeof(struct ipv4_hdr));
+ const prox_rte_udp_hdr *udp = (const prox_rte_udp_hdr *)((const uint8_t *)ipv4_hdr + sizeof(prox_rte_ipv4_hdr));
if (((udp->src_port == 0x67) && (udp->dst_port == 0x68)) || ((udp->src_port == 0x68) && (udp->dst_port == 0x67))) {
return PRIORITY_DHCP;
}
@@ -55,7 +55,7 @@ static inline uint8_t detect_l4_priority(uint8_t l3_priority, const struct ipv4_
return l3_priority;
}
-static inline uint8_t detect_l3_priority(uint8_t l2_priority, const struct ipv4_hdr *ipv4_hdr)
+static inline uint8_t detect_l3_priority(uint8_t l2_priority, const prox_rte_ipv4_hdr *ipv4_hdr)
{
uint8_t dscp;
if ((ipv4_hdr->version_ihl >> 4) == 4) {
@@ -107,10 +107,10 @@ static inline void buffer_packet(struct task_aggregator *task, struct rte_mbuf *
static inline void handle_aggregator(struct task_aggregator *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
uint8_t priority = 0;
const struct qinq_hdr *pqinq;
- const struct ipv4_hdr *ipv4_hdr;
+ const prox_rte_ipv4_hdr *ipv4_hdr;
const uint16_t eth_type = peth->ether_type;
switch (eth_type) {
@@ -121,7 +121,7 @@ static inline void handle_aggregator(struct task_aggregator *task, struct rte_mb
pqinq = rte_pktmbuf_mtod(mbuf, const struct qinq_hdr *);
if ((priority = detect_l2_priority(pqinq)) == OUT_DISCARD)
break;
- ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1);
+ ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1);
if ((priority = detect_l3_priority(priority, ipv4_hdr)) == OUT_DISCARD)
break;
if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD)
@@ -130,7 +130,7 @@ static inline void handle_aggregator(struct task_aggregator *task, struct rte_mb
case ETYPE_VLAN:
break;
case ETYPE_IPv4:
- ipv4_hdr = (const struct ipv4_hdr *)(peth+1);
+ ipv4_hdr = (const prox_rte_ipv4_hdr *)(peth+1);
if ((priority = detect_l3_priority(LOW_PRIORITY, ipv4_hdr)) == OUT_DISCARD)
break;
if ((priority = detect_l4_priority(priority, ipv4_hdr)) == OUT_DISCARD)
diff --git a/VNFs/DPPD-PROX/handle_arp.c b/VNFs/DPPD-PROX/handle_arp.c
index 767cee11..c0286d42 100644
--- a/VNFs/DPPD-PROX/handle_arp.c
+++ b/VNFs/DPPD-PROX/handle_arp.c
@@ -28,7 +28,7 @@
struct task_arp {
struct task_base base;
- struct ether_addr src_mac;
+ prox_rte_ether_addr src_mac;
uint32_t seed;
uint32_t flags;
uint32_t ip;
@@ -44,9 +44,9 @@ static void task_update_config(struct task_arp *task)
task->ip = task->tmp_ip;
}
-static void handle_arp(struct task_arp *task, struct ether_hdr_arp *hdr, struct ether_addr *s_addr)
+static void handle_arp(struct task_arp *task, struct ether_hdr_arp *hdr, prox_rte_ether_addr *s_addr)
{
- build_arp_reply(hdr, s_addr);
+ build_arp_reply((prox_rte_ether_hdr *)hdr, s_addr, &hdr->arp);
}
static int handle_arp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
@@ -56,12 +56,12 @@ static int handle_arp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
uint8_t out[MAX_PKT_BURST] = {0};
struct rte_mbuf *replies_mbufs[64] = {0}, *arp_pkt_mbufs[64] = {0};
int n_arp_reply_pkts = 0, n_other_pkts = 0,n_arp_pkts = 0;
- struct ether_addr s_addr;
+ prox_rte_ether_addr s_addr;
for (uint16_t j = 0; j < n_pkts; ++j) {
hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *);
if (hdr->ether_hdr.ether_type == ETYPE_ARP) {
- if (arp_is_gratuitous(hdr)) {
+ if (arp_is_gratuitous(&hdr->arp)) {
out[n_other_pkts] = OUT_DISCARD;
n_other_pkts++;
plog_info("Received gratuitous packet \n");
@@ -71,7 +71,7 @@ static int handle_arp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
out[n_arp_pkts] = task->arp_replies_ring;
n_arp_pkts++;
} else if (task->ip == 0) {
- create_mac(hdr, &s_addr);
+ create_mac(&hdr->arp, &s_addr);
handle_arp(task, hdr, &s_addr);
replies_mbufs[n_arp_reply_pkts] = mbufs[j];
out[n_arp_reply_pkts] = 0;
@@ -130,7 +130,7 @@ static void init_task_arp(struct task_base *tbase, struct task_args *targ)
task->arp_replies_ring = OUT_DISCARD;
task->seed = rte_rdtsc();
- memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw_sw.tx_port_queue.port].eth_addr, sizeof(struct ether_addr));
+ memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw_sw.tx_port_queue.port].eth_addr, sizeof(prox_rte_ether_addr));
task->ip = rte_cpu_to_be_32(targ->local_ipv4);
task->tmp_ip = task->ip;
diff --git a/VNFs/DPPD-PROX/handle_blockudp.c b/VNFs/DPPD-PROX/handle_blockudp.c
index 04c945e5..8dbfea8a 100644
--- a/VNFs/DPPD-PROX/handle_blockudp.c
+++ b/VNFs/DPPD-PROX/handle_blockudp.c
@@ -35,8 +35,8 @@ static int handle_blockudp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs
uint16_t j;
for (j = 0; j < n_pkts; ++j) {
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *);
- struct ipv4_hdr *pip = (struct ipv4_hdr *) (peth + 1);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *) (peth + 1);
out[j] = peth->ether_type == ETYPE_IPv4 && pip->next_proto_id == 0x11 ? OUT_DISCARD : 0;
}
diff --git a/VNFs/DPPD-PROX/handle_cgnat.c b/VNFs/DPPD-PROX/handle_cgnat.c
index 6f176c08..9ce63b20 100644
--- a/VNFs/DPPD-PROX/handle_cgnat.c
+++ b/VNFs/DPPD-PROX/handle_cgnat.c
@@ -45,8 +45,6 @@
#define BIT_8_TO_15 0x0000ff00
#define BIT_0_TO_15 0x0000ffff
-#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
-
struct private_key {
uint32_t ip_addr;
uint16_t l4_port;
@@ -113,10 +111,10 @@ struct task_nat {
static __m128i proto_ipsrc_portsrc_mask;
static __m128i proto_ipdst_portdst_mask;
struct pkt_eth_ipv4 {
- struct ether_hdr ether_hdr;
- struct ipv4_hdr ipv4_hdr;
- struct udp_hdr udp_hdr;
-} __attribute__((packed));
+ prox_rte_ether_hdr ether_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_udp_hdr udp_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
void task_cgnat_dump_public_hash(struct task_nat *task)
{
@@ -130,7 +128,7 @@ void task_cgnat_dump_private_hash(struct task_nat *task)
static void set_l2(struct task_nat *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
*((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes;
*((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx];
}
@@ -138,8 +136,8 @@ static void set_l2(struct task_nat *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
static uint8_t route_ipv4(struct task_nat *task, struct rte_mbuf *mbuf)
{
struct pkt_eth_ipv4 *pkt = rte_pktmbuf_mtod(mbuf, struct pkt_eth_ipv4 *);
- struct ipv4_hdr *ip = &pkt->ipv4_hdr;
- struct ether_hdr *peth_out;
+ prox_rte_ipv4_hdr *ip = &pkt->ipv4_hdr;
+ prox_rte_ether_hdr *peth_out;
uint8_t tx_port;
uint32_t dst_ip;
@@ -150,7 +148,7 @@ static uint8_t route_ipv4(struct task_nat *task, struct rte_mbuf *mbuf)
break;
default:
/* Routing for other protocols is not implemented */
- plogx_info("Routing nit implemented for this protocol\n");
+ plogx_info("Routing not implemented for this protocol\n");
return OUT_DISCARD;
}
@@ -288,7 +286,7 @@ static int add_new_port_entry(struct task_nat *task, uint8_t proto, int public_i
task->private_flow_entries[ret].ip_addr = ip;
task->private_flow_entries[ret].l4_port = *port;
task->private_flow_entries[ret].flow_time = tsc;
- task->private_flow_entries[ret].private_ip_idx = private_ip_idx;
+ task->private_flow_entries[ret].private_ip_idx = private_ip_idx;
public_key.ip_addr = ip;
public_key.l4_port = *port;
@@ -305,15 +303,15 @@ static int add_new_port_entry(struct task_nat *task, uint8_t proto, int public_i
task->public_entries[ret].ip_addr = private_src_ip;
task->public_entries[ret].l4_port = private_udp_port;
task->public_entries[ret].dpdk_port = mbuf->port;
- task->public_entries[ret].private_ip_idx = private_ip_idx;
+ task->public_entries[ret].private_ip_idx = private_ip_idx;
return ret;
}
static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
- struct task_nat *task = (struct task_nat *)tbase;
- uint8_t out[MAX_PKT_BURST];
- uint16_t j;
+ struct task_nat *task = (struct task_nat *)tbase;
+ uint8_t out[MAX_PKT_BURST] = {0};
+ uint16_t j;
uint32_t *ip_addr, public_ip, private_ip;
uint16_t *udp_src_port, port, private_port, public_port;
struct pkt_eth_ipv4 *pkt[MAX_PKT_BURST];
@@ -324,6 +322,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
void *keys[MAX_PKT_BURST];
int32_t positions[MAX_PKT_BURST];
int map[MAX_PKT_BURST] = {0};
+ struct public_key null_key ={0};
if (unlikely(task->dump_public_hash)) {
const struct public_key *next_key;
@@ -350,24 +349,24 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task->dump_private_hash = 0;
}
- for (j = 0; j < n_pkts; ++j) {
- PREFETCH0(mbufs[j]);
+ for (j = 0; j < n_pkts; ++j) {
+ PREFETCH0(mbufs[j]);
}
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
pkt[j] = rte_pktmbuf_mtod(mbufs[j], struct pkt_eth_ipv4 *);
- PREFETCH0(pkt[j]);
+ PREFETCH0(pkt[j]);
}
if (task->private) {
- struct private_key key[MAX_PKT_BURST];
- for (j = 0; j < n_pkts; ++j) {
+ struct private_key key[MAX_PKT_BURST];
+ for (j = 0; j < n_pkts; ++j) {
/* Currently, only support eth/ipv4 packets */
if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) {
plogx_info("Currently, only support eth/ipv4 packets\n");
out[j] = OUT_DISCARD;
- keys[j] = (void *)NULL;
+ keys[j] = (void *)&null_key;
continue;
}
- key[j].ip_addr = pkt[j]->ipv4_hdr.src_addr;
+ key[j].ip_addr = pkt[j]->ipv4_hdr.src_addr;
key[j].l4_port = pkt[j]->udp_hdr.src_port;
keys[j] = &key[j];
}
@@ -377,27 +376,29 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
return -1;
}
int n_new_mapping = 0;
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
port_idx = positions[j];
- if (unlikely(port_idx < 0)) {
- plogx_dbg("ip %d.%d.%d.%d / port %x not found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
- map[n_new_mapping] = j;
- keys[n_new_mapping++] = (void *)&(pkt[j]->ipv4_hdr.src_addr);
- } else {
- ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
- udp_src_port = &(pkt[j]->udp_hdr.src_port);
- plogx_dbg("ip/port %d.%d.%d.%d / %x found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
- *ip_addr = task->private_flow_entries[port_idx].ip_addr;
- *udp_src_port = task->private_flow_entries[port_idx].l4_port;
- uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
- if (flow_time + tsc_hz < tsc) {
- task->private_flow_entries[port_idx].flow_time = tsc;
+ if (out[j] != OUT_DISCARD) {
+ if (unlikely(port_idx < 0)) {
+ plogx_dbg("ip %d.%d.%d.%d / port %x not found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
+ map[n_new_mapping] = j;
+ keys[n_new_mapping++] = (void *)&(pkt[j]->ipv4_hdr.src_addr);
+ } else {
+ ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
+ udp_src_port = &(pkt[j]->udp_hdr.src_port);
+ plogx_dbg("ip/port %d.%d.%d.%d / %x found in private ip/port hash\n", IP4(pkt[j]->ipv4_hdr.src_addr), pkt[j]->udp_hdr.src_port);
+ *ip_addr = task->private_flow_entries[port_idx].ip_addr;
+ *udp_src_port = task->private_flow_entries[port_idx].l4_port;
+ uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
+ if (flow_time + tsc_hz < tsc) {
+ task->private_flow_entries[port_idx].flow_time = tsc;
+ }
+ private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx;
+ if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc)
+ task->private_ip_info[private_ip_idx].mac_aging_time = tsc;
+ prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
+ out[j] = route_ipv4(task, mbufs[j]);
}
- private_ip_idx = task->private_flow_entries[port_idx].private_ip_idx;
- if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc)
- task->private_ip_info[private_ip_idx].mac_aging_time = tsc;
- prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
- out[j] = route_ipv4(task, mbufs[j]);
}
}
@@ -412,7 +413,7 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
}
n_new_mapping = 0;
}
- for (int k = 0; k < n_new_mapping; ++k) {
+ for (int k = 0; k < n_new_mapping; ++k) {
private_ip_idx = positions[k];
j = map[k];
ip_addr = &(pkt[j]->ipv4_hdr.src_addr);
@@ -478,17 +479,17 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
private_port = *udp_src_port;
plogx_info("Added new ip/port: private ip/port = %d.%d.%d.%d/%x public ip/port = %d.%d.%d.%d/%x, index = %d\n", IP4(private_ip), private_port, IP4(public_ip), public_port, port_idx);
}
- // task->private_flow_entries[port_idx].ip_addr = task->private_ip_info[private_ip_idx].public_ip;
+ // task->private_flow_entries[port_idx].ip_addr = task->private_ip_info[private_ip_idx].public_ip;
plogx_info("Added new port: private ip/port = %d.%d.%d.%d/%x, public ip/port = %d.%d.%d.%d/%x\n", IP4(private_ip), private_port, IP4(task->private_ip_info[private_ip_idx].public_ip), public_port);
- *ip_addr = public_ip ;
- *udp_src_port = public_port;
+ *ip_addr = public_ip ;
+ *udp_src_port = public_port;
uint64_t flow_time = task->private_flow_entries[port_idx].flow_time;
if (flow_time + tsc_hz < tsc) {
task->private_flow_entries[port_idx].flow_time = tsc;
}
if (task->private_ip_info[private_ip_idx].mac_aging_time + tsc_hz < tsc)
task->private_ip_info[private_ip_idx].mac_aging_time = tsc;
- prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
// TODO: if route fails while just added new key in table, should we delete the key from the table?
out[j] = route_ipv4(task, mbufs[j]);
if (out[j] && new_entry) {
@@ -497,18 +498,18 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
}
}
}
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
} else {
struct public_key public_key[MAX_PKT_BURST];
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
/* Currently, only support eth/ipv4 packets */
if (pkt[j]->ether_hdr.ether_type != ETYPE_IPv4) {
plogx_info("Currently, only support eth/ipv4 packets\n");
out[j] = OUT_DISCARD;
- keys[j] = (void *)NULL;
+ keys[j] = (void *)&null_key;
continue;
}
- public_key[j].ip_addr = pkt[j]->ipv4_hdr.dst_addr;
+ public_key[j].ip_addr = pkt[j]->ipv4_hdr.dst_addr;
public_key[j].l4_port = pkt[j]->udp_hdr.dst_port;
keys[j] = &public_key[j];
}
@@ -517,26 +518,28 @@ static int handle_nat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
plogx_err("Failed lookup bulk public_ip_port_hash\n");
return -1;
}
- for (j = 0; j < n_pkts; ++j) {
+ for (j = 0; j < n_pkts; ++j) {
port_idx = positions[j];
- ip_addr = &(pkt[j]->ipv4_hdr.dst_addr);
+ ip_addr = &(pkt[j]->ipv4_hdr.dst_addr);
udp_src_port = &(pkt[j]->udp_hdr.dst_port);
- if (port_idx < 0) {
- plogx_err("Failed to find ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
- out[j] = OUT_DISCARD;
- } else {
- plogx_dbg("Found ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
- *ip_addr = task->public_entries[port_idx].ip_addr;
- *udp_src_port = task->public_entries[port_idx].l4_port;
- private_ip_idx = task->public_entries[port_idx].private_ip_idx;
- plogx_dbg("Found private IP info for ip %d.%d.%d.%d\n", IP4(*ip_addr));
- rte_memcpy(((uint8_t *)(pkt[j])) + 0, &task->private_ip_info[private_ip_idx].private_mac, 6);
- rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6);
- out[j] = task->public_entries[port_idx].dpdk_port;
+ if (out[j] != OUT_DISCARD) {
+ if (port_idx < 0) {
+ plogx_err("Failed to find ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
+ out[j] = OUT_DISCARD;
+ } else {
+ plogx_dbg("Found ip/port %d.%d.%d.%d/%x in public_ip_port_hash\n", IP4(*ip_addr), *udp_src_port);
+ *ip_addr = task->public_entries[port_idx].ip_addr;
+ *udp_src_port = task->public_entries[port_idx].l4_port;
+ private_ip_idx = task->public_entries[port_idx].private_ip_idx;
+ plogx_dbg("Found private IP info for ip %d.%d.%d.%d\n", IP4(*ip_addr));
+ rte_memcpy(((uint8_t *)(pkt[j])) + 0, &task->private_ip_info[private_ip_idx].private_mac, 6);
+ rte_memcpy(((uint8_t *)(pkt[j])) + 6, &task->src_mac_from_dpdk_port[task->public_entries[port_idx].dpdk_port], 6);
+ out[j] = task->public_entries[port_idx].dpdk_port;
+ }
}
- prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_udp_cksum(mbufs[j], &pkt[j]->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
}
}
@@ -564,9 +567,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
}
struct tmp_public_ip {
- uint32_t ip_beg;
+ uint32_t ip_beg;
uint32_t ip_end;
- uint16_t port_beg;
+ uint16_t port_beg;
uint16_t port_end;
};
struct tmp_static_ip {
@@ -597,10 +600,10 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
plogx_info("No dynamic table found\n");
} else {
uint64_t n_ip, n_port;
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
- }
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
+ }
lua_len(L, -1);
n_public_groups = lua_tointeger(L, -1);
plogx_info("%d groups of public IP\n", n_public_groups);
@@ -611,9 +614,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "public_ip_range_start", &dst_ip1) ||
- lua_to_ip(L, TABLE, "public_ip_range_stop", &dst_ip2) ||
- lua_to_val_range(L, TABLE, "public_port", &dst_port))
- return -1;
+ lua_to_ip(L, TABLE, "public_ip_range_stop", &dst_ip2) ||
+ lua_to_val_range(L, TABLE, "public_port", &dst_port))
+ return -1;
PROX_PANIC(dst_ip2 < dst_ip1, "public_ip_range error: %d.%d.%d.%d < %d.%d.%d.%d\n", (dst_ip2 >> 24), (dst_ip2 >> 16) & 0xFF, (dst_ip2 >> 8) & 0xFF, dst_ip2 & 0xFF, dst_ip1 >> 24, (dst_ip1 >> 16) & 0xFF, (dst_ip1 >> 8) & 0xFF, dst_ip1 & 0xFF);
PROX_PANIC(dst_port.end < dst_port.beg, "public_port error: %d < %d\n", dst_port.end, dst_port.beg);
n_ip = dst_ip2 - dst_ip1 + 1;
@@ -634,9 +637,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
if ((pop2 = lua_getfrom(L, TABLE, "static_ip")) < 0) {
plogx_info("No static ip table found\n");
} else {
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
}
lua_len(L, -1);
@@ -648,7 +651,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
lua_pushnil(L);
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "src_ip", &ip_from) ||
- lua_to_ip(L, TABLE, "dst_ip", &ip_to))
+ lua_to_ip(L, TABLE, "dst_ip", &ip_to))
return -1;
ip_from = rte_bswap32(ip_from);
ip_to = rte_bswap32(ip_to);
@@ -669,9 +672,9 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
if ((pop2 = lua_getfrom(L, TABLE, "static_ip_port")) < 0) {
plogx_info("No static table found\n");
} else {
- if (!lua_istable(L, -1)) {
- plogx_err("Can't read cgnat since data is not a table\n");
- return -1;
+ if (!lua_istable(L, -1)) {
+ plogx_err("Can't read cgnat since data is not a table\n");
+ return -1;
}
lua_len(L, -1);
@@ -684,10 +687,10 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
while (lua_next(L, -2)) {
if (lua_to_ip(L, TABLE, "src_ip", &ip_from) ||
- lua_to_ip(L, TABLE, "dst_ip", &ip_to) ||
- lua_to_port(L, TABLE, "src_port", &port_from) ||
- lua_to_port(L, TABLE, "dst_port", &port_to))
- return -1;
+ lua_to_ip(L, TABLE, "dst_ip", &ip_to) ||
+ lua_to_port(L, TABLE, "src_port", &port_from) ||
+ lua_to_port(L, TABLE, "dst_port", &port_to))
+ return -1;
ip_from = rte_bswap32(ip_from);
ip_to = rte_bswap32(ip_to);
@@ -742,7 +745,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
ip_info = &tmp_public_ip_config_info[ip_free_count];
ip_info->public_ip = rte_bswap32(ip);
ip_info->port_list = (uint16_t *)prox_zmalloc((dst_port.end - dst_port.beg) * sizeof(uint16_t), socket);
- PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", ip);
+ PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", ip);
for (uint32_t port = tmp_public_ip[i].port_beg; port <= tmp_public_ip[i].port_end; port++) {
ip_info->port_list[ip_info->port_free_count] = rte_bswap16(port);
ip_info->port_free_count++;
@@ -765,7 +768,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
ip_info = &tmp_public_ip_config_info[ip_free_count];
ip_info->public_ip = tmp_static_ip_port[i].public_ip;
ip_info->port_list = (uint16_t *)prox_zmalloc(tmp_static_ip_port[i].n_ports * sizeof(uint16_t), socket);
- PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", tmp_static_ip_port[i].public_ip);
+ PROX_PANIC(ip_info->port_list == NULL, "Failed to allocate list of ports for ip %x\n", tmp_static_ip_port[i].public_ip);
ip_info->port_list[ip_info->port_free_count] = tmp_static_ip_port[i].public_port;
ip_info->port_free_count++;
ip_info->max_port_count = ip_info->port_free_count;
@@ -794,6 +797,7 @@ static int lua_to_hash_nat(struct task_args *targ, struct lua_State *L, enum lua
.key_len = sizeof(struct private_key),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket,
};
plogx_info("hash table name = %s\n", hash_params.name);
struct private_key private_key;
@@ -961,7 +965,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
@@ -973,12 +977,11 @@ static struct task_init task_init_nat = {
.init = init_task_nat,
.handle = handle_nat_bulk,
#ifdef SOFT_CRC
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX,
+ .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX,
#else
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX,
+ .flag_features = TASK_FEATURE_ROUTING|TASK_FEATURE_ZERO_RX,
#endif
.size = sizeof(struct task_nat),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_nat(void)
diff --git a/VNFs/DPPD-PROX/handle_classify.c b/VNFs/DPPD-PROX/handle_classify.c
index f4f96aaf..96a14149 100644
--- a/VNFs/DPPD-PROX/handle_classify.c
+++ b/VNFs/DPPD-PROX/handle_classify.c
@@ -32,11 +32,14 @@
#include "log.h"
#include "quit.h"
#include "prox_shared.h"
+#include "handle_sched.h"
+#include "prox_compat.h"
struct task_classify {
struct task_base base;
uint16_t *user_table;
uint8_t *dscp;
+ struct rte_sched_port *sched_port;
};
static inline void handle_classify(struct task_classify *task, struct rte_mbuf *mbuf)
@@ -52,19 +55,19 @@ static inline void handle_classify(struct task_classify *task, struct rte_mbuf *
uint32_t prev_tc;
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
uint32_t dummy;
- rte_sched_port_pkt_read_tree_path(mbuf, &dummy, &dummy, &prev_tc, &dummy);
+ prox_rte_sched_port_pkt_read_tree_path(task->sched_port, mbuf, &dummy, &dummy, &prev_tc, &dummy);
#else
struct rte_sched_port_hierarchy *sched = (struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
prev_tc = sched->traffic_class;
#endif
- const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1);
+ const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1);
uint8_t dscp = task->dscp[ipv4_hdr->type_of_service >> 2];
uint8_t queue = dscp & 0x3;
uint8_t tc = prev_tc? prev_tc : dscp >> 2;
- rte_sched_port_pkt_write(mbuf, 0, task->user_table[qinq], tc, queue, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, 0, task->user_table[qinq], tc, queue, 0);
}
static int handle_classify_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
@@ -117,6 +120,8 @@ static void init_task_classify(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(ret, "Failed to create dscp table from config\n");
prox_sh_add_socket(socket_id, targ->dscp, task->dscp);
}
+ int rc = init_port_sched(&task->sched_port, targ);
+ PROX_PANIC(rc, "Did not find any QoS task to transmit to => undefined sched_port parameters\n");
}
static struct task_init task_init_classify = {
diff --git a/VNFs/DPPD-PROX/handle_dump.c b/VNFs/DPPD-PROX/handle_dump.c
index c35a6e9e..8fbc514c 100644
--- a/VNFs/DPPD-PROX/handle_dump.c
+++ b/VNFs/DPPD-PROX/handle_dump.c
@@ -24,13 +24,14 @@
#include "task_init.h"
#include "task_base.h"
#include "stats.h"
+#include "prox_compat.h"
struct task_dump {
struct task_base base;
uint32_t n_mbufs;
struct rte_mbuf **mbufs;
uint32_t n_pkts;
- char pcap_file[128];
+ char pcap_file[256];
};
static uint16_t buffer_packets(struct task_dump *task, struct rte_mbuf **mbufs, uint16_t n_pkts)
@@ -41,7 +42,12 @@ static uint16_t buffer_packets(struct task_dump *task, struct rte_mbuf **mbufs,
return 0;
for (j = 0; j < n_pkts && task->n_mbufs < task->n_pkts; ++j) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ uint64_t rdtsc = rte_rdtsc();
+ memcpy(&mbufs[j]->dynfield1[0], &rdtsc, sizeof(rdtsc));
+#else
mbufs[j]->udata64 = rte_rdtsc();
+#endif
task->mbufs[task->n_mbufs++] = mbufs[j];
}
@@ -64,12 +70,14 @@ static void init_task_dump(struct task_base *tbase, __attribute__((unused)) stru
struct task_dump *task = (struct task_dump *)tbase;
const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
+ if (targ->n_pkts == 0)
+ targ->n_pkts = 64 * 1024;
task->mbufs = prox_zmalloc(sizeof(*task->mbufs) * targ->n_pkts, socket_id);
task->n_pkts = targ->n_pkts;
if (!strcmp(targ->pcap_file, "")) {
strcpy(targ->pcap_file, "out.pcap");
}
- strncpy(task->pcap_file, targ->pcap_file, sizeof(task->pcap_file));
+ prox_strncpy(task->pcap_file, targ->pcap_file, sizeof(task->pcap_file));
}
static void stop(struct task_base *tbase)
@@ -90,10 +98,20 @@ static void stop(struct task_base *tbase)
pcap_dump_handle = pcap_dump_open(handle, task->pcap_file);
if (task->n_mbufs) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ memcpy(&beg, &task->mbufs[0]->dynfield1[0], sizeof(beg));
+#else
beg = task->mbufs[0]->udata64;
+#endif
}
for (uint32_t j = 0; j < task->n_mbufs; ++j) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ uint64_t mbufs_beg;
+ memcpy(&mbufs_beg, &task->mbufs[j]->dynfield1[0], sizeof(mbufs_beg));
+ tsc = mbufs_beg - beg;
+#else
tsc = task->mbufs[j]->udata64 - beg;
+#endif
header.len = rte_pktmbuf_pkt_len(task->mbufs[j]);
header.caplen = header.len;
tsc_to_tv(&header.ts, tsc);
diff --git a/VNFs/DPPD-PROX/handle_esp.c b/VNFs/DPPD-PROX/handle_esp.c
index 15996d58..a78130bf 100644
--- a/VNFs/DPPD-PROX/handle_esp.c
+++ b/VNFs/DPPD-PROX/handle_esp.c
@@ -31,691 +31,711 @@
#include "cfgfile.h"
#include "log.h"
#include "prox_cksum.h"
+#include "defines.h"
#include <rte_ip.h>
#include <rte_cryptodev.h>
-#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
#include "prox_port_cfg.h"
+#include "prox_compat.h"
typedef unsigned int u32;
typedef unsigned char u8;
-#define MAX_ASYNC_SESSIONS 256
-#define BYTE_LENGTH(x) (x/8)
-#define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
-
-//#define CIPHER_KEY_LENGTH_AES_CBC (32)
-#define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
-#define CIPHER_IV_LENGTH_AES_CBC 16
-//#define SINGLE_VDEV 1
-
-static inline void *get_sym_cop(struct rte_crypto_op *cop)
-{
- //return (cop + 1);//makes no sense on dpdk_17.05.2; TODO: doublecheck
- return cop->sym;
-}
-
-struct task_esp_enc {
- struct task_base base;
- uint8_t crypto_dev_id;
- uint16_t qp_id;
- u8 iv[16];
- uint32_t local_ipv4;
- struct ether_addr local_mac;
- uint32_t remote_ipv4;
- u8 key[16];
- uint32_t ipaddr;
- struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_xform cipher_xform;
- struct rte_crypto_sym_xform auth_xform;
- uint8_t head;
- uint8_t nb_enc;
- struct rte_crypto_op *ops_rx_burst[MAX_ASYNC_SESSIONS];
- struct rte_crypto_op *ops_tx_burst[MAX_ASYNC_SESSIONS];
-};
-
-struct task_esp_dec {
- struct task_base base;
- uint8_t crypto_dev_id;
- uint16_t qp_id;
- u8 iv[16];
- uint32_t local_ipv4;
- struct ether_addr local_mac;
- u8 key[16];
- uint32_t ipaddr;
- struct rte_cryptodev_sym_session *sess;
- struct rte_crypto_sym_xform cipher_xform;
- struct rte_crypto_sym_xform auth_xform;
- struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
-};
-
-struct crypto_testsuite_params {
- struct rte_mempool *mbuf_ol_pool_enc;
- struct rte_mempool *mbuf_ol_pool_dec;
-
- struct rte_cryptodev_config conf;
- struct rte_cryptodev_qp_conf qp_conf;
+#define BYTE_LENGTH(x) (x/8)
+#define DIGEST_BYTE_LENGTH_SHA1 (BYTE_LENGTH(160))
+
+//#define CIPHER_KEY_LENGTH_AES_CBC (32)
+#define CIPHER_KEY_LENGTH_AES_CBC (16)//==TEST
+#define CIPHER_IV_LENGTH_AES_CBC 16
+
+#define MAXIMUM_IV_LENGTH 16
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
+
+#define MAX_SESSIONS 1024
+#define POOL_CACHE_SIZE 128
+
+//#define NUM_OPS 256
+#define NUM_OPS 128
+struct task_esp {
+ struct task_base base;
+ uint8_t cdev_id;
+ uint16_t qp_id;
+ uint32_t local_ipv4;
+ prox_rte_ether_addr local_mac;
+ uint32_t remote_ipv4;
+ prox_rte_ether_addr dst_mac;
+ struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *session_pool;
+ struct rte_cryptodev_sym_session *sess;
+ struct rte_crypto_op *ops_burst[NUM_OPS];
+ unsigned len; //number of ops ready to be enqueued
+ uint32_t pkts_in_flight; // difference between enqueued and dequeued
+ uint8_t (*handle_esp_finish)(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status);
+ uint8_t (*handle_esp_ah)(struct task_esp *task, struct rte_mbuf *mbuf,
+ struct rte_crypto_op *cop);
};
-static struct crypto_testsuite_params testsuite_params = { NULL };
-static enum rte_cryptodev_type gbl_cryptodev_preftest_devtype = RTE_CRYPTODEV_AESNI_MB_PMD;
-
static uint8_t hmac_sha1_key[] = {
- 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
- 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
- 0xDE, 0xF4, 0xDE, 0xAD };
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD };
static uint8_t aes_cbc_key[] = {
- 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
- 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
- 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
- 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
static uint8_t aes_cbc_iv[] = {
- 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
- 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
-
-//RFC4303
-struct esp_hdr {
- uint32_t spi;
- uint32_t sn;
-};
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A };
-static void init_task_esp_common(struct task_base *tbase, struct task_args *targ)
+static void printf_cdev_info(uint8_t cdev_id)
{
- struct task_esp_enc *task = (struct task_esp_enc *)tbase;
- char name[30];
- static int vdev_initialized = 0;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
-#ifdef SINGLE_VDEV
- if (!vdev_initialized) {
- rte_vdev_init("crypto_aesni_mb", "max_nb_queue_pairs=16,max_nb_sessions=1024,socket_id=0");
- int nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
- PROX_PANIC(nb_devs < 1, "No crypto devices found?\n");
- vdev_initialized = 1;
- plog_info("%d crypto \n", nb_devs);
- task->crypto_dev_id = rte_cryptodev_get_dev_id("crypto_aesni_mb");
- } else {
- task->crypto_dev_id = 0;
+ struct rte_cryptodev_info dev_info;
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ plog_info("!!!numdevs:%d\n", rte_cryptodev_count());
+ //uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id);
+ plog_info("dev:%d name:%s nb_queue_pairs:%d max_nb_sessions:%d\n",
+ cdev_id, dev_info.driver_name, dev_info.max_nb_queue_pairs, dev_info.sym.max_nb_sessions);
+ const struct rte_cryptodev_capabilities *cap = &dev_info.capabilities[0];
+ int i=0;
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ //plog_info("cap->sym.xform_type:%d,");
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ plog_info("RTE_CRYPTO_SYM_XFORM_CIPHER: %d\n", cap->sym.cipher.algo);
+ cap = &dev_info.capabilities[++i];
}
-#else
- sprintf(name, "crypto_aesni_mb%02d", targ->lconf->id);
- rte_vdev_init(name, "max_nb_queue_pairs=4,max_nb_sessions=128,socket_id=0");
- int nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
- PROX_PANIC(nb_devs < 1, "No crypto devices found?\n");
- plog_info("%d crypto \n", nb_devs);
- task->crypto_dev_id = rte_cryptodev_get_dev_id(name);
-#endif
+}
-#if 1
- plog_info("cryptodev_count=%d\n", rte_cryptodev_count());
- plog_info("cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD)=%d\n",
- rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD));
-
- struct rte_cryptodev_info info;
- rte_cryptodev_info_get(task->crypto_dev_id, &info);
- plog_info("driver_name=%s pci_dev=? feature_flags=? capabilities=? max_nb_queue_pairs=%u, max_nb_sessions=%u max_nb_sessions_per_qp=%u\n",
- info.driver_name,
- info.max_nb_queue_pairs,
- info.sym.max_nb_sessions,
- info.sym.max_nb_sessions_per_qp
- );
-#endif
+static uint8_t get_cdev_id(void)
+{
+ static uint8_t last_unused_cdev_id=0;
+ char name[64]={0};
+ uint8_t cdev_count, cdev_id;
+
+ cdev_count = rte_cryptodev_count();
+ plog_info("crypto dev count: %d \n", cdev_count);
+ for (cdev_id = last_unused_cdev_id; cdev_id < cdev_count; cdev_id++) {
+ if (cdev_id != 1) {
+ printf_cdev_info(cdev_id);
+ last_unused_cdev_id = cdev_id + 1;
+ return cdev_id;
+ }
+ }
+ sprintf(name, "crypto_aesni_mb%d", cdev_count);
- ts_params->conf.socket_id = SOCKET_ID_ANY;
- ts_params->conf.session_mp.nb_objs = 2048;
-#ifdef SINGLE_VDEV
- ts_params->conf.nb_queue_pairs = 16;
- ts_params->qp_conf.nb_descriptors = 4096;
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
+ int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,max_nb_sessions=1024,socket_id=0");
#else
- ts_params->conf.nb_queue_pairs = 4;
- ts_params->qp_conf.nb_descriptors = 2048;
- ts_params->conf.session_mp.cache_size = 64;
+ int ret = rte_vdev_init(name, "max_nb_queue_pairs=8,socket_id=0");
#endif
+ PROX_PANIC(ret != 0, "Failed rte_vdev_init\n");
+ cdev_id = rte_cryptodev_get_dev_id(name);
- /*Now reconfigure queues to size we actually want to use in this testsuite.*/
- rte_cryptodev_configure(task->crypto_dev_id, &ts_params->conf);
- //TODO: move qp init here
- //rte_cryptodev_start(task->crypto_dev_id);//call after setup qp
- //to undo call rte_cryptodev_stop()
+ printf_cdev_info(cdev_id);
+ last_unused_cdev_id = cdev_id + 1;
+ return cdev_id;
}
-static uint16_t get_qp_id(void)
+static inline uint8_t handle_enc_finish(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status)
{
- static uint16_t qp_id=0;
- PROX_PANIC(qp_id >= 16, "exceeded max_nb_queue_pairs\n");
- return qp_id++;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ pip4->dst_addr = task->remote_ipv4;
+ pip4->src_addr = task->local_ipv4;
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
+ return 0;
}
-static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
+static inline uint8_t handle_dec_finish(struct task_esp *task,
+ struct rte_mbuf *mbuf, uint8_t status)
{
- int i, nb_devs, valid_dev_id = 0;
- struct rte_cryptodev_info info;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- init_task_esp_common(tbase, targ);
- tbase->flags |= FLAG_NEVER_FLUSH;
-
- char name[30];
- sprintf(name, "crypto_op_pool_enc_%03d", targ->lconf->id);
-
-#ifdef SINGLE_VDEV
- ts_params->mbuf_ol_pool_enc = rte_crypto_op_pool_create("crypto_op_pool_enc",
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
- rte_socket_id());
-#else
- ts_params->mbuf_ol_pool_enc = rte_crypto_op_pool_create(name,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024/8), 128, 0,
- rte_socket_id());
-#endif
- PROX_PANIC(ts_params->mbuf_ol_pool_enc == NULL, "Can't create ENC CRYPTO_OP_POOL\n");
-
- struct task_esp_enc *task = (struct task_esp_enc *)tbase;
-
- /*
- * Since we can't free and re-allocate queue memory always set the queues
- * on this device up to max size first so enough memory is allocated for
- * any later re-configures needed by other tests
- */
+ if (likely(status == RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+ u8* m = rte_pktmbuf_mtod(mbuf, u8*);
+ rte_memcpy(m + sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC, m,
+ sizeof(prox_rte_ether_hdr));
+ m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(m +
+ sizeof(prox_rte_ether_hdr));
+
+ if (unlikely((pip4->version_ihl >> 4) != 4)) {
+ // plog_info("non IPv4 packet after esp dec %i\n",
+ // pip4->version_ihl);
+ // plogdx_info(mbuf, "DEC TX: ");
+ return OUT_DISCARD;
+ }
+ if (pip4->time_to_live) {
+ pip4->time_to_live--;
+ }
+ else {
+ plog_info("TTL = 0 => Dropping\n");
+ return OUT_DISCARD;
+ }
+ uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
+ int len = rte_pktmbuf_pkt_len(mbuf);
+ rte_pktmbuf_trim(mbuf, len - sizeof(prox_rte_ether_hdr) -
+ ipv4_length);
-#ifdef SINGLE_VDEV
- task->qp_id=get_qp_id();
+#if 0
+ do_ipv4_swap(task, mbuf);
#else
- task->qp_id=0;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
+ prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
+ //rte_memcpy(peth, task->dst_mac, sizeof(task->dst_mac));
#endif
- plog_info("enc: task->qp_id=%u\n", task->qp_id);
- rte_cryptodev_queue_pair_setup(task->crypto_dev_id, task->qp_id,
- &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
-
- struct rte_cryptodev *dev;
- dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
- PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No ENC cryptodev attached\n");
-
- /* Setup Cipher Parameters */
- task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- task->cipher_xform.next = &(task->auth_xform);
-
- task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- task->cipher_xform.cipher.key.data = aes_cbc_key;
- task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- task->auth_xform.next = NULL;
- task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
- task->auth_xform.auth.key.data = hmac_sha1_key;
- task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->cipher_xform);
- PROX_PANIC(task->sess == NULL, "Failed to create ENC session\n");
-
- //TODO: doublecheck task->ops_burst lifecycle!
- if (rte_crypto_op_bulk_alloc(ts_params->mbuf_ol_pool_enc,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- task->ops_rx_burst, MAX_ASYNC_SESSIONS) != MAX_ASYNC_SESSIONS) {
- PROX_PANIC(1, "Failed to allocate ENC crypto operations\n");
- }
- //to clean up after rte_crypto_op_bulk_alloc:
- //for (j = 0; j < MAX_PKT_BURST; j++) {
- // rte_crypto_op_free(task->ops_burst[j]);
- //}
-
- // Read config file with SAs
- task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
- task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
- //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
- struct prox_port_cfg *port = find_reachable_port(targ);
- memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr));
-
- for (i = 0; i < 16; i++) task->key[i] = i+2;
- for (i = 0; i < 16; i++) task->iv[i] = i;
+ pip4->dst_addr = task->remote_ipv4;
+ pip4->src_addr = task->local_ipv4;
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
+ return 0;
+ }
+ else {
+ return OUT_DISCARD;
+ }
}
-static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
+static inline uint8_t handle_esp_ah_enc(struct task_esp *task,
+ struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
{
- int i, nb_devs;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- init_task_esp_common(tbase, targ);
-
- tbase->flags |= FLAG_NEVER_FLUSH;
- ts_params->mbuf_ol_pool_dec = rte_crypto_op_pool_create("crypto_op_pool_dec",
- RTE_CRYPTO_OP_TYPE_SYMMETRIC, (2*1024*1024), 128, 0,
- rte_socket_id());
- PROX_PANIC(ts_params->mbuf_ol_pool_dec == NULL, "Can't create DEC CRYPTO_OP_POOL\n");
-
- struct task_esp_dec *task = (struct task_esp_dec *)tbase;
-
- static struct rte_cryptodev_session *sess_dec = NULL;
- // Read config file with SAs
- task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
-
- task->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
- task->cipher_xform.next = NULL;
- task->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
- task->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
- task->cipher_xform.cipher.key.data = aes_cbc_key;
- task->cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
-
- /* Setup HMAC Parameters */
- task->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- task->auth_xform.next = &task->cipher_xform;
- task->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- task->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
- task->auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
- task->auth_xform.auth.key.data = hmac_sha1_key;
- task->auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
-
- task->qp_id=get_qp_id();
- plog_info("dec: task->qp_id=%u\n", task->qp_id);
- rte_cryptodev_queue_pair_setup(task->crypto_dev_id, task->qp_id,
- &ts_params->qp_conf, rte_cryptodev_socket_id(task->crypto_dev_id));
-
- struct rte_cryptodev *dev;
- dev = rte_cryptodev_pmd_get_dev(task->crypto_dev_id);
- PROX_PANIC(dev->attached != RTE_CRYPTODEV_ATTACHED, "No DEC cryptodev attached\n");
-
- ts_params->qp_conf.nb_descriptors = 128;
-
- task->sess = rte_cryptodev_sym_session_create(task->crypto_dev_id, &task->auth_xform);
- PROX_PANIC(task->sess == NULL, "Failed to create DEC session\n");
-
- if (rte_crypto_op_bulk_alloc(ts_params->mbuf_ol_pool_dec,
- RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- task->ops_burst, MAX_PKT_BURST) != MAX_PKT_BURST) {
- PROX_PANIC(1, "Failed to allocate DEC crypto operations\n");
- }
- //to clean up after rte_crypto_op_bulk_alloc:
- //for (int j = 0; j < MAX_PKT_BURST; j++) {
- // rte_crypto_op_free(task->ops_burst[j]);
- //}
-
- struct prox_port_cfg *port = find_reachable_port(targ);
- memcpy(&task->local_mac, &port->eth_addr, sizeof(struct ether_addr));
-
-// FIXME debug data
- for (i = 0; i < 16; i++) task->key[i] = i+2;
- for (i = 0; i < 16; i++) task->iv[i] = i;
-}
+ u8 *data;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
+ struct rte_crypto_sym_op *sym_cop = cop->sym;
+
+ if (unlikely((pip4->version_ihl >> 4) != 4)) {
+ plog_info("Received non IPv4 packet at esp enc %i\n",
+ pip4->version_ihl);
+ return OUT_DISCARD;
+ }
+ if (pip4->time_to_live) {
+ pip4->time_to_live--;
+ }
+ else {
+ plog_info("TTL = 0 => Dropping\n");
+ return OUT_DISCARD;
+ }
-static inline struct rte_mbuf *get_mbuf(struct task_esp_enc *task, struct rte_crypto_op *cop)
-{
- struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
- return sym_cop->m_src;
-}
+ // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
+ int l1 = rte_pktmbuf_pkt_len(mbuf);
+ int padding = l1 - (ipv4_length + sizeof(prox_rte_ether_hdr));
+ if (unlikely(padding > 0)) {
+ rte_pktmbuf_trim(mbuf, padding);
+ }
-static inline uint8_t handle_esp_ah_enc(struct task_esp_enc *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
-{
- u8 *data;
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
-
- if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("Received non IPv4 packet at esp enc %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "ENC RX: ");
- return OUT_DISCARD;
- }
- if (pip4->time_to_live) {
- pip4->time_to_live--;
- }
- else {
- plog_info("TTL = 0 => Dropping\n");
- return OUT_DISCARD;
- }
-
- // Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
- int l1 = rte_pktmbuf_pkt_len(mbuf);
- int padding = l1 - (ipv4_length + sizeof(struct ether_hdr));
- if (unlikely(padding > 0)) {
- rte_pktmbuf_trim(mbuf, padding);
- }
-
- l1 = rte_pktmbuf_pkt_len(mbuf);
- int encrypt_len = l1 - sizeof(struct ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
- padding = 0;
- if ((encrypt_len & 0xf) != 0)
- {
- padding = 16 - (encrypt_len % 16);
- encrypt_len += padding;
- }
-
- // Encapsulate, crypt in a separate buffer
- const int extra_space = sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC; // + new IP header, SPI, SN, IV
- struct ether_addr src_mac = peth->s_addr;
- struct ether_addr dst_mac = peth->d_addr;
- uint32_t src_addr = pip4->src_addr;
- uint32_t dst_addr = pip4->dst_addr;
- uint8_t ttl = pip4->time_to_live;
- uint8_t version_ihl = pip4->version_ihl;
-
- peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
- peth = (struct ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 + padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
- peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- l1 = rte_pktmbuf_pkt_len(mbuf);
- peth->ether_type = ETYPE_IPv4;
-#if 1
- //send it back
- ether_addr_copy(&dst_mac, &peth->s_addr);
- ether_addr_copy(&src_mac, &peth->d_addr);
+ l1 = rte_pktmbuf_pkt_len(mbuf);
+ int encrypt_len = l1 - sizeof(prox_rte_ether_hdr) + 2; // According to RFC4303 table 1, encrypt len is ip+tfc_pad(o)+pad+pad len(1) + next header(1)
+ padding = 0;
+ if ((encrypt_len & 0xf) != 0){
+ padding = 16 - (encrypt_len % 16);
+ encrypt_len += padding;
+ }
+
+ const int extra_space = sizeof(prox_rte_ipv4_hdr) +
+ sizeof(struct prox_esp_hdr) + CIPHER_IV_LENGTH_AES_CBC;
+
+ prox_rte_ether_addr src_mac = peth->s_addr;
+ prox_rte_ether_addr dst_mac = peth->d_addr;
+ uint32_t src_addr = pip4->src_addr;
+ uint32_t dst_addr = pip4->dst_addr;
+ uint8_t ttl = pip4->time_to_live;
+ uint8_t version_ihl = pip4->version_ihl;
+
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, extra_space); // encap + prefix
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_append(mbuf, 0 + 1 + 1 +
+ padding + 4 + DIGEST_BYTE_LENGTH_SHA1); // padding + pad_len + next_head + seqn + ICV pad + ICV
+ peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ l1 = rte_pktmbuf_pkt_len(mbuf);
+ peth->ether_type = ETYPE_IPv4;
+#if 0
+ //send it back
+ prox_rte_ether_addr_copy(&dst_mac, &peth->s_addr);
+ prox_rte_ether_addr_copy(&src_mac, &peth->d_addr);
#else
- ether_addr_copy(&task->local_mac, &peth->s_addr);
- ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
+ prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
+ //prox_rte_ether_addr_copy(&dst_mac, &peth->d_addr);//IS: dstmac should be rewritten by arp
+ prox_rte_ether_addr_copy(&task->dst_mac, &peth->d_addr);
#endif
- pip4 = (struct ipv4_hdr *)(peth + 1);
- pip4->src_addr = task->local_ipv4;
- pip4->dst_addr = task->remote_ipv4;
- pip4->time_to_live = ttl;
- pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
- pip4->version_ihl = version_ihl; // 20 bytes, ipv4
- pip4->total_length = rte_cpu_to_be_16(ipv4_length + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 + DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
- pip4->packet_id = 0x0101;
- pip4->type_of_service = 0;
- pip4->time_to_live = 64;
- prox_ip_cksum(mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), 1);
-
- //find the SA when there will be more than one
- if (task->ipaddr == pip4->src_addr)
- {
- }
- data = (u8*)(pip4 + 1);
+ pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ pip4->src_addr = task->local_ipv4;
+ pip4->dst_addr = task->remote_ipv4;
+ pip4->time_to_live = ttl;
+ pip4->next_proto_id = IPPROTO_ESP; // 50 for ESP, ip in ip next proto trailer
+ pip4->version_ihl = version_ihl; // 20 bytes, ipv4
+ pip4->total_length = rte_cpu_to_be_16(ipv4_length +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)
+ + CIPHER_IV_LENGTH_AES_CBC + padding + 1 + 1 +
+ DIGEST_BYTE_LENGTH_SHA1); // iphdr+SPI+SN+IV+payload+padding+padlen+next header + crc + auth
+ pip4->packet_id = 0x0101;
+ pip4->type_of_service = 0;
+ pip4->time_to_live = 64;
+ prox_ip_cksum(mbuf, pip4, sizeof(prox_rte_ether_hdr),
+ sizeof(prox_rte_ipv4_hdr), 1);
+
+ data = (u8*)(pip4 + 1);
#if 0
- *((u32*) data) = 0x2016; // FIXME SPI
- *((u32*) data + 1) = 0x2; // FIXME SN
+ *((u32*) data) = 0x2016; // FIXME SPI
+ *((u32*) data + 1) = 0x2; // FIXME SN
#else
- struct esp_hdr *pesp = (struct esp_hdr*)(pip4+1);
- pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
- static u32 sn = 0;
- pesp->sn = ++sn;
+ struct prox_esp_hdr *pesp = (struct prox_esp_hdr*)(pip4+1);
+ pesp->spi = src_addr;//for simplicity assume 1 tunnel per source ip
+ static u32 sn = 0;
+ pesp->seq = ++sn;
+// pesp->spi=0xAAAAAAAA;//debug
+// pesp->seq =0xBBBBBBBB;//debug
#endif
- u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
- //padl += CIPHER_IV_LENGTH_AES_CBC;
- *padl = padding;
- *(padl + 1) = 4; // ipv4 in 4
+ u8 *padl = (u8*)data + (8 + encrypt_len - 2 + CIPHER_IV_LENGTH_AES_CBC); // No ESN yet. (-2 means NH is crypted)
+ //padl += CIPHER_IV_LENGTH_AES_CBC;
+ *padl = padding;
+ *(padl + 1) = 4; // ipv4 in 4
+
+ sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC +
+ encrypt_len;
+ //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
+ + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
+ //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+
+ //sym_cop->cipher.iv.data = data + 8;
+ //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
+ //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ //rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET);
+ rte_memcpy(iv_ptr, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+
+#if 0//old
+ sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
+ sym_cop->cipher.data.length = encrypt_len;
+
+ uint64_t *iv = (uint64_t *)(pesp + 1);
+ memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
+#else
+ //uint64_t *iv = (uint64_t *)(pesp + 1);
+ //memset(iv, 0, CIPHER_IV_LENGTH_AES_CBC);
+ sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr);
+ sym_cop->cipher.data.length = encrypt_len + CIPHER_IV_LENGTH_AES_CBC;
+#endif
+
+ sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr);
+ sym_cop->auth.data.length = sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC + encrypt_len;// + 4;// FIXME
- //one key for them all for now
- rte_crypto_op_attach_sym_session(cop, task->sess);
+ sym_cop->m_src = mbuf;
+ rte_crypto_op_attach_sym_session(cop, task->sess);
- sym_cop->auth.digest.data = data + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len;
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, (sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 8 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len));
- sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
+ //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
- sym_cop->cipher.iv.data = data + 8;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
- sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+ return 0;
+}
+
+static inline uint8_t handle_esp_ah_dec(struct task_esp *task,
+ struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
+{
+ struct rte_crypto_sym_op *sym_cop = cop->sym;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
+ u8 *data = (u8*)(pip4 + 1);
+
+ if (pip4->next_proto_id != IPPROTO_ESP){
+ plog_info("Received non ESP packet on esp dec\n");
+ plogdx_info(mbuf, "DEC RX: ");
+ return OUT_DISCARD;
+ }
+
+ rte_crypto_op_attach_sym_session(cop, task->sess);
- rte_memcpy(sym_cop->cipher.iv.data, aes_cbc_iv, CIPHER_IV_LENGTH_AES_CBC);
+ sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 +
+ ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
+ //sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr)); // FIXME
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr)
+ + sizeof(struct prox_esp_hdr));
+ //sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
- sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
- sym_cop->cipher.data.length = encrypt_len;
+ //sym_cop->cipher.iv.data = (uint8_t *)data + 8;
+ //sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + 4 + 4;
+ //sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
- sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
- sym_cop->auth.data.length = 4 + 4 + CIPHER_IV_LENGTH_AES_CBC + encrypt_len ;// + 4;// FIXME
+#if 0
+ rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
+ aes_cbc_iv,
+ CIPHER_IV_LENGTH_AES_CBC);
+#else
+ uint8_t * iv = (uint8_t *)(pip4 + 1) + sizeof(struct prox_esp_hdr);
+ rte_memcpy(rte_crypto_op_ctod_offset(cop, uint8_t *, IV_OFFSET),
+ iv,
+ CIPHER_IV_LENGTH_AES_CBC);
+#endif
- sym_cop->m_src = mbuf;
- //cop->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- //cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ sym_cop->auth.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr);
+ sym_cop->auth.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) - 4 -
+ CIPHER_IV_LENGTH_AES_CBC;
- return 0;
+ sym_cop->cipher.data.offset = sizeof(prox_rte_ether_hdr) +
+ sizeof(prox_rte_ipv4_hdr) + sizeof(struct prox_esp_hdr) +
+ CIPHER_IV_LENGTH_AES_CBC;
+ sym_cop->cipher.data.length = ipv4_length - sizeof(prox_rte_ipv4_hdr) -
+ CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
+
+ sym_cop->m_src = mbuf;
+ return 0;
}
-static inline uint8_t handle_esp_ah_dec(struct task_esp_dec *task, struct rte_mbuf *mbuf, struct rte_crypto_op *cop)
+static inline void do_ipv4_swap(struct task_esp *task, struct rte_mbuf *mbuf)
{
- struct rte_crypto_sym_op *sym_cop = get_sym_cop(cop);
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- u8 *data = (u8*)(pip4 + 1);
- //find the SA
- if (pip4->next_proto_id != IPPROTO_ESP)
- {
- plog_info("Received non ESP packet on esp dec\n");
- plogdx_info(mbuf, "DEC RX: ");
- return OUT_DISCARD;
- }
- if (task->ipaddr == pip4->src_addr)
- {
- }
-
- rte_crypto_op_attach_sym_session(cop, task->sess);
-
- sym_cop->auth.digest.data = (unsigned char *)((unsigned char*)pip4 + ipv4_length - DIGEST_BYTE_LENGTH_SHA1);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(mbuf, sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4); // FIXME
- sym_cop->auth.digest.length = DIGEST_BYTE_LENGTH_SHA1;
-
- sym_cop->cipher.iv.data = (uint8_t *)data + 8;
- sym_cop->cipher.iv.phys_addr = rte_pktmbuf_mtophys(mbuf) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4;
- sym_cop->cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
-
- sym_cop->auth.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr);
- sym_cop->auth.data.length = ipv4_length - sizeof(struct ipv4_hdr) - 4 - CIPHER_IV_LENGTH_AES_CBC;
-
- sym_cop->cipher.data.offset = sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC;
- sym_cop->cipher.data.length = ipv4_length - sizeof(struct ipv4_hdr) - CIPHER_IV_LENGTH_AES_CBC - 28; // FIXME
-
- sym_cop->m_src = mbuf;
- return 0;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf,
+ prox_rte_ether_hdr *);
+ prox_rte_ether_addr src_mac = peth->s_addr;
+ prox_rte_ether_addr dst_mac = peth->d_addr;
+ uint32_t src_ip, dst_ip;
+
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
+ src_ip = pip4->src_addr;
+ dst_ip = pip4->dst_addr;
+
+ //peth->s_addr = dst_mac;
+ peth->d_addr = src_mac;//should be replaced by arp
+ pip4->src_addr = dst_ip;
+ pip4->dst_addr = src_ip;
+ prox_rte_ether_addr_copy(&task->local_mac, &peth->s_addr);
}
-static inline void do_ipv4_swap(struct task_esp_dec *task, struct rte_mbuf *mbuf)
+
+static void init_task_esp_enc(struct task_base *tbase, struct task_args *targ)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ether_addr src_mac = peth->s_addr;
- struct ether_addr dst_mac = peth->d_addr;
- uint32_t src_ip, dst_ip;
-
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
- src_ip = pip4->src_addr;
- dst_ip = pip4->dst_addr;
-
- //peth->s_addr = dst_mac;
- peth->d_addr = src_mac;//should be replaced by arp
- pip4->src_addr = dst_ip;
- pip4->dst_addr = src_ip;
- ether_addr_copy(&task->local_mac, &peth->s_addr);
+ struct task_esp *task = (struct task_esp *)tbase;
+ unsigned int session_size;
+
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
+
+ uint8_t lcore_id = targ->lconf->id;
+ char name[64];
+ task->handle_esp_finish = handle_enc_finish;
+ task->handle_esp_ah = handle_esp_ah_enc;
+ task->len = 0;
+ task->pkts_in_flight = 0;
+ sprintf(name, "core_%03u_crypto_pool", lcore_id);
+ task->crypto_op_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
+ MAXIMUM_IV_LENGTH, rte_socket_id());
+ plog_info("rte_crypto_op_pool_create nb_elements =%d\n",
+ targ->nb_mbuf);
+ PROX_PANIC(task->crypto_op_pool == NULL, "Can't create ENC \
+ CRYPTO_OP_POOL\n");
+
+ task->cdev_id = get_cdev_id();
+
+ struct rte_cryptodev_config cdev_conf;
+ cdev_conf.nb_queue_pairs = 2;
+ cdev_conf.socket_id = rte_socket_id();
+ rte_cryptodev_configure(task->cdev_id, &cdev_conf);
+
+ session_size = rte_cryptodev_sym_get_private_session_size(
+ task->cdev_id);
+ plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
+ session_size);
+ sprintf(name, "core_%03u_session_pool", lcore_id);
+ task->session_pool = rte_cryptodev_sym_session_pool_create(name,
+ MAX_SESSIONS,
+ session_size,
+ POOL_CACHE_SIZE,
+ 0, rte_socket_id());
+ PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
+
+ task->qp_id=0;
+ plog_info("enc: task->qp_id=%u\n", task->qp_id);
+ struct prox_rte_cryptodev_qp_conf qp_conf;
+ qp_conf.nb_descriptors = 2048;
+ qp_conf.mp_session = task->session_pool;
+ prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
+ &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
+
+ int ret = rte_cryptodev_start(task->cdev_id);
+ PROX_PANIC(ret < 0, "Failed to start device\n");
+
+ //Setup Cipher Parameters
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+// cipher_xform.next = &auth_xform;
+ cipher_xform.next = NULL; //CRYPTO_ONLY
+
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ cipher_xform.cipher.key.data = aes_cbc_key;
+ cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ //Setup HMAC Parameters
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
+ auth_xform.auth.key.data = hmac_sha1_key;
+ auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
+
+ auth_xform.auth.iv.offset = 0;
+ auth_xform.auth.iv.length = 0;
+
+ task->sess = rte_cryptodev_sym_session_create(task->cdev_id,
+ &cipher_xform, task->session_pool);
+ PROX_PANIC(task->sess < 0, "Failed ENC sym_session_create\n");
+
+ task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
+ task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
+ //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
+
+ if (targ->flags & TASK_ARG_DST_MAC_SET){
+ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
+ plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
+ MAC_BYTES(task->dst_mac.addr_bytes));
+ //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
+ //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
+ }
}
-static inline uint8_t handle_esp_ah_dec_finish(struct task_esp_dec *task, struct rte_mbuf *mbuf)
+static void init_task_esp_dec(struct task_base *tbase, struct task_args *targ)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr), ((u8*)peth) + sizeof (struct ether_hdr) +
- + sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, sizeof(struct ipv4_hdr));// next hdr, padding
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
-
- if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "DEC TX: ");
- return OUT_DISCARD;
- }
- if (pip4->time_to_live) {
- pip4->time_to_live--;
- }
- else {
- plog_info("TTL = 0 => Dropping\n");
- return OUT_DISCARD;
- }
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- rte_memcpy(((u8*)peth) + sizeof (struct ether_hdr) + sizeof(struct ipv4_hdr),
- ((u8*)peth) + sizeof (struct ether_hdr) +
- + 2 * sizeof(struct ipv4_hdr) + 4 + 4 + CIPHER_IV_LENGTH_AES_CBC, ipv4_length - sizeof(struct ipv4_hdr));
-
- int len = rte_pktmbuf_pkt_len(mbuf);
- rte_pktmbuf_trim(mbuf, len - sizeof (struct ether_hdr) - ipv4_length);
- peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
-
-#if 1
- do_ipv4_swap(task, mbuf);
-#endif
- prox_ip_cksum_sw(pip4);
-// one key for them all for now
-// set key
-// struct crypto_aes_ctx ctx;
-// ctx.iv = (u8*)&iv_onstack;
-// *((u32*)ctx.iv) = *((u32*)data + 2);
-// aes_set_key(&ctx, task->key, 16);//
-//
-// result = ctr_crypt(&ctx, dest, data + 12, len);//
-// memcpy(pip4, dest, len);
-
- return 0;
+ struct task_esp *task = (struct task_esp *)tbase;
+ unsigned int session_size;
+
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
+
+ uint8_t lcore_id = targ->lconf->id;
+ char name[64];
+ task->handle_esp_finish = handle_dec_finish;
+ task->handle_esp_ah = handle_esp_ah_dec;
+ task->len = 0;
+ task->pkts_in_flight = 0;
+ sprintf(name, "core_%03u_crypto_pool", lcore_id);
+ task->crypto_op_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, targ->nb_mbuf, 128,
+ MAXIMUM_IV_LENGTH, rte_socket_id());
+ PROX_PANIC(task->crypto_op_pool == NULL, "Can't create DEC \
+ CRYPTO_OP_POOL\n");
+
+ task->cdev_id = get_cdev_id();
+ struct rte_cryptodev_config cdev_conf;
+ cdev_conf.nb_queue_pairs = 2;
+ cdev_conf.socket_id = SOCKET_ID_ANY;
+ cdev_conf.socket_id = rte_socket_id();
+ rte_cryptodev_configure(task->cdev_id, &cdev_conf);
+
+ session_size = rte_cryptodev_sym_get_private_session_size(
+ task->cdev_id);
+ plog_info("rte_cryptodev_sym_get_private_session_size=%d\n",
+ session_size);
+ sprintf(name, "core_%03u_session_pool", lcore_id);
+ task->session_pool = rte_cryptodev_sym_session_pool_create(name,
+ MAX_SESSIONS,
+ session_size,
+ POOL_CACHE_SIZE,
+ 0, rte_socket_id());
+ PROX_PANIC(task->session_pool == NULL, "Failed rte_mempool_create\n");
+
+ task->qp_id=0;
+ plog_info("dec: task->qp_id=%u\n", task->qp_id);
+ struct prox_rte_cryptodev_qp_conf qp_conf;
+ qp_conf.nb_descriptors = 2048;
+ qp_conf.mp_session = task->session_pool;
+ prox_rte_cryptodev_queue_pair_setup(task->cdev_id, task->qp_id,
+ &qp_conf, rte_cryptodev_socket_id(task->cdev_id));
+
+ int ret = rte_cryptodev_start(task->cdev_id);
+ PROX_PANIC(ret < 0, "Failed to start device\n");
+
+ //Setup Cipher Parameters
+ struct rte_crypto_sym_xform cipher_xform = {0};
+ struct rte_crypto_sym_xform auth_xform = {0};
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ cipher_xform.cipher.key.data = aes_cbc_key;
+ cipher_xform.cipher.key.length = CIPHER_KEY_LENGTH_AES_CBC;
+
+ cipher_xform.cipher.iv.offset = IV_OFFSET;
+ cipher_xform.cipher.iv.length = CIPHER_IV_LENGTH_AES_CBC;
+
+ //Setup HMAC Parameters
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = &cipher_xform;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ auth_xform.auth.key.length = DIGEST_BYTE_LENGTH_SHA1;
+ auth_xform.auth.key.data = hmac_sha1_key;
+ auth_xform.auth.digest_length = DIGEST_BYTE_LENGTH_SHA1;
+
+ auth_xform.auth.iv.offset = 0;
+ auth_xform.auth.iv.length = 0;
+
+ task->sess = rte_cryptodev_sym_session_create(task->cdev_id, &cipher_xform,
+ task->session_pool);
+ PROX_PANIC(task->sess < 0, "Failed DEC sym_session_create\n");
+
+ task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
+ task->remote_ipv4 = rte_cpu_to_be_32(targ->remote_ipv4);
+ //memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ memcpy(&task->local_mac, &port->eth_addr, sizeof(prox_rte_ether_addr));
+
+ if (targ->flags & TASK_ARG_DST_MAC_SET){
+ memcpy(&task->dst_mac, &targ->edaddr, sizeof(task->dst_mac));
+ plog_info("TASK_ARG_DST_MAC_SET ("MAC_BYTES_FMT")\n",
+ MAC_BYTES(task->dst_mac.addr_bytes));
+ //prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
+ //rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
+ }
}
-static inline uint8_t handle_esp_ah_dec_finish2(struct task_esp_dec *task, struct rte_mbuf *mbuf)
+static int crypto_send_burst(struct task_esp *task, uint16_t n)
{
- u8* m = rte_pktmbuf_mtod(mbuf, u8*);
- rte_memcpy(m+sizeof(struct ipv4_hdr)+sizeof(struct esp_hdr)+CIPHER_IV_LENGTH_AES_CBC,
- m, sizeof(struct ether_hdr));
- m = (u8*)rte_pktmbuf_adj(mbuf, sizeof(struct ipv4_hdr)+sizeof(struct esp_hdr)+CIPHER_IV_LENGTH_AES_CBC);
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(m+sizeof(struct ether_hdr));
-
- if (unlikely((pip4->version_ihl >> 4) != 4)) {
- plog_info("non IPv4 packet after esp dec %i\n", pip4->version_ihl);
- plogdx_info(mbuf, "DEC TX: ");
- return OUT_DISCARD;
- }
- if (pip4->time_to_live) {
- pip4->time_to_live--;
- }
- else {
- plog_info("TTL = 0 => Dropping\n");
- return OUT_DISCARD;
- }
- uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
- int len = rte_pktmbuf_pkt_len(mbuf);
- rte_pktmbuf_trim(mbuf, len - sizeof (struct ether_hdr) - ipv4_length);
-
-#if 1
- do_ipv4_swap(task, mbuf);
-#endif
- prox_ip_cksum_sw(pip4);
- return 0;
+ uint8_t out[MAX_PKT_BURST];
+ struct rte_mbuf *mbufs[MAX_PKT_BURST];
+ unsigned ret;
+ unsigned i = 0;
+ ret = rte_cryptodev_enqueue_burst(task->cdev_id,
+ task->qp_id, task->ops_burst, n);
+ task->pkts_in_flight += ret;
+ if (unlikely(ret < n)) {
+ for (i = 0; i < (n-ret); i++) {
+ mbufs[i] = task->ops_burst[ret + i]->sym->m_src;
+ out[i] = OUT_DISCARD;
+ rte_crypto_op_free(task->ops_burst[ret + i]);
+ }
+ return task->base.tx_pkt(&task->base, mbufs, i, out);
+ }
+ return 0;
}
-static int handle_esp_enc_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
+static int handle_esp_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
+ uint16_t n_pkts)
{
- struct task_esp_enc *task = (struct task_esp_enc *)tbase;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
-
- uint8_t out[MAX_ASYNC_SESSIONS];
- uint16_t i = 0, nb_rx = 0, j = 0, nb_del = 0, n_fwd = 0, ret;
- uint8_t nb_enc = 0;
- uint8_t head = task->head;
- struct rte_mbuf *del_mbufs[MAX_PKT_BURST], *fwd_mbufs[MAX_ASYNC_SESSIONS];
-
- if (task->nb_enc + n_pkts >= MAX_ASYNC_SESSIONS) {
- // Discards all packets for now - TODO fine grain...
- for (uint16_t j = 0; j < n_pkts; ++j) {
- out[j] = OUT_DISCARD;
+ struct task_esp *task = (struct task_esp *)tbase;
+ uint8_t out[MAX_PKT_BURST];
+ uint8_t result = 0;
+ uint16_t nb_deq = 0, j, idx = 0;
+ struct rte_mbuf *drop_mbufs[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+ int nbr_tx_pkt = 0;
+
+ if (likely(n_pkts != 0)) {
+ if (rte_crypto_op_bulk_alloc(task->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, n_pkts) != n_pkts) {
+ plog_info("Failed to allocate crypto operations, discarding \
+ %d packets\n", n_pkts);
+ for (j = 0; j < n_pkts; j++) {
+ out[j] = OUT_DISCARD;
+ }
+ nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, n_pkts,
+ out);
+ }
+ else {
+ for (j = 0; j < n_pkts; j++) {
+ result = task->handle_esp_ah(task, mbufs[j],
+ ops_burst[j]);
+ if (result == 0) {
+ task->ops_burst[task->len] = ops_burst[j];
+ task->len++;
+ /* enough ops to be sent */
+ if (task->len == MAX_PKT_BURST) {
+ nbr_tx_pkt += crypto_send_burst(task,
+ (uint16_t) MAX_PKT_BURST);
+ task->len = 0;
+ }
+ }
+ else {
+ drop_mbufs[idx] = mbufs[j];
+ out[idx] = result;
+ idx++;
+ rte_crypto_op_free(ops_burst[j]);
+ plog_info("Failed handle_esp_ah for 1 \
+ packet\n");
+ }
+ }
+ if (idx) nbr_tx_pkt += task->base.tx_pkt(&task->base,
+ drop_mbufs, idx, out);
}
- task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
- n_pkts = 0;
+ } else if (task->len) {
+ // No packets where received on the rx queue, but this handle
+ // function was called anyway since some packets where not yet
+ // enqueued. Hence they get enqueued here in order to minimize
+ // latency or in case no new packets will arrive
+ nbr_tx_pkt += crypto_send_burst(task, task->len);
+ task->len = 0;
}
-
- for (uint16_t j = 0; j < n_pkts; ++j) {
- ret = handle_esp_ah_enc(task, mbufs[j], task->ops_rx_burst[head]);
- if (ret != OUT_DISCARD) {
- ++nb_enc;
- head++;
- } else {
- out[nb_del] = ret;
- del_mbufs[nb_del++] = mbufs[j];
- }
- }
-
- if ((ret = rte_cryptodev_enqueue_burst(task->crypto_dev_id, task->qp_id, &task->ops_rx_burst[task->head], nb_enc)) != nb_enc) {
- for (uint16_t j = 0; j < nb_enc - ret; ++j) {
- out[nb_del] = OUT_DISCARD;
- del_mbufs[nb_del++] = get_mbuf(task, task->ops_rx_burst[task->head+ret]);
- }
- }
- task->head+=ret;
- if (nb_del)
- task->base.tx_pkt(&task->base, del_mbufs, nb_del, out);
- task->nb_enc += nb_enc;
-
- if (task->nb_enc == 0)
- return 0;
-
- ret = rte_cryptodev_dequeue_burst(task->crypto_dev_id, task->qp_id, task->ops_tx_burst, task->nb_enc);
- for (uint16_t j = 0; j < ret; ++j) {
- out[n_fwd] = 0;
- fwd_mbufs[n_fwd++] = get_mbuf(task, task->ops_tx_burst[j]);
+ if (task->pkts_in_flight) {
+ do {
+ nb_deq = rte_cryptodev_dequeue_burst(task->cdev_id,
+ task->qp_id, ops_burst, MAX_PKT_BURST);
+ task->pkts_in_flight -= nb_deq;
+ for (j = 0; j < nb_deq; j++) {
+ mbufs[j] = ops_burst[j]->sym->m_src;
+ out[j] = task->handle_esp_finish(task, mbufs[j],
+ ops_burst[j]->status);
+ rte_crypto_op_free(ops_burst[j]);
+ }
+ nbr_tx_pkt += task->base.tx_pkt(&task->base, mbufs, nb_deq,
+ out);
+ } while (nb_deq == MAX_PKT_BURST);
}
- task->nb_enc -= n_fwd;
- return task->base.tx_pkt(&task->base, fwd_mbufs, n_fwd, out);
-}
-
-static int handle_esp_dec_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
-{
- struct task_esp_dec *task = (struct task_esp_dec *)tbase;
- struct crypto_testsuite_params *ts_params = &testsuite_params;
- uint8_t out[MAX_PKT_BURST];
- uint16_t j, nb_dec=0, nb_rx=0;
-
- for (j = 0; j < n_pkts; ++j) {
- out[j] = handle_esp_ah_dec(task, mbufs[j], task->ops_burst[nb_dec]);
- if (out[j] != OUT_DISCARD)
- ++nb_dec;
- }
-
- if (rte_cryptodev_enqueue_burst(task->crypto_dev_id, task->qp_id, task->ops_burst, nb_dec) != nb_dec) {
- plog_info("Error dec enqueue_burst\n");
- return -1;
- }
-
- j=0;
- do {
- nb_rx = rte_cryptodev_dequeue_burst(task->crypto_dev_id, task->qp_id,
- task->ops_burst+j, nb_dec-j);
- j += nb_rx;
- } while (j < nb_dec);
-
- for (j = 0; j < nb_dec; ++j) {
- if (task->ops_burst[j]->status != RTE_CRYPTO_OP_STATUS_SUCCESS){
- plog_info("err: task->ops_burst[%d].status=%d\n", j, task->ops_burst[j]->status);
- //!!!TODO!!! find mbuf and discard it!!!
- //for now just send it further
- //plogdx_info(mbufs[j], "RX: ");
- }
- if (task->ops_burst[j]->status == RTE_CRYPTO_OP_STATUS_SUCCESS) {
- struct rte_mbuf *mbuf = task->ops_burst[j]->sym->m_src;
- handle_esp_ah_dec_finish2(task, mbuf);//TODO set out[j] properly
- }
- }
-
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ return nbr_tx_pkt;
}
struct task_init task_init_esp_enc = {
.mode = ESP_ENC,
.mode_str = "esp_enc",
.init = init_task_esp_enc,
- .handle = handle_esp_enc_bulk,
+ .handle = handle_esp_bulk,
.flag_features = TASK_FEATURE_ZERO_RX,
- .size = sizeof(struct task_esp_enc),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM
+ .size = sizeof(struct task_esp),
};
struct task_init task_init_esp_dec = {
- .mode = ESP_ENC,
+ .mode = ESP_DEC,
.mode_str = "esp_dec",
.init = init_task_esp_dec,
- .handle = handle_esp_dec_bulk,
+ .handle = handle_esp_bulk,
.flag_features = TASK_FEATURE_ZERO_RX,
- .size = sizeof(struct task_esp_dec),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM
+ .size = sizeof(struct task_esp),
};
__attribute__((constructor)) static void reg_task_esp_enc(void)
{
- reg_task(&task_init_esp_enc);
+ reg_task(&task_init_esp_enc);
}
__attribute__((constructor)) static void reg_task_esp_dec(void)
{
- reg_task(&task_init_esp_dec);
+ reg_task(&task_init_esp_dec);
}
diff --git a/VNFs/DPPD-PROX/handle_fm.c b/VNFs/DPPD-PROX/handle_fm.c
index c4a10e67..75d0cee1 100644
--- a/VNFs/DPPD-PROX/handle_fm.c
+++ b/VNFs/DPPD-PROX/handle_fm.c
@@ -21,6 +21,7 @@
#include <rte_tcp.h>
#include <rte_cycles.h>
#include <rte_ether.h>
+#include <rte_ethdev.h> // required by rte_eth_ctrl.h in 19.05
#include <rte_eth_ctrl.h>
#include "log.h"
@@ -50,13 +51,13 @@ struct task_fm {
};
struct eth_ip4_udp {
- struct ether_hdr l2;
- struct ipv4_hdr l3;
+ prox_rte_ether_hdr l2;
+ prox_rte_ipv4_hdr l3;
union {
- struct udp_hdr udp;
- struct tcp_hdr tcp;
+ prox_rte_udp_hdr udp;
+ prox_rte_tcp_hdr tcp;
} l4;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
union pkt_type {
struct {
@@ -103,8 +104,8 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct
fi_flipped->port_src = p->l4.udp.dst_port;
fi_flipped->port_dst = p->l4.udp.src_port;
- *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(struct udp_hdr);
- *payload = (uint8_t*)(&p->l4.udp) + sizeof(struct udp_hdr);
+ *len = rte_be_to_cpu_16(p->l4.udp.dgram_len) - sizeof(prox_rte_udp_hdr);
+ *payload = (uint8_t*)(&p->l4.udp) + sizeof(prox_rte_udp_hdr);
return 0;
}
else if (pkt_type.val == pkt_type_tcp.val) {
@@ -120,7 +121,7 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct
fi_flipped->port_src = p->l4.tcp.dst_port;
fi_flipped->port_dst = p->l4.tcp.src_port;
- *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(struct ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4);
+ *len = rte_be_to_cpu_16(p->l3.total_length) - sizeof(prox_rte_ipv4_hdr) - ((p->l4.tcp.data_off >> 4)*4);
*payload = ((uint8_t*)&p->l4.tcp) + ((p->l4.tcp.data_off >> 4)*4);
return 0;
}
@@ -131,7 +132,7 @@ static int extract_flow_info(struct eth_ip4_udp *p, struct flow_info *fi, struct
static int is_flow_beg(const struct flow_info *fi, const struct eth_ip4_udp *p)
{
return fi->ip_proto == IPPROTO_UDP ||
- (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & TCP_SYN_FLAG);
+ (fi->ip_proto == IPPROTO_TCP && p->l4.tcp.tcp_flags & PROX_RTE_TCP_SYN_FLAG);
}
static void *lookup_flow(struct task_fm *task, struct flow_info *fi, uint64_t now_tsc)
diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c
index 89dbe9e4..2c8a65c7 100644
--- a/VNFs/DPPD-PROX/handle_gen.c
+++ b/VNFs/DPPD-PROX/handle_gen.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,6 +14,11 @@
// limitations under the License.
*/
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <rte_mbuf.h>
#include <pcap.h>
#include <string.h>
@@ -22,7 +27,9 @@
#include <rte_version.h>
#include <rte_byteorder.h>
#include <rte_ether.h>
+#include <rte_hash.h>
#include <rte_hash_crc.h>
+#include <rte_malloc.h>
#include "prox_shared.h"
#include "random.h"
@@ -47,20 +54,36 @@
#include "arp.h"
#include "tx_pkt.h"
#include "handle_master.h"
+#include "defines.h"
+#include "prox_ipv6.h"
+#include "handle_lb_5tuple.h"
struct pkt_template {
uint16_t len;
uint16_t l2_len;
uint16_t l3_len;
- uint8_t buf[ETHER_MAX_LEN];
+ uint8_t *buf;
};
-#define MAX_TEMPLATE_INDEX 65536
-#define TEMPLATE_INDEX_MASK (MAX_TEMPLATE_INDEX - 1)
-#define MBUF_ARP MAX_TEMPLATE_INDEX
+#define MAX_STORE_PKT_SIZE 2048
+
+struct packet {
+ unsigned int len;
+ unsigned char buf[MAX_STORE_PKT_SIZE];
+};
#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
+#define DO_PANIC 1
+#define DO_NOT_PANIC 0
+
+#define FROM_PCAP 1
+#define NOT_FROM_PCAP 0
+
+#define MAX_RANGES 64
+
+#define TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC 1
+
static void pkt_template_init_mbuf(struct pkt_template *pkt_template, struct rte_mbuf *mbuf, uint8_t *pkt)
{
const uint32_t pkt_size = pkt_template->len;
@@ -81,12 +104,16 @@ struct task_gen_pcap {
uint32_t n_pkts;
uint64_t last_tsc;
uint64_t *proto_tsc;
+ uint32_t socket_id;
+};
+
+struct flows {
+ uint32_t packet_id;
};
struct task_gen {
struct task_base base;
uint64_t hz;
- uint64_t link_speed;
struct token_time token_time;
struct local_mbuf local_mbuf;
struct pkt_template *pkt_template; /* packet templates used at runtime */
@@ -95,16 +122,22 @@ struct task_gen {
uint64_t new_rate_bps;
uint64_t pkt_queue_index;
uint32_t n_pkts; /* number of packets in pcap */
+ uint32_t orig_n_pkts; /* number of packets in pcap */
uint32_t pkt_idx; /* current packet from pcap */
uint32_t pkt_count; /* how many pakets to generate */
+ uint32_t max_frame_size;
uint32_t runtime_flags;
uint16_t lat_pos;
uint16_t packet_id_pos;
uint16_t accur_pos;
uint16_t sig_pos;
+ uint16_t flow_id_pos;
+ uint16_t packet_id_in_flow_pos;
uint32_t sig;
+ uint32_t socket_id;
uint8_t generator_id;
uint8_t n_rands; /* number of randoms */
+ uint8_t n_ranges; /* number of ranges */
uint8_t min_bulk_size;
uint8_t max_bulk_size;
uint8_t lat_enabled;
@@ -116,20 +149,37 @@ struct task_gen {
uint16_t rand_offset; /* each random has an offset*/
uint8_t rand_len; /* # bytes to take from random (no bias introduced) */
} rand[64];
- uint64_t accur[64];
+ struct range ranges[MAX_RANGES];
+ uint64_t accur[ACCURACY_WINDOW];
uint64_t pkt_tsc_offset[64];
struct pkt_template *pkt_template_orig; /* packet templates (from inline or from pcap) */
- struct ether_addr src_mac;
+ prox_rte_ether_addr src_mac;
uint8_t flags;
uint8_t cksum_offload;
struct prox_port_cfg *port;
+ uint64_t *bytes_to_tsc;
+ uint32_t imix_pkt_sizes[MAX_IMIX_PKTS];
+ uint32_t imix_nb_pkts;
+ uint32_t new_imix_nb_pkts;
+ uint32_t store_pkt_id;
+ uint32_t store_msk;
+ struct packet *store_buf;
+ FILE *fp;
+ struct rte_hash *flow_id_table;
+ struct flows*flows;
} __rte_cache_aligned;
-static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
+static void task_gen_set_pkt_templates_len(struct task_gen *task, uint32_t *pkt_sizes);
+static void task_gen_reset_pkt_templates_content(struct task_gen *task);
+static void task_gen_pkt_template_recalc_metadata(struct task_gen *task);
+static int check_all_pkt_size(struct task_gen *task, int do_panic);
+static int check_all_fields_in_bounds(struct task_gen *task, int do_panic);
+
+static inline uint8_t ipv4_get_hdr_len(prox_rte_ipv4_hdr *ip)
{
/* Optimize for common case of IPv4 header without options. */
if (ip->version_ihl == 0x45)
- return sizeof(struct ipv4_hdr);
+ return sizeof(prox_rte_ipv4_hdr);
if (unlikely(ip->version_ihl >> 4 != 4)) {
plog_warn("IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
return 0;
@@ -139,16 +189,16 @@ static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip)
static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, uint16_t len)
{
- *l2_len = sizeof(struct ether_hdr);
+ *l2_len = sizeof(prox_rte_ether_hdr);
*l3_len = 0;
- struct vlan_hdr *vlan_hdr;
- struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
- struct ipv4_hdr *ip;
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
+ prox_rte_ipv4_hdr *ip;
uint16_t ether_type = eth_hdr->ether_type;
// Unstack VLAN tags
- while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(struct vlan_hdr) < len)) {
- vlan_hdr = (struct vlan_hdr *)(pkt + *l2_len);
+ while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (*l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)(pkt + *l2_len);
*l2_len +=4;
ether_type = vlan_hdr->eth_proto;
}
@@ -161,11 +211,11 @@ static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, ui
case ETYPE_MPLSM:
*l2_len +=4;
break;
+ case ETYPE_IPv6:
case ETYPE_IPv4:
break;
case ETYPE_EoGRE:
case ETYPE_ARP:
- case ETYPE_IPv6:
*l2_len = 0;
break;
default:
@@ -175,8 +225,9 @@ static void parse_l2_l3_len(uint8_t *pkt, uint16_t *l2_len, uint16_t *l3_len, ui
}
if (*l2_len) {
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + *l2_len);
- *l3_len = ipv4_get_hdr_len(ip);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(pkt + *l2_len);
+ if (ip->version_ihl >> 4 == 4)
+ *l3_len = ipv4_get_hdr_len(ip);
}
}
@@ -185,9 +236,20 @@ static void checksum_packet(uint8_t *hdr, struct rte_mbuf *mbuf, struct pkt_temp
uint16_t l2_len = pkt_template->l2_len;
uint16_t l3_len = pkt_template->l3_len;
- if (l2_len) {
- struct ipv4_hdr *ip = (struct ipv4_hdr*)(hdr + l2_len);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(hdr + l2_len);
+ if (l3_len) {
prox_ip_udp_cksum(mbuf, ip, l2_len, l3_len, cksum_offload);
+ } else if (ip->version_ihl >> 4 == 6) {
+ prox_rte_ipv6_hdr *ip6 = (prox_rte_ipv6_hdr *)(hdr + l2_len);
+ if (ip6->proto == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(ip6 + 1);
+ udp->dgram_cksum = 0;
+ udp->dgram_cksum = rte_ipv6_udptcp_cksum(ip6, udp);
+ } else if (ip6->proto == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(ip6 + 1);
+ tcp->cksum = 0;
+ tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
+ }
}
}
@@ -261,20 +323,14 @@ static int handle_gen_pcap_bulk(struct task_base *tbase, struct rte_mbuf **mbuf,
return task->base.tx_pkt(&task->base, new_pkts, send_bulk, NULL);
}
-static uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
+static inline uint64_t bytes_to_tsc(struct task_gen *task, uint32_t bytes)
{
- const uint64_t hz = task->hz;
- const uint64_t bytes_per_hz = task->link_speed;
-
- if (bytes_per_hz == UINT64_MAX)
- return 0;
-
- return hz * bytes / bytes_per_hz;
+ return task->bytes_to_tsc[bytes];
}
static uint32_t task_gen_next_pkt_idx(const struct task_gen *task, uint32_t pkt_idx)
{
- return pkt_idx + 1 == task->n_pkts? 0 : pkt_idx + 1;
+ return pkt_idx + 1 >= task->n_pkts? 0 : pkt_idx + 1;
}
static uint32_t task_gen_offset_pkt_idx(const struct task_gen *task, uint32_t offset)
@@ -354,41 +410,168 @@ static void task_gen_apply_all_random_fields(struct task_gen *task, uint8_t **pk
task_gen_apply_random_fields(task, pkt_hdr[i]);
}
-static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
+static void task_gen_apply_ranges(struct task_gen *task, uint8_t *pkt_hdr)
{
- *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
+ uint32_t ret;
+ if (!task->n_ranges)
+ return;
+
+ for (uint16_t j = 0; j < task->n_ranges; ++j) {
+ if (unlikely(task->ranges[j].value == task->ranges[j].max))
+ task->ranges[j].value = task->ranges[j].min;
+ else
+ task->ranges[j].value++;
+ ret = rte_bswap32(task->ranges[j].value);
+ uint8_t *pret = (uint8_t*)&ret;
+ rte_memcpy(pkt_hdr + task->ranges[j].offset, pret + 4 - task->ranges[j].range_len, task->ranges[j].range_len);
+ }
+}
+
+static void task_gen_apply_all_ranges(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
+{
+ uint32_t ret;
+ if (!task->n_ranges)
+ return;
+
+ for (uint16_t i = 0; i < count; ++i) {
+ task_gen_apply_ranges(task, pkt_hdr[i]);
+ }
}
-static void task_gen_apply_sig(struct task_gen *task, uint8_t *pkt_hdr)
+static inline uint32_t gcd(uint32_t a, uint32_t b)
{
- *(uint32_t *)(pkt_hdr + task->sig_pos) = task->sig;
+ // Euclidean algorithm
+ uint32_t t;
+ while (b != 0) {
+ t = b;
+ b = a % b;
+ a = t;
+ }
+ return a;
}
-static void task_gen_apply_all_accur_pos(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
+static inline uint32_t lcm(uint32_t a, uint32_t b)
{
- if (!task->accur_pos)
- return;
+ return ((a / gcd(a, b)) * b);
+}
- /* The accuracy of task->pkt_queue_index - 64 is stored in
- packet task->pkt_queue_index. The ID modulo 64 is the
- same. */
- for (uint16_t j = 0; j < count; ++j) {
- if ((mbufs[j]->udata64 & MBUF_ARP) == 0) {
- uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & 63];
- task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy);
+static uint32_t get_n_range_flows(struct task_gen *task)
+{
+ uint32_t t = 1;
+ for (int i = 0; i < task->n_ranges; i++) {
+ t = lcm((task->ranges[i].max - task->ranges[i].min) + 1, t);
+ }
+ return t;
+}
+
+static uint32_t get_n_rand_flows(struct task_gen *task)
+{
+ uint32_t t = 0;
+ for (int i = 0; i < task->n_rands; i++) {
+ t += __builtin_popcount(task->rand[i].rand_mask);
+ }
+ PROX_PANIC(t > 31, "Too many random bits - maximum 31 supported\n");
+ return 1 << t;
+}
+
+//void add_to_hash_table(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val, uint32_t fixed_bits, uint32_t rand_offset) {
+// uint32_t ret_tmp = val | fixed_bits;
+// ret_tmp = rte_bswap32(ret_tmp);
+// uint8_t *pret_tmp = (uint8_t*)&ret_tmp;
+// rte_memcpy(buf + rand_offset, pret_tmp + 4 - rand_len, rand_len);
+//
+// init idx
+// alloc buffer
+// init/alloc hash_table
+//void build_buffer(struct task_gen *task, uint32_t *buffer, uint32_t *idx, uint32_t mask, uint32_t bit_pos, uint32_t val)
+//{
+// if (mask == 0) {
+// buffer[*idx] = val;
+// *idx = (*idx) + 1;
+// return;
+// }
+// build_buffer(task, but, mask >> 1, bit_pos + 1, val);
+// if (mask & 1) {
+// build_buffer(task, but, mask >> 1, bit_pos + 1, val | (1 << bit_pos));
+//}
+
+static void build_flow_table(struct task_gen *task)
+{
+ uint8_t buf[2048], *key_fields;
+ union ipv4_5tuple_host key;
+ struct pkt_template *pkt_template;
+ uint32_t n_range_flows = get_n_range_flows(task);
+ // uint32_t n_rand_flows = get_n_rand_flows(task);
+ // uint32_t n_flows= n_range_flows * n_rand_flows * task->orig_n_pkts;
+ // for (int i = 0; i < task->n_rands; i++) {
+ // build_buffer(task, task->values_buf[i], &task->values_idx[i], task->rand[i].rand_mask, 0, 0);
+ // }
+
+ uint32_t n_flows = n_range_flows * task->orig_n_pkts;
+
+ for (uint32_t k = 0; k < task->orig_n_pkts; k++) {
+ memcpy(buf, task->pkt_template[k].buf, task->pkt_template[k].len);
+ for (uint32_t j = 0; j < n_range_flows; j++) {
+ task_gen_apply_ranges(task, buf);
+ key_fields = buf + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live);
+ key.xmm = _mm_loadu_si128((__m128i*)(key_fields));
+ key.pad0 = key.pad1 = 0;
+ int idx = rte_hash_add_key(task->flow_id_table, (const void *)&key);
+ PROX_PANIC(idx < 0, "Unable to add key in table\n");
+ if (idx >= 0)
+ plog_dbg("Added key %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst);
+ }
+ }
+}
+
+static int32_t task_gen_get_flow_id(struct task_gen *task, uint8_t *pkt_hdr)
+{
+ int ret = 0;
+ union ipv4_5tuple_host key;
+ uint8_t *hdr = pkt_hdr + sizeof(prox_rte_ether_hdr) + offsetof(prox_rte_ipv4_hdr, time_to_live);
+ // __m128i data = _mm_loadu_si128((__m128i*)(hdr));
+ // key.xmm = _mm_and_si128(data, mask0);
+ key.xmm = _mm_loadu_si128((__m128i*)(hdr));
+ key.pad0 = key.pad1 = 0;
+ ret = rte_hash_lookup(task->flow_id_table, (const void *)&key);
+ if (ret < 0) {
+ plog_err("Flow not found: %d, %x, %x, %x, %x\n", key.proto, key.ip_src, key.ip_dst, key.port_src, key.port_dst);
+ }
+ return ret;
+}
+
+static void task_gen_apply_all_flow_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *flow_id)
+{
+ if (task->flow_id_pos) {
+ for (uint16_t j = 0; j < count; ++j) {
+ flow_id[j] = task_gen_get_flow_id(task, pkt_hdr[j]);
+ *(int32_t *)(pkt_hdr[j] + task->flow_id_pos) = flow_id[j];
}
}
}
-static void task_gen_apply_all_sig(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
+static void task_gen_apply_accur_pos(struct task_gen *task, uint8_t *pkt_hdr, uint32_t accuracy)
{
- if (!task->sig_pos)
+ *(uint32_t *)(pkt_hdr + task->accur_pos) = accuracy;
+}
+
+static void task_gen_apply_sig(struct task_gen *task, struct pkt_template *dst)
+{
+ if (task->sig_pos)
+ *(uint32_t *)(dst->buf + task->sig_pos) = task->sig;
+}
+
+static void task_gen_apply_all_accur_pos(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
+{
+ if (!task->accur_pos)
return;
+ /* The accuracy of task->pkt_queue_index - ACCURACY_WINDOW is stored in
+ packet task->pkt_queue_index. The ID modulo ACCURACY_WINDOW is the
+ same. */
for (uint16_t j = 0; j < count; ++j) {
- if ((mbufs[j]->udata64 & MBUF_ARP) == 0) {
- task_gen_apply_sig(task, pkt_hdr[j]);
- }
+ uint32_t accuracy = task->accur[(task->pkt_queue_index + j) & (ACCURACY_WINDOW - 1)];
+ task_gen_apply_accur_pos(task, pkt_hdr[j], accuracy);
}
}
@@ -399,16 +582,34 @@ static void task_gen_apply_unique_id(struct task_gen *task, uint8_t *pkt_hdr, co
*dst = *id;
}
-static void task_gen_apply_all_unique_id(struct task_gen *task, struct rte_mbuf **mbufs, uint8_t **pkt_hdr, uint32_t count)
+static void task_gen_apply_all_unique_id(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count)
{
if (!task->packet_id_pos)
return;
for (uint16_t i = 0; i < count; ++i) {
- if ((mbufs[i]->udata64 & MBUF_ARP) == 0) {
- struct unique_id id;
- unique_id_init(&id, task->generator_id, task->pkt_queue_index++);
- task_gen_apply_unique_id(task, pkt_hdr[i], &id);
+ struct unique_id id;
+ unique_id_init(&id, task->generator_id, task->pkt_queue_index++);
+ task_gen_apply_unique_id(task, pkt_hdr[i], &id);
+ }
+}
+
+static void task_gen_apply_id_in_flows(struct task_gen *task, uint8_t *pkt_hdr, const struct unique_id *id)
+{
+ struct unique_id *dst = (struct unique_id *)(pkt_hdr + task->packet_id_in_flow_pos);
+ *dst = *id;
+}
+
+static void task_gen_apply_all_id_in_flows(struct task_gen *task, uint8_t **pkt_hdr, uint32_t count, int32_t *idx)
+{
+ if (!task->packet_id_in_flow_pos)
+ return;
+
+ for (uint16_t i = 0; i < count; ++i) {
+ struct unique_id id;
+ if (idx[i] >= 0 ) {
+ unique_id_init(&id, task->generator_id, task->flows[idx[i]].packet_id++);
+ task_gen_apply_id_in_flows(task, pkt_hdr[i], &id);
}
}
}
@@ -423,11 +624,9 @@ static void task_gen_checksum_packets(struct task_gen *task, struct rte_mbuf **m
uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - count);
for (uint16_t i = 0; i < count; ++i) {
- if ((mbufs[i]->udata64 & MBUF_ARP) == 0) {
- struct pkt_template *pkt_template = &task->pkt_template[pkt_idx];
- checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload);
- pkt_idx = task_gen_next_pkt_idx(task, pkt_idx);
- }
+ struct pkt_template *pkt_template = &task->pkt_template[pkt_idx];
+ checksum_packet(pkt_hdr[i], mbufs[i], pkt_template, task->cksum_offload);
+ pkt_idx = task_gen_next_pkt_idx(task, pkt_idx);
}
}
@@ -447,8 +646,12 @@ static uint64_t task_gen_calc_bulk_duration(struct task_gen *task, uint32_t coun
uint32_t pkt_idx = task_gen_offset_pkt_idx(task, - 1);
struct pkt_template *last_pkt_template = &task->pkt_template[pkt_idx];
uint32_t last_pkt_len = pkt_len_to_wire_size(last_pkt_template->len);
+#ifdef NO_EXTRAPOLATION
+ uint64_t bulk_duration = task->pkt_tsc_offset[count - 1];
+#else
uint64_t last_pkt_duration = bytes_to_tsc(task, last_pkt_len);
uint64_t bulk_duration = task->pkt_tsc_offset[count - 1] + last_pkt_duration;
+#endif
return bulk_duration;
}
@@ -483,6 +686,14 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
simply sleeping until delta_t is zero would leave a period
of silence on the line. The error has been introduced
earlier, but the packets have already been sent. */
+
+ /* This happens typically if previous bulk was delayed
+ by an interrupt e.g. (with Time in nsec)
+ Time x: sleep 4 microsec
+ Time x+4000: send 64 packets (64 packets as 4000 nsec, w/ 10Gbps 64 bytes)
+ Time x+5000: send 16 packets (16 packets as 1000 nsec)
+ When we send the 16 packets, the 64 ealier packets are not yet
+ fully sent */
if (tx_tsc < task->earliest_tsc_next_pkt)
delta_t = task->earliest_tsc_next_pkt - tx_tsc;
else
@@ -491,12 +702,10 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
for (uint16_t i = 0; i < count; ++i) {
uint32_t *pos = (uint32_t *)(pkt_hdr[i] + task->lat_pos);
const uint64_t pkt_tsc = tx_tsc + delta_t + task->pkt_tsc_offset[i];
-
*pos = pkt_tsc >> LATENCY_ACCURACY;
}
uint64_t bulk_duration = task_gen_calc_bulk_duration(task, count);
-
task->earliest_tsc_next_pkt = tx_tsc + delta_t + bulk_duration;
write_tsc_after = rte_rdtsc();
task->write_duration_estimate = write_tsc_after - write_tsc_before;
@@ -506,6 +715,7 @@ static uint64_t task_gen_write_latency(struct task_gen *task, uint8_t **pkt_hdr,
do {
tsc_before_tx = rte_rdtsc();
} while (tsc_before_tx < tx_tsc);
+
return tsc_before_tx;
}
@@ -518,7 +728,7 @@ static void task_gen_store_accuracy(struct task_gen *task, uint32_t count, uint6
uint64_t first_accuracy_idx = task->pkt_queue_index - count;
for (uint32_t i = 0; i < count; ++i) {
- uint32_t accuracy_idx = (first_accuracy_idx + i) & 63;
+ uint32_t accuracy_idx = (first_accuracy_idx + i) & (ACCURACY_WINDOW - 1);
task->accur[accuracy_idx] = accur;
}
@@ -542,20 +752,218 @@ static void task_gen_build_packets(struct task_gen *task, struct rte_mbuf **mbuf
struct pkt_template *pktpl = &task->pkt_template[task->pkt_idx];
struct pkt_template *pkt_template = &task->pkt_template[task->pkt_idx];
pkt_template_init_mbuf(pkt_template, mbufs[i], pkt_hdr[i]);
- mbufs[i]->udata64 = task->pkt_idx & TEMPLATE_INDEX_MASK;
- struct ether_hdr *hdr = (struct ether_hdr *)pkt_hdr[i];
+ prox_rte_ether_hdr *hdr = (prox_rte_ether_hdr *)pkt_hdr[i];
if (task->lat_enabled) {
+#ifdef NO_EXTRAPOLATION
+ task->pkt_tsc_offset[i] = 0;
+#else
task->pkt_tsc_offset[i] = bytes_to_tsc(task, will_send_bytes);
+#endif
will_send_bytes += pkt_len_to_wire_size(pkt_template->len);
}
task->pkt_idx = task_gen_next_pkt_idx(task, task->pkt_idx);
}
}
+static int task_gen_allocate_templates(struct task_gen *task, uint32_t orig_nb_pkts, uint32_t nb_pkts, int do_panic, int pcap)
+{
+ size_t mem_size = nb_pkts * sizeof(*task->pkt_template);
+ size_t orig_mem_size = orig_nb_pkts * sizeof(*task->pkt_template);
+ task->pkt_template = prox_zmalloc(mem_size, task->socket_id);
+ task->pkt_template_orig = prox_zmalloc(orig_mem_size, task->socket_id);
+
+ if (task->pkt_template == NULL || task->pkt_template_orig == NULL) {
+ plog_err_or_panic(do_panic, "Failed to allocate %lu bytes (in huge pages) for %s\n", mem_size, pcap ? "pcap file":"packet template");
+ return -1;
+ }
+
+ for (size_t i = 0; i < orig_nb_pkts; i++) {
+ task->pkt_template_orig[i].buf = prox_zmalloc(task->max_frame_size, task->socket_id);
+ if (task->pkt_template_orig[i].buf == NULL) {
+ plog_err_or_panic(do_panic, "Failed to allocate %u bytes (in huge pages) for %s\n", task->max_frame_size, pcap ? "packet from pcap": "packet");
+ return -1;
+ }
+ }
+ for (size_t i = 0; i < nb_pkts; i++) {
+ task->pkt_template[i].buf = prox_zmalloc(task->max_frame_size, task->socket_id);
+ if (task->pkt_template[i].buf == NULL) {
+ plog_err_or_panic(do_panic, "Failed to allocate %u bytes (in huge pages) for %s\n", task->max_frame_size, pcap ? "packet from pcap": "packet");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int task_gen_reallocate_templates(struct task_gen *task, uint32_t nb_pkts, int do_panic)
+{
+ // Need to free up bufs allocated in previous (longer) imix
+ for (size_t i = nb_pkts; i < task->n_pkts; i++) {
+ if (task->pkt_template[i].buf) {
+ rte_free(task->pkt_template[i].buf);
+ task->pkt_template[i].buf = NULL;
+ }
+ }
+
+ size_t mem_size = nb_pkts * sizeof(*task->pkt_template);
+ size_t old_mem_size = task->n_pkts * sizeof(*task->pkt_template);
+ if (old_mem_size > mem_size)
+ old_mem_size = mem_size;
+
+ struct pkt_template *ptr;
+
+ // re-allocate memory for new pkt_template (this might allocate additional memory or free up some...)
+ if ((ptr = rte_malloc_socket(NULL, mem_size, RTE_CACHE_LINE_SIZE, task->socket_id)) != NULL) {
+ memcpy(ptr, task->pkt_template, old_mem_size);
+ rte_free(task->pkt_template);
+ task->pkt_template = ptr;
+ } else {
+ plog_err_or_panic(do_panic, "Failed to allocate %lu bytes (in huge pages) for packet template for IMIX\n", mem_size);
+ return -1;
+ }
+
+ // Need to allocate bufs for new template but no need to reallocate for existing ones
+ for (size_t i = task->n_pkts; i < nb_pkts; ++i) {
+ task->pkt_template[i].buf = prox_zmalloc(task->max_frame_size, task->socket_id);
+ if (task->pkt_template[i].buf == NULL) {
+ plog_err_or_panic(do_panic, "Failed to allocate %u bytes (in huge pages) for packet %zd in IMIX\n", task->max_frame_size, i);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic)
+{
+ const uint16_t min_len = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr);
+ const uint16_t max_len = task->max_frame_size;
+
+ if (do_panic) {
+ PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n");
+ PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len);
+ PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len);
+ return 0;
+ } else {
+ if (pkt_size == 0) {
+ plog_err("Invalid packet size length (no packet defined?)\n");
+ return -1;
+ }
+ if (pkt_size > max_len) {
+ if (pkt_size > PROX_RTE_ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4)
+ plog_err("pkt_size too high and jumbo frames disabled\n");
+ else
+ plog_err("pkt_size out of range (must be <= (mtu=%u))\n", max_len);
+ return -1;
+ }
+ if (pkt_size < min_len) {
+ plog_err("pkt_size out of range (must be >= %u)\n", min_len);
+ return -1;
+ }
+ return 0;
+ }
+}
+
+static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int do_panic)
+{
+ if (task->lat_enabled) {
+ uint32_t pos_beg = task->lat_pos;
+ uint32_t pos_end = task->lat_pos + 3U;
+
+ if (do_panic)
+ PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n",
+ pos_beg, pos_end, pkt_size);
+ else if (pkt_size <= pos_end) {
+ plog_err("Writing latency at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
+ return -1;
+ }
+ }
+ if (task->packet_id_pos) {
+ uint32_t pos_beg = task->packet_id_pos;
+ uint32_t pos_end = task->packet_id_pos + 4U;
+
+ if (do_panic)
+ PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n",
+ pos_beg, pos_end, pkt_size);
+ else if (pkt_size <= pos_end) {
+ plog_err("Writing packet at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
+ return -1;
+ }
+ }
+ if (task->accur_pos) {
+ uint32_t pos_beg = task->accur_pos;
+ uint32_t pos_end = task->accur_pos + 3U;
+
+ if (do_panic)
+ PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u-%u, but packet size is %u bytes\n",
+ pos_beg, pos_end, pkt_size);
+ else if (pkt_size <= pos_end) {
+ plog_err("Writing accuracy at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int task_gen_set_eth_ip_udp_sizes(struct task_gen *task, uint32_t orig_n_pkts, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes)
+{
+ size_t k;
+ uint32_t l4_len;
+ prox_rte_ipv4_hdr *ip;
+ struct pkt_template *template;
+
+ for (size_t j = 0; j < nb_pkt_sizes; ++j) {
+ for (size_t i = 0; i < orig_n_pkts; ++i) {
+ k = j * orig_n_pkts + i;
+ template = &task->pkt_template[k];
+ if (template->l2_len == 0)
+ continue;
+ ip = (prox_rte_ipv4_hdr *)(template->buf + template->l2_len);
+ ip->total_length = rte_bswap16(pkt_sizes[j] - template->l2_len);
+ l4_len = pkt_sizes[j] - template->l2_len - template->l3_len;
+ ip->hdr_checksum = 0;
+ prox_ip_cksum_sw(ip);
+
+ if (ip->next_proto_id == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)ip) + template->l3_len);
+ udp->dgram_len = rte_bswap16(l4_len);
+ prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
+ } else if (ip->next_proto_id == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
+ prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
+ }
+ }
+ }
+ return 0;
+}
+
+static int task_gen_apply_imix(struct task_gen *task, int do_panic)
+{
+ struct pkt_template *ptr;
+ int rc;
+ task->imix_nb_pkts = task->new_imix_nb_pkts;
+ uint32_t n_pkts = task->imix_nb_pkts * task->orig_n_pkts;
+
+ if ((n_pkts != task->n_pkts) && ((rc = task_gen_reallocate_templates(task, n_pkts, do_panic)) < 0))
+ return rc;
+
+ task->n_pkts = n_pkts;
+ if (task->pkt_idx >= n_pkts)
+ task->pkt_idx = 0;
+ task_gen_set_pkt_templates_len(task, task->imix_pkt_sizes);
+ task_gen_reset_pkt_templates_content(task);
+ task_gen_pkt_template_recalc_metadata(task);
+ check_all_pkt_size(task, DO_NOT_PANIC);
+ check_all_fields_in_bounds(task, DO_NOT_PANIC);
+ task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes);
+ return 0;
+}
+
static void task_gen_update_config(struct task_gen *task)
{
if (task->token_time.cfg.bpp != task->new_rate_bps)
task_gen_reset_token_time(task);
+ if (task->new_imix_nb_pkts)
+ task_gen_apply_imix(task, DO_NOT_PANIC);
+ task->new_imix_nb_pkts = 0;
}
static inline void build_value(struct task_gen *task, uint32_t mask, int bit_pos, uint32_t val, uint32_t fixed_bits)
@@ -570,45 +978,86 @@ static inline void build_value(struct task_gen *task, uint32_t mask, int bit_pos
register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_cpu_to_be_32(val | fixed_bits), tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
}
}
+
+static inline void build_value_ipv6(struct task_gen *task, uint32_t mask, int var_bit_pos, int init_var_bit_pos, struct ipv6_addr val, struct ipv6_addr fixed_bits)
+{
+ struct task_base *tbase = (struct task_base *)task;
+ if (var_bit_pos < 32) {
+ build_value_ipv6(task, mask >> 1, var_bit_pos + 1, init_var_bit_pos, val, fixed_bits);
+ if (mask & 1) {
+ int byte_pos = (var_bit_pos + init_var_bit_pos) / 8;
+ int bit_pos = (var_bit_pos + init_var_bit_pos) % 8;
+ val.bytes[byte_pos] = val.bytes[byte_pos] | (1 << bit_pos);
+ build_value_ipv6(task, mask >> 1, var_bit_pos + 1, init_var_bit_pos, val, fixed_bits);
+ }
+ } else {
+ for (uint i = 0; i < sizeof(struct ipv6_addr) / 8; i++)
+ val.bytes[i] = val.bytes[i] | fixed_bits.bytes[i];
+ register_node_to_ctrl_plane(tbase->l3.tmaster, &null_addr, &val, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
+ }
+}
+
static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
{
struct task_base *tbase = (struct task_base *)task;
int i, len, fixed;
unsigned int offset;
- uint32_t mask;
+ uint32_t mask, ip_len;
+ struct ipv6_addr *ip6_src = NULL;
+ uint32_t *ip_src;
for (uint32_t i = 0; i < task->n_pkts; ++i) {
struct pkt_template *pktpl = &task->pkt_template[i];
unsigned int ip_src_pos = 0;
- int maybe_ipv4 = 0;
- unsigned int l2_len = sizeof(struct ether_hdr);
+ int ipv4 = 0;
+ unsigned int l2_len = sizeof(prox_rte_ether_hdr);
uint8_t *pkt = pktpl->buf;
- struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
+ prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
uint16_t ether_type = eth_hdr->ether_type;
- struct vlan_hdr *vlan_hdr;
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ipv4_hdr *ip;
// Unstack VLAN tags
- while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < pktpl->len)) {
- vlan_hdr = (struct vlan_hdr *)(pkt + l2_len);
+ while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < pktpl->len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)(pkt + l2_len);
l2_len +=4;
ether_type = vlan_hdr->eth_proto;
}
if ((ether_type == ETYPE_MPLSU) || (ether_type == ETYPE_MPLSM)) {
l2_len +=4;
- maybe_ipv4 = 1;
- }
- if ((ether_type != ETYPE_IPv4) && !maybe_ipv4)
+ ip = (prox_rte_ipv4_hdr *)(pkt + l2_len);
+ if (ip->version_ihl >> 4 == 4)
+ ipv4 = 1;
+ else if (ip->version_ihl >> 4 != 6) // Version field at same location for IPv4 and IPv6
+ continue;
+ } else if (ether_type == ETYPE_IPv4) {
+ ip = (prox_rte_ipv4_hdr *)(pkt + l2_len);
+ PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4); // Invalid Packet
+ ipv4 = 1;
+ } else if (ether_type == ETYPE_IPv6) {
+ ip = (prox_rte_ipv4_hdr *)(pkt + l2_len);
+ PROX_PANIC(ip->version_ihl >> 4 != 6, "IPv6 ether_type but IP version = %d != 6", ip->version_ihl >> 4); // Invalid Packet
+ } else {
continue;
+ }
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(pkt + l2_len);
- PROX_PANIC(ip->version_ihl >> 4 != 4, "IPv4 ether_type but IP version = %d != 4", ip->version_ihl >> 4);
-
- // Even if IPv4 header contains options, options are after ip src and dst
- ip_src_pos = l2_len + sizeof(struct ipv4_hdr) - 2 * sizeof(uint32_t);
- uint32_t *ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos));
- plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src);
- register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
+ PROX_PANIC(ipv4 && ((prox_cfg.flags & DSF_L3_ENABLED) == 0), "Trying to generate an IPv4 packet in NDP mode => not supported\n");
+ PROX_PANIC((ipv4 == 0) && ((prox_cfg.flags & DSF_NDP_ENABLED) == 0), "Trying to generate an IPv6 packet in L3 (IPv4) mode => not supported\n");
+ if (ipv4) {
+ // Even if IPv4 header contains options, options are after ip src and dst
+ ip_src_pos = l2_len + sizeof(prox_rte_ipv4_hdr) - 2 * sizeof(uint32_t);
+ ip_src = ((uint32_t *)(pktpl->buf + ip_src_pos));
+ plog_info("\tip_src_pos = %d, ip_src = %x\n", ip_src_pos, *ip_src);
+ register_ip_to_ctrl_plane(tbase->l3.tmaster, *ip_src, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
+ ip_len = sizeof(uint32_t);
+ } else {
+ ip_src_pos = l2_len + sizeof(prox_rte_ipv6_hdr) - 2 * sizeof(struct ipv6_addr);
+ ip6_src = ((struct ipv6_addr *)(pktpl->buf + ip_src_pos));
+ plog_info("\tip_src_pos = %d, ip6_src = "IPv6_BYTES_FMT"\n", ip_src_pos, IPv6_BYTES(ip6_src->bytes));
+ register_node_to_ctrl_plane(tbase->l3.tmaster, ip6_src, &null_addr, tbase->l3.reachable_port_id, tbase->l3.core_id, tbase->l3.task_id);
+ ip_len = sizeof(struct ipv6_addr);
+ }
for (int j = 0; j < task->n_rands; j++) {
offset = task->rand[j].rand_offset;
@@ -616,7 +1065,12 @@ static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
mask = task->rand[j].rand_mask;
fixed = task->rand[j].fixed_bits;
plog_info("offset = %d, len = %d, mask = %x, fixed = %x\n", offset, len, mask, fixed);
- if ((offset < ip_src_pos + 4) && (offset + len >= ip_src_pos)) {
+ if (offset >= ip_src_pos + ip_len) // First random bit after IP
+ continue;
+ if (offset + len < ip_src_pos) // Last random bit before IP
+ continue;
+
+ if (ipv4) {
if (offset >= ip_src_pos) {
int32_t ip_src_mask = (1 << (4 + ip_src_pos - offset) * 8) - 1;
mask = mask & ip_src_mask;
@@ -628,6 +1082,28 @@ static inline void register_all_ip_to_ctrl_plane(struct task_gen *task)
fixed = (fixed << bits) | (rte_be_to_cpu_32(*ip_src) & ((1 << bits) - 1));
build_value(task, mask, 0, 0, fixed);
}
+ } else {
+ // We do not support when random partially covers IP - either starting before or finishing after
+ if (offset + len >= ip_src_pos + ip_len) { // len over the ip
+ plog_err("Not supported: random_offset = %d, random_len = %d, ip_src_pos = %d, ip_len = %d\n", offset, len, ip_src_pos, ip_len);
+ continue;
+ }
+ if (offset < ip_src_pos) {
+ plog_err("Not supported: random_offset = %d, random_len = %d, ip_src_pos = %d, ip_len = %d\n", offset, len, ip_src_pos, ip_len);
+ continue;
+ }
+ // Even for IPv6 the random mask supported by PROX are 32 bits only
+ struct ipv6_addr fixed_ipv6;
+ uint init_var_byte_pos = (offset - ip_src_pos);
+ for (uint i = 0; i < sizeof(struct ipv6_addr); i++) {
+ if (i < init_var_byte_pos)
+ fixed_ipv6.bytes[i] = ip6_src->bytes[i];
+ else if (i < init_var_byte_pos + len)
+ fixed_ipv6.bytes[i] = (fixed >> (i - init_var_byte_pos)) & 0xFF;
+ else
+ fixed_ipv6.bytes[i] = ip6_src->bytes[i];
+ }
+ build_value_ipv6(task, mask, 0, init_var_byte_pos * 8, null_addr, fixed_ipv6);
}
}
}
@@ -641,16 +1117,6 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
int i, j;
- // If link is down, link_speed is 0
- if (unlikely(task->link_speed == 0)) {
- if (task->port && task->port->link_speed != 0) {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- } else
- return 0;
- }
-
task_gen_update_config(task);
if (task->pkt_count == 0) {
@@ -674,20 +1140,47 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
if (new_pkts == NULL)
return 0;
uint8_t *pkt_hdr[MAX_RING_BURST];
-
+ int32_t flow_id[MAX_RING_BURST];
task_gen_load_and_prefetch(new_pkts, pkt_hdr, send_bulk);
task_gen_build_packets(task, new_pkts, pkt_hdr, send_bulk);
task_gen_apply_all_random_fields(task, pkt_hdr, send_bulk);
- task_gen_apply_all_accur_pos(task, new_pkts, pkt_hdr, send_bulk);
- task_gen_apply_all_sig(task, new_pkts, pkt_hdr, send_bulk);
- task_gen_apply_all_unique_id(task, new_pkts, pkt_hdr, send_bulk);
+ task_gen_apply_all_ranges(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_accur_pos(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_flow_id(task, pkt_hdr, send_bulk, flow_id);
+ task_gen_apply_all_unique_id(task, pkt_hdr, send_bulk);
+ task_gen_apply_all_id_in_flows(task, pkt_hdr, send_bulk, flow_id);
uint64_t tsc_before_tx;
tsc_before_tx = task_gen_write_latency(task, pkt_hdr, send_bulk);
task_gen_checksum_packets(task, new_pkts, pkt_hdr, send_bulk);
+ if (task->store_msk) {
+ for (uint32_t i = 0; i < send_bulk; i++) {
+ if (out[i] != OUT_DISCARD) {
+ uint8_t *hdr;
+ hdr = (uint8_t *)rte_pktmbuf_mtod(new_pkts[i], prox_rte_ether_hdr *);
+ memcpy(&task->store_buf[task->store_pkt_id & task->store_msk].buf, hdr, rte_pktmbuf_pkt_len(new_pkts[i]));
+ task->store_buf[task->store_pkt_id & task->store_msk].len = rte_pktmbuf_pkt_len(new_pkts[i]);
+ task->store_pkt_id++;
+ }
+ }
+ }
ret = task->base.tx_pkt(&task->base, new_pkts, send_bulk, out);
task_gen_store_accuracy(task, send_bulk, tsc_before_tx);
+
+ // If we failed to send some packets, we need to do some clean-up:
+
+ if (unlikely(ret)) {
+ // We need re-use the packets indexes not being sent
+ // Hence non-sent packets will not be considered as lost by the receiver when it looks at
+ // packet ids. This should also increase the percentage of packets used for latency measurements
+ task->pkt_queue_index -= ret;
+
+ // In case of failures, the estimate about when we can send next packet (earliest_tsc_next_pkt) is wrong
+ // This would result in under-estimated latency (up to 0 or negative)
+ uint64_t bulk_duration = task_gen_calc_bulk_duration(task, ret);
+ task->earliest_tsc_next_pkt -= bulk_duration;
+ }
return ret;
}
@@ -697,14 +1190,17 @@ static void init_task_gen_seeds(struct task_gen *task)
random_init_seed(&task->rand[i].state);
}
-static uint32_t pcap_count_pkts(pcap_t *handle)
+static uint32_t pcap_count_pkts(pcap_t *handle, uint32_t *max_frame_size)
{
struct pcap_pkthdr header;
const uint8_t *buf;
uint32_t ret = 0;
+ *max_frame_size = 0;
long pkt1_fpos = ftell(pcap_file(handle));
while ((buf = pcap_next(handle, &header))) {
+ if (header.len > *max_frame_size)
+ *max_frame_size = header.len;
ret++;
}
int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET);
@@ -721,7 +1217,7 @@ static uint64_t avg_time_stamp(uint64_t *time_stamp, uint32_t n)
return (tot_inter_pkt + n / 2)/n;
}
-static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp)
+static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts, struct pkt_template *proto, uint64_t *time_stamp, uint32_t max_frame_size)
{
struct pcap_pkthdr header;
const uint8_t *buf;
@@ -732,7 +1228,7 @@ static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts
PROX_PANIC(buf == NULL, "Failed to read packet %d from pcap %s\n", i, file_name);
proto[i].len = header.len;
- len = RTE_MIN(header.len, sizeof(proto[i].buf));
+ len = RTE_MIN(header.len, max_frame_size);
if (header.len > len)
plogx_warn("Packet truncated from %u to %zu bytes\n", header.len, len);
@@ -764,33 +1260,6 @@ static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts
return 0;
}
-static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic)
-{
- const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr);
- const uint16_t max_len = ETHER_MAX_LEN - 4;
-
- if (do_panic) {
- PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n");
- PROX_PANIC(pkt_size > max_len, "pkt_size out of range (must be <= %u)\n", max_len);
- PROX_PANIC(pkt_size < min_len, "pkt_size out of range (must be >= %u)\n", min_len);
- return 0;
- } else {
- if (pkt_size == 0) {
- plog_err("Invalid packet size length (no packet defined?)\n");
- return -1;
- }
- if (pkt_size > max_len) {
- plog_err("pkt_size out of range (must be <= %u)\n", max_len);
- return -1;
- }
- if (pkt_size < min_len) {
- plog_err("pkt_size out of range (must be >= %u)\n", min_len);
- return -1;
- }
- return 0;
- }
-}
-
static int check_all_pkt_size(struct task_gen *task, int do_panic)
{
int rc;
@@ -801,43 +1270,12 @@ static int check_all_pkt_size(struct task_gen *task, int do_panic)
return 0;
}
-static int check_fields_in_bounds(struct task_gen *task, uint32_t pkt_size, int do_panic)
+static int check_all_fields_in_bounds(struct task_gen *task, int do_panic)
{
- if (task->lat_enabled) {
- uint32_t pos_beg = task->lat_pos;
- uint32_t pos_end = task->lat_pos + 3U;
-
- if (do_panic)
- PROX_PANIC(pkt_size <= pos_end, "Writing latency at %u-%u, but packet size is %u bytes\n",
- pos_beg, pos_end, pkt_size);
- else if (pkt_size <= pos_end) {
- plog_err("Writing latency at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
- return -1;
- }
- }
- if (task->packet_id_pos) {
- uint32_t pos_beg = task->packet_id_pos;
- uint32_t pos_end = task->packet_id_pos + 4U;
-
- if (do_panic)
- PROX_PANIC(pkt_size <= pos_end, "Writing packet at %u-%u, but packet size is %u bytes\n",
- pos_beg, pos_end, pkt_size);
- else if (pkt_size <= pos_end) {
- plog_err("Writing packet at %u-%u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
- return -1;
- }
- }
- if (task->accur_pos) {
- uint32_t pos_beg = task->accur_pos;
- uint32_t pos_end = task->accur_pos + 3U;
-
- if (do_panic)
- PROX_PANIC(pkt_size <= pos_end, "Writing accuracy at %u%-u, but packet size is %u bytes\n",
- pos_beg, pos_end, pkt_size);
- else if (pkt_size <= pos_end) {
- plog_err("Writing accuracy at %u%-u, but packet size is %u bytes\n", pos_beg, pos_end, pkt_size);
- return -1;
- }
+ int rc;
+ for (uint32_t i = 0; i < task->n_pkts;++i) {
+ if ((rc = check_fields_in_bounds(task, task->pkt_template[i].len, do_panic)) != 0)
+ return rc;
}
return 0;
}
@@ -855,25 +1293,37 @@ static void task_gen_pkt_template_recalc_metadata(struct task_gen *task)
static void task_gen_pkt_template_recalc_checksum(struct task_gen *task)
{
struct pkt_template *template;
- struct ipv4_hdr *ip;
+ prox_rte_ipv4_hdr *ip;
task->runtime_checksum_needed = 0;
for (size_t i = 0; i < task->n_pkts; ++i) {
template = &task->pkt_template[i];
if (template->l2_len == 0)
continue;
- ip = (struct ipv4_hdr *)(template->buf + template->l2_len);
-
- ip->hdr_checksum = 0;
- prox_ip_cksum_sw(ip);
- uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len;
-
- if (ip->next_proto_id == IPPROTO_UDP) {
- struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t *)ip) + template->l3_len);
- prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
- } else if (ip->next_proto_id == IPPROTO_TCP) {
- struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
- prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
+ ip = (prox_rte_ipv4_hdr *)(template->buf + template->l2_len);
+ if (ip->version_ihl >> 4 == 4) {
+ ip->hdr_checksum = 0;
+ prox_ip_cksum_sw(ip);
+ uint32_t l4_len = rte_bswap16(ip->total_length) - template->l3_len;
+ if (ip->next_proto_id == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)ip) + template->l3_len);
+ prox_udp_cksum_sw(udp, l4_len, ip->src_addr, ip->dst_addr);
+ } else if (ip->next_proto_id == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)ip) + template->l3_len);
+ prox_tcp_cksum_sw(tcp, l4_len, ip->src_addr, ip->dst_addr);
+ }
+ } else if (ip->version_ihl >> 4 == 6) {
+ prox_rte_ipv6_hdr *ip6;
+ ip6 = (prox_rte_ipv6_hdr *)(template->buf + template->l2_len);
+ if (ip6->proto == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(ip6 + 1);
+ udp->dgram_cksum = 0;
+ udp->dgram_cksum = rte_ipv6_udptcp_cksum(ip6, udp);
+ } else if (ip6->proto == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(ip6 + 1);
+ tcp->cksum = 0;
+ tcp->cksum = rte_ipv6_udptcp_cksum(ip6, tcp);
+ }
}
/* The current implementation avoids checksum
@@ -895,14 +1345,28 @@ static void task_gen_pkt_template_recalc_all(struct task_gen *task)
task_gen_pkt_template_recalc_checksum(task);
}
+static void task_gen_set_pkt_templates_len(struct task_gen *task, uint32_t *pkt_sizes)
+{
+ struct pkt_template *src, *dst;
+
+ for (size_t j = 0; j < task->n_pkts / task->orig_n_pkts; ++j) {
+ for (size_t i = 0; i < task->orig_n_pkts; ++i) {
+ dst = &task->pkt_template[j * task->orig_n_pkts + i];
+ dst->len = pkt_sizes[j];
+ }
+ }
+}
+
static void task_gen_reset_pkt_templates_len(struct task_gen *task)
{
struct pkt_template *src, *dst;
- for (size_t i = 0; i < task->n_pkts; ++i) {
- src = &task->pkt_template_orig[i];
- dst = &task->pkt_template[i];
- dst->len = src->len;
+ for (size_t j = 0; j < task->n_pkts / task->orig_n_pkts; ++j) {
+ for (size_t i = 0; i < task->orig_n_pkts; ++i) {
+ src = &task->pkt_template_orig[i];
+ dst = &task->pkt_template[j * task->orig_n_pkts + i];
+ dst->len = src->len;
+ }
}
}
@@ -910,82 +1374,105 @@ static void task_gen_reset_pkt_templates_content(struct task_gen *task)
{
struct pkt_template *src, *dst;
- for (size_t i = 0; i < task->n_pkts; ++i) {
- src = &task->pkt_template_orig[i];
- dst = &task->pkt_template[i];
- memcpy(dst->buf, src->buf, dst->len);
+ for (size_t j = 0; j < task->n_pkts / task->orig_n_pkts; ++j) {
+ for (size_t i = 0; i < task->orig_n_pkts; ++i) {
+ src = &task->pkt_template_orig[i];
+ dst = &task->pkt_template[j * task->orig_n_pkts + i];
+ memcpy(dst->buf, src->buf, RTE_MAX(src->len, dst->len));
+ if (task->flags & TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC) {
+ rte_memcpy(&dst->buf[sizeof(prox_rte_ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr));
+ }
+ task_gen_apply_sig(task, dst);
+ }
}
}
static void task_gen_reset_pkt_templates(struct task_gen *task)
{
- task_gen_reset_pkt_templates_len(task);
+ if (task->imix_nb_pkts)
+ task_gen_set_pkt_templates_len(task, task->imix_pkt_sizes);
+ else
+ task_gen_reset_pkt_templates_len(task);
task_gen_reset_pkt_templates_content(task);
task_gen_pkt_template_recalc_all(task);
}
static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_args *targ)
{
- const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
-
- if (targ->pkt_size > sizeof(task->pkt_template[0].buf))
- targ->pkt_size = sizeof(task->pkt_template[0].buf);
- task->n_pkts = 1;
-
- size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
- task->pkt_template = prox_zmalloc(mem_size, socket_id);
- task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
+ int rc;
- PROX_PANIC(task->pkt_template == NULL ||
- task->pkt_template_orig == NULL,
- "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
+ task->orig_n_pkts = 1;
+ if (task->imix_nb_pkts == 0) {
+ task->n_pkts = 1;
+ task->imix_pkt_sizes[0] = targ->pkt_size;
+ } else {
+ task->n_pkts = task->imix_nb_pkts;
+ }
+ task_gen_allocate_templates(task, task->orig_n_pkts, task->n_pkts, DO_PANIC, NOT_FROM_PCAP);
- rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size);
- task->pkt_template_orig[0].len = targ->pkt_size;
+ rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, task->max_frame_size);
+ task->pkt_template_orig[0].len = task->imix_pkt_sizes[0];
task_gen_reset_pkt_templates(task);
- check_all_pkt_size(task, 1);
- check_fields_in_bounds(task, task->pkt_template[0].len, 1);
+ check_all_pkt_size(task, DO_PANIC);
+ check_all_fields_in_bounds(task, DO_PANIC);
+
+ // If IMIX was not specified then pkt_size is specified using pkt_size parameter or the length of pkt_inline
+ // In that case, for backward compatibility, we do NOT adapt the length of IP and UDP to the length of the packet
+ task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes);
}
static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *targ)
{
- const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
char err[PCAP_ERRBUF_SIZE];
+ uint32_t max_frame_size;
pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
- task->n_pkts = pcap_count_pkts(handle);
- plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
+ task->orig_n_pkts = pcap_count_pkts(handle, &max_frame_size);
+ plogx_info("%u packets in pcap file '%s'; max frame size=%d\n", task->orig_n_pkts, targ->pcap_file, max_frame_size);
+ PROX_PANIC(max_frame_size > task->max_frame_size,
+ max_frame_size > PROX_RTE_ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ?
+ "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu");
if (targ->n_pkts)
- task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts);
- PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
+ task->orig_n_pkts = RTE_MIN(task->orig_n_pkts, targ->n_pkts);
+ if (task->imix_nb_pkts == 0) {
+ task->n_pkts = task->orig_n_pkts;
+ } else {
+ task->n_pkts = task->imix_nb_pkts * task->orig_n_pkts;
+ }
+ task_gen_allocate_templates(task, task->orig_n_pkts, task->n_pkts, DO_PANIC, FROM_PCAP);
plogx_info("Loading %u packets from pcap\n", task->n_pkts);
- size_t mem_size = task->n_pkts * sizeof(*task->pkt_template);
- task->pkt_template = prox_zmalloc(mem_size, socket_id);
- task->pkt_template_orig = prox_zmalloc(mem_size, socket_id);
- PROX_PANIC(task->pkt_template == NULL ||
- task->pkt_template_orig == NULL,
- "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
-
- pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL);
+
+ pcap_read_pkts(handle, targ->pcap_file, task->orig_n_pkts, task->pkt_template_orig, NULL, max_frame_size);
pcap_close(handle);
task_gen_reset_pkt_templates(task);
+ check_all_pkt_size(task, DO_PANIC);
+ check_all_fields_in_bounds(task, DO_PANIC);
+ task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes);
}
-static struct rte_mempool *task_gen_create_mempool(struct task_args *targ)
+static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint16_t max_frame_size)
{
static char name[] = "gen_pool";
struct rte_mempool *ret;
const int sock_id = rte_lcore_to_socket_id(targ->lconf->id);
name[0]++;
- ret = rte_mempool_create(name, targ->nb_mbuf - 1, MBUF_SIZE,
+ uint32_t mbuf_size = TX_MBUF_SIZE;
+ if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
+ mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+ plog_info("\t\tCreating mempool with name '%s'\n", name);
+ ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
sock_id, 0);
PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
sock_id, targ->nb_mbuf - 1);
+
+ plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
+ targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
+
return ret;
}
@@ -1001,12 +1488,33 @@ int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size)
struct task_gen *task = (struct task_gen *)tbase;
int rc;
- if ((rc = check_pkt_size(task, pkt_size, 0)) != 0)
- return rc;
- if ((rc = check_fields_in_bounds(task, pkt_size, 0)) != 0)
- return rc;
- task->pkt_template[0].len = pkt_size;
- return rc;
+ for (size_t i = 0; i < task->n_pkts; ++i) {
+ if ((rc = check_pkt_size(task, pkt_size, 0)) != 0)
+ return rc;
+ if ((rc = check_fields_in_bounds(task, pkt_size, 0)) != 0)
+ return rc;
+ }
+ for (size_t i = 0; i < task->n_pkts; ++i) {
+ task->pkt_template[i].len = pkt_size;
+ }
+ return 0;
+}
+
+int task_gen_set_imix(struct task_base *tbase, uint32_t nb_pkt_sizes, uint32_t *pkt_sizes)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+ int rc;
+
+ memcpy(task->imix_pkt_sizes, pkt_sizes, nb_pkt_sizes * sizeof(uint32_t));
+ for (size_t i = 0; i < nb_pkt_sizes; ++i) {
+ if ((rc = check_pkt_size(task, pkt_sizes[i], DO_NOT_PANIC)) != 0)
+ return rc;
+ if ((rc = check_fields_in_bounds(task, pkt_sizes[i], DO_NOT_PANIC)) != 0)
+ return rc;
+ }
+ // only set new_imix_nb_pkts if checks of pkt sizes succeeded
+ task->new_imix_nb_pkts = nb_pkt_sizes;
+ return 0;
}
void task_gen_set_rate(struct task_base *tbase, uint64_t bps)
@@ -1028,10 +1536,20 @@ void task_gen_reset_randoms(struct task_base *tbase)
task->n_rands = 0;
}
+void task_gen_reset_ranges(struct task_base *tbase)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+
+ memset(task->ranges, 0, task->n_ranges * sizeof(struct range));
+ task->n_ranges = 0;
+}
+
int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len)
{
struct task_gen *task = (struct task_gen *)tbase;
+ if (offset + len > task->max_frame_size)
+ return -1;
for (size_t i = 0; i < task->n_pkts; ++i) {
uint32_t to_write = rte_cpu_to_be_32(value) >> ((4 - len) * 8);
uint8_t *dst = task->pkt_template[i].buf;
@@ -1049,6 +1567,16 @@ void task_gen_reset_values(struct task_base *tbase)
struct task_gen *task = (struct task_gen *)tbase;
task_gen_reset_pkt_templates_content(task);
+ task_gen_pkt_template_recalc_metadata(task);
+ check_all_pkt_size(task, DO_NOT_PANIC);
+ check_all_fields_in_bounds(task, DO_NOT_PANIC);
+ task_gen_set_eth_ip_udp_sizes(task, task->orig_n_pkts, task->imix_nb_pkts, task->imix_pkt_sizes);
+
+ if (task->flags & TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC) {
+ for (uint32_t i = 0; i < task->n_pkts; ++i) {
+ rte_memcpy(&task->pkt_template[i].buf[sizeof(prox_rte_ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr));
+ }
+ }
}
uint32_t task_gen_get_n_randoms(struct task_base *tbase)
@@ -1058,43 +1586,54 @@ uint32_t task_gen_get_n_randoms(struct task_base *tbase)
return task->n_rands;
}
+uint32_t task_gen_get_n_ranges(struct task_base *tbase)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+
+ return task->n_ranges;
+}
+
static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ)
{
struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
- const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id);
+ task->socket_id = rte_lcore_to_socket_id(targ->lconf->id);
+ uint32_t max_frame_size;
task->loop = targ->loop;
task->pkt_idx = 0;
task->hz = rte_get_tsc_hz();
- task->local_mbuf.mempool = task_gen_create_mempool(targ);
-
- PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n");
-
char err[PCAP_ERRBUF_SIZE];
pcap_t *handle = pcap_open_offline(targ->pcap_file, err);
PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err);
- task->n_pkts = pcap_count_pkts(handle);
+ task->n_pkts = pcap_count_pkts(handle, &max_frame_size);
plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file);
+ task->local_mbuf.mempool = task_gen_create_mempool(targ, max_frame_size);
+
+ PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n");
+
if (targ->n_pkts) {
plogx_info("Configured to load %u packets\n", targ->n_pkts);
if (task->n_pkts > targ->n_pkts)
task->n_pkts = targ->n_pkts;
}
- PROX_PANIC(task->n_pkts > MAX_TEMPLATE_INDEX, "Too many packets specified in pcap - increase MAX_TEMPLATE_INDEX\n");
-
plogx_info("Loading %u packets from pcap\n", task->n_pkts);
size_t mem_size = task->n_pkts * (sizeof(*task->proto) + sizeof(*task->proto_tsc));
- uint8_t *mem = prox_zmalloc(mem_size, sockid);
+ uint8_t *mem = prox_zmalloc(mem_size, task->socket_id);
PROX_PANIC(mem == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size);
task->proto = (struct pkt_template *) mem;
task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto));
- pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc);
+ for (uint i = 0; i < targ->n_pkts; i++) {
+ task->proto[i].buf = prox_zmalloc(max_frame_size, task->socket_id);
+ PROX_PANIC(task->proto[i].buf == NULL, "Failed to allocate %u bytes (in huge pages) for pcap file\n", max_frame_size);
+ }
+
+ pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc, max_frame_size);
pcap_close(handle);
}
@@ -1109,6 +1648,26 @@ static int task_gen_find_random_with_offset(struct task_gen *task, uint32_t offs
return UINT32_MAX;
}
+int task_gen_add_range(struct task_base *tbase, struct range *range)
+{
+ struct task_gen *task = (struct task_gen *)tbase;
+ if (task->n_ranges == MAX_RANGES) {
+ plog_err("Too many ranges\n");
+ return -1;
+ }
+ task->ranges[task->n_ranges].min = range->min;
+ task->ranges[task->n_ranges].value = range->min;
+ uint32_t m = range->max;
+ task->ranges[task->n_ranges].range_len = 0;
+ while (m != 0) {
+ m >>= 8;
+ task->ranges[task->n_ranges].range_len++;
+ }
+ task->ranges[task->n_ranges].offset = range->offset;
+ task->ranges[task->n_ranges++].max = range->max;
+ return 0;
+}
+
int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id)
{
struct task_gen *task = (struct task_gen *)tbase;
@@ -1155,18 +1714,7 @@ static void start(struct task_base *tbase)
if (tbase->l3.tmaster) {
register_all_ip_to_ctrl_plane(task);
}
- if (task->port) {
- // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC.
- // task->link_speed reports link speed in Bytes per sec.
- // It can be 0 if link is down, and must hence be updated in fast path.
- task->link_speed = task->port->link_speed * 125000L;
- if (task->link_speed)
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- else
- plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- }
+
/* TODO
Handle the case when two tasks transmit to the same port
and one of them is stopped. In that case ARP (requests or replies)
@@ -1177,6 +1725,31 @@ static void start(struct task_base *tbase)
*/
}
+static void stop_gen(struct task_base *tbase)
+{
+ uint32_t i, j;
+ struct task_gen *task = (struct task_gen *)tbase;
+ if (task->store_msk) {
+ for (i = task->store_pkt_id & task->store_msk; i < task->store_msk + 1; i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ for (i = 0; i < (task->store_pkt_id & task->store_msk); i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ }
+}
static void start_pcap(struct task_base *tbase)
{
struct task_gen_pcap *task = (struct task_gen_pcap *)tbase;
@@ -1201,16 +1774,29 @@ static void init_task_gen_early(struct task_args *targ)
static void init_task_gen(struct task_base *tbase, struct task_args *targ)
{
struct task_gen *task = (struct task_gen *)tbase;
+ task->socket_id = rte_lcore_to_socket_id(targ->lconf->id);
task->packet_id_pos = targ->packet_id_pos;
- task->local_mbuf.mempool = task_gen_create_mempool(targ);
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ // TODO: check that all reachable ports have the same mtu...
+ if (port) {
+ task->cksum_offload = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
+ task->port = port;
+ task->max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE;
+ } else {
+ // Not generating to any port...
+ task->max_frame_size = PROX_RTE_ETHER_MAX_LEN;
+ }
+ task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size);
PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n");
task->pkt_idx = 0;
task->hz = rte_get_tsc_hz();
task->lat_pos = targ->lat_pos;
task->accur_pos = targ->accur_pos;
task->sig_pos = targ->sig_pos;
+ task->flow_id_pos = targ->flow_id_pos;
+ task->packet_id_in_flow_pos = targ->packet_id_in_flow_pos;
task->sig = targ->sig;
task->new_rate_bps = targ->rate_bps;
@@ -1241,34 +1827,95 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
task->generator_id = targ->generator_id;
- plog_info("\tGenerator id = %d\n", task->generator_id);
- task->link_speed = UINT64_MAX;
+ plog_info("\t\tGenerator id = %d\n", task->generator_id);
+
+ // Allocate array holding bytes to tsc for supported frame sizes
+ task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), task->socket_id);
+ PROX_PANIC(task->bytes_to_tsc == NULL,
+ "Failed to allocate %u bytes (in huge pages) for bytes_to_tsc\n", task->max_frame_size);
+
+ // task->port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
+ // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
+ uint64_t bytes_per_hz = UINT64_MAX;
+ if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
+ bytes_per_hz = task->port->max_link_speed * 125000L;
+ plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
+ (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
+ }
+ // There are cases where hz estimate might be slighly over-estimated
+ // This results in too much extrapolation
+ // Only account for 99% of extrapolation to handle cases with up to 1% error clocks
+ for (unsigned int i = 0; i < task->max_frame_size * MAX_PKT_BURST ; i++) {
+ if (bytes_per_hz == UINT64_MAX)
+ task->bytes_to_tsc[i] = 0;
+ else
+ task->bytes_to_tsc[i] = (task->hz * i * 0.99) / bytes_per_hz;
+ }
+ task->imix_nb_pkts = targ->imix_nb_pkts;
+ for (uint32_t i = 0; i < targ->imix_nb_pkts; i++) {
+ task->imix_pkt_sizes[i] = targ->imix_pkt_sizes[i];
+ }
if (!strcmp(targ->pcap_file, "")) {
- plog_info("\tUsing inline definition of a packet\n");
+ plog_info("\t\tUsing inline definition of a packet\n");
task_init_gen_load_pkt_inline(task, targ);
} else {
- plog_info("Loading from pcap %s\n", targ->pcap_file);
+ plog_info("\t\tLoading from pcap %s\n", targ->pcap_file);
task_init_gen_load_pcap(task, targ);
}
- if ((targ->flags & DSF_KEEP_SRC_MAC) == 0 && (targ->nb_txrings || targ->nb_txports)) {
- uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes;
+ PROX_PANIC(((targ->nb_txrings == 0) && (targ->nb_txports == 0)), "Gen mode requires a tx ring or a tx port");
+ if ((targ->flags & DSF_KEEP_SRC_MAC) == 0) {
+ task->flags |= TASK_OVERWRITE_SRC_MAC_WITH_PORT_MAC;
+ memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(prox_rte_ether_addr));
for (uint32_t i = 0; i < task->n_pkts; ++i) {
- rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6);
+ rte_memcpy(&task->pkt_template[i].buf[sizeof(prox_rte_ether_addr)], &task->src_mac, sizeof(prox_rte_ether_addr));
}
}
- memcpy(&task->src_mac, &prox_port_cfg[task->base.tx_params_hw.tx_port_queue->port].eth_addr, sizeof(struct ether_addr));
for (uint32_t i = 0; i < targ->n_rand_str; ++i) {
PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX),
"Failed to add random\n");
}
-
- struct prox_port_cfg *port = find_reachable_port(targ);
- if (port) {
- task->cksum_offload = port->capabilities.tx_offload_cksum;
- task->port = port;
+ for (uint32_t i = 0; i < targ->n_ranges; ++i) {
+ PROX_PANIC(task_gen_add_range(tbase, &targ->range[i]), "Failed to add range\n");
+ }
+ if (targ->store_max) {
+ char filename[256];
+ sprintf(filename, "gen_buf_%02d_%02d", targ->lconf->id, targ->task);
+
+ task->store_msk = targ->store_max - 1;
+ task->store_buf = (struct packet *)malloc(sizeof(struct packet) * targ->store_max);
+ task->fp = fopen(filename, "w+");
+ PROX_PANIC(task->fp == NULL, "Unable to open %s\n", filename);
+ } else {
+ task->store_msk = 0;
}
+ uint32_t n_entries = get_n_range_flows(task) * task->orig_n_pkts * 4;
+#ifndef RTE_HASH_BUCKET_ENTRIES
+#define RTE_HASH_BUCKET_ENTRIES 8
+#endif
+ // cuckoo hash requires at least RTE_HASH_BUCKET_ENTRIES (8) entries
+ if (n_entries < RTE_HASH_BUCKET_ENTRIES)
+ n_entries = RTE_HASH_BUCKET_ENTRIES;
+
+ static char hash_name[30];
+ sprintf(hash_name, "A%03d_hash_gen_table", targ->lconf->id);
+ struct rte_hash_parameters hash_params = {
+ .name = hash_name,
+ .entries = n_entries,
+ .key_len = sizeof(union ipv4_5tuple_host),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = task->socket_id,
+ };
+ plog_info("\t\thash table name = %s\n", hash_params.name);
+ task->flow_id_table = rte_hash_create(&hash_params);
+ PROX_PANIC(task->flow_id_table == NULL, "Failed to set up flow_id hash table for gen\n");
+ plog_info("\t\tflow_id hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ build_flow_table(task);
+ task->flows = (struct flows *)prox_zmalloc(n_entries * sizeof(struct flows), task->socket_id);
+ PROX_PANIC(task->flows == NULL, "Failed to allocate flows\n");
+ plog_info("\t\t%d flows allocated\n", n_entries);
}
static struct task_init task_init_gen = {
@@ -1280,11 +1927,12 @@ static struct task_init task_init_gen = {
#ifdef SOFT_CRC
// For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
// vector mode is used by DPDK, resulting (theoretically) in higher performance.
- .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
#else
.flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
#endif
- .size = sizeof(struct task_gen)
+ .size = sizeof(struct task_gen),
+ .stop_last = stop_gen
};
static struct task_init task_init_gen_l3 = {
@@ -1297,13 +1945,14 @@ static struct task_init task_init_gen_l3 = {
#ifdef SOFT_CRC
// For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the
// vector mode is used by DPDK, resulting (theoretically) in higher performance.
- .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
#else
.flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
#endif
.size = sizeof(struct task_gen)
};
+/* This mode uses time stamps in the pcap file */
static struct task_init task_init_gen_pcap = {
.mode_str = "gen",
.sub_mode_str = "pcap",
@@ -1312,7 +1961,7 @@ static struct task_init task_init_gen_pcap = {
.start = start_pcap,
.early_init = init_task_gen_early,
#ifdef SOFT_CRC
- .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
#else
.flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX,
#endif
diff --git a/VNFs/DPPD-PROX/handle_gen.h b/VNFs/DPPD-PROX/handle_gen.h
index 5083fea9..bd8fae7b 100644
--- a/VNFs/DPPD-PROX/handle_gen.h
+++ b/VNFs/DPPD-PROX/handle_gen.h
@@ -17,6 +17,8 @@
#ifndef _HANDLE_GEN_H_
#define _HANDLE_GEN_H_
+#include "task_init.h"
+
struct unique_id {
uint8_t generator_id;
uint32_t packet_id;
@@ -38,13 +40,17 @@ struct task_base;
void task_gen_set_pkt_count(struct task_base *tbase, uint32_t count);
int task_gen_set_pkt_size(struct task_base *tbase, uint32_t pkt_size);
+int task_gen_set_imix(struct task_base *tbase, uint32_t nb_pkts, uint32_t *pkt_size);
void task_gen_set_rate(struct task_base *tbase, uint64_t bps);
void task_gen_reset_randoms(struct task_base *tbase);
+void task_gen_reset_ranges(struct task_base *tbase);
void task_gen_reset_values(struct task_base *tbase);
int task_gen_set_value(struct task_base *tbase, uint32_t value, uint32_t offset, uint32_t len);
int task_gen_add_rand(struct task_base *tbase, const char *rand_str, uint32_t offset, uint32_t rand_id);
+int task_gen_add_range(struct task_base *tbase, struct range *range);
uint32_t task_gen_get_n_randoms(struct task_base *tbase);
+uint32_t task_gen_get_n_ranges(struct task_base *tbase);
uint32_t task_gen_get_n_values(struct task_base *tbase);
#endif /* _HANDLE_GEN_H_ */
diff --git a/VNFs/DPPD-PROX/handle_genl4.c b/VNFs/DPPD-PROX/handle_genl4.c
index 4c62c641..49fde3fc 100644
--- a/VNFs/DPPD-PROX/handle_genl4.c
+++ b/VNFs/DPPD-PROX/handle_genl4.c
@@ -439,9 +439,9 @@ static int handle_gen_scheduled(struct task_gen_server *task)
}
else {
- struct ether_hdr *eth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr*)(eth + 1);
- struct tcp_hdr *tcp = (struct tcp_hdr*)(ip + 1);
+ prox_rte_ether_hdr *eth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(eth + 1);
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(ip + 1);
task->out_saved = 0;
task->cancelled = 1;
@@ -732,8 +732,8 @@ static int lua_to_stream_cfg(struct lua_State *L, enum lua_place from, const cha
const uint64_t hz = rte_get_tsc_hz();
- ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, ETHER_MAX_LEN + 20);
- ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, ETHER_MAX_LEN + 20);
+ ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, PROX_RTE_ETHER_MAX_LEN + 20);
+ ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, PROX_RTE_ETHER_MAX_LEN + 20);
if (!strcmp(proto, "tcp")) {
ret->proto = IPPROTO_TCP;
@@ -845,7 +845,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
static char name[] = "server_mempool";
name[0]++;
task->mempool = rte_mempool_create(name,
- 4*1024 - 1, MBUF_SIZE,
+ 4*1024 - 1, TX_MBUF_SIZE,
targ->nb_cache_mbuf,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
@@ -946,7 +946,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
struct token_time_cfg tt_cfg = {
.bpp = targ->rate_bps,
.period = rte_get_tsc_hz(),
- .bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
+ .bytes_max = n_descriptors * (PROX_RTE_ETHER_MIN_LEN + 20),
};
token_time_init(&task->token_time, &tt_cfg);
@@ -959,7 +959,7 @@ static void init_task_gen_client(struct task_base *tbase, struct task_args *targ
const uint32_t socket = rte_lcore_to_socket_id(targ->lconf->id);
name[0]++;
task->mempool = rte_mempool_create(name,
- 4*1024 - 1, MBUF_SIZE,
+ 4*1024 - 1, TX_MBUF_SIZE,
targ->nb_cache_mbuf,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL,
@@ -1025,7 +1025,7 @@ static void init_task_gen_client(struct task_base *tbase, struct task_args *targ
task->heap = heap_create(targ->n_concur_conn, socket);
task->seed = rte_rdtsc();
- /* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */
+ /* task->token_time.bytes_max = MAX_PKT_BURST * (PROX_RTE_ETHER_MAX_LEN + 20); */
/* To avoid overflowing the tx descriptors, the token bucket
size needs to be limited. The descriptors are filled most
@@ -1037,7 +1037,7 @@ static void init_task_gen_client(struct task_base *tbase, struct task_args *targ
struct token_time_cfg tt_cfg = {
.bpp = targ->rate_bps,
.period = rte_get_tsc_hz(),
- .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
+ .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (PROX_RTE_ETHER_MIN_LEN + 20),
};
token_time_init(&task->token_time, &tt_cfg);
@@ -1118,7 +1118,6 @@ static struct task_init task_init_gen1 = {
.stop = stop_task_gen_server,
.flag_features = TASK_FEATURE_ZERO_RX,
.size = sizeof(struct task_gen_server),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
static struct task_init task_init_gen2 = {
@@ -1129,7 +1128,6 @@ static struct task_init task_init_gen2 = {
.stop = stop_task_gen_client,
.flag_features = TASK_FEATURE_ZERO_RX,
.size = sizeof(struct task_gen_client),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_gen(void)
diff --git a/VNFs/DPPD-PROX/handle_gre_decap_encap.c b/VNFs/DPPD-PROX/handle_gre_decap_encap.c
index 41f6dd33..83e430a6 100644
--- a/VNFs/DPPD-PROX/handle_gre_decap_encap.c
+++ b/VNFs/DPPD-PROX/handle_gre_decap_encap.c
@@ -37,7 +37,7 @@
#include "quit.h"
struct cpe_gre_key {
- struct ether_addr clt_mac;
+ prox_rte_ether_addr clt_mac;
uint16_t pad;
} __attribute__((__packed__));
@@ -156,7 +156,7 @@ static void init_task_gre_encap(struct task_base *tbase, struct task_args *targ)
struct port_cfg *port = find_reachable_task_sending_to_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & TX_OFFLOAD_CKSUM;
}
#ifdef GRE_TP
@@ -219,12 +219,12 @@ void handle_gre_decap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
}
struct gre_packet {
- struct ether_hdr eth;
- struct ipv4_hdr ip;
+ prox_rte_ether_hdr eth;
+ prox_rte_ipv4_hdr ip;
struct gre_hdr gre;
union {
- struct ether_hdr eth2;
- struct ipv4_hdr ip2;
+ prox_rte_ether_hdr eth2;
+ prox_rte_ipv4_hdr ip2;
};
} __attribute__((__packed__));
@@ -232,26 +232,26 @@ struct gre_packet {
GRE remove gre and ipv4 header and retain space for ethernet
header. In case of Eth over GRE remove external eth, gre and ipv4
headers and return pointer to payload */
-static inline struct ether_hdr *gre_decap(struct gre_hdr *pgre, struct rte_mbuf *mbuf)
+static inline prox_rte_ether_hdr *gre_decap(struct gre_hdr *pgre, struct rte_mbuf *mbuf)
{
int16_t hsize = 0;
if (pgre->type == ETYPE_EoGRE) {
- hsize = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ hsize = sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
}
else if (pgre->type == ETYPE_IPv4) {
- /* retain sizeof(struct ether_hdr) */
- hsize = sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ /* retain sizeof(prox_rte_ether_hdr) */
+ hsize = sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
}
else {
return NULL;
}
- return (struct ether_hdr *)rte_pktmbuf_adj(mbuf, hsize);
+ return (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, hsize);
}
static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_mbuf *mbuf)
{
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbuf, struct ether_hdr *) + 1);
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *) + 1);
if (pip->next_proto_id != IPPROTO_GRE) {
plog_warn("Invalid packet proto_id = 0x%x expect 0x%x\n",
@@ -265,15 +265,15 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m
data.gre_id = pgre->gre_id;
data.cpe_ip = pip->src_addr;
- struct ether_hdr *peth = gre_decap(pgre, mbuf);
+ prox_rte_ether_hdr *peth = gre_decap(pgre, mbuf);
PROX_PANIC(peth != 0, "Failed to gre_decap");
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
/* emulate client MAC for test purposes */
#if 1
if (pgre->type == ETYPE_IPv4) {
- struct ether_hdr eth = {
+ prox_rte_ether_hdr eth = {
.d_addr = {.addr_bytes =
{0x0A, 0x02, 0x0A, 0x0A, 0x00, 0x01}},
.s_addr = {.addr_bytes =
@@ -285,9 +285,9 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m
eth.s_addr.addr_bytes[3] = (hip >> 16) & 0xFF;
eth.s_addr.addr_bytes[4] = (hip >> 8) & 0xFF;
eth.s_addr.addr_bytes[5] = (hip) & 0xFF;
- rte_memcpy(peth, &eth, sizeof(struct ether_hdr));
+ rte_memcpy(peth, &eth, sizeof(prox_rte_ether_hdr));
}
- ether_addr_copy(&peth->s_addr, &key.clt_mac);
+ prox_rte_ether_addr_copy(&peth->s_addr, &key.clt_mac);
#endif
data.tsc = rte_rdtsc() + task->cpe_timeout;
@@ -303,7 +303,7 @@ static inline uint8_t handle_gre_decap(struct task_gre_decap *task, struct rte_m
}
rte_memcpy(&task->cpe_gre_data[hash_index], &data, sizeof(data));
if (task->runtime_flags & TASK_TX_CRC) {
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
return 0;
@@ -333,8 +333,8 @@ void handle_gre_encap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
{
for (uint8_t i = 0; i < n_pkts; ++i) {
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *);
- ether_addr_copy(&peth->d_addr, &task->key[i].clt_mac);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ prox_rte_ether_addr_copy(&peth->d_addr, &task->key[i].clt_mac);
}
int32_t hash_index[16];
@@ -359,24 +359,24 @@ static inline void handle_gre_encap16(struct task_gre_decap *task, struct rte_mb
}
#ifdef DO_ENC_ETH_OVER_GRE
-#define PKT_PREPEND_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr))
+#define PKT_PREPEND_LEN (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr))
#elif DO_ENC_IP_OVER_GRE
-#define PKT_PREPEND_LEN (sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr))
+#define PKT_PREPEND_LEN (sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr))
#else
static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_mbuf *mbuf, struct cpe_gre_data *table)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(peth + 1);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(peth + 1);
uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
struct cpe_gre_key key;
- ether_addr_copy(&peth->d_addr, &key.clt_mac);
+ prox_rte_ether_addr_copy(&peth->d_addr, &key.clt_mac);
#ifdef GRE_TP
/* policing enabled */
if (task->cycles_per_byte) {
- const uint16_t pkt_size = rte_pktmbuf_pkt_len(mbuf) + ETHER_CRC_LEN;
+ const uint16_t pkt_size = rte_pktmbuf_pkt_len(mbuf) + PROX_RTE_ETHER_CRC_LEN;
uint64_t tsc_now = rte_rdtsc();
if (table->tp_tbsize < pkt_size) {
uint64_t cycles_diff = tsc_now - table->tp_tsc;
@@ -399,19 +399,19 @@ static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_m
/* reuse ethernet header from payload, retain payload (ip) in
case of DO_ENC_IP_OVER_GRE */
- peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, PKT_PREPEND_LEN);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, PKT_PREPEND_LEN);
PREFETCH0(peth);
ip_len += PKT_PREPEND_LEN;
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
struct gre_hdr *pgre = (struct gre_hdr *)(pip + 1);
- struct ether_hdr eth = {
+ prox_rte_ether_hdr eth = {
.d_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x02}},
.s_addr = {.addr_bytes = {0x0A, 0x0A, 0x0A, 0xC8, 0x00, 0x01}},
.ether_type = ETYPE_IPv4
};
- rte_memcpy(peth, &eth, sizeof(struct ether_hdr));
+ rte_memcpy(peth, &eth, sizeof(prox_rte_ether_hdr));
rte_memcpy(pgre, &gre_hdr_proto, sizeof(struct gre_hdr));
#if DO_ENC_ETH_OVER_GRE
@@ -421,13 +421,13 @@ static inline uint8_t handle_gre_encap(struct task_gre_decap *task, struct rte_m
#endif
pgre->gre_id = table->gre_id;
- rte_memcpy(pip, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
+ rte_memcpy(pip, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr));
pip->src_addr = 0x02010a0a; //emulate port ip
pip->dst_addr = table->cpe_ip;
pip->total_length = rte_cpu_to_be_16(ip_len);
if (task->runtime_flags & TASK_TX_CRC) {
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
return 0;
diff --git a/VNFs/DPPD-PROX/handle_impair.c b/VNFs/DPPD-PROX/handle_impair.c
index 805dedfc..a147d44d 100644
--- a/VNFs/DPPD-PROX/handle_impair.c
+++ b/VNFs/DPPD-PROX/handle_impair.c
@@ -55,7 +55,9 @@ struct task_impair {
unsigned queue_head;
unsigned queue_tail;
unsigned queue_mask;
- int tresh;
+ int tresh_no_drop;
+ int tresh_duplicate;
+ int tresh_delay;
unsigned int seed;
struct random state;
uint64_t last_idx;
@@ -72,10 +74,23 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
-void task_impair_set_proba(struct task_base *tbase, float proba)
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba_no_drop)
{
struct task_impair *task = (struct task_impair *)tbase;
- task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * (uint32_t)(proba_no_drop * 10000) / 1000000;
+}
+
+void task_impair_set_proba_delay(struct task_base *tbase, float proba_delay)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * (uint32_t)(proba_delay * 10000) / 1000000;
+ task->flags |= IMPAIR_NEED_UPDATE;
+}
+
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba_dup)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * (uint32_t)(proba_dup * 10000) / 1000000;
}
void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us)
@@ -118,7 +133,7 @@ static void task_impair_update(struct task_base *tbase)
uint16_t idx = 0;
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx++] = task->queue[task->queue_tail].mbuf;
task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
}
@@ -140,7 +155,7 @@ static void task_impair_update(struct task_base *tbase)
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf;
queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
}
@@ -175,10 +190,10 @@ static void task_impair_update(struct task_base *tbase)
}
} else if (task->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, task->socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id);
@@ -192,23 +207,23 @@ static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mb
{
struct task_impair *task = (struct task_impair *)tbase;
uint8_t out[MAX_PKT_BURST];
- struct ether_hdr * hdr[MAX_PKT_BURST];
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
int ret = 0;
for (uint16_t i = 0; i < n_pkts; ++i) {
PREFETCH0(mbufs[i]);
}
for (uint16_t i = 0; i < n_pkts; ++i) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *);
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
PREFETCH0(hdr[i]);
}
if (task->flags & IMPAIR_SET_MAC) {
for (uint16_t i = 0; i < n_pkts; ++i) {
- ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
- out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
}
} else {
for (uint16_t i = 0; i < n_pkts; ++i) {
- out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
}
}
ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
@@ -224,12 +239,12 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
uint16_t enqueue_failed;
uint16_t i;
int ret = 0;
- struct ether_hdr * hdr[MAX_PKT_BURST];
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
for (uint16_t i = 0; i < n_pkts; ++i) {
PREFETCH0(mbufs[i]);
}
for (uint16_t i = 0; i < n_pkts; ++i) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *);
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
PREFETCH0(hdr[i]);
}
@@ -238,7 +253,7 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
/* We know n_pkts fits, no need to check for every packet */
for (i = 0; i < n_pkts; ++i) {
if (task->flags & IMPAIR_SET_MAC)
- ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
task->queue[task->queue_head].tsc = now + task->delay_time;
task->queue[task->queue_head].mbuf = mbufs[i];
task->queue_head = (task->queue_head + 1) & task->queue_mask;
@@ -247,7 +262,7 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
for (i = 0; i < n_pkts; ++i) {
if (((task->queue_head + 1) & task->queue_mask) != task->queue_tail) {
if (task->flags & IMPAIR_SET_MAC)
- ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
task->queue[task->queue_head].tsc = now + task->delay_time;
task->queue[task->queue_head].mbuf = mbufs[i];
task->queue_head = (task->queue_head + 1) & task->queue_mask;
@@ -268,10 +283,10 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
uint16_t idx = 0;
- if (task->tresh != RAND_MAX) {
+ if (task->tresh_no_drop != RAND_MAX) {
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
PREFETCH0(new_mbufs[idx]);
PREFETCH0(&new_mbufs[idx]->cacheline1);
@@ -336,23 +351,26 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
int ret = 0;
uint64_t packet_time, idx;
uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK;
- struct ether_hdr * hdr[MAX_PKT_BURST];
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
for (uint16_t i = 0; i < n_pkts; ++i) {
PREFETCH0(mbufs[i]);
}
for (uint16_t i = 0; i < n_pkts; ++i) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr *);
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
PREFETCH0(hdr[i]);
}
for (i = 0; i < n_pkts; ++i) {
- packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ if (rand_r(&task->seed) <= task->tresh_delay)
+ packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ else
+ packet_time = now;
idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK;
while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
struct queue *queue = &task->buffer[idx];
if (((queue->queue_head + 1) & task->queue_mask) != queue->queue_tail) {
if (task->flags & IMPAIR_SET_MAC)
- ether_addr_copy((struct ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
queue->queue_elem[queue->queue_head].mbuf = mbufs[i];
queue->queue_head = (queue->queue_head + 1) & task->queue_mask;
break;
@@ -366,6 +384,15 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out);
plog_warn("Unexpectdly dropping packets\n");
}
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ if (rand_r(&task->seed) <= task->tresh_duplicate) {
+ mbufs[i] = rte_pktmbuf_copy(mbufs[i], mbufs[i]->pool, 0, UINT32_MAX);
+ if (mbufs[i] == NULL) {
+ plog_err("Failed to duplicate mbuf\n");
+ } else
+ i = i - 1;
+ }
+#endif
}
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
@@ -374,7 +401,7 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf;
PREFETCH0(new_mbufs[pkt_idx]);
PREFETCH0(&new_mbufs[pkt_idx]->cacheline1);
@@ -399,10 +426,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
uint64_t delay_us = 0;
task->seed = rte_rdtsc();
- if (targ->probability == 0)
- targ->probability = 1000000;
- task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * targ->probability_no_drop / 1000000;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * targ->probability_delay / 1000000;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * targ->probability_duplicate / 1000000;
if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) {
tbase->handle_bulk = handle_bulk_random_drop;
@@ -438,10 +465,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
task->queue_tail = 0;
} else if (targ->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id);
@@ -450,7 +477,7 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
}
random_init_seed(&task->state);
if (targ->nb_txports) {
- memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(struct ether_addr));
+ memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(prox_rte_ether_addr));
task->flags = IMPAIR_SET_MAC;
} else {
task->flags = 0;
diff --git a/VNFs/DPPD-PROX/handle_impair.h b/VNFs/DPPD-PROX/handle_impair.h
index 162213ed..c2d10ab3 100644
--- a/VNFs/DPPD-PROX/handle_impair.h
+++ b/VNFs/DPPD-PROX/handle_impair.h
@@ -18,6 +18,8 @@
#define _HANDLE_IMPAIR_H_
void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us);
-void task_impair_set_proba(struct task_base *tbase, float proba);
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba);
+void task_impair_set_proba_delay(struct task_base *tbase, float proba);
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba);
#endif /* _HANDLE_IMPAIR_H_ */
diff --git a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
index 13570b14..1c99eb84 100644
--- a/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
+++ b/VNFs/DPPD-PROX/handle_ipv6_tunnel.c
@@ -49,7 +49,7 @@
struct ipv6_tun_dest {
struct ipv6_addr dst_addr;
- struct ether_addr dst_mac;
+ prox_rte_ether_addr dst_mac;
};
typedef enum ipv6_tun_dir_t {
@@ -59,7 +59,7 @@ typedef enum ipv6_tun_dir_t {
struct task_ipv6_tun_base {
struct task_base base;
- struct ether_addr src_mac;
+ prox_rte_ether_addr src_mac;
uint8_t core_nb;
uint64_t keys[64];
struct rte_mbuf* fake_packets[64];
@@ -71,7 +71,7 @@ struct task_ipv6_tun_base {
struct task_ipv6_decap {
struct task_ipv6_tun_base base;
- struct ether_addr dst_mac;
+ prox_rte_ether_addr dst_mac;
};
struct task_ipv6_encap {
@@ -131,7 +131,7 @@ static void init_lookup_table(struct task_ipv6_tun_base* ptask, struct task_args
struct ipv6_tun_binding_entry* entry = &table->entry[idx];
uint64_t key = MAKE_KEY_FROM_FIELDS(rte_cpu_to_be_32(entry->public_ipv4), entry->public_port, ptask->lookup_port_mask);
rte_memcpy(&data.dst_addr, &entry->endpoint_addr, sizeof(struct ipv6_addr));
- rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(struct ether_addr));
+ rte_memcpy(&data.dst_mac, &entry->next_hop_mac, sizeof(prox_rte_ether_addr));
int ret = prox_rte_table_key8_add(ptask->lookup_table, &key, &data, &key_found, &entry_in_hash);
PROX_PANIC(ret, "Error adding entry (%d) to binding lookup table", idx);
@@ -167,7 +167,7 @@ static void init_task_ipv6_tun_base(struct task_ipv6_tun_base* tun_base, struct
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- tun_base->offload_crc = port->capabilities.tx_offload_cksum;
+ tun_base->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
@@ -221,16 +221,16 @@ __attribute__((constructor)) static void reg_task_ipv6_encap(void)
static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest);
static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rte_mbuf* rx_mbuf, struct ipv6_tun_dest* tun_dest);
-static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort)
+static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_tun_base* ptask, prox_rte_ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint32_t* pAddr, uint16_t* pPort)
{
*pAddr = (dir == TUNNEL_DIR_DECAP) ? pip4->src_addr : pip4->dst_addr;
if (pip4->next_proto_id == IPPROTO_UDP) {
- struct udp_hdr* pudp = (struct udp_hdr *)(pip4 + 1);
+ prox_rte_udp_hdr* pudp = (prox_rte_udp_hdr *)(pip4 + 1);
*pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? pudp->src_port : pudp->dst_port);
}
else if (pip4->next_proto_id == IPPROTO_TCP) {
- struct tcp_hdr* ptcp = (struct tcp_hdr *)(pip4 + 1);
+ prox_rte_tcp_hdr* ptcp = (prox_rte_tcp_hdr *)(pip4 + 1);
*pPort = rte_be_to_cpu_16((dir == TUNNEL_DIR_DECAP) ? ptcp->src_port : ptcp->dst_port);
}
else {
@@ -242,7 +242,7 @@ static inline int extract_key_fields( __attribute__((unused)) struct task_ipv6_t
return 0;
}
-static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey)
+static inline void extract_key(struct task_ipv6_tun_base* ptask, prox_rte_ipv4_hdr* pip4, ipv6_tun_dir_t dir, uint64_t* pkey)
{
uint32_t lookup_addr;
uint16_t lookup_port;
@@ -256,19 +256,19 @@ static inline void extract_key(struct task_ipv6_tun_base* ptask, struct ipv4_hdr
*pkey = MAKE_KEY_FROM_FIELDS(lookup_addr, lookup_port, ptask->lookup_port_mask);
}
-static inline struct ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf)
+static inline prox_rte_ipv4_hdr* get_ipv4_decap(struct rte_mbuf *mbuf)
{
- struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
- struct ipv4_hdr* pip4 = (struct ipv4_hdr*) (pip6 + 1); // TODO - Skip Option headers
+ prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr*) (pip6 + 1); // TODO - Skip Option headers
return pip4;
}
-static inline struct ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf)
+static inline prox_rte_ipv4_hdr* get_ipv4_encap(struct rte_mbuf *mbuf)
{
- struct ether_hdr* peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
+ prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
return pip4;
}
@@ -303,7 +303,7 @@ __attribute__((cold)) static void handle_error(struct task_ipv6_tun_base* ptask,
uint16_t lookup_port;
uint64_t key;
- struct ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf);
+ prox_rte_ipv4_hdr* pip4 = (dir == TUNNEL_DIR_DECAP) ? get_ipv4_decap(mbuf) : get_ipv4_encap(mbuf);
extract_key_fields(ptask, pip4, dir, &lookup_addr, &lookup_port);
extract_key(ptask, pip4, dir, &key);
@@ -381,9 +381,9 @@ static int handle_ipv6_encap_bulk(struct task_base* tbase, struct rte_mbuf** mbu
static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rte_mbuf* rx_mbuf, __attribute__((unused)) struct ipv6_tun_dest* tun_dest)
{
- struct ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr* peth = rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *);
struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask;
- struct ipv4_hdr* pip4 = NULL;
+ prox_rte_ipv4_hdr* pip4 = NULL;
if (unlikely(peth->ether_type != ETYPE_IPv6)) {
plog_warn("Received non IPv6 packet on ipv6 tunnel port\n");
@@ -391,8 +391,8 @@ static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rt
return OUT_DISCARD;
}
- struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
- int ipv6_hdr_len = sizeof(struct ipv6_hdr);
+ prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1);
+ int ipv6_hdr_len = sizeof(prox_rte_ipv6_hdr);
// TODO - Skip over any IPv6 Extension Header:
// If pip6->next_header is in (0, 43, 44, 50, 51, 60, 135), skip ahead pip->hdr_ext_len
@@ -406,18 +406,18 @@ static inline uint8_t handle_ipv6_decap(struct task_ipv6_decap* ptask, struct rt
// Discard IPv6 encapsulation
rte_pktmbuf_adj(rx_mbuf, ipv6_hdr_len);
- peth = rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *);
- pip4 = (struct ipv4_hdr *)(peth + 1);
+ peth = rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *);
+ pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
// Restore Ethernet header
- ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
- ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
+ prox_rte_ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
+ prox_rte_ether_addr_copy(&ptask->dst_mac, &peth->d_addr);
peth->ether_type = ETYPE_IPv4;
#ifdef GEN_DECAP_IPV6_TO_IPV4_CKSUM
// generate an IP checksum for ipv4 packet
if (tun_base->runtime_flags & TASK_TX_CRC) {
- prox_ip_cksum(rx_mbuf, pip4, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), ptask->base.offload_crc);
+ prox_ip_cksum(rx_mbuf, pip4, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), ptask->base.offload_crc);
}
#endif
@@ -428,8 +428,8 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt
{
//plog_info("Found tunnel endpoint:"IPv6_BYTES_FMT" ("MAC_BYTES_FMT")\n", IPv6_BYTES(tun_dest->dst_addr), MAC_BYTES(tun_dest->dst_mac.addr_bytes));
- struct ether_hdr* peth = (struct ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, struct ether_hdr *));
- struct ipv4_hdr* pip4 = (struct ipv4_hdr *)(peth + 1);
+ prox_rte_ether_hdr* peth = (prox_rte_ether_hdr *)(rte_pktmbuf_mtod(rx_mbuf, prox_rte_ether_hdr *));
+ prox_rte_ipv4_hdr* pip4 = (prox_rte_ipv4_hdr *)(peth + 1);
uint16_t ipv4_length = rte_be_to_cpu_16(pip4->total_length);
struct task_ipv6_tun_base* tun_base = (struct task_ipv6_tun_base*)ptask;
@@ -449,22 +449,22 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt
pip4->hdr_checksum = 0;
// Remove padding if any (we don't want to encapsulate garbage at end of IPv4 packet)
- int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(struct ether_hdr));
+ int padding = rte_pktmbuf_pkt_len(rx_mbuf) - (ipv4_length + sizeof(prox_rte_ether_hdr));
if (unlikely(padding > 0)) {
rte_pktmbuf_trim(rx_mbuf, padding);
}
// Encapsulate
- const int extra_space = sizeof(struct ipv6_hdr);
- peth = (struct ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space);
+ const int extra_space = sizeof(prox_rte_ipv6_hdr);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(rx_mbuf, extra_space);
// Ethernet Header
- ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
- ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr);
+ prox_rte_ether_addr_copy(&ptask->base.src_mac, &peth->s_addr);
+ prox_rte_ether_addr_copy(&tun_dest->dst_mac, &peth->d_addr);
peth->ether_type = ETYPE_IPv6;
// Set up IPv6 Header
- struct ipv6_hdr* pip6 = (struct ipv6_hdr *)(peth + 1);
+ prox_rte_ipv6_hdr* pip6 = (prox_rte_ipv6_hdr *)(peth + 1);
pip6->vtc_flow = rte_cpu_to_be_32(IPv6_VERSION << 28);
pip6->proto = IPPROTO_IPIP;
pip6->payload_len = rte_cpu_to_be_16(ipv4_length);
@@ -474,8 +474,8 @@ static inline uint8_t handle_ipv6_encap(struct task_ipv6_encap* ptask, struct rt
if (tun_base->runtime_flags & TASK_TX_CRC) {
// We modified the TTL in the IPv4 header, hence have to recompute the IPv4 checksum
-#define TUNNEL_L2_LEN (sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr))
- prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(struct ipv4_hdr), ptask->base.offload_crc);
+#define TUNNEL_L2_LEN (sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv6_hdr))
+ prox_ip_cksum(rx_mbuf, pip4, TUNNEL_L2_LEN, sizeof(prox_rte_ipv4_hdr), ptask->base.offload_crc);
}
return 0;
}
diff --git a/VNFs/DPPD-PROX/handle_irq.c b/VNFs/DPPD-PROX/handle_irq.c
index 00c192f6..36aa54e8 100644
--- a/VNFs/DPPD-PROX/handle_irq.c
+++ b/VNFs/DPPD-PROX/handle_irq.c
@@ -26,7 +26,10 @@
#include "input.h"
#define MAX_INTERRUPT_LENGTH 500000 /* Maximum length of an interrupt is (1 / MAX_INTERRUPT_LENGTH) seconds */
+
+uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
uint64_t irq_bucket_maxtime_micro[] = {1,5,10,50,100,500,1000,5000,10000,50000,100000,500000,UINT64_MAX};
+
/*
* This module is not handling any packets.
* It loops on rdtsc() and checks whether it has been interrupted
diff --git a/VNFs/DPPD-PROX/handle_l2fwd.c b/VNFs/DPPD-PROX/handle_l2fwd.c
index faebe6fb..35d331b6 100644
--- a/VNFs/DPPD-PROX/handle_l2fwd.c
+++ b/VNFs/DPPD-PROX/handle_l2fwd.c
@@ -31,32 +31,32 @@ struct task_l2fwd {
static int handle_l2fwd_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
struct task_l2fwd *task = (struct task_l2fwd *)tbase;
- struct ether_hdr *hdr;
- struct ether_addr mac;
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ether_addr mac;
if ((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) {
/* Source and Destination mac hardcoded */
for (uint16_t j = 0; j < n_pkts; ++j) {
- hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *);
+ hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
}
} else {
for (uint16_t j = 0; j < n_pkts; ++j) {
- hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *);
+ hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
if ((task->runtime_flags & (TASK_ARG_DO_NOT_SET_SRC_MAC|TASK_ARG_SRC_MAC_SET)) == 0) {
/* dst mac will be used as src mac */
- ether_addr_copy(&hdr->d_addr, &mac);
+ prox_rte_ether_addr_copy(&hdr->d_addr, &mac);
}
if (task->runtime_flags & TASK_ARG_DST_MAC_SET)
- ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[0], &hdr->d_addr);
else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_DST_MAC) == 0)
- ether_addr_copy(&hdr->s_addr, &hdr->d_addr);
+ prox_rte_ether_addr_copy(&hdr->s_addr, &hdr->d_addr);
if (task->runtime_flags & TASK_ARG_SRC_MAC_SET) {
- ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
} else if ((task->runtime_flags & TASK_ARG_DO_NOT_SET_SRC_MAC) == 0) {
- ether_addr_copy(&mac, &hdr->s_addr);
+ prox_rte_ether_addr_copy(&mac, &hdr->s_addr);
}
}
}
@@ -66,7 +66,7 @@ static int handle_l2fwd_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, u
static void init_task_l2fwd(struct task_base *tbase, struct task_args *targ)
{
struct task_l2fwd *task = (struct task_l2fwd *)tbase;
- struct ether_addr *src_addr, *dst_addr;
+ prox_rte_ether_addr *src_addr, *dst_addr;
/*
* The destination MAC of the outgoing packet is based on the config file:
@@ -112,9 +112,8 @@ static struct task_init task_init_l2fwd = {
.mode_str = "l2fwd",
.init = init_task_l2fwd,
.handle = handle_l2fwd_bulk,
- .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
.size = sizeof(struct task_l2fwd),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_l2fwd(void)
diff --git a/VNFs/DPPD-PROX/handle_lat.c b/VNFs/DPPD-PROX/handle_lat.c
index 8cc5c32b..04a4848b 100644
--- a/VNFs/DPPD-PROX/handle_lat.c
+++ b/VNFs/DPPD-PROX/handle_lat.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -34,8 +34,8 @@
#include "prox_shared.h"
#include "prox_port_cfg.h"
-#define DEFAULT_BUCKET_SIZE 10
-#define ACCURACY_BUFFER_SIZE 64
+#define DEFAULT_BUCKET_SIZE 11
+#define ACCURACY_BUFFER_SIZE (2 * ACCURACY_WINDOW)
struct lat_info {
uint32_t rx_packet_index;
@@ -60,7 +60,7 @@ struct delayed_latency_entry {
uint32_t packet_id;
uint8_t generator_id;
uint64_t pkt_rx_time;
- uint64_t pkt_tx_time;
+ uint64_t pkt_tx_time; // Time written into packets by gen. Unit is TSC >> LATENCY_ACCURACY
uint64_t rx_time_err;
};
@@ -86,6 +86,15 @@ struct rx_pkt_meta_data {
uint32_t bytes_after_in_bulk;
};
+struct loss_buffer {
+ uint32_t packet_id;
+ uint32_t n;
+};
+
+struct flows {
+ uint32_t packet_id;
+};
+
struct task_lat {
struct task_base base;
uint64_t limit;
@@ -106,14 +115,24 @@ struct task_lat {
struct lat_test lt[2];
struct lat_test *lat_test;
uint32_t generator_count;
+ uint16_t min_pkt_len;
struct early_loss_detect *eld;
struct rx_pkt_meta_data *rx_pkt_meta;
- uint64_t link_speed;
// Following fields are only used when starting or stopping, not in general runtime
uint64_t *prev_tx_packet_index;
+ FILE *fp_loss;
FILE *fp_rx;
FILE *fp_tx;
struct prox_port_cfg *port;
+ uint64_t *bytes_to_tsc;
+ uint64_t *previous_packet;
+ uint32_t loss_buffer_size;
+ struct loss_buffer *loss_buffer;
+ uint32_t loss_id;
+ uint32_t packet_id_in_flow_pos;
+ int32_t flow_id_pos;
+ uint32_t flow_count;
+ struct flows *flows;
};
/* This function calculate the difference between rx and tx_time
* Both values are uint32_t (see handle_lat_bulk)
@@ -127,6 +146,11 @@ static uint32_t diff_time(uint32_t rx_time, uint32_t tx_time)
return rx_time - tx_time;
}
+uint32_t task_lat_get_latency_bucket_size(struct task_lat *task)
+{
+ return task->lat_test->bucket_size;
+}
+
struct lat_test *task_lat_get_latency_meassurement(struct task_lat *task)
{
if (task->use_lt == task->using_lt)
@@ -343,9 +367,9 @@ static void lat_write_latency_to_file(struct task_lat *task)
uint64_t rx_tsc = lat_info_get_rx_tsc(lat_info);
uint64_t tx_tsc = lat_info_get_tx_tsc(lat_info);
- /* Packet n + ACCURACY_BUFFER_SIZE delivers the TX error for packet n,
- hence the last ACCURACY_BUFFER_SIZE packets do no have TX error. */
- if (i + ACCURACY_BUFFER_SIZE >= task->latency_buffer_idx) {
+ /* Packet n + ACCURACY_WINDOW delivers the TX error for packet n,
+ hence the last ACCURACY_WINDOW packets do no have TX error. */
+ if (i + ACCURACY_WINDOW >= task->latency_buffer_idx) {
tx_err_tsc = 0;
}
@@ -393,7 +417,14 @@ static void lat_stop(struct task_base *tbase)
if (task->unique_id_pos) {
task_lat_count_remaining_lost_packets(task);
task_lat_reset_eld(task);
+ memset(task->previous_packet, 0, sizeof(task->previous_packet) * task->generator_count);
}
+ if (task->loss_id && task->fp_loss) {
+ for (uint i = 0; i < task->loss_id; i++) {
+ fprintf(task->fp_loss, "packet %d: %d\n", task->loss_buffer[i].packet_id, task->loss_buffer[i].n);
+ }
+ }
+ task->lat_test->lost_packets = 0;
if (task->latency_buffer)
lat_write_latency_to_file(task);
}
@@ -428,25 +459,30 @@ static void task_lat_store_lat_buf(struct task_lat *task, uint64_t rx_packet_ind
lat_info->tx_err = tx_err;
}
-static uint32_t task_lat_early_loss_detect(struct task_lat *task, struct unique_id *unique_id)
+static uint32_t task_lat_early_loss_detect(struct task_lat *task, uint32_t packet_id, uint8_t generator_id)
{
- struct early_loss_detect *eld;
- uint8_t generator_id;
- uint32_t packet_index;
-
- unique_id_get(unique_id, &generator_id, &packet_index);
-
- if (generator_id >= task->generator_count)
- return 0;
+ struct early_loss_detect *eld = &task->eld[generator_id];
+ return early_loss_detect_add(eld, packet_id);
+}
- eld = &task->eld[generator_id];
+static void lat_test_check_duplicate(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+ struct early_loss_detect *eld = &task->eld[generator_id];
+ uint32_t old_queue_id, queue_pos;
- return early_loss_detect_add(eld, packet_index);
+ queue_pos = packet_id & PACKET_QUEUE_MASK;
+ old_queue_id = eld->entries[queue_pos];
+ if ((packet_id >> PACKET_QUEUE_BITS) == old_queue_id)
+ lat_test->duplicate++;
}
-static uint64_t tsc_extrapolate_backward(uint64_t link_speed, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
+static uint64_t tsc_extrapolate_backward(struct task_lat *task, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
{
- uint64_t tsc = tsc_from - (rte_get_tsc_hz()*bytes)/link_speed;
+#ifdef NO_LAT_EXTRAPOLATION
+ uint64_t tsc = tsc_from;
+#else
+ uint64_t tsc = tsc_from - task->bytes_to_tsc[bytes];
+#endif
if (likely(tsc > tsc_minimum))
return tsc;
else
@@ -458,10 +494,28 @@ static void lat_test_histogram_add(struct lat_test *lat_test, uint64_t lat_tsc)
uint64_t bucket_id = (lat_tsc >> lat_test->bucket_size);
size_t bucket_count = sizeof(lat_test->buckets)/sizeof(lat_test->buckets[0]);
- bucket_id = bucket_id < bucket_count? bucket_id : bucket_count;
+ bucket_id = bucket_id < bucket_count? bucket_id : (bucket_count - 1);
lat_test->buckets[bucket_id]++;
}
+static void lat_test_check_flow_ordering(struct task_lat *task, struct lat_test *lat_test, int32_t flow_id, uint32_t packet_id)
+{
+ if (packet_id < task->flows[flow_id].packet_id) {
+ lat_test->mis_ordered++;
+ lat_test->extent += task->flows[flow_id].packet_id - packet_id;
+ }
+ task->flows[flow_id].packet_id = packet_id;
+}
+
+static void lat_test_check_ordering(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+ if (packet_id < task->previous_packet[generator_id]) {
+ lat_test->mis_ordered++;
+ lat_test->extent += task->previous_packet[generator_id] - packet_id;
+ }
+ task->previous_packet[generator_id] = packet_id;
+}
+
static void lat_test_add_lost(struct lat_test *lat_test, uint64_t lost_packets)
{
lat_test->lost_packets += lost_packets;
@@ -502,8 +556,6 @@ static int task_lat_can_store_latency(struct task_lat *task)
static void task_lat_store_lat(struct task_lat *task, uint64_t rx_packet_index, uint64_t rx_time, uint64_t tx_time, uint64_t rx_error, uint64_t tx_error, uint32_t packet_id, uint8_t generator_id)
{
- if (tx_time == 0)
- return;
uint32_t lat_tsc = diff_time(rx_time, tx_time) << LATENCY_ACCURACY;
lat_test_add_latency(task->lat_test, lat_tsc, rx_error + tx_error);
@@ -516,19 +568,8 @@ static void task_lat_store_lat(struct task_lat *task, uint64_t rx_packet_index,
static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
struct task_lat *task = (struct task_lat *)tbase;
- uint64_t rx_time_err;
-
- uint32_t pkt_rx_time, pkt_tx_time;
-
- // If link is down, link_speed is 0
- if (unlikely(task->link_speed == 0)) {
- if (task->port && task->port->link_speed != 0) {
- task->link_speed = task->port->link_speed * 125000L;
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- } else
- return 0;
- }
+ static int max_flows_printed = 0;
+ int rc;
if (n_pkts == 0) {
task->begin = tbase->aux->tsc_rx.before;
@@ -537,8 +578,12 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
task_lat_update_lat_test(task);
- const uint64_t rx_tsc = tbase->aux->tsc_rx.after;
- uint32_t tx_time_err = 0;
+ // Remember those packets with bad length or bad signature
+ uint32_t non_dp_count = 0;
+ uint64_t pkt_bad_len_sig = 0;
+#define BIT64_SET(a64, bit) a64 |= (((uint64_t)1) << (bit & 63))
+#define BIT64_CLR(a64, bit) a64 &= ~(((uint64_t)1) << (bit & 63))
+#define BIT64_TEST(a64, bit) a64 & (((uint64_t)1) << (bit & 63))
/* Go once through all received packets and read them. If
packet has just been modified by another core, the cost of
@@ -546,17 +591,31 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
for (uint16_t j = 0; j < n_pkts; ++j) {
struct rte_mbuf *mbuf = mbufs[j];
task->rx_pkt_meta[j].hdr = rte_pktmbuf_mtod(mbuf, uint8_t *);
+
+ // Remember those packets which are too short to hold the values that we expect
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < task->min_pkt_len)) {
+ BIT64_SET(pkt_bad_len_sig, j);
+ non_dp_count++;
+ } else
+ BIT64_CLR(pkt_bad_len_sig, j);
}
- if (task->sig) {
+ if (task->sig_pos) {
for (uint16_t j = 0; j < n_pkts; ++j) {
- if (*(uint32_t *)(task->rx_pkt_meta[j].hdr + task->sig_pos) == task->sig)
+ if (unlikely(BIT64_TEST(pkt_bad_len_sig, j)))
+ continue;
+ // Remember those packets with bad signature
+ if (likely(*(uint32_t *)(task->rx_pkt_meta[j].hdr + task->sig_pos) == task->sig))
task->rx_pkt_meta[j].pkt_tx_time = *(uint32_t *)(task->rx_pkt_meta[j].hdr + task->lat_pos);
- else
- task->rx_pkt_meta[j].pkt_tx_time = 0;
+ else {
+ BIT64_SET(pkt_bad_len_sig, j);
+ non_dp_count++;
+ }
}
} else {
for (uint16_t j = 0; j < n_pkts; ++j) {
+ if (unlikely(BIT64_TEST(pkt_bad_len_sig, j)))
+ continue;
task->rx_pkt_meta[j].pkt_tx_time = *(uint32_t *)(task->rx_pkt_meta[j].hdr + task->lat_pos);
}
}
@@ -570,47 +629,89 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
bytes_total_in_bulk += mbuf_wire_size(mbufs[flipped]);
}
- pkt_rx_time = tsc_extrapolate_backward(task->link_speed, rx_tsc, task->rx_pkt_meta[0].bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
- if ((uint32_t)((task->begin >> LATENCY_ACCURACY)) > pkt_rx_time) {
+ const uint64_t rx_tsc = tbase->aux->tsc_rx.after;
+
+ uint64_t rx_time_err;
+ uint64_t pkt_rx_time64 = tsc_extrapolate_backward(task, rx_tsc, task->rx_pkt_meta[0].bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
+ if (unlikely((task->begin >> LATENCY_ACCURACY) > pkt_rx_time64)) {
// Extrapolation went up to BEFORE begin => packets were stuck in the NIC but we were not seeing them
- rx_time_err = pkt_rx_time - (uint32_t)(task->last_pkts_tsc >> LATENCY_ACCURACY);
+ rx_time_err = pkt_rx_time64 - (task->last_pkts_tsc >> LATENCY_ACCURACY);
} else {
- rx_time_err = pkt_rx_time - (uint32_t)(task->begin >> LATENCY_ACCURACY);
+ rx_time_err = pkt_rx_time64 - (task->begin >> LATENCY_ACCURACY);
}
- struct unique_id *unique_id = NULL;
- struct delayed_latency_entry *delayed_latency_entry;
- uint32_t packet_id, generator_id;
-
+ TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, non_dp_count);
for (uint16_t j = 0; j < n_pkts; ++j) {
+ // Used to display % of packets within accuracy limit vs. total number of packets (used_col)
+ task->lat_test->tot_all_pkts++;
+
+ // Skip those packets with bad length or bad signature
+ if (unlikely(BIT64_TEST(pkt_bad_len_sig, j)))
+ continue;
+
struct rx_pkt_meta_data *rx_pkt_meta = &task->rx_pkt_meta[j];
uint8_t *hdr = rx_pkt_meta->hdr;
- pkt_rx_time = tsc_extrapolate_backward(task->link_speed, rx_tsc, rx_pkt_meta->bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
- pkt_tx_time = rx_pkt_meta->pkt_tx_time;
+ uint32_t pkt_rx_time = tsc_extrapolate_backward(task, rx_tsc, rx_pkt_meta->bytes_after_in_bulk, task->last_pkts_tsc) >> LATENCY_ACCURACY;
+ uint32_t pkt_tx_time = rx_pkt_meta->pkt_tx_time;
+
+ uint8_t generator_id;
+ uint32_t packet_id;
+ int32_t flow_id = -1;
+ if (task->flow_id_pos) {
+ flow_id = *(int32_t *)(hdr + task->flow_id_pos);
+ if (unlikely(flow_id >= (int32_t)(task->flow_count))) {
+ flow_id = -1;
+ if (!max_flows_printed) {
+ plog_info("Too many flows - increase flow count (only printed once)\n");
+ max_flows_printed = 1;
+ }
+ }
+ }
+ if (task->packet_id_in_flow_pos && (flow_id != -1)) {
+ uint32_t packet_id_in_flow;
+ struct unique_id *unique_id = (struct unique_id *)(hdr + task->packet_id_in_flow_pos);
+ unique_id_get(unique_id, &generator_id, &packet_id_in_flow);
+ lat_test_check_flow_ordering(task, task->lat_test, flow_id + generator_id * task->generator_count, packet_id_in_flow);
+ }
if (task->unique_id_pos) {
- unique_id = (struct unique_id *)(hdr + task->unique_id_pos);
-
- uint32_t n_loss = task_lat_early_loss_detect(task, unique_id);
- packet_id = unique_id->packet_id;
- generator_id = unique_id->generator_id;
- lat_test_add_lost(task->lat_test, n_loss);
+ struct unique_id *unique_id = (struct unique_id *)(hdr + task->unique_id_pos);
+ unique_id_get(unique_id, &generator_id, &packet_id);
+
+ if (unlikely(generator_id >= task->generator_count)) {
+ /* No need to remember unexpected packet at this stage
+ BIT64_SET(pkt_bad_len_sig, j);
+ */
+ // Skip unexpected packet
+ continue;
+ }
+ if (flow_id == -1) {
+ lat_test_check_ordering(task, task->lat_test, packet_id, generator_id);
+ }
+ lat_test_check_duplicate(task, task->lat_test, packet_id, generator_id);
+ uint32_t loss = task_lat_early_loss_detect(task, packet_id, generator_id);
+ if (loss) {
+ lat_test_add_lost(task->lat_test, loss);
+ if (task->loss_id < task->loss_buffer_size) {
+ task->loss_buffer[task->loss_id].packet_id = packet_id;
+ task->loss_buffer[task->loss_id++].n = loss;
+ }
+ }
} else {
- packet_id = task->rx_packet_index;
generator_id = 0;
+ packet_id = task->rx_packet_index;
}
- task->lat_test->tot_all_pkts++;
/* If accuracy is enabled, latency is reported with a
- delay of ACCURACY_BUFFER_SIZE packets since the generator puts the
- accuracy for packet N into packet N + ACCURACY_BUFFER_SIZE. The delay
+ delay of ACCURACY_WINDOW packets since the generator puts the
+ accuracy for packet N into packet N + ACCURACY_WINDOW. The delay
ensures that all reported latencies have both rx
and tx error. */
if (task->accur_pos) {
- tx_time_err = *(uint32_t *)(hdr + task->accur_pos);
+ uint32_t tx_time_err = *(uint32_t *)(hdr + task->accur_pos);
- delayed_latency_entry = delayed_latency_get(task->delayed_latency_entries, generator_id, packet_id - ACCURACY_BUFFER_SIZE);
+ struct delayed_latency_entry *delayed_latency_entry = delayed_latency_get(task->delayed_latency_entries, generator_id, packet_id - ACCURACY_WINDOW);
if (delayed_latency_entry) {
task_lat_store_lat(task,
@@ -633,13 +734,20 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
} else {
task_lat_store_lat(task, task->rx_packet_index, pkt_rx_time, pkt_tx_time, 0, 0, packet_id, generator_id);
}
+
+ // Bad/unexpected packets do not need to be indexed
task->rx_packet_index++;
}
- int ret;
- ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
- task->begin = tbase->aux->tsc_rx.before;
+
+ if (n_pkts < MAX_PKT_BURST)
+ task->begin = tbase->aux->tsc_rx.before;
task->last_pkts_tsc = tbase->aux->tsc_rx.after;
- return ret;
+
+ rc = task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
+ // non_dp_count should not be drop-handled, as there are all by definition considered as not handled
+ // RX = DISCARDED + HANDLED + NON_DP + (TX - TX_NON_DP) + TX_FAIL
+ TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, -non_dp_count);
+ return rc;
}
static void init_task_lat_latency_buffer(struct task_lat *task, uint32_t core_id)
@@ -677,7 +785,7 @@ static void task_init_generator_count(struct task_lat *task)
plog_info("\tNo generators found, hard-coding to %u generators\n", task->generator_count);
} else
task->generator_count = *generator_count;
- plog_info("\tLatency using %u generators\n", task->generator_count);
+ plog_info("\t\tLatency using %u generators\n", task->generator_count);
}
static void task_lat_init_eld(struct task_lat *task, uint8_t socket_id)
@@ -698,18 +806,6 @@ static void lat_start(struct task_base *tbase)
{
struct task_lat *task = (struct task_lat *)tbase;
- if (task->port) {
- // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC.
- // task->link_speed reports link speed in Bytes per sec.
- // It can be 0 if link is down, and must hence be updated in fast path.
- task->link_speed = task->port->link_speed * 125000L;
- if (task->link_speed)
- plog_info("\tPort %u: link speed is %ld Mbps\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- else
- plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n",
- (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000);
- }
}
static void init_task_lat(struct task_base *tbase, struct task_args *targ)
@@ -721,10 +817,25 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
task->accur_pos = targ->accur_pos;
task->sig_pos = targ->sig_pos;
task->sig = targ->sig;
+ task->packet_id_in_flow_pos = targ->packet_id_in_flow_pos;
+ task->flow_id_pos = targ->flow_id_pos;
task->unique_id_pos = targ->packet_id_pos;
task->latency_buffer_size = targ->latency_buffer_size;
+ PROX_PANIC(task->lat_pos == 0, "Missing 'lat pos' parameter in config file\n");
+ uint16_t min_pkt_len = task->lat_pos + sizeof(uint32_t);
+ if (task->unique_id_pos && (
+ min_pkt_len < task->unique_id_pos + sizeof(struct unique_id)))
+ min_pkt_len = task->unique_id_pos + sizeof(struct unique_id);
+ if (task->accur_pos && (
+ min_pkt_len < task->accur_pos + sizeof(uint32_t)))
+ min_pkt_len = task->accur_pos + sizeof(uint32_t);
+ if (task->sig_pos && (
+ min_pkt_len < task->sig_pos + sizeof(uint32_t)))
+ min_pkt_len = task->sig_pos + sizeof(uint32_t);
+ task->min_pkt_len = min_pkt_len;
+
task_init_generator_count(task);
if (task->latency_buffer_size) {
@@ -743,34 +854,76 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(task->delayed_latency_entries[i] == NULL, "Failed to allocate array for storing delayed latency entries\n");
}
if (task->unique_id_pos == 0) {
- /* When using accuracy feature, the accuracy from TX is written ACCURACY_BUFFER_SIZE packets later
+ /* When using accuracy feature, the accuracy from TX is written ACCURACY_WINDOW packets later
* We can only retrieve the good packet if a packet id is written to it.
- * Otherwise we will use the packet RECEIVED ACCURACY_BUFFER_SIZE packets ago which is OK if
+ * Otherwise we will use the packet RECEIVED ACCURACY_WINDOW packets ago which is OK if
* packets are not re-ordered. If packets are re-ordered, then the matching between
- * the tx accuracy znd the latency is wrong.
+ * the TX accuracy and the latency is wrong.
*/
plog_warn("\tWhen accuracy feature is used, a unique id should ideally also be used\n");
}
}
- task->lt[0].bucket_size = targ->bucket_size - LATENCY_ACCURACY;
- task->lt[1].bucket_size = targ->bucket_size - LATENCY_ACCURACY;
+ task->lt[0].min_lat = -1;
+ task->lt[1].min_lat = -1;
+ task->lt[0].bucket_size = targ->bucket_size;
+ task->lt[1].bucket_size = targ->bucket_size;
if (task->unique_id_pos) {
task_lat_init_eld(task, socket_id);
task_lat_reset_eld(task);
+ task->previous_packet = prox_zmalloc(sizeof(task->previous_packet) * task->generator_count , socket_id);
+ PROX_PANIC(task->previous_packet == NULL, "Failed to allocate array for storing previous packet\n");
}
task->lat_test = &task->lt[task->using_lt];
task_lat_set_accuracy_limit(task, targ->accuracy_limit_nsec);
- task->rx_pkt_meta = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(*task->rx_pkt_meta), socket_id);
+ task->rx_pkt_meta = prox_zmalloc(MAX_PKT_BURST * sizeof(*task->rx_pkt_meta), socket_id);
PROX_PANIC(task->rx_pkt_meta == NULL, "unable to allocate memory to store RX packet meta data");
- task->link_speed = UINT64_MAX;
+ uint32_t max_frame_size = MAX_PKT_SIZE;
+ uint64_t bytes_per_hz = UINT64_MAX;
if (targ->nb_rxports) {
- // task->port structure is only used while starting handle_lat to get the link_speed.
- // link_speed can not be quiried at init as the port has not been initialized yet.
struct prox_port_cfg *port = &prox_port_cfg[targ->rx_port_queue[0].port];
- task->port = port;
+ max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+
+ // port->max_link_speed reports the maximum, non negotiated ink speed in Mbps e.g. 40k for a 40 Gbps NIC.
+ // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
+ if (port->max_link_speed != UINT32_MAX) {
+ bytes_per_hz = port->max_link_speed * 125000L;
+ plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
+ (uint8_t)(port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
+ }
+ }
+ task->loss_buffer_size = targ->loss_buffer_size;
+ if (task->loss_buffer_size) {
+ char name[256];
+ sprintf(name, "loss_%u.txt", targ->lconf->id);
+ task->fp_loss = fopen(name, "w+");
+ PROX_PANIC(task->fp_loss == NULL, "Failed to open %s\n", name);
+
+ task->loss_buffer = prox_zmalloc(task->loss_buffer_size * sizeof(struct loss_buffer), rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->loss_buffer == NULL,
+ "Failed to allocate %lu bytes (in huge pages) for loss_buffer\n", task->loss_buffer_size * sizeof(struct loss_buffer));
+ }
+ task->bytes_to_tsc = prox_zmalloc(max_frame_size * sizeof(task->bytes_to_tsc[0]) * MAX_PKT_BURST, rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->bytes_to_tsc == NULL,
+ "Failed to allocate %lu bytes (in huge pages) for bytes_to_tsc\n", max_frame_size * sizeof(task->bytes_to_tsc[0]) * MAX_PKT_BURST);
+
+ // There are cases where hz estimate might be slighly over-estimated
+ // This results in too much extrapolation
+ // Only account for 99% of extrapolation to handle cases with up to 1% error clocks
+ for (unsigned int i = 0; i < max_frame_size * MAX_PKT_BURST ; i++) {
+ if (bytes_per_hz == UINT64_MAX)
+ task->bytes_to_tsc[i] = 0;
+ else
+ task->bytes_to_tsc[i] = (rte_get_tsc_hz() * i * 0.99) / bytes_per_hz;
+ }
+ task->flow_count = targ->flow_count;
+ PROX_PANIC(task->flow_id_pos && (task->flow_count == 0), "flow_count must be configured when flow_id_pos is set\n");
+ if (task->flow_count) {
+ task->flows = prox_zmalloc(task->flow_count * sizeof(struct flows) * task->generator_count, rte_lcore_to_socket_id(targ->lconf->id));
+ PROX_PANIC(task->flows == NULL,
+ "Failed to allocate %lu bytes (in huge pages) for flows\n", task->flow_count * sizeof(struct flows) * task->generator_count);
}
}
@@ -780,7 +933,7 @@ static struct task_init task_init_lat = {
.handle = handle_lat_bulk,
.start = lat_start,
.stop = lat_stop,
- .flag_features = TASK_FEATURE_TSC_RX | TASK_FEATURE_RX_ALL | TASK_FEATURE_ZERO_RX | TASK_FEATURE_NEVER_DISCARDS,
+ .flag_features = TASK_FEATURE_TSC_RX | TASK_FEATURE_ZERO_RX | TASK_FEATURE_NEVER_DISCARDS,
.size = sizeof(struct task_lat)
};
diff --git a/VNFs/DPPD-PROX/handle_lat.h b/VNFs/DPPD-PROX/handle_lat.h
index 3cc80461..475682ce 100644
--- a/VNFs/DPPD-PROX/handle_lat.h
+++ b/VNFs/DPPD-PROX/handle_lat.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,6 +25,15 @@
#include "clock.h"
#define LATENCY_ACCURACY 1
+// If ACCURACY_WINDOW is too small, the accuracy for packet N can be received by lat BEFORE
+// packet N is received (re-ordering) resulting in accuracy being unused
+// 8192 packets is equivalent to 550 micro-seconds at 10Gbps for 64 bytes packets
+#define ACCURACY_WINDOW 8192
+#define LAT_BUCKET_COUNT 128
+
+#define LAT_BUCKET_COUNT 128
+
+#define LAT_BUCKET_COUNT 128
struct lat_test {
uint64_t tot_all_pkts;
@@ -40,9 +49,12 @@ struct lat_test {
uint64_t tot_lat_error;
unsigned __int128 var_lat_error;
- uint64_t buckets[128];
+ uint64_t buckets[LAT_BUCKET_COUNT];
uint64_t bucket_size;
uint64_t lost_packets;
+ uint64_t mis_ordered;
+ uint64_t extent;
+ uint64_t duplicate;
};
static struct time_unit lat_test_get_accuracy_limit(struct lat_test *lat_test)
@@ -148,6 +160,9 @@ static void lat_test_combine(struct lat_test *dst, struct lat_test *src)
if (src->accuracy_limit_tsc > dst->accuracy_limit_tsc)
dst->accuracy_limit_tsc = src->accuracy_limit_tsc;
dst->lost_packets += src->lost_packets;
+ dst->mis_ordered += src->mis_ordered;
+ dst->extent += src->extent;
+ dst->duplicate += src->duplicate;
#ifdef LATENCY_HISTOGRAM
_lat_test_histogram_combine(dst, src);
@@ -169,6 +184,9 @@ static void lat_test_reset(struct lat_test *lat_test)
lat_test->accuracy_limit_tsc = 0;
lat_test->lost_packets = 0;
+ lat_test->mis_ordered = 0;
+ lat_test->extent = 0;
+ lat_test->duplicate = 0;
memset(lat_test->buckets, 0, sizeof(lat_test->buckets));
}
@@ -182,6 +200,7 @@ static void lat_test_copy(struct lat_test *dst, struct lat_test *src)
struct task_lat;
struct lat_test *task_lat_get_latency_meassurement(struct task_lat *task);
+uint32_t task_lat_get_latency_bucket_size(struct task_lat *task);
void task_lat_use_other_latency_meassurement(struct task_lat *task);
void task_lat_set_accuracy_limit(struct task_lat *task, uint32_t accuracy_limit_nsec);
diff --git a/VNFs/DPPD-PROX/handle_lb_5tuple.c b/VNFs/DPPD-PROX/handle_lb_5tuple.c
index 7aadf49a..ec229386 100644
--- a/VNFs/DPPD-PROX/handle_lb_5tuple.c
+++ b/VNFs/DPPD-PROX/handle_lb_5tuple.c
@@ -14,6 +14,11 @@
// limitations under the License.
*/
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <rte_hash.h>
#include <rte_ether.h>
#include <rte_memcpy.h>
@@ -58,7 +63,7 @@ static inline uint8_t get_ipv4_dst_port(struct task_lb_5tuple *task, void *ipv4_
int ret = 0;
union ipv4_5tuple_host key;
- ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(struct ipv4_hdr, time_to_live);
+ ipv4_hdr = (uint8_t *)ipv4_hdr + offsetof(prox_rte_ipv4_hdr, time_to_live);
__m128i data = _mm_loadu_si128((__m128i*)(ipv4_hdr));
/* Get 5 tuple: dst port, src port, dst IP address, src IP address and protocol */
key.xmm = _mm_and_si128(data, mask0);
@@ -76,15 +81,15 @@ static inline uint8_t get_ipv4_dst_port(struct task_lb_5tuple *task, void *ipv4_
static inline uint8_t handle_lb_5tuple(struct task_lb_5tuple *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
+ prox_rte_ether_hdr *eth_hdr;
+ prox_rte_ipv4_hdr *ipv4_hdr;
- eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
switch (eth_hdr->ether_type) {
case ETYPE_IPv4:
/* Handle IPv4 headers.*/
- ipv4_hdr = (struct ipv4_hdr *) (eth_hdr + 1);
+ ipv4_hdr = (prox_rte_ipv4_hdr *) (eth_hdr + 1);
return get_ipv4_dst_port(task, ipv4_hdr, OUT_DISCARD, task->lookup_hash);
default:
return OUT_DISCARD;
diff --git a/VNFs/DPPD-PROX/handle_lb_net.c b/VNFs/DPPD-PROX/handle_lb_net.c
index 46a7226e..1bfb6c3d 100644
--- a/VNFs/DPPD-PROX/handle_lb_net.c
+++ b/VNFs/DPPD-PROX/handle_lb_net.c
@@ -357,9 +357,9 @@ static inline uint8_t worker_from_mask(struct task_lb_net *task, uint32_t val)
static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, struct rte_mbuf *mbuf)
{
// For all packets, one by one, remove MPLS tag if any and fills in keys used by "fake" packets
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
// Check for MPLS TAG
- struct ipv4_hdr *ip;
+ prox_rte_ipv4_hdr *ip;
if (peth->ether_type == ETYPE_MPLSU) {
struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
uint32_t mpls_len = 0;
@@ -368,12 +368,12 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s
mpls_len += sizeof(struct mpls_hdr);
}
mpls_len += sizeof(struct mpls_hdr);
- ip = (struct ipv4_hdr *)(mpls + 1);
+ ip = (prox_rte_ipv4_hdr *)(mpls + 1);
switch (ip->version_ihl >> 4) {
case 4:
// Remove MPLS Tag if requested
if (task->runtime_flags & TASK_MPLS_TAGGING) {
- peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
peth->ether_type = ETYPE_IPv4;
}
break;
@@ -386,7 +386,7 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s
}
}
else {
- ip = (struct ipv4_hdr *)(peth + 1);
+ ip = (prox_rte_ipv4_hdr *)(peth + 1);
}
// Entry point for the packet => check for packet validity
// => do not use extract_key_core(mbufs[j], &task->keys[j]);
@@ -416,7 +416,7 @@ static inline int extract_gre_key(struct task_lb_net_lut *task, uint32_t *key, s
return 0;
}
-static inline uint8_t lb_ip4(struct task_lb_net *task, struct ipv4_hdr *ip)
+static inline uint8_t lb_ip4(struct task_lb_net *task, prox_rte_ipv4_hdr *ip)
{
if (unlikely(ip->version_ihl >> 4 != 4)) {
plog_warn("Expected to receive IPv4 packet but IP version was %d\n",
@@ -453,7 +453,7 @@ static inline uint8_t lb_ip4(struct task_lb_net *task, struct ipv4_hdr *ip)
return OUT_DISCARD;
}
-static inline uint8_t lb_ip6(struct task_lb_net *task, struct ipv6_hdr *ip)
+static inline uint8_t lb_ip6(struct task_lb_net *task, prox_rte_ipv6_hdr *ip)
{
if (unlikely((*(uint8_t*)ip) >> 4 != 6)) {
plog_warn("Expected to receive IPv6 packet but IP version was %d\n",
@@ -465,7 +465,7 @@ static inline uint8_t lb_ip6(struct task_lb_net *task, struct ipv6_hdr *ip)
return worker + task->nb_worker_threads * IPV6;
}
-static inline uint8_t lb_mpls(struct task_lb_net *task, struct ether_hdr *peth, struct rte_mbuf *mbuf)
+static inline uint8_t lb_mpls(struct task_lb_net *task, prox_rte_ether_hdr *peth, struct rte_mbuf *mbuf)
{
struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
uint32_t mpls_len = 0;
@@ -474,21 +474,21 @@ static inline uint8_t lb_mpls(struct task_lb_net *task, struct ether_hdr *peth,
mpls_len += sizeof(struct mpls_hdr);
}
mpls_len += sizeof(struct mpls_hdr);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(mpls + 1);
switch (ip->version_ihl >> 4) {
case 4:
if (task->runtime_flags & TASK_MPLS_TAGGING) {
- peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
peth->ether_type = ETYPE_IPv4;
}
return lb_ip4(task, ip);
case 6:
if (task->runtime_flags & TASK_MPLS_TAGGING) {
- peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
peth->ether_type = ETYPE_IPv6;
}
- return lb_ip6(task, (struct ipv6_hdr *)ip);
+ return lb_ip6(task, (prox_rte_ipv6_hdr *)ip);
default:
plogd_warn(mbuf, "Failed Decoding MPLS Packet - neither IPv4 neither IPv6: version %u for packet : \n", ip->version_ihl);
return OUT_DISCARD;
@@ -507,7 +507,7 @@ static inline uint8_t lb_qinq(struct task_lb_net *task, struct qinq_hdr *qinq)
static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
const uint16_t len = rte_pktmbuf_pkt_len(mbuf);
if (len < 60) {
plogd_warn(mbuf, "Unexpected frame len = %d for packet : \n", len);
@@ -520,9 +520,9 @@ static inline uint8_t handle_lb_net(struct task_lb_net *task, struct rte_mbuf *m
case ETYPE_8021ad:
return lb_qinq(task, (struct qinq_hdr *)peth);
case ETYPE_IPv4:
- return lb_ip4(task, (struct ipv4_hdr *)(peth + 1));
+ return lb_ip4(task, (prox_rte_ipv4_hdr *)(peth + 1));
case ETYPE_IPv6:
- return lb_ip6(task, (struct ipv6_hdr *)(peth + 1));
+ return lb_ip6(task, (prox_rte_ipv6_hdr *)(peth + 1));
case ETYPE_LLDP:
return OUT_DISCARD;
default:
diff --git a/VNFs/DPPD-PROX/handle_lb_pos.c b/VNFs/DPPD-PROX/handle_lb_pos.c
index 4324e94d..3cf465ce 100644
--- a/VNFs/DPPD-PROX/handle_lb_pos.c
+++ b/VNFs/DPPD-PROX/handle_lb_pos.c
@@ -81,9 +81,9 @@ union ip_port {
};
struct pkt_ether_ipv4_udp {
- struct ether_hdr ether;
- struct ipv4_hdr ipv4;
- struct udp_hdr udp;
+ prox_rte_ether_hdr ether;
+ prox_rte_ipv4_hdr ipv4;
+ prox_rte_udp_hdr udp;
} __attribute__((unused));
static uint8_t handle_lb_ip_port(struct task_lb_pos *task, struct rte_mbuf *mbuf)
diff --git a/VNFs/DPPD-PROX/handle_lb_qinq.c b/VNFs/DPPD-PROX/handle_lb_qinq.c
index 18ff7df4..9726edda 100644
--- a/VNFs/DPPD-PROX/handle_lb_qinq.c
+++ b/VNFs/DPPD-PROX/handle_lb_qinq.c
@@ -104,9 +104,9 @@ static void init_task_lb_qinq(struct task_base *tbase, struct task_args *targ)
plog_info("\t\ttask_lb_qinq protocols_mask = 0x%x\n", task->protocols_mask);
if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_RSS)
- tbase->flags |= BASE_FLAG_LUT_QINQ_RSS;
+ tbase->flags |= TBASE_FLAG_LUT_QINQ_RSS;
if (targ->task_init->flag_features & TASK_FEATURE_LUT_QINQ_HASH)
- tbase->flags |= BASE_FLAG_LUT_QINQ_HASH;
+ tbase->flags |= TBASE_FLAG_LUT_QINQ_HASH;
plog_info("\t\ttask_lb_qinq flags = 0x%x\n", tbase->flags);
}
@@ -245,24 +245,24 @@ int handle_lb_qinq_bulk_set_port(struct task_base *tbase, struct rte_mbuf **mbuf
struct qinq_packet {
struct qinq_hdr qinq_hdr;
union {
- struct ipv4_hdr ipv4_hdr;
- struct ipv6_hdr ipv6_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_ipv6_hdr ipv6_hdr;
};
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct qinq_packet_data {
- struct ether_addr d_addr;
- struct ether_addr s_addr;
+ prox_rte_ether_addr d_addr;
+ prox_rte_ether_addr s_addr;
uint64_t qinq;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct ether_packet {
- struct ether_hdr ether_hdr;
+ prox_rte_ether_hdr ether_hdr;
union {
- struct ipv4_hdr ipv4_hdr;
- struct ipv6_hdr ipv6_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+ prox_rte_ipv6_hdr ipv6_hdr;
};
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
struct cpe_packet {
union {
@@ -275,7 +275,7 @@ struct cpe_packet {
static inline uint8_t get_worker(struct task_lb_qinq *task, struct cpe_packet *packet)
{
uint8_t worker = 0;
- if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_HASH) {
+ if (((struct task_base *)task)->flags & TBASE_FLAG_LUT_QINQ_HASH) {
// Load Balance on Hash of combination of cvlan and svlan
uint64_t qinq_net = packet->qd.qinq;
qinq_net = qinq_net & 0xFF0F0000FF0F0000; // Mask Proto and QoS bits
@@ -286,7 +286,7 @@ static inline uint8_t get_worker(struct task_lb_qinq *task, struct cpe_packet *p
worker = rte_hash_crc(&qinq_net,8,0) % task->nb_worker_threads;
}
plogx_dbg("Sending packet svlan=%x, cvlan=%x, pseudo_qinq=%lx to worker %d\n", rte_bswap16(0xFF0F & packet->qp.qinq_hdr.svlan.vlan_tci), rte_bswap16(0xFF0F & packet->qp.qinq_hdr.cvlan.vlan_tci), qinq_net, worker);
- } else if (((struct task_base *)task)->flags & BASE_FLAG_LUT_QINQ_RSS){
+ } else if (((struct task_base *)task)->flags & TBASE_FLAG_LUT_QINQ_RSS){
// Load Balance on rss of combination of cvlan and svlan
uint32_t qinq = (packet->qp.qinq_hdr.cvlan.vlan_tci & 0xFF0F) << 16;
uint32_t rss = toeplitz_hash((uint8_t *)&qinq, 4);
diff --git a/VNFs/DPPD-PROX/handle_master.c b/VNFs/DPPD-PROX/handle_master.c
index 074d7dd3..58240ba0 100644
--- a/VNFs/DPPD-PROX/handle_master.c
+++ b/VNFs/DPPD-PROX/handle_master.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,10 +14,19 @@
// limitations under the License.
*/
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <net/if.h>
+
#include <rte_hash.h>
#include <rte_hash_crc.h>
-#include "prox_cfg.h"
+#include <rte_ether.h>
+#include <rte_icmp.h>
+#include "prox_cfg.h"
#include "prox_globals.h"
#include "rx_pkt.h"
#include "arp.h"
@@ -26,7 +35,6 @@
#include "mbuf_utils.h"
#include "etypes.h"
#include "defaults.h"
-#include "prox_cfg.h"
#include "prox_malloc.h"
#include "quit.h"
#include "task_init.h"
@@ -35,11 +43,37 @@
#include "lconf.h"
#include "input.h"
#include "tx_pkt.h"
+#include "defines.h"
+#include "prox_ipv6.h"
+#include "packet_utils.h"
-#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
#define PROX_MAX_ARP_REQUESTS 32 // Maximum number of tasks requesting the same MAC address
+#define NETLINK_BUF_SIZE 16384
+
+static char netlink_buf[NETLINK_BUF_SIZE];
-const char *actions_string[] = {"UPDATE_FROM_CTRL", "SEND_ARP_REQUEST_FROM_CTRL", "SEND_ARP_REPLY_FROM_CTRL", "HANDLE_ARP_TO_CTRL", "REQ_MAC_TO_CTRL"};
+const char *actions_string[] = {
+ "MAC_INFO_FROM_MASTER", // Controlplane sending a MAC update to dataplane
+ "MAC_INFO_FROM_MASTER_FOR_IPV6",// Controlplane sending a MAC update to dataplane
+ "IPV6_INFO_FROM_MASTER", // Controlplane IPv6 Global IP info to dataplane
+ "ROUTE_ADD_FROM_MASTER", // Controlplane sending a new route to dataplane
+ "ROUTE_DEL_FROM_MASTER", // Controlplane deleting a new route from dataplane
+ "SEND_ARP_REQUEST_FROM_MASTER", // Controlplane requesting dataplane to send ARP request
+ "SEND_ARP_REPLY_FROM_MASTER", // Controlplane requesting dataplane to send ARP reply
+ "SEND_NDP_FROM_MASTER", // Controlplane requesting dataplane to send NDP
+ "SEND_ICMP_FROM_MASTER", // Controlplane requesting dataplane to send ICMP message
+ "SEND_BGP_FROM_MASTER", // Controlplane requesting dataplane to send BGP message
+ "ARP_PKT_FROM_NET_TO_MASTER", // ARP sent by datplane to Controlpane for handling
+ "NDP_PKT_FROM_NET_TO_MASTER," // NDP sent by datplane to Controlpane for handling
+ "ICMP_TO_MASTER", // ICMP sent by datplane to Controlpane for handling
+ "BGP_TO_MASTER" // BGP sent by datplane to Controlpane for handling
+ "IP4_REQ_MAC_TO_MASTER", // Dataplane requesting MAC resolution to Controlplane
+ "IP6_REQ_MAC_TO_MASTER", // Dataplane requesting MAC resolution to Controlplane
+ "PKT_FROM_TAP" // Packet received by Controlplane from kernel and forwarded to dataplane for sending
+
+};
+
+int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts) = NULL;
static struct my_arp_t arp_reply = {
.htype = 0x100,
@@ -56,78 +90,116 @@ static struct my_arp_t arp_request = {
.oper = 0x100
};
-struct ip_table {
- struct ether_addr mac;
- struct rte_ring *ring;
-};
-
-struct external_ip_table {
- struct ether_addr mac;
- struct rte_ring *rings[PROX_MAX_ARP_REQUESTS];
- uint16_t nb_requests;
-};
-
-struct port_table {
- struct ether_addr mac;
- struct rte_ring *ring;
- uint32_t ip;
- uint8_t port;
- uint8_t flags;
-};
-
-struct task_master {
- struct task_base base;
- struct rte_ring *ctrl_rx_ring;
- struct rte_ring **ctrl_tx_rings;
- struct ip_table *internal_ip_table;
- struct external_ip_table *external_ip_table;
- struct rte_hash *external_ip_hash;
- struct rte_hash *internal_ip_hash;
- struct port_table internal_port_table[PROX_MAX_PORTS];
-};
-
struct ip_port {
uint32_t ip;
uint8_t port;
} __attribute__((packed));
-static inline uint8_t get_command(struct rte_mbuf *mbuf)
-{
- return mbuf->udata64 & 0xFF;
-}
-static inline uint8_t get_task(struct rte_mbuf *mbuf)
-{
- return (mbuf->udata64 >> 8) & 0xFF;
-}
-static inline uint8_t get_core(struct rte_mbuf *mbuf)
+struct ip6_port {
+ struct ipv6_addr ip6;
+ uint8_t port;
+} __attribute__((packed));
+
+void register_router_to_ctrl_plane(struct task_base *tbase, uint8_t port_id, uint8_t core_id, uint8_t task_id, struct ipv6_addr *local_ipv6_addr, struct ipv6_addr *global_ipv6_addr, struct ipv6_addr *router_prefix)
{
- return (mbuf->udata64 >> 16) & 0xFF;
+ struct task_master *task = (struct task_master *)tbase;
+ task->internal_port_table[port_id].flags |= IPV6_ROUTER;
+ memcpy(&task->internal_port_table[port_id].router_prefix, router_prefix, sizeof(struct ipv6_addr));
+ register_node_to_ctrl_plane(tbase, local_ipv6_addr, global_ipv6_addr, port_id, core_id, task_id);
}
-static inline uint8_t get_port(struct rte_mbuf *mbuf)
+
+void register_node_to_ctrl_plane(struct task_base *tbase, struct ipv6_addr *local_ipv6_addr, struct ipv6_addr *global_ipv6_addr, uint8_t port_id, uint8_t core_id, uint8_t task_id)
{
- return mbuf->port;
+ struct task_master *task = (struct task_master *)tbase;
+ if (task->internal_port_table[port_id].flags & IPV6_ROUTER)
+ plogx_dbg("\tregistering router with port %d core %d and task %d\n", port_id, core_id, task_id);
+ else
+ plogx_dbg("\tregistering node with port %d core %d and task %d\n", port_id, core_id, task_id);
+
+ if (port_id >= PROX_MAX_PORTS) {
+ plog_err("Unable to register router, port %d\n", port_id);
+ return;
+ }
+ task->internal_port_table[port_id].ring = task->ctrl_tx_rings[core_id * MAX_TASKS_PER_CORE + task_id];
+ memcpy(&task->internal_port_table[port_id].mac, &prox_port_cfg[port_id].eth_addr, sizeof(prox_rte_ether_addr));
+ memcpy(&task->internal_port_table[port_id].local_ipv6_addr, local_ipv6_addr, sizeof(struct ipv6_addr));
+ if (memcmp(local_ipv6_addr, &prox_cfg.random_ip, sizeof(struct ipv6_addr)) == 0) {
+ task->internal_port_table[port_id].flags |= HANDLE_RANDOM_LOCAL_IP_FLAG;
+ return;
+ }
+ memcpy(&task->internal_port_table[port_id].global_ipv6_addr, global_ipv6_addr, sizeof(struct ipv6_addr));
+ if (memcmp(global_ipv6_addr, &prox_cfg.random_ip, sizeof(struct ipv6_addr)) == 0) {
+ task->internal_port_table[port_id].flags |= HANDLE_RANDOM_GLOBAL_IP_FLAG;
+ return;
+ }
+ struct ip6_port key;
+ memcpy(&key.ip6, local_ipv6_addr, sizeof(struct ipv6_addr));
+ key.port = port_id;
+ int ret = rte_hash_add_key(task->internal_ip6_hash, (const void *)&key);
+ if (unlikely(ret < 0)) {
+ plog_err("Unable to register ip "IPv6_BYTES_FMT"\n", IPv6_BYTES(local_ipv6_addr->bytes));
+ return;
+ }
+ memcpy(&key.ip6, global_ipv6_addr, sizeof(struct ipv6_addr));
+ ret = rte_hash_add_key(task->internal_ip6_hash, (const void *)&key);
+ if (unlikely(ret < 0)) {
+ plog_err("Unable to register ip "IPv6_BYTES_FMT"\n", IPv6_BYTES(global_ipv6_addr->bytes));
+ return;
+ }
+ memcpy(&task->internal_ip6_table[ret].mac, &prox_port_cfg[port_id].eth_addr, sizeof(prox_rte_ether_addr));
+ task->internal_ip6_table[ret].ring = task->ctrl_tx_rings[core_id * MAX_TASKS_PER_CORE + task_id];
}
-static inline uint32_t get_ip(struct rte_mbuf *mbuf)
+
+void master_init_vdev(struct task_base *tbase, uint8_t port_id, uint8_t core_id, uint8_t task_id)
{
- return (mbuf->udata64 >> 32) & 0xFFFFFFFF;
+ struct task_master *task = (struct task_master *)tbase;
+ uint8_t vdev_port = prox_port_cfg[port_id].dpdk_mapping;
+ int rc, i;
+ if (vdev_port != NO_VDEV_PORT) {
+ for (i = 0; i < task->max_vdev_id; i++) {
+ if (task->all_vdev[i].port_id == vdev_port)
+ break;
+ }
+ if (i < task->max_vdev_id) {
+ // Already initialized (e.g. by another core handling the same port).
+ return;
+ }
+ task->all_vdev[task->max_vdev_id].port_id = vdev_port;
+ task->all_vdev[task->max_vdev_id].ring = task->ctrl_tx_rings[core_id * MAX_TASKS_PER_CORE + task_id];
+
+ struct sockaddr_in dst, src;
+ src.sin_family = AF_INET;
+ src.sin_port = rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT);
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[vdev_port].n_vlans; vlan_id++) {
+ src.sin_addr.s_addr = rte_be_to_cpu_32(prox_port_cfg[vdev_port].ip_addr[vlan_id].ip);
+ int fd = socket(AF_INET, SOCK_DGRAM, 0);
+ PROX_PANIC(fd < 0, "Failed to open socket(AF_INET, SOCK_DGRAM, 0)\n");
+ prox_port_cfg[vdev_port].fds[vlan_id] = fd;
+ rc = bind(fd,(struct sockaddr *)&src, sizeof(struct sockaddr_in));
+ PROX_PANIC(rc, "Failed to bind("IPv4_BYTES_FMT":%d): errno = %d (%s)\n", IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, errno, strerror(errno));
+ plog_info("DPDK port %d bound("IPv4_BYTES_FMT":%d) to fd %d\n", port_id, IPv4_BYTES(((uint8_t*)&src.sin_addr.s_addr)), src.sin_port, fd);
+ fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
+ }
+ task->max_vdev_id++;
+ }
}
void register_ip_to_ctrl_plane(struct task_base *tbase, uint32_t ip, uint8_t port_id, uint8_t core_id, uint8_t task_id)
{
struct task_master *task = (struct task_master *)tbase;
struct ip_port key;
- plogx_dbg("\tregistering IP %x.%x.%x.%x with port %d core %d and task %d\n", IP4(ip), port_id, core_id, task_id);
+ plogx_info("\tregistering IP "IPv4_BYTES_FMT" with port %d core %d and task %d\n", IP4(ip), port_id, core_id, task_id);
if (port_id >= PROX_MAX_PORTS) {
- plog_err("Unable to register ip %x, port %d\n", ip, port_id);
+ plog_err("Unable to register ip "IPv4_BYTES_FMT", port %d\n", IP4(ip), port_id);
return;
}
- /* TODO - stoe multiple rings if multiple cores able to handle IP
+ /* TODO - store multiple rings if multiple cores able to handle IP
Remove them when such cores are stopped and de-register IP
*/
task->internal_port_table[port_id].ring = task->ctrl_tx_rings[core_id * MAX_TASKS_PER_CORE + task_id];
- memcpy(&task->internal_port_table[port_id].mac, &prox_port_cfg[port_id].eth_addr, 6);
+ memcpy(&task->internal_port_table[port_id].mac, &prox_port_cfg[port_id].eth_addr, sizeof(prox_rte_ether_addr));
task->internal_port_table[port_id].ip = ip;
if (ip == RANDOM_IP) {
@@ -139,21 +211,19 @@ void register_ip_to_ctrl_plane(struct task_base *tbase, uint32_t ip, uint8_t por
key.port = port_id;
int ret = rte_hash_add_key(task->internal_ip_hash, (const void *)&key);
if (unlikely(ret < 0)) {
- plog_err("Unable to register ip %x\n", ip);
+ plog_err("Unable to register ip "IPv4_BYTES_FMT"\n", IP4(ip));
return;
}
- memcpy(&task->internal_ip_table[ret].mac, &prox_port_cfg[port_id].eth_addr, 6);
+ memcpy(&task->internal_ip_table[ret].mac, &prox_port_cfg[port_id].eth_addr, sizeof(prox_rte_ether_addr));
task->internal_ip_table[ret].ring = task->ctrl_tx_rings[core_id * MAX_TASKS_PER_CORE + task_id];
-
}
-static inline void handle_arp_reply(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_arp_reply(struct task_base *tbase, struct rte_mbuf *mbuf, struct my_arp_t *arp)
{
struct task_master *task = (struct task_master *)tbase;
- struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
int i, ret;
- uint32_t key = hdr_arp->arp.data.spa;
- plogx_dbg("\tMaster handling ARP reply for ip %x\n", key);
+ uint32_t key = arp->data.spa;
+ plogx_dbg("\tMaster handling ARP reply for ip "IPv4_BYTES_FMT"\n", IP4(key));
ret = rte_hash_lookup(task->external_ip_hash, (const void *)&key);
if (unlikely(ret < 0)) {
@@ -162,53 +232,85 @@ static inline void handle_arp_reply(struct task_base *tbase, struct rte_mbuf *mb
} else {
// entry found for this IP
uint16_t nb_requests = task->external_ip_table[ret].nb_requests;
- memcpy(&hdr_arp->ether_hdr.d_addr.addr_bytes, &task->external_ip_table[ret].mac, 6);
// If we receive a request from multiple task for the same IP, then we update all tasks
if (task->external_ip_table[ret].nb_requests) {
rte_mbuf_refcnt_set(mbuf, nb_requests);
for (int i = 0; i < nb_requests; i++) {
struct rte_ring *ring = task->external_ip_table[ret].rings[i];
- tx_ring_ip(tbase, ring, UPDATE_FROM_CTRL, mbuf, key);
+ tx_ring_ip(tbase, ring, MAC_INFO_FROM_MASTER, mbuf, key);
}
task->external_ip_table[ret].nb_requests = 0;
+ } else {
+ tx_drop(mbuf);
}
}
}
-static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf *mbuf)
+static inline void handle_arp_request(struct task_base *tbase, struct rte_mbuf *mbuf, struct my_arp_t *arp)
{
struct task_master *task = (struct task_master *)tbase;
- struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
+ prox_rte_ether_hdr *ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
int i, ret;
uint8_t port = get_port(mbuf);
struct ip_port key;
- key.ip = hdr_arp->arp.data.tpa;
+ key.ip = arp->data.tpa;
key.port = port;
if (task->internal_port_table[port].flags & HANDLE_RANDOM_IP_FLAG) {
- struct ether_addr mac;
- plogx_dbg("\tMaster handling ARP request for ip %x on port %d which supports random ip\n", key.ip, key.port);
+ prox_rte_ether_addr mac;
+ plogx_dbg("\tMaster handling ARP request for ip "IPv4_BYTES_FMT" on port %d which supports random ip\n", IP4(key.ip), key.port);
struct rte_ring *ring = task->internal_port_table[port].ring;
- create_mac(hdr_arp, &mac);
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
- build_arp_reply(hdr_arp, &mac);
- tx_ring(tbase, ring, ARP_REPLY_FROM_CTRL, mbuf);
+ create_mac(arp, &mac);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
+ build_arp_reply(ether_hdr, &mac, arp);
+ tx_ring(tbase, ring, SEND_ARP_REPLY_FROM_MASTER, mbuf);
return;
}
- plogx_dbg("\tMaster handling ARP request for ip %x\n", key.ip);
+ plogx_dbg("\tMaster handling ARP request for ip "IPv4_BYTES_FMT"\n", IP4(key.ip));
ret = rte_hash_lookup(task->internal_ip_hash, (const void *)&key);
if (unlikely(ret < 0)) {
// entry not found for this IP.
- plogx_dbg("Master ignoring ARP REQUEST received on un-registered IP %d.%d.%d.%d on port %d\n", IP4(hdr_arp->arp.data.tpa), port);
+ plogx_dbg("Master ignoring ARP REQUEST received on un-registered IP "IPv4_BYTES_FMT" on port %d\n", IP4(arp->data.tpa), port);
tx_drop(mbuf);
} else {
struct rte_ring *ring = task->internal_ip_table[ret].ring;
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
- build_arp_reply(hdr_arp, &task->internal_ip_table[ret].mac);
- tx_ring(tbase, ring, ARP_REPLY_FROM_CTRL, mbuf);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
+ build_arp_reply(ether_hdr, &task->internal_ip_table[ret].mac, arp);
+ tx_ring(tbase, ring, SEND_ARP_REPLY_FROM_MASTER, mbuf);
+ }
+}
+
+static inline int record_request(struct task_base *tbase, uint32_t ip_dst, uint8_t port, struct rte_ring *ring)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ int ret = rte_hash_add_key(task->external_ip_hash, (const void *)&ip_dst);
+ int i;
+
+ if (unlikely(ret < 0)) {
+ plogx_dbg("Unable to add IP "IPv4_BYTES_FMT" in external_ip_hash\n", IP4(ip_dst));
+ return -1;
+ }
+
+ // If multiple tasks requesting the same info, we will need to send a reply to all of them
+ // However if one task sends multiple requests to the same IP (e.g. because it is not answering)
+ // then we should not send multiple replies to the same task
+ if (task->external_ip_table[ret].nb_requests >= PROX_MAX_ARP_REQUESTS) {
+ // This can only happen if really many tasks requests the same IP
+ plogx_dbg("Unable to add request for IP "IPv4_BYTES_FMT" in external_ip_table\n", IP4(ip_dst));
+ return -1;
+ }
+ for (i = 0; i < task->external_ip_table[ret].nb_requests; i++) {
+ if (task->external_ip_table[ret].rings[i] == ring)
+ break;
}
+ if (i >= task->external_ip_table[ret].nb_requests) {
+ // If this is a new request i.e. a new task requesting a new IP
+ task->external_ip_table[ret].rings[task->external_ip_table[ret].nb_requests] = ring;
+ task->external_ip_table[ret].nb_requests++;
+ }
+ return 0;
}
static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *mbuf)
@@ -217,9 +319,9 @@ static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *m
struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
uint8_t port = get_port(mbuf);
uint32_t ip_dst = get_ip(mbuf);
- int ret1, ret2;
+ uint16_t vlan = ctrl_ring_get_vlan(mbuf);
- plogx_dbg("\tMaster handling unknown ip %x for port %d\n", ip_dst, port);
+ plogx_dbg("\tMaster handling unknown ip "IPv4_BYTES_FMT" for port %d\n", IP4(ip_dst), port);
if (unlikely(port >= PROX_MAX_PORTS)) {
plogx_dbg("Port %d not found", port);
tx_drop(mbuf);
@@ -234,54 +336,559 @@ static inline void handle_unknown_ip(struct task_base *tbase, struct rte_mbuf *m
return;
}
- ret2 = rte_hash_add_key(task->external_ip_hash, (const void *)&ip_dst);
+ if (record_request(tbase, ip_dst, port, ring) < 0) {
+ tx_drop(mbuf);
+ return;
+ }
+ // We send an ARP request even if one was just sent (and not yet answered) by another task
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
+ build_arp_request(mbuf, &task->internal_port_table[port].mac, ip_dst, ip_src, vlan);
+ tx_ring(tbase, ring, SEND_ARP_REQUEST_FROM_MASTER, mbuf);
+}
+
+static inline void build_icmp_reply_message(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ struct ip_port key;
+ key.port = mbuf->port;
+ prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_addr dst_mac;
+ prox_rte_ether_addr_copy(&hdr->s_addr, &dst_mac);
+ prox_rte_ether_addr_copy(&hdr->d_addr, &hdr->s_addr);
+ prox_rte_ether_addr_copy(&dst_mac, &hdr->d_addr);
+ prox_rte_ipv4_hdr *ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1);
+ key.ip = ip_hdr->dst_addr;
+ ip_hdr->dst_addr = ip_hdr->src_addr;
+ ip_hdr->src_addr = key.ip;
+ prox_rte_icmp_hdr *picmp = (prox_rte_icmp_hdr *)(ip_hdr + 1);
+ picmp->icmp_type = PROX_RTE_IP_ICMP_ECHO_REPLY;
+
+ int ret = rte_hash_lookup(task->internal_ip_hash, (const void *)&key);
+ if (unlikely(ret < 0)) {
+ // entry not found for this IP.
+ plogx_dbg("Master ignoring ICMP received on un-registered IP "IPv4_BYTES_FMT" on port %d\n", IP4(key.ip), mbuf->port);
+ tx_drop(mbuf);
+ } else {
+ struct rte_ring *ring = task->internal_ip_table[ret].ring;
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM);
+ tx_ring(tbase, ring, SEND_ICMP_FROM_MASTER, mbuf);
+ }
+}
+
+static inline void handle_icmp(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ uint8_t port_id = get_port(mbuf);
+ struct port_table *port = &task->internal_port_table[port_id];
+ prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ if (hdr->ether_type != ETYPE_IPv4) {
+ tx_drop(mbuf);
+ return;
+ }
+ prox_rte_ipv4_hdr *ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1);
+ if (ip_hdr->next_proto_id != IPPROTO_ICMP) {
+ tx_drop(mbuf);
+ return;
+ }
+ if (ip_hdr->dst_addr != port->ip) {
+ tx_drop(mbuf);
+ return;
+ }
+
+ prox_rte_icmp_hdr *picmp = (prox_rte_icmp_hdr *)(ip_hdr + 1);
+ uint8_t type = picmp->icmp_type;
+ if (type == PROX_RTE_IP_ICMP_ECHO_REQUEST) {
+ port->n_echo_req++;
+ if (rte_rdtsc() - port->last_echo_req_rcvd_tsc > rte_get_tsc_hz()) {
+ plog_dbg("Received %u Echo Request on IP "IPv4_BYTES_FMT" (last received from IP "IPv4_BYTES_FMT")\n", port->n_echo_req, IPv4_BYTES(((uint8_t*)&ip_hdr->dst_addr)), IPv4_BYTES(((uint8_t*)&ip_hdr->src_addr)));
+ port->n_echo_req = 0;
+ port->last_echo_req_rcvd_tsc = rte_rdtsc();
+ }
+ return build_icmp_reply_message(tbase, mbuf);
+ } else if (type == PROX_RTE_IP_ICMP_ECHO_REPLY) {
+ port->n_echo_rep++;
+ if (rte_rdtsc() - port->last_echo_rep_rcvd_tsc > rte_get_tsc_hz()) {
+ plog_info("Received %u Echo Reply on IP "IPv4_BYTES_FMT" (last received from IP "IPv4_BYTES_FMT")\n", port->n_echo_rep, IPv4_BYTES(((uint8_t*)&ip_hdr->dst_addr)), IPv4_BYTES(((uint8_t*)&ip_hdr->src_addr)));
+ port->n_echo_rep = 0;
+ port->last_echo_rep_rcvd_tsc = rte_rdtsc();
+ }
+ }
+ tx_drop(mbuf);
+ return;
+}
+
+static inline void handle_unknown_ip6(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
+ uint8_t port_id = get_port(mbuf);
+ struct ipv6_addr *ip_dst = ctrl_ring_get_ipv6_addr(mbuf);
+ uint16_t vlan = ctrl_ring_get_vlan(mbuf);
+ int ret1, ret2, i;
+
+ plogx_dbg("\tMaster trying to find MAC of external IP "IPv6_BYTES_FMT" for port %d\n", IPv6_BYTES(ip_dst->bytes), port_id);
+ if (unlikely(port_id >= PROX_MAX_PORTS)) {
+ plogx_dbg("Port %d not found", port_id);
+ tx_drop(mbuf);
+ return;
+ }
+ struct ipv6_addr *local_ip_src = &task->internal_port_table[port_id].local_ipv6_addr;
+ struct ipv6_addr *global_ip_src = &task->internal_port_table[port_id].global_ipv6_addr;
+ struct ipv6_addr *ip_src;
+ if (memcmp(local_ip_src, ip_dst, prox_port_cfg[port_id].v6_mask_length) == 0)
+ ip_src = local_ip_src;
+ else if (memcmp(global_ip_src, &null_addr, 16))
+ ip_src = global_ip_src;
+ else {
+ plogx_dbg("Unable to find a src ip for dst ip "IPv6_BYTES_FMT"\n", IPv6_BYTES(ip_dst->bytes));
+ tx_drop(mbuf);
+ return;
+ }
+ struct rte_ring *ring = task->ctrl_tx_rings[get_core(mbuf) * MAX_TASKS_PER_CORE + get_task(mbuf)];
+
+ if (ring == NULL) {
+ plogx_dbg("Port %d not registered", port_id);
+ tx_drop(mbuf);
+ return;
+ }
+
+ ret2 = rte_hash_add_key(task->external_ip6_hash, (const void *)ip_dst);
if (unlikely(ret2 < 0)) {
- // entry not found for this IP: delete the reply
- plogx_dbg("Unable to add IP %x in external_ip_hash\n", rte_be_to_cpu_32(hdr_arp->arp.data.tpa));
+ plogx_dbg("Unable to add IP "IPv6_BYTES_FMT" in external_ip6_hash\n", IPv6_BYTES(ip_dst->bytes));
tx_drop(mbuf);
return;
}
- task->external_ip_table[ret2].rings[task->external_ip_table[ret2].nb_requests] = ring;
- task->external_ip_table[ret2].nb_requests++;
- memcpy(&task->external_ip_table[ret2].mac, &task->internal_port_table[port].mac, 6);
- // We send an ARP request even if one was just sent (and not yet answered) by another task
- mbuf->ol_flags &= ~(PKT_TX_IP_CKSUM|PKT_TX_UDP_CKSUM);
- build_arp_request(mbuf, &task->internal_port_table[port].mac, ip_dst, ip_src);
- tx_ring(tbase, ring, ARP_REQ_FROM_CTRL, mbuf);
+ // If multiple tasks requesting the same info, we will need to send a reply to all of them
+ // However if one task sends multiple requests to the same IP (e.g. because it is not answering)
+ // then we should not send multiple replies to the same task
+ if (task->external_ip6_table[ret2].nb_requests >= PROX_MAX_ARP_REQUESTS) {
+ // This can only happen if really many tasks requests the same IP
+ plogx_dbg("Unable to add request for IP "IPv6_BYTES_FMT" in external_ip6_table\n", IPv6_BYTES(ip_dst->bytes));
+ tx_drop(mbuf);
+ return;
+ }
+ for (i = 0; i < task->external_ip6_table[ret2].nb_requests; i++) {
+ if (task->external_ip6_table[ret2].rings[i] == ring)
+ break;
+ }
+ if (i >= task->external_ip6_table[ret2].nb_requests) {
+ // If this is a new request i.e. a new task requesting a new IP
+ task->external_ip6_table[ret2].rings[task->external_ip6_table[ret2].nb_requests] = ring;
+ task->external_ip6_table[ret2].nb_requests++;
+ // Only needed for first request - but avoid test and copy the same 6 bytes
+ // In most cases we will only have one request per IP.
+ //memcpy(&task->external_ip6_table[ret2].mac, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));
+ }
+
+ // As timers are not handled by master, we might send an NS request even if one was just sent
+ // (and not yet answered) by another task
+ build_neighbour_sollicitation(mbuf, &task->internal_port_table[port_id].mac, ip_dst, ip_src, vlan);
+ tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
}
-static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf, int ring_id)
+static inline void handle_rs(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ int i, ret;
+ uint8_t port = get_port(mbuf);
+
+ if (task->internal_port_table[port].flags & IPV6_ROUTER) {
+ plogx_dbg("\tMaster handling Router Solicitation from ip "IPv6_BYTES_FMT" on port %d\n", IPv6_BYTES(ipv6_hdr->src_addr), port);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+ build_router_advertisement(mbuf, &prox_port_cfg[port].eth_addr, &task->internal_port_table[port].local_ipv6_addr, &task->internal_port_table[port].router_prefix, vlan);
+ tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
+ return;
+ }
+}
+
+static inline void handle_ra(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ int i, ret, send = 0;
+ uint8_t port = get_port(mbuf);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+
+ plog_dbg("Master handling Router Advertisement from ip "IPv6_BYTES_FMT" on port %d - len = %d; payload_len = %d\n", IPv6_BYTES(ipv6_hdr->src_addr), port, rte_pktmbuf_pkt_len(mbuf), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ if (rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr) > rte_pktmbuf_pkt_len(mbuf)) {
+ plog_err("Unexpected length received: pkt_len = %d, ipv6 hdr length = %ld, ipv6 payload len = %d\n", rte_pktmbuf_pkt_len(mbuf), sizeof(prox_rte_ipv6_hdr), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ tx_drop(mbuf);
+ return;
+ }
+ if (ring == NULL) {
+ plog_info("TX side not initialized yet => dropping\n");
+ tx_drop(mbuf);
+ return;
+ }
+ int16_t option_len = rte_be_to_cpu_16(ipv6_hdr->payload_len) - sizeof(struct icmpv6_RA) + sizeof(struct icmpv6_option);
+ struct icmpv6_RA *router_advertisement = (struct icmpv6_RA *)(ipv6_hdr + 1);
+ struct icmpv6_option *option = (struct icmpv6_option *)&router_advertisement->options;
+ struct icmpv6_prefix_option *prefix_option;
+ while(option_len > 0) {
+ uint8_t type = option->type;
+ switch(type) {
+ case ICMPv6_source_link_layer_address:
+ plog_dbg("\tOption %d = Source Link Layer Address\n", type);
+ break;
+ case ICMPv6_prefix_information:
+ prefix_option = (struct icmpv6_prefix_option *)option;
+ plog_dbg("\tOption %d = Prefix Information = %s\n", type, IP6_Canonical(&prefix_option->prefix));
+ send = 1;
+ break;
+ case ICMPv6_mtu:
+ plog_dbg("\tOption %d = MTU\n", type);
+ break;
+ default:
+ plog_dbg("\tOption %d = Unknown Option\n", type);
+ break;
+ }
+ if ((option->length == 0) || (option->length *8 > option_len)) {
+ plog_err("Unexpected option length (%d) received in option %d: %d\n", option->length, option->type, option->length);
+ send = 0;
+ break;
+ }
+ option_len -=option->length * 8;
+ option = (struct icmpv6_option *)(((uint8_t *)option) + option->length * 8);
+ }
+ if (send) {
+ struct ipv6_addr global_ipv6;
+ memcpy(&global_ipv6, &prefix_option->prefix, sizeof(struct ipv6_addr));
+ set_EUI(&global_ipv6, &task->internal_port_table[port].mac);
+ tx_ring_ip6(tbase, ring, IPV6_INFO_FROM_MASTER, mbuf, &global_ipv6);
+ } else
+ tx_drop(mbuf);
+}
+
+static inline void handle_ns(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
{
+ struct task_master *task = (struct task_master *)tbase;
+ struct icmpv6_NS *neighbour_sollicitation = (struct icmpv6_NS *)(ipv6_hdr + 1);
+ int i, ret;
+ uint8_t port = get_port(mbuf);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+
+ plog_dbg("Master handling Neighbour Sollicitation for ip "IPv6_BYTES_FMT" on port %d - len = %d; payload_len = %d\n", IPv6_BYTES(neighbour_sollicitation->target_address.bytes), port, rte_pktmbuf_pkt_len(mbuf), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ if (rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr) > rte_pktmbuf_pkt_len(mbuf)) {
+ plog_err("Unexpected length received: pkt_len = %d, ipv6 hdr length = %ld, ipv6 payload len = %d\n", rte_pktmbuf_pkt_len(mbuf), sizeof(prox_rte_ipv6_hdr), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ tx_drop(mbuf);
+ return;
+ }
+ int16_t option_len = rte_be_to_cpu_16(ipv6_hdr->payload_len) - sizeof(struct icmpv6_NS) + sizeof(struct icmpv6_option);
+ struct icmpv6_option *option = (struct icmpv6_option *)&neighbour_sollicitation->options;
+ while(option_len > 0) {
+ uint8_t type = option->type;
+ switch(type) {
+ case ICMPv6_source_link_layer_address:
+ plog_dbg("Option %d = Source Link Layer Address\n", type);
+ break;
+ default:
+ plog_dbg("Option %d = Unknown Option\n", type);
+ break;
+ }
+ if ((option->length == 0) || (option->length *8 > option_len)) {
+ plog_err("Unexpected option length (%d) received in option %d: %d\n", option->length, option->type, option->length);
+ tx_drop(mbuf);
+ return;
+ }
+ option_len -=option->length * 8;
+ option = (struct icmpv6_option *)(((uint8_t *)option) + option->length * 8);
+ }
+ struct ip6_port key;
+ memcpy(&key.ip6, &neighbour_sollicitation->target_address, sizeof(struct ipv6_addr));
+ key.port = port;
+
+ if (memcmp(&neighbour_sollicitation->target_address, &task->internal_port_table[port].local_ipv6_addr, 8) == 0) {
+ // Local IP
+ if (task->internal_port_table[port].flags & HANDLE_RANDOM_LOCAL_IP_FLAG) {
+ prox_rte_ether_addr mac;
+ plogx_dbg("\tMaster handling NS request for ip "IPv6_BYTES_FMT" on port %d which supports random ip\n", IPv6_BYTES(key.ip6.bytes), key.port);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+ create_mac_from_EUI(&key.ip6, &mac);
+ build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].local_ipv6_addr, PROX_SOLLICITED, vlan);
+ tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
+ return;
+ }
+ } else {
+ if (task->internal_port_table[port].flags & HANDLE_RANDOM_GLOBAL_IP_FLAG) {
+ prox_rte_ether_addr mac;
+ plogx_dbg("\tMaster handling NS request for ip "IPv6_BYTES_FMT" on port %d which supports random ip\n", IPv6_BYTES(key.ip6.bytes), key.port);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+ create_mac_from_EUI(&key.ip6, &mac);
+ build_neighbour_advertisement(tbase, mbuf, &mac, &task->internal_port_table[port].global_ipv6_addr, PROX_SOLLICITED, vlan);
+ tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
+ return;
+ }
+ }
+
+ ret = rte_hash_lookup(task->internal_ip6_hash, (const void *)&key);
+ if (unlikely(ret < 0)) {
+ // entry not found for this IP.
+ plogx_dbg("Master ignoring Neighbour Sollicitation received on un-registered IP "IPv6_BYTES_FMT" on port %d\n", IPv6_BYTES(key.ip6.bytes), port);
+ tx_drop(mbuf);
+ } else {
+ struct rte_ring *ring = task->internal_ip6_table[ret].ring;
+ if (ring == NULL) return;
+ build_neighbour_advertisement(tbase, mbuf, &task->internal_ip6_table[ret].mac, &key.ip6, PROX_SOLLICITED, vlan);
+ tx_ring(tbase, ring, SEND_NDP_FROM_MASTER, mbuf);
+ }
+}
+
+static inline void handle_na(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ipv6_hdr *ipv6_hdr, uint16_t vlan)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ struct icmpv6_NA *neighbour_advertisement = (struct icmpv6_NA *)(ipv6_hdr + 1);
+ int i, ret;
+ uint8_t port = get_port(mbuf);
+ struct rte_ring *ring = task->internal_port_table[port].ring;
+
+ plog_dbg("Master handling Neighbour Advertisement for ip "IPv6_BYTES_FMT" on port %d - len = %d; payload_len = %d\n", IPv6_BYTES(neighbour_advertisement->destination_address.bytes), port, rte_pktmbuf_pkt_len(mbuf), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ if (rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr) > rte_pktmbuf_pkt_len(mbuf)) {
+ plog_err("Unexpected length received: pkt_len = %d, ipv6 hdr length = %ld, ipv6 payload len = %d\n", rte_pktmbuf_pkt_len(mbuf), sizeof(prox_rte_ipv6_hdr), rte_be_to_cpu_16(ipv6_hdr->payload_len));
+ tx_drop(mbuf);
+ return;
+ }
+ int16_t option_len = rte_be_to_cpu_16(ipv6_hdr->payload_len) - sizeof(struct icmpv6_NA) + sizeof(struct icmpv6_option);
+ struct icmpv6_option *option = (struct icmpv6_option *)&neighbour_advertisement->options;
+ uint8_t *target_address = NULL;
+ while(option_len > 0) {
+ uint8_t type = option->type;
+ switch(type) {
+ case ICMPv6_source_link_layer_address:
+ plog_dbg("Option %d = Source Link Layer Address\n", type);
+ break;
+ case ICMPv6_target_link_layer_address:
+ if (option->length != 1) {
+ plog_err("Unexpected option length = %u for Target Link Layer Address\n", option->length);
+ break;
+ }
+ target_address = option->data;
+ plog_dbg("Option %d = Target Link Layer Address = "MAC_BYTES_FMT"\n", type, MAC_BYTES(target_address));
+ break;
+ default:
+ plog_dbg("Option %d = Unknown Option\n", type);
+ break;
+ }
+ if ((option->length == 0) || (option->length *8 > option_len)) {
+ plog_err("Unexpected option length (%d) received in option %d: %d\n", option->length, option->type, option->length);
+ tx_drop(mbuf);
+ return;
+ }
+ option_len -=option->length * 8;
+ option = (struct icmpv6_option *)(((uint8_t *)option) + option->length * 8);
+ }
+
+ if (target_address == NULL) {
+ target_address = (uint8_t *)&neighbour_advertisement->destination_address;
+ }
struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
+ struct ipv6_addr *key = &neighbour_advertisement->destination_address;
+
+ ret = rte_hash_lookup(task->external_ip6_hash, (const void *)key);
+ if (unlikely(ret < 0)) {
+ // entry not found for this IP: we did not ask a request, delete the reply
+ plog_err("Unkown IP "IPv6_BYTES_FMT"", IPv6_BYTES(neighbour_advertisement->destination_address.bytes));
+ tx_drop(mbuf);
+ } else {
+ // entry found for this IP
+ uint16_t nb_requests = task->external_ip6_table[ret].nb_requests;
+ //memcpy(&hdr->d_addr.addr_bytes, &task->external_ip6_table[ret].mac, sizeof(prox_rte_ether_addr));
+ // If we receive a request from multiple task for the same IP, then we update all tasks
+ if (task->external_ip6_table[ret].nb_requests) {
+ rte_mbuf_refcnt_set(mbuf, nb_requests);
+ for (int i = 0; i < nb_requests; i++) {
+ struct rte_ring *ring = task->external_ip6_table[ret].rings[i];
+ tx_ring_ip6_data(tbase, ring, MAC_INFO_FROM_MASTER_FOR_IPV6, mbuf, &neighbour_advertisement->destination_address, *(uint64_t *)target_address);
+ }
+ task->external_ip6_table[ret].nb_requests = 0;
+ } else {
+ plog_err("UNEXPECTED nb_requests == 0");
+ tx_drop(mbuf);
+ }
+ }
+}
+
+static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf, int ring_id)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ prox_rte_ether_hdr *ether_hdr;
+ struct icmpv6 *icmpv6;
int command = get_command(mbuf);
+ uint8_t port = get_port(mbuf);
uint32_t ip;
+ uint16_t vlan = 0, ether_type;
+ uint8_t vdev_port = prox_port_cfg[port].dpdk_mapping;
plogx_dbg("\tMaster received %s (%x) from mbuf %p\n", actions_string[command], command, mbuf);
+ struct my_arp_t *arp;
switch(command) {
- case ARP_TO_CTRL:
- if (hdr_arp->ether_hdr.ether_type != ETYPE_ARP) {
+ case BGP_TO_MASTER:
+ if (vdev_port != NO_VDEV_PORT) {
+ // If a virtual (net_tap) device is attached, send the (BGP) packet to this device
+ // The kernel will receive and handle it.
+ plogx_dbg("\tMaster forwarding BGP packet to TAP\n");
+ int n = rte_eth_tx_burst(prox_port_cfg[port].dpdk_mapping, 0, &mbuf, 1);
+ return;
+ }
+ tx_drop(mbuf);
+ break;
+ case ICMP_TO_MASTER:
+ if (vdev_port != NO_VDEV_PORT) {
+ // If a virtual (net_tap) device is attached, send the (PING) packet to this device
+ // The kernel will receive and handle it.
+ plogx_dbg("\tMaster forwarding packet to TAP\n");
+ int n = rte_eth_tx_burst(prox_port_cfg[port].dpdk_mapping, 0, &mbuf, 1);
+ return;
+ }
+ handle_icmp(tbase, mbuf);
+ break;
+ case ARP_PKT_FROM_NET_TO_MASTER:
+ if (vdev_port != NO_VDEV_PORT) {
+ // If a virtual (net_tap) device is attached, send the (ARP) packet to this device
+ // The kernel will receive and handle it.
+ plogx_dbg("\tMaster forwarding packet to TAP\n");
+ int n = rte_eth_tx_burst(prox_port_cfg[port].dpdk_mapping, 0, &mbuf, 1);
+ return;
+ }
+ ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ ether_type = ether_hdr->ether_type;
+ if (ether_type == ETYPE_VLAN) {
+ prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)(ether_hdr + 1);
+ arp = (struct my_arp_t *)(vlan_hdr + 1);
+ ether_type = vlan_hdr->eth_proto;
+ } else {
+ arp = (struct my_arp_t *)(ether_hdr + 1);
+ }
+
+ if (ether_type != ETYPE_ARP) {
+ plog_err("\tUnexpected message received: ARP_PKT_FROM_NET_TO_MASTER with ether_type %x\n", ether_type);
tx_drop(mbuf);
- plog_err("\tUnexpected message received: ARP_TO_CTRL with ether_type %x\n", hdr_arp->ether_hdr.ether_type);
return;
- } else if (arp_is_gratuitous(hdr_arp)) {
+ }
+ if (arp_is_gratuitous(arp)) {
plog_info("\tReceived gratuitous packet \n");
tx_drop(mbuf);
return;
- } else if (memcmp(&hdr_arp->arp, &arp_reply, 8) == 0) {
- uint32_t ip = hdr_arp->arp.data.spa;
- handle_arp_reply(tbase, mbuf);
- } else if (memcmp(&hdr_arp->arp, &arp_request, 8) == 0) {
- handle_arp_request(tbase, mbuf);
+ } else if (memcmp(arp, &arp_reply, 8) == 0) {
+ // uint32_t ip = arp->data.spa;
+ handle_arp_reply(tbase, mbuf, arp);
+ } else if (memcmp(arp, &arp_request, 8) == 0) {
+ handle_arp_request(tbase, mbuf, arp);
} else {
- plog_info("\tReceived unexpected ARP operation %d\n", hdr_arp->arp.oper);
+ plog_info("\tReceived unexpected ARP operation %d\n", arp->oper);
tx_drop(mbuf);
return;
}
break;
- case REQ_MAC_TO_CTRL:
+ case IP4_REQ_MAC_TO_MASTER:
+ if (vdev_port != NO_VDEV_PORT) {
+ // We send a packet to the kernel with the proper destnation IP address and our src IP address
+ // This means that if a generator sends packets from many sources all ARP will still
+ // be sent from the same IP src. This might be a limitation.
+ // This prevent to have to open as many sockets as there are sources MAC addresses
+ // We also always use the same UDP ports - as the packet will finally not leave the system anyhow
+
+ struct ether_hdr_arp *hdr_arp = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
+ uint32_t ip = get_ip(mbuf);
+ vlan = ctrl_ring_get_vlan(mbuf);
+ struct rte_ring *ring = task->ctrl_tx_rings[get_core(mbuf) * MAX_TASKS_PER_CORE + get_task(mbuf)];
+
+ // First check whether MAC address is not already in kernel MAC table.
+ // If present in our hash with a non-null MAC, then present in kernel. A null MAC
+ // might just mean that we sent a request.
+ // If MAC present in kernel, do not send a packet towards the kernel to try to generate
+ // an ARP request, as the kernel would not generate it.
+ int ret = rte_hash_lookup(task->external_ip_hash, (const void *)&ip);
+ if ((ret >= 0) && (!prox_rte_is_zero_ether_addr(&task->external_ip_table[ret].mac))) {
+ memcpy(&hdr_arp->arp.data.sha, &task->external_ip_table[ret].mac, sizeof(prox_rte_ether_addr));
+ plogx_dbg("\tMaster ready to send MAC_INFO_FROM_MASTER ip "IPv4_BYTES_FMT" with mac "MAC_BYTES_FMT"\n",
+ IP4(ip), MAC_BYTES(hdr_arp->arp.data.sha.addr_bytes));
+ tx_ring_ip(tbase, ring, MAC_INFO_FROM_MASTER, mbuf, ip);
+ return;
+ }
+
+ struct sockaddr_in dst;
+ dst.sin_family = AF_INET;
+ dst.sin_addr.s_addr = ip;
+ dst.sin_port = rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT);
+
+ int vlan_id;
+ for (vlan_id = 0; vlan_id < prox_port_cfg[vdev_port].n_vlans; vlan_id++) {
+ if (prox_port_cfg[vdev_port].vlan_tags[vlan_id] == vlan)
+ break;
+ }
+ if (vlan_id >= prox_port_cfg[vdev_port].n_vlans) {
+ // Tag not found
+ plogx_info("\tDid not send to TAP IP "IPv4_BYTES_FMT" as wrong VLAN %d\n", IPv4_BYTES(((uint8_t*)&ip)), vlan);
+ tx_drop(mbuf);
+ break;
+ }
+ int n = sendto(prox_port_cfg[vdev_port].fds[vlan_id], (char*)(&ip), 0, MSG_DONTROUTE, (struct sockaddr *)&dst, sizeof(struct sockaddr_in));
+ if (n < 0) {
+ plogx_info("\tFailed to send to TAP IP "IPv4_BYTES_FMT" using fd %d, error = %d (%s)\n", IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fds[vlan_id], errno, strerror(errno));
+ } else
+ plogx_dbg("\tSent %d bytes to TAP IP "IPv4_BYTES_FMT" using fd %d\n", n, IPv4_BYTES(((uint8_t*)&ip)), prox_port_cfg[vdev_port].fds[vlan_id]);
+
+ record_request(tbase, ip, port, ring);
+ tx_drop(mbuf);
+ break;
+ }
handle_unknown_ip(tbase, mbuf);
break;
+ case IP6_REQ_MAC_TO_MASTER:
+ handle_unknown_ip6(tbase, mbuf);
+ break;
+ case NDP_PKT_FROM_NET_TO_MASTER:
+ ether_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_get_ipv6_hdr(ether_hdr, rte_pktmbuf_pkt_len(mbuf), &vlan);
+ if (unlikely((!ipv6_hdr) || (ipv6_hdr->proto != ICMPv6))) {
+ // Should not happen
+ if (!ipv6_hdr)
+ plog_err("\tUnexpected message received: NDP_PKT_FROM_NET_TO_MASTER with ether_type %x\n", ether_hdr->ether_type);
+ else
+ plog_err("\tUnexpected message received: NDP_PKT_FROM_NET_TO_MASTER with ether_type %x and proto %x\n", ether_hdr->ether_type, ipv6_hdr->proto);
+ tx_drop(mbuf);
+ return;
+ }
+ icmpv6 = (struct icmpv6 *)(ipv6_hdr + 1);
+ switch (icmpv6->type) {
+ case ICMPv6_DU:
+ plog_err("IPV6 ICMPV6 Destination Unreachable\n");
+ tx_drop(mbuf);
+ break;
+ case ICMPv6_PTB:
+ plog_err("IPV6 ICMPV6 packet too big\n");
+ tx_drop(mbuf);
+ break;
+ case ICMPv6_TE:
+ plog_err("IPV6 ICMPV6 Time Exceeded\n");
+ tx_drop(mbuf);
+ break;
+ case ICMPv6_PaPr:
+ plog_err("IPV6 ICMPV6 Parameter Problem\n");
+ tx_drop(mbuf);
+ break;
+ case ICMPv6_RS:
+ handle_rs(tbase, mbuf, ipv6_hdr, vlan);
+ break;
+ case ICMPv6_RA:
+ handle_ra(tbase, mbuf, ipv6_hdr, vlan);
+ break;
+ case ICMPv6_NS:
+ handle_ns(tbase, mbuf, ipv6_hdr, vlan);
+ break;
+ case ICMPv6_NA:
+ handle_na(tbase, mbuf, ipv6_hdr, vlan);
+ break;
+ case ICMPv6_RE:
+ plog_err("IPV6 ICMPV6 Redirect not handled\n");
+ tx_drop(mbuf);
+ break;
+ default:
+ plog_err("Unexpected type %d in IPV6 ICMPV6\n", icmpv6->type);
+ tx_drop(mbuf);
+ break;
+ }
+ break;
default:
plogx_dbg("\tMaster received unexpected message\n");
tx_drop(mbuf);
@@ -291,39 +898,272 @@ static inline void handle_message(struct task_base *tbase, struct rte_mbuf *mbuf
void init_ctrl_plane(struct task_base *tbase)
{
- prox_cfg.flags |= DSF_CTRL_PLANE_ENABLED;
struct task_master *task = (struct task_master *)tbase;
- int socket = rte_lcore_to_socket_id(prox_cfg.master);
+ int socket_id = rte_lcore_to_socket_id(prox_cfg.master);
uint32_t n_entries = MAX_ARP_ENTRIES * 4;
static char hash_name[30];
+
sprintf(hash_name, "A%03d_hash_arp_table", prox_cfg.master);
struct rte_hash_parameters hash_params = {
.name = hash_name,
.entries = n_entries,
- .key_len = sizeof(uint32_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket_id,
};
- task->external_ip_hash = rte_hash_create(&hash_params);
- PROX_PANIC(task->external_ip_hash == NULL, "Failed to set up external ip hash\n");
- plog_info("\texternal ip hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
- task->external_ip_table = (struct external_ip_table *)prox_zmalloc(n_entries * sizeof(struct external_ip_table), socket);
- PROX_PANIC(task->external_ip_table == NULL, "Failed to allocate memory for %u entries in external ip table\n", n_entries);
- plog_info("\texternal ip table, with %d entries of size %ld\n", n_entries, sizeof(struct external_ip_table));
-
- hash_name[0]++;
- hash_params.key_len = sizeof(struct ip_port);
- task->internal_ip_hash = rte_hash_create(&hash_params);
- PROX_PANIC(task->internal_ip_hash == NULL, "Failed to set up internal ip hash\n");
- plog_info("\tinternal ip hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
- task->internal_ip_table = (struct ip_table *)prox_zmalloc(n_entries * sizeof(struct ip_table), socket);
- PROX_PANIC(task->internal_ip_table == NULL, "Failed to allocate memory for %u entries in internal ip table\n", n_entries);
- plog_info("\tinternal ip table, with %d entries of size %ld\n", n_entries, sizeof(struct ip_table));
+ if (prox_cfg.flags & DSF_L3_ENABLED) {
+ hash_params.key_len = sizeof(uint32_t);
+ task->external_ip_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(task->external_ip_hash == NULL, "Failed to set up external ip hash\n");
+ plog_info("\texternal ip hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ hash_name[0]++;
+
+ task->external_ip_table = (struct external_ip_table *)prox_zmalloc(n_entries * sizeof(struct external_ip_table), socket_id);
+ PROX_PANIC(task->external_ip_table == NULL, "Failed to allocate memory for %u entries in external ip table\n", n_entries);
+ plog_info("\texternal ip table, with %d entries of size %ld\n", n_entries, sizeof(struct external_ip_table));
+
+ hash_params.key_len = sizeof(struct ip_port);
+ task->internal_ip_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(task->internal_ip_hash == NULL, "Failed to set up internal ip hash\n");
+ plog_info("\tinternal ip hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ hash_name[0]++;
+
+ task->internal_ip_table = (struct ip_table *)prox_zmalloc(n_entries * sizeof(struct ip_table), socket_id);
+ PROX_PANIC(task->internal_ip_table == NULL, "Failed to allocate memory for %u entries in internal ip table\n", n_entries);
+ plog_info("\tinternal ip table, with %d entries of size %ld\n", n_entries, sizeof(struct ip_table));
+ }
+
+ if (prox_cfg.flags & DSF_NDP_ENABLED) {
+ hash_params.key_len = sizeof(struct ipv6_addr);
+ task->external_ip6_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(task->external_ip6_hash == NULL, "Failed to set up external ip6 hash\n");
+ plog_info("\texternal ip6 hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ hash_name[0]++;
+
+ task->external_ip6_table = (struct external_ip_table *)prox_zmalloc(n_entries * sizeof(struct external_ip_table), socket_id);
+ PROX_PANIC(task->external_ip6_table == NULL, "Failed to allocate memory for %u entries in external ip6 table\n", n_entries);
+ plog_info("\texternal ip6_table, with %d entries of size %ld\n", n_entries, sizeof(struct external_ip_table));
+
+ hash_params.key_len = sizeof(struct ip6_port);
+ task->internal_ip6_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(task->internal_ip6_hash == NULL, "Failed to set up internal ip6 hash\n");
+ plog_info("\tinternal ip6 hash table allocated, with %d entries of size %d\n", hash_params.entries, hash_params.key_len);
+ hash_name[0]++;
+
+ task->internal_ip6_table = (struct ip_table *)prox_zmalloc(n_entries * sizeof(struct ip_table), socket_id);
+ PROX_PANIC(task->internal_ip6_table == NULL, "Failed to allocate memory for %u entries in internal ip6 table\n", n_entries);
+ plog_info("\tinternal ip6 table, with %d entries of size %ld\n", n_entries, sizeof(struct ip_table));
+ }
+
+ int fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ PROX_PANIC(fd < 0, "Failed to open netlink socket: %d\n", errno);
+ fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
+
+ struct sockaddr_nl sockaddr;
+ memset(&sockaddr, 0, sizeof(struct sockaddr_nl));
+ sockaddr.nl_family = AF_NETLINK;
+ sockaddr.nl_groups = RTMGRP_NEIGH | RTMGRP_NOTIFY;
+ int rc = bind(fd, (struct sockaddr *)&sockaddr, sizeof(struct sockaddr_nl));
+ PROX_PANIC(rc < 0, "Failed to bind to RTMGRP_NEIGH netlink group\n");
+ task->arp_fds.fd = fd;
+ task->arp_fds.events = POLL_IN;
+ plog_info("\tRTMGRP_NEIGH netlink group bound; fd = %d\n", fd);
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ PROX_PANIC(fd < 0, "Failed to open netlink socket: %d\n", errno);
+ fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
+ struct sockaddr_nl sockaddr2;
+ memset(&sockaddr2, 0, sizeof(struct sockaddr_nl));
+ sockaddr2.nl_family = AF_NETLINK;
+ sockaddr2.nl_groups = RTMGRP_IPV4_ROUTE | RTMGRP_NOTIFY;
+ rc = bind(fd, (struct sockaddr *)&sockaddr2, sizeof(struct sockaddr_nl));
+ PROX_PANIC(rc < 0, "Failed to bind to RTMGRP_NEIGH netlink group\n");
+ task->route_fds.fd = fd;
+ task->route_fds.events = POLL_IN;
+ plog_info("\tRTMGRP_IPV4_ROUTE netlink group bound; fd = %d\n", fd);
+
+ static char name[] = "master_arp_nd_pool";
+ const int NB_ARP_MBUF = 1024;
+ const int ARP_MBUF_SIZE = 2048;
+ const int NB_CACHE_ARP_MBUF = 256;
+ struct rte_mempool *ret = rte_mempool_create(name, NB_ARP_MBUF, ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF,
+ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
+ rte_socket_id(), 0);
+ PROX_PANIC(ret == NULL, "Failed to allocate ARP memory pool on socket %u with %u elements\n",
+ rte_socket_id(), NB_ARP_MBUF);
+ plog_info("\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
+ ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, rte_socket_id());
+ tbase->l3.arp_nd_pool = ret;
+}
+
+static void handle_route_event(struct task_base *tbase)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ struct rte_mbuf *mbufs[MAX_RING_BURST];
+ int fd = task->route_fds.fd, interface_index, mask = -1;
+ char interface_name[IF_NAMESIZE] = {0};
+ int len = recv(fd, netlink_buf, sizeof(netlink_buf), 0);
+ uint32_t ip = 0, gw_ip = 0;
+ if (len < 0) {
+ plog_err("Failed to recv from netlink: %d\n", errno);
+ return;
+ }
+ struct nlmsghdr * nl_hdr = (struct nlmsghdr *)netlink_buf;
+ if (nl_hdr->nlmsg_flags & NLM_F_MULTI) {
+ plog_err("Unexpected multipart netlink message\n");
+ return;
+ }
+ if ((nl_hdr->nlmsg_type != RTM_NEWROUTE) && (nl_hdr->nlmsg_type != RTM_DELROUTE))
+ return;
+
+ struct rtmsg *rtmsg = (struct rtmsg *)NLMSG_DATA(nl_hdr);
+ int rtm_family = rtmsg->rtm_family;
+ if (rtm_family != AF_INET) {
+ plog_warn("Unhandled non IPV4 routing message\n");
+ return;
+ }
+ if ((rtmsg->rtm_table != RT_TABLE_MAIN) && (rtmsg->rtm_table != RT_TABLE_LOCAL))
+ return;
+ int dst_len = rtmsg->rtm_dst_len;
+
+ struct rtattr *rta = (struct rtattr *)RTM_RTA(rtmsg);
+ int rtl = RTM_PAYLOAD(nl_hdr);
+ for (; RTA_OK(rta, rtl); rta = RTA_NEXT(rta, rtl)) {
+ switch (rta->rta_type) {
+ case RTA_DST:
+ ip = *((uint32_t *)RTA_DATA(rta));
+ break;
+ case RTA_OIF:
+ interface_index = *((int *)RTA_DATA(rta));
+ if (if_indextoname(interface_index, interface_name) == NULL) {
+ plog_info("Unknown Interface Index %d\n", interface_index);
+ }
+ break;
+ case RTA_METRICS:
+ mask = *((int *)RTA_DATA(rta));
+ break;
+ case RTA_GATEWAY:
+ gw_ip = *((uint32_t *)RTA_DATA(rta));
+ break;
+ default:
+ break;
+ }
+ }
+ int dpdk_vdev_port = -1;
+ for (int i = 0; i< prox_rte_eth_dev_count_avail(); i++) {
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[i].n_vlans; vlan_id++) {
+ if (strcmp(prox_port_cfg[i].names[vlan_id], interface_name) == 0) {
+ dpdk_vdev_port = i;
+ break;
+ }
+ }
+ if (dpdk_vdev_port != -1)
+ break;
+ }
+ if (dpdk_vdev_port != -1) {
+ plogx_info("Received netlink message on tap interface %s for IP "IPv4_BYTES_FMT"/%d, Gateway "IPv4_BYTES_FMT"\n", interface_name, IP4(ip), dst_len, IP4(gw_ip));
+ int ret1 = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)mbufs);
+ if (unlikely(ret1 != 0)) {
+ plog_err("Unable to allocate a mbuf for master to core communication\n");
+ return;
+ }
+ int dpdk_port = prox_port_cfg[dpdk_vdev_port].dpdk_mapping;
+ tx_ring_route(tbase, task->internal_port_table[dpdk_port].ring, (nl_hdr->nlmsg_type == RTM_NEWROUTE), mbufs[0], ip, gw_ip, dst_len);
+ } else
+ plog_info("Received netlink message on unknown interface %s for IP "IPv4_BYTES_FMT"/%d, Gateway "IPv4_BYTES_FMT"\n", interface_name[0] ? interface_name:"", IP4(ip), dst_len, IP4(gw_ip));
+ return;
+}
+
+static void handle_arp_event(struct task_base *tbase)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ struct rte_mbuf *mbufs[MAX_RING_BURST];
+ struct nlmsghdr * nl_hdr;
+ int fd = task->arp_fds.fd;
+ int len, ret;
+ uint32_t ip = 0;
+ prox_rte_ether_addr mac;
+ memset(&mac, 0, sizeof(mac));
+ len = recv(fd, netlink_buf, sizeof(netlink_buf), 0);
+ if (len < 0) {
+ plog_err("Failed to recv from netlink: %d\n", errno);
+ return;
+ }
+ nl_hdr = (struct nlmsghdr *)netlink_buf;
+ if (nl_hdr->nlmsg_flags & NLM_F_MULTI) {
+ plog_err("Unexpected multipart netlink message\n");
+ return;
+ }
+ if ((nl_hdr->nlmsg_type != RTM_NEWNEIGH) && (nl_hdr->nlmsg_type != RTM_DELNEIGH))
+ return;
+
+ struct ndmsg *ndmsg = (struct ndmsg *)NLMSG_DATA(nl_hdr);
+ int ndm_family = ndmsg->ndm_family;
+ struct rtattr *rta = (struct rtattr *)RTM_RTA(ndmsg);
+ int rtl = RTM_PAYLOAD(nl_hdr);
+ for (; RTA_OK(rta, rtl); rta = RTA_NEXT(rta, rtl)) {
+ switch (rta->rta_type) {
+ case NDA_DST:
+ ip = *((uint32_t *)RTA_DATA(rta));
+ break;
+ case NDA_LLADDR:
+ mac = *((prox_rte_ether_addr *)(uint64_t *)RTA_DATA(rta));
+ break;
+ default:
+ break;
+ }
+ }
+ plogx_info("Received netlink ip "IPv4_BYTES_FMT" with mac "MAC_BYTES_FMT"\n", IP4(ip), MAC_BYTES(mac.addr_bytes));
+ ret = rte_hash_lookup(task->external_ip_hash, (const void *)&ip);
+ if (unlikely(ret < 0)) {
+ // entry not found for this IP: we did not ask a request.
+ // This can happen if the kernel updated the ARP table when receiving an ARP_REQUEST
+ // We must record this, as the ARP entry is now in the kernel table
+ if (prox_rte_is_zero_ether_addr(&mac)) {
+ // Timeout or MAC deleted from kernel MAC table
+ int ret = rte_hash_del_key(task->external_ip_hash, (const void *)&ip);
+ plogx_dbg("ip "IPv4_BYTES_FMT" removed from external_ip_hash\n", IP4(ip));
+ return;
+ }
+ int ret = rte_hash_add_key(task->external_ip_hash, (const void *)&ip);
+ if (unlikely(ret < 0)) {
+ plogx_dbg("IP "IPv4_BYTES_FMT" not found in external_ip_hash and unable to add it\n", IP4(ip));
+ return;
+ }
+ memcpy(&task->external_ip_table[ret].mac, &mac, sizeof(prox_rte_ether_addr));
+ plogx_dbg("ip "IPv4_BYTES_FMT" added in external_ip_hash with mac "MAC_BYTES_FMT"\n", IP4(ip), MAC_BYTES(mac.addr_bytes));
+ return;
+ }
+
+ // entry found for this IP
+ uint16_t nb_requests = task->external_ip_table[ret].nb_requests;
+ if (nb_requests == 0) {
+ return;
+ }
+
+ memcpy(&task->external_ip_table[ret].mac, &mac, sizeof(prox_rte_ether_addr));
+
+ // If we receive a request from multiple task for the same IP, then we update all tasks
+ int ret1 = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)mbufs);
+ if (unlikely(ret1 != 0)) {
+ plog_err("Unable to allocate a mbuf for master to core communication\n");
+ return;
+ }
+ rte_mbuf_refcnt_set(mbufs[0], nb_requests);
+ for (int i = 0; i < nb_requests; i++) {
+ struct rte_ring *ring = task->external_ip_table[ret].rings[i];
+ struct ether_hdr_arp *hdr = rte_pktmbuf_mtod(mbufs[0], struct ether_hdr_arp *);
+ memcpy(&hdr->arp.data.sha, &mac, sizeof(prox_rte_ether_addr));
+ tx_ring_ip(tbase, ring, MAC_INFO_FROM_MASTER, mbufs[0], ip);
+ plog_dbg("MAC_INFO_FROM_MASTER ip "IPv4_BYTES_FMT" with mac "MAC_BYTES_FMT"\n", IP4(ip), MAC_BYTES(mac.addr_bytes));
+ }
+ task->external_ip_table[ret].nb_requests = 0;
+ return;
}
static int handle_ctrl_plane_f(struct task_base *tbase, __attribute__((unused)) struct rte_mbuf **mbuf, uint16_t n_pkts)
{
- int ring_id = 0, j, ret = 0;
+ int ring_id = 0, j, ret = 0, n = 0;
struct rte_mbuf *mbufs[MAX_RING_BURST];
struct task_master *task = (struct task_master *)tbase;
@@ -336,6 +1176,20 @@ static int handle_ctrl_plane_f(struct task_base *tbase, __attribute__((unused))
for (j = 0; j < ret; j++) {
handle_message(tbase, mbufs[j], ring_id);
}
+ for (int vdev_id = 0; vdev_id < task->max_vdev_id; vdev_id++) {
+ struct vdev *vdev = &task->all_vdev[vdev_id];
+ n = rte_eth_rx_burst(vdev->port_id, 0, mbufs, MAX_PKT_BURST);
+ for (j = 0; j < n; j++) {
+ tx_ring(tbase, vdev->ring, PKT_FROM_TAP, mbufs[j]);
+ }
+ ret +=n;
+ }
+ if ((task->max_vdev_id) && (poll(&task->arp_fds, 1, prox_cfg.poll_timeout) == POLL_IN)) {
+ handle_arp_event(tbase);
+ }
+ if (poll(&task->route_fds, 1, prox_cfg.poll_timeout) == POLL_IN) {
+ handle_route_event(tbase);
+ }
return ret;
}
diff --git a/VNFs/DPPD-PROX/handle_master.h b/VNFs/DPPD-PROX/handle_master.h
index bc32182d..dcd0a5f2 100644
--- a/VNFs/DPPD-PROX/handle_master.h
+++ b/VNFs/DPPD-PROX/handle_master.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,26 +14,96 @@
// limitations under the License.
*/
+#include <poll.h>
#include "task_base.h"
#include "task_init.h"
enum arp_actions {
- UPDATE_FROM_CTRL,
- ARP_REQ_FROM_CTRL,
- ARP_REPLY_FROM_CTRL,
- ARP_TO_CTRL,
- REQ_MAC_TO_CTRL,
+ MAC_INFO_FROM_MASTER,
+ MAC_INFO_FROM_MASTER_FOR_IPV6,
+ IPV6_INFO_FROM_MASTER,
+ ROUTE_ADD_FROM_MASTER,
+ ROUTE_DEL_FROM_MASTER,
+ SEND_ARP_REQUEST_FROM_MASTER,
+ SEND_ARP_REPLY_FROM_MASTER,
+ SEND_NDP_FROM_MASTER,
+ SEND_ICMP_FROM_MASTER,
+ SEND_BGP_FROM_MASTER,
+ ARP_PKT_FROM_NET_TO_MASTER,
+ NDP_PKT_FROM_NET_TO_MASTER,
+ ICMP_TO_MASTER,
+ BGP_TO_MASTER,
+ IP4_REQ_MAC_TO_MASTER,
+ IP6_REQ_MAC_TO_MASTER,
+ PKT_FROM_TAP,
MAX_ACTIONS
};
-#define HANDLE_RANDOM_IP_FLAG 1
+#define PROX_MAX_ARP_REQUESTS 32 // Maximum number of tasks requesting the same MAC address
+
+#define HANDLE_RANDOM_IP_FLAG 1
+#define HANDLE_RANDOM_LOCAL_IP_FLAG 2
+#define HANDLE_RANDOM_GLOBAL_IP_FLAG 4
+#define IPV6_ROUTER 8
#define RANDOM_IP 0xffffffff
-const char *actions_string[MAX_ACTIONS];
+#define PROX_PSEUDO_PKT_PORT 0xdead
+
+struct port_table {
+ prox_rte_ether_addr mac;
+ struct rte_ring *ring;
+ uint32_t ip;
+ uint8_t port;
+ uint8_t flags;
+ struct ipv6_addr local_ipv6_addr;
+ struct ipv6_addr global_ipv6_addr;
+ struct ipv6_addr router_prefix;
+ uint64_t last_echo_req_rcvd_tsc;
+ uint64_t last_echo_rep_rcvd_tsc;
+ uint32_t n_echo_req;
+ uint32_t n_echo_rep;
+};
+
+struct ip_table {
+ prox_rte_ether_addr mac;
+ struct rte_ring *ring;
+};
+
+struct external_ip_table {
+ prox_rte_ether_addr mac;
+ struct rte_ring *rings[PROX_MAX_ARP_REQUESTS];
+ uint16_t nb_requests;
+};
+
+struct vdev {
+ int port_id;
+ struct rte_ring *ring;
+};
+
+struct task_master {
+ struct task_base base;
+ struct rte_ring *ctrl_rx_ring;
+ struct rte_ring **ctrl_tx_rings;
+ struct ip_table *internal_ip_table; // Store mac address from our IP
+ struct external_ip_table *external_ip_table; // Store mac address from external systems
+ struct ip_table *internal_ip6_table; // Store mac address from our IP
+ struct external_ip_table *external_ip6_table; // Store mac address from external systems
+ struct rte_hash *external_ip_hash;
+ struct rte_hash *external_ip6_hash;
+ struct rte_hash *internal_ip_hash;
+ struct rte_hash *internal_ip6_hash;
+ struct port_table internal_port_table[PROX_MAX_PORTS];
+ struct vdev all_vdev[PROX_MAX_PORTS];
+ int max_vdev_id;
+ struct pollfd arp_fds;
+ struct pollfd route_fds;
+};
+
+extern const char *actions_string[MAX_ACTIONS];
void init_ctrl_plane(struct task_base *tbase);
-int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts);
+extern int (*handle_ctrl_plane)(struct task_base *tbase, struct rte_mbuf **mbuf, uint16_t n_pkts);
static inline void tx_drop(struct rte_mbuf *mbuf)
{
@@ -41,3 +111,6 @@ static inline void tx_drop(struct rte_mbuf *mbuf)
}
void register_ip_to_ctrl_plane(struct task_base *task, uint32_t ip, uint8_t port_id, uint8_t core_id, uint8_t task_id);
+void master_init_vdev(struct task_base *task, uint8_t port_id, uint8_t core_id, uint8_t task_id);
+void register_router_to_ctrl_plane(struct task_base *tbase, uint8_t port_id, uint8_t core_id, uint8_t task_id, struct ipv6_addr *local_ipv6_addr, struct ipv6_addr *global_ipv6_addr, struct ipv6_addr *router_prefix);
+void register_node_to_ctrl_plane(struct task_base *tbase, struct ipv6_addr *local_ipv6_addr, struct ipv6_addr *global_ipv6_addr, uint8_t port_id, uint8_t core_id, uint8_t task_id);
diff --git a/VNFs/DPPD-PROX/handle_mirror.c b/VNFs/DPPD-PROX/handle_mirror.c
index 0d764b4d..73a5242c 100644
--- a/VNFs/DPPD-PROX/handle_mirror.c
+++ b/VNFs/DPPD-PROX/handle_mirror.c
@@ -24,6 +24,8 @@
#include "log.h"
#include "prox_port_cfg.h"
#include "quit.h"
+#include "prox_cksum.h"
+#include "prefetch.h"
/* Task that sends packets to multiple outputs. Note that in case of n
outputs, the output packet rate is n times the input packet
@@ -34,7 +36,9 @@
way to resolve this is to create deep copies of the packet. */
struct task_mirror {
struct task_base base;
- uint32_t n_dests;
+ uint32_t n_dests;
+ uint32_t multiplier;
+ uint32_t mirror_size;
};
struct task_mirror_copy {
@@ -55,14 +59,40 @@ static int handle_mirror_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
multiple times, the pointers are copied first. This copy is
used in each call to tx_pkt below. */
rte_memcpy(mbufs2, mbufs, sizeof(mbufs[0]) * n_pkts);
-
+ /* prefetch for optimization */
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
+ for (uint16_t j = 0; j < n_pkts; ++j) {
+ PREFETCH0(mbufs2[j]);
+ }
for (uint16_t j = 0; j < n_pkts; ++j) {
- rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests - 1);
+ hdr[j] = rte_pktmbuf_mtod(mbufs2[j], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[j]);
+ }
+ for (uint16_t j = 0; j < n_pkts; ++j) {
+ rte_pktmbuf_refcnt_update(mbufs2[j], task->n_dests * task->multiplier - 1);
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *) (hdr[j] + 1);
+ if ((task->mirror_size != 0) && (hdr[j]->ether_type == ETYPE_IPv4) && ((pip->next_proto_id == IPPROTO_UDP) || (pip->next_proto_id == IPPROTO_TCP))) {
+ rte_pktmbuf_pkt_len(mbufs2[j]) = task->mirror_size;
+ rte_pktmbuf_data_len(mbufs2[j]) = task->mirror_size;
+ pip->total_length = rte_bswap16(task->mirror_size-sizeof(prox_rte_ether_hdr));
+ pip->hdr_checksum = 0;
+ prox_ip_cksum_sw(pip);
+ int l4_len = task->mirror_size - sizeof(prox_rte_ether_hdr) - sizeof(prox_rte_ipv4_hdr);
+ if (pip->next_proto_id == IPPROTO_UDP) {
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
+ udp->dgram_len = rte_bswap16(l4_len);
+ prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr);
+ } else if (pip->next_proto_id == IPPROTO_TCP) {
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t *)pip) + sizeof(prox_rte_ipv4_hdr));
+ prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr);
+ }
+ }
}
for (uint16_t j = 0; j < task->n_dests; ++j) {
memset(out, j, n_pkts);
-
- ret+= task->base.tx_pkt(&task->base, mbufs2, n_pkts, out);
+ for (uint16_t i = 0; i < task->multiplier; ++i) {
+ ret += task->base.tx_pkt(&task->base, mbufs2, n_pkts, out);
+ }
}
return ret;
}
@@ -110,8 +140,9 @@ static int handle_mirror_bulk_copy(struct task_base *tbase, struct rte_mbuf **mb
static void init_task_mirror(struct task_base *tbase, struct task_args *targ)
{
struct task_mirror *task = (struct task_mirror *)tbase;
-
task->n_dests = targ->nb_txports? targ->nb_txports : targ->nb_txrings;
+ task->multiplier = targ->multiplier? targ->multiplier : 1;
+ task->mirror_size = targ->mirror_size > 63? targ->mirror_size: 0;
}
static void init_task_mirror_copy(struct task_base *tbase, struct task_args *targ)
@@ -139,9 +170,8 @@ static struct task_init task_init_mirror = {
.mode_str = "mirror",
.init = init_task_mirror,
.handle = handle_mirror_bulk,
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS | TASK_FEATURE_TXQ_FLAGS_REFCOUNT,
+ .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_REFCOUNT,
.size = sizeof(struct task_mirror),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
static struct task_init task_init_mirror2 = {
@@ -149,9 +179,8 @@ static struct task_init task_init_mirror2 = {
.sub_mode_str = "copy",
.init = init_task_mirror_copy,
.handle = handle_mirror_bulk_copy,
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
.size = sizeof(struct task_mirror),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_mirror(void)
diff --git a/VNFs/DPPD-PROX/handle_mplstag.c b/VNFs/DPPD-PROX/handle_mplstag.c
index ce5996eb..ed122a06 100644
--- a/VNFs/DPPD-PROX/handle_mplstag.c
+++ b/VNFs/DPPD-PROX/handle_mplstag.c
@@ -42,23 +42,23 @@ static void init_task_unmpls(__attribute__((unused)) struct task_base *tbase,
static inline uint8_t handle_unmpls(__attribute__((unused)) struct task_unmpls *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
uint32_t mpls_len = sizeof(struct mpls_hdr);
while (!(mpls->bytes & 0x00010000)) {
mpls++;
mpls_len += sizeof(struct mpls_hdr);
}
- uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr);
+ uint32_t tot_eth_addr_len = 2*sizeof(prox_rte_ether_addr);
rte_memcpy(((uint8_t *)peth) + mpls_len, peth, tot_eth_addr_len);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(mpls + 1);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)(mpls + 1);
switch (ip->version_ihl >> 4) {
case 4:
- peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
peth->ether_type = ETYPE_IPv4;
return 0;
case 6:
- peth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
+ peth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, mpls_len);
peth->ether_type = ETYPE_IPv6;
return 0;
default:
@@ -109,12 +109,12 @@ static void init_task_tagmpls(__attribute__((unused)) struct task_base *tbase,
static inline uint8_t handle_tagmpls(__attribute__((unused)) struct task_tagmpls *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 4);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 4);
PROX_ASSERT(peth);
rte_prefetch0(peth);
uint32_t mpls = 0;
- uint32_t tot_eth_addr_len = 2*sizeof(struct ether_addr);
+ uint32_t tot_eth_addr_len = 2*sizeof(prox_rte_ether_addr);
rte_memcpy(peth, ((uint8_t *)peth) + sizeof(struct mpls_hdr), tot_eth_addr_len);
*((uint32_t *)(peth + 1)) = mpls | 0x00010000; // Set BoS to 1
peth->ether_type = ETYPE_MPLSU;
diff --git a/VNFs/DPPD-PROX/handle_nat.c b/VNFs/DPPD-PROX/handle_nat.c
index 23d7ad87..ad0fcf45 100644
--- a/VNFs/DPPD-PROX/handle_nat.c
+++ b/VNFs/DPPD-PROX/handle_nat.c
@@ -45,9 +45,9 @@ struct task_nat {
};
struct pkt_eth_ipv4 {
- struct ether_hdr ether_hdr;
- struct ipv4_hdr ipv4_hdr;
-} __attribute__((packed));
+ prox_rte_ether_hdr ether_hdr;
+ prox_rte_ipv4_hdr ipv4_hdr;
+} __attribute__((packed)) __attribute__((__aligned__(2)));
static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf)
{
@@ -71,7 +71,7 @@ static int handle_nat(struct task_nat *task, struct rte_mbuf *mbuf)
return OUT_DISCARD;
*ip_addr = task->entries[ret];
- prox_ip_udp_cksum(mbuf, &pkt->ipv4_hdr, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_udp_cksum(mbuf, &pkt->ipv4_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
return 0;
}
@@ -123,6 +123,7 @@ static int lua_to_hash_nat(struct lua_State *L, enum lua_place from, const char
.key_len = sizeof(ip_from),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket,
};
ret_hash = rte_hash_create(&hash_params);
@@ -171,7 +172,7 @@ static void init_task_nat(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(ret != 0, "Failed to load NAT table from lua:\n%s\n", get_lua_to_errors());
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
}
@@ -182,12 +183,11 @@ static struct task_init task_init_nat = {
.init = init_task_nat,
.handle = handle_nat_bulk,
#ifdef SOFT_CRC
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
#else
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = 0,
#endif
.size = sizeof(struct task_nat),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_nat(void)
diff --git a/VNFs/DPPD-PROX/handle_nop.c b/VNFs/DPPD-PROX/handle_nop.c
index b3eef54c..4d10a365 100644
--- a/VNFs/DPPD-PROX/handle_nop.c
+++ b/VNFs/DPPD-PROX/handle_nop.c
@@ -22,9 +22,8 @@ static struct task_init task_init_nop_thrpt_opt = {
.init = NULL,
.handle = handle_nop_bulk,
.thread_x = thread_nop,
- .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_THROUGHPUT_OPT|TASK_FEATURE_MULTI_RX,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_THROUGHPUT_OPT|TASK_FEATURE_MULTI_RX,
.size = sizeof(struct task_nop),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
static struct task_init task_init_nop_lat_opt = {
@@ -33,9 +32,8 @@ static struct task_init task_init_nop_lat_opt = {
.init = NULL,
.handle = handle_nop_bulk,
.thread_x = thread_nop,
- .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_MULTI_RX,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_MULTI_RX,
.size = sizeof(struct task_nop),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
static struct task_init task_init_none;
diff --git a/VNFs/DPPD-PROX/handle_nsh.c b/VNFs/DPPD-PROX/handle_nsh.c
index 65a80c3d..a1df22fc 100644
--- a/VNFs/DPPD-PROX/handle_nsh.c
+++ b/VNFs/DPPD-PROX/handle_nsh.c
@@ -18,6 +18,10 @@
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
+#include <rte_version.h>
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+#include <rte_vxlan.h>
+#endif
#include "vxlangpe_nsh.h"
#include "task_base.h"
@@ -27,7 +31,7 @@
#include "prefetch.h"
#include "log.h"
-#define VXLAN_GPE_HDR_SZ sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + sizeof(struct vxlan_gpe_hdr) + sizeof(struct nsh_hdr)
+#define VXLAN_GPE_HDR_SZ sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr) + sizeof(prox_rte_vxlan_gpe_hdr) + sizeof(struct nsh_hdr)
#define ETHER_NSH_TYPE 0x4F89 /* 0x894F in little endian */
#define VXLAN_GPE_NSH_TYPE 0xB612 /* 4790 in little endian */
#define VXLAN_GPE_NP 0x4
@@ -51,39 +55,56 @@ static void init_task_decap_nsh(__attribute__((unused)) struct task_base *tbase,
static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap_nsh *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *eth_hdr = NULL;
- struct udp_hdr *udp_hdr = NULL;
- struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
+ prox_rte_ether_hdr *eth_hdr = NULL;
+ prox_rte_udp_hdr *udp_hdr = NULL;
+ prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
uint16_t hdr_len;
- eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
/* "decapsulate" Ethernet + NSH header by moving packet pointer */
- hdr_len = sizeof(struct ether_hdr) + sizeof(struct nsh_hdr);
+ hdr_len = sizeof(prox_rte_ether_hdr) + sizeof(struct nsh_hdr);
mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
mbuf->data_off += hdr_len;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* save length of header in the dynfield1 of rte_mbuf */
+ mbuf->dynfield1[0] = hdr_len;
+#else
/* save length of header in reserved 16bits of rte_mbuf */
mbuf->udata64 = hdr_len;
+#endif
}
else {
if (mbuf->data_len < VXLAN_GPE_HDR_SZ) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
/* check the UDP destination port */
- udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr));
+ udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
/* check the Next Protocol field in VxLAN-GPE header */
- vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr));
- if (vxlan_gpe_hdr->next_proto != VXLAN_GPE_NP) {
+ vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
+ if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ mbuf->dynfield1[0] = 0;
+#else
mbuf->udata64 = 0;
+#endif
return 0;
}
@@ -93,8 +114,13 @@ static inline uint8_t handle_decap_nsh(__attribute__((unused)) struct task_decap
mbuf->data_len = (uint16_t)(mbuf->data_len - hdr_len);
mbuf->data_off += hdr_len;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len - hdr_len);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* save length of header in the dynfield1 of rte_mbuf */
+ mbuf->dynfield1[0] = hdr_len;
+#else
/* save length of header in reserved 16bits of rte_mbuf */
mbuf->udata64 = hdr_len;
+#endif
}
return 0;
@@ -131,26 +157,38 @@ static void init_task_encap_nsh(__attribute__((unused)) struct task_base *tbase,
static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap_nsh *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *eth_hdr = NULL;
+ prox_rte_ether_hdr *eth_hdr = NULL;
struct nsh_hdr *nsh_hdr = NULL;
- struct udp_hdr *udp_hdr = NULL;
- struct vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
+ prox_rte_udp_hdr *udp_hdr = NULL;
+ prox_rte_vxlan_gpe_hdr *vxlan_gpe_hdr = NULL;
uint16_t hdr_len;
if (mbuf == NULL)
return 0;
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ if (mbuf->dynfield1[0] == 0)
+#else
if (mbuf->udata64 == 0)
+#endif
return 0;
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ /* use header length saved in dynfields1 of rte_mbuf to
+ "encapsulate" transport + NSH header by moving packet pointer */
+ mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->dynfield1[0]);
+ mbuf->data_off -= mbuf->dynfield1[0];
+ mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->dynfield1[0]);
+#else
/* use header length saved in reserved 16bits of rte_mbuf to
"encapsulate" transport + NSH header by moving packet pointer */
mbuf->data_len = (uint16_t)(mbuf->data_len + mbuf->udata64);
mbuf->data_off -= mbuf->udata64;
mbuf->pkt_len = (uint32_t)(mbuf->pkt_len + mbuf->udata64);
+#endif
- eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ eth_hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
if (eth_hdr->ether_type == ETHER_NSH_TYPE) {
- nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(struct ether_hdr));
+ nsh_hdr = (struct nsh_hdr *) (((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr));
/* decrement Service Index in NSH header */
if (nsh_hdr->sf_index > 0)
@@ -162,17 +200,17 @@ static inline uint8_t handle_encap_nsh(__attribute__((unused)) struct task_encap
return 0;
/* check the UDP destination port */
- udp_hdr = (struct udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr));
+ udp_hdr = (prox_rte_udp_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr));
if (udp_hdr->dst_port != VXLAN_GPE_NSH_TYPE)
return 0;
/* check the Next Protocol field in VxLAN-GPE header */
- vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr));
- if (vxlan_gpe_hdr->next_proto != VXLAN_GPE_NP)
+ vxlan_gpe_hdr = (prox_rte_vxlan_gpe_hdr *)(((unsigned char *)eth_hdr) + sizeof(prox_rte_ether_hdr) + sizeof(prox_rte_ipv4_hdr) + sizeof(prox_rte_udp_hdr));
+ if (vxlan_gpe_hdr->proto != VXLAN_GPE_NP)
return 0;
/* decrement Service Index in NSH header */
- nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(struct vxlan_gpe_hdr));
+ nsh_hdr = (struct nsh_hdr *)(((unsigned char *)vxlan_gpe_hdr) + sizeof(prox_rte_vxlan_gpe_hdr));
if (nsh_hdr->sf_index > 0)
nsh_hdr->sf_index -= 1;
}
diff --git a/VNFs/DPPD-PROX/handle_police.c b/VNFs/DPPD-PROX/handle_police.c
index 125e8c0a..0d46cc16 100644
--- a/VNFs/DPPD-PROX/handle_police.c
+++ b/VNFs/DPPD-PROX/handle_police.c
@@ -33,6 +33,7 @@
#include "qinq.h"
#include "prox_cfg.h"
#include "prox_shared.h"
+#include "prox_compat.h"
#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
@@ -44,41 +45,55 @@ struct task_police {
struct rte_meter_srtcm *sr_flows;
struct rte_meter_trtcm *tr_flows;
};
-
+ union {
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+ struct rte_meter_srtcm_profile sr_profile;
+ struct rte_meter_trtcm_profile tr_profile;
+#endif
+ };
uint16_t *user_table;
enum police_action police_act[3][3];
uint16_t overhead;
uint8_t runtime_flags;
+ struct rte_sched_port *sched_port;
};
typedef uint8_t (*hp) (struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user);
static uint8_t handle_police(struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user)
{
- enum rte_meter_color in_color = e_RTE_METER_GREEN;
- enum rte_meter_color out_color;
+ enum prox_rte_color in_color = RTE_COLOR_GREEN;
+ enum prox_rte_color out_color;
uint32_t pkt_len = rte_pktmbuf_pkt_len(mbuf) + task->overhead;
- out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], tsc, pkt_len, in_color);
+#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
+ out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], tsc, pkt_len, in_color);
+#else
+ out_color = rte_meter_srtcm_color_aware_check(&task->sr_flows[user], &task->sr_profile, tsc, pkt_len, in_color);
+#endif
return task->police_act[in_color][out_color] == ACT_DROP? OUT_DISCARD : 0;
}
static uint8_t handle_police_tr(struct task_police *task, struct rte_mbuf *mbuf, uint64_t tsc, uint32_t user)
{
- enum rte_meter_color in_color = e_RTE_METER_GREEN;
- enum rte_meter_color out_color;
+ enum prox_rte_color in_color = RTE_COLOR_GREEN;
+ enum prox_rte_color out_color;
uint32_t pkt_len = rte_pktmbuf_pkt_len(mbuf) + task->overhead;
+#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
out_color = rte_meter_trtcm_color_aware_check(&task->tr_flows[user], tsc, pkt_len, in_color);
+#else
+ out_color = rte_meter_trtcm_color_aware_check(&task->tr_flows[user], &task->tr_profile, tsc, pkt_len, in_color);
+#endif
if (task->runtime_flags & TASK_MARK) {
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
uint32_t subport, pipe, traffic_class, queue;
- enum rte_meter_color color;
+ enum prox_rte_color color;
- rte_sched_port_pkt_read_tree_path(mbuf, &subport, &pipe, &traffic_class, &queue);
+ prox_rte_sched_port_pkt_read_tree_path(task->sched_port, mbuf, &subport, &pipe, &traffic_class, &queue);
color = task->police_act[in_color][out_color];
- rte_sched_port_pkt_write(mbuf, subport, pipe, traffic_class, queue, color);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, subport, pipe, traffic_class, queue, color);
#else
struct rte_sched_port_hierarchy *sched =
(struct rte_sched_port_hierarchy *) &mbuf->pkt.hash.sched;
@@ -100,7 +115,7 @@ static inline int get_user(struct task_police *task, struct rte_mbuf *mbuf)
uint32_t dummy;
uint32_t pipe;
- rte_sched_port_pkt_read_tree_path(mbuf, &dummy, &pipe, &dummy, &dummy);
+ prox_rte_sched_port_pkt_read_tree_path(task->sched_port, mbuf, &dummy, &pipe, &dummy, &dummy);
return pipe;
#else
struct rte_sched_port_hierarchy *sched =
@@ -202,6 +217,8 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
prox_sh_add_socket(socket_id, "user_table", task->user_table);
}
+ task->sched_port = rte_sched_port_config(&targ->qos_conf.port_params);
+
if (strcmp(targ->task_init->sub_mode_str, "trtcm")) {
task->sr_flows = prox_zmalloc(targ->n_flows * sizeof(*task->sr_flows), socket_id);
PROX_PANIC(task->sr_flows == NULL, "Failed to allocate flow contexts\n");
@@ -214,10 +231,16 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
.cbs = targ->cbs,
.ebs = targ->ebs,
};
-
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+ PROX_PANIC(rte_meter_srtcm_profile_config(&task->sr_profile, &params) != 0, "Failed to rte_meter_srtcm_profile_config\n");
+ for (uint32_t i = 0; i < targ->n_flows; ++i) {
+ PROX_PANIC(rte_meter_srtcm_config(&task->sr_flows[i], &task->sr_profile) != 0, "Failed to rte_meter_srtcm_config");
+ }
+#else
for (uint32_t i = 0; i < targ->n_flows; ++i) {
rte_meter_srtcm_config(&task->sr_flows[i], &params);
}
+#endif
}
else {
task->tr_flows = prox_zmalloc(targ->n_flows * sizeof(*task->tr_flows), socket_id);
@@ -233,10 +256,17 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
.cir = targ->cir,
.cbs = targ->cbs,
};
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+ PROX_PANIC(rte_meter_trtcm_profile_config(&task->tr_profile, &params) != 0, "Failed to rte_meter_srtcm_profile_config\n");
+ for (uint32_t i = 0; i < targ->n_flows; ++i) {
+ PROX_PANIC(rte_meter_trtcm_config(&task->tr_flows[i], &task->tr_profile) != 0, "Failed to rte_meter_trtcm_config\n");
+ }
+#else
for (uint32_t i = 0; i < targ->n_flows; ++i) {
rte_meter_trtcm_config(&task->tr_flows[i], &params);
}
+#endif
}
for (uint32_t i = 0; i < 3; ++i) {
@@ -247,6 +277,7 @@ static void init_task_police(struct task_base *tbase, struct task_args *targ)
}
static struct task_init task_init_police = {
+ .mode = POLICE,
.mode_str = "police",
.init = init_task_police,
.handle = handle_police_bulk,
@@ -255,6 +286,7 @@ static struct task_init task_init_police = {
};
static struct task_init task_init_police2 = {
+ .mode = POLICE,
.mode_str = "police",
.sub_mode_str = "trtcm",
.init = init_task_police,
diff --git a/VNFs/DPPD-PROX/handle_qinq_decap4.c b/VNFs/DPPD-PROX/handle_qinq_decap4.c
index c1715800..2a5bfc7f 100644
--- a/VNFs/DPPD-PROX/handle_qinq_decap4.c
+++ b/VNFs/DPPD-PROX/handle_qinq_decap4.c
@@ -148,7 +148,7 @@ static void init_task_qinq_decap4(struct task_base *tbase, struct task_args *tar
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
// By default, calling this function 1K times per second => 64K ARP per second max
@@ -183,6 +183,10 @@ __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
svlan = rte_be_to_cpu_16(svlan & 0xFF0F);
cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%x, L2Tag=%d type=%d\n",
+ key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->dynfield1[0], mbuf->vlan_tci_outer, mbuf->packet_type);
+#else
#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type);
@@ -195,6 +199,7 @@ __attribute__((cold)) static void handle_error(struct rte_mbuf *mbuf)
key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved);
#endif
#endif
+#endif
#else
plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr));
#endif
@@ -380,9 +385,9 @@ static int handle_qinq_decap4_bulk(struct task_base *tbase, struct rte_mbuf **mb
static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id)
{
#ifdef USE_QINQ
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
#else
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *));
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *));
#endif
uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr);
@@ -391,15 +396,15 @@ static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, s
rte_pktmbuf_trim(mbuf, padlen);
}
- PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN,
+ PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > PROX_RTE_ETHER_MAX_LEN,
"Would need to fragment packet new size = %u - not implemented\n",
rte_pktmbuf_data_len(mbuf) - padlen + 20);
#ifdef USE_QINQ
/* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
#else
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
#endif
PROX_ASSERT(peth);
@@ -407,16 +412,16 @@ static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, s
if (task->runtime_flags & TASK_TX_CRC) {
/* calculate IP CRC here to avoid problems with -O3 flag with gcc */
#ifdef MPLS_ROUTING
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#else
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#endif
}
/* new IP header */
- struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1);
- rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
- ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ prox_rte_ipv4_hdr *p_tunnel_ip = (prox_rte_ipv4_hdr *)(peth + 1);
+ rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr));
+ ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len);
p_tunnel_ip->src_addr = src_ipv4;
@@ -435,7 +440,7 @@ static inline uint16_t calc_padlen(const struct rte_mbuf *mbuf, const uint16_t i
static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task)
{
- PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN,
+ PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > PROX_RTE_ETHER_MAX_LEN,
"Would need to fragment packet new size = %u - not implemented\n",
rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA);
@@ -443,7 +448,7 @@ static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf,
PROX_ASSERT(packet);
PREFETCH0(packet);
- struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
+ prox_rte_ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
/* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */
@@ -476,16 +481,16 @@ static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf,
#endif
/* New IP header */
- rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
- ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr));
+ ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len);
packet->tunnel_ip_hdr.src_addr = src_ipv4;
packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst;
if (task->runtime_flags & TASK_TX_CRC) {
#ifdef MPLS_ROUTING
- prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#else
- prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#endif
}
diff --git a/VNFs/DPPD-PROX/handle_qinq_decap6.c b/VNFs/DPPD-PROX/handle_qinq_decap6.c
index 77bacb75..d26f312a 100644
--- a/VNFs/DPPD-PROX/handle_qinq_decap6.c
+++ b/VNFs/DPPD-PROX/handle_qinq_decap6.c
@@ -45,7 +45,7 @@ struct task_qinq_decap6 {
struct rte_table_hash *cpe_table;
uint16_t *user_table;
uint32_t bucket_index;
- struct ether_addr edaddr;
+ prox_rte_ether_addr edaddr;
struct rte_lpm6 *rte_lpm6;
void* period_data; /* used if using dual stack*/
void (*period_func)(void* data);
@@ -103,7 +103,7 @@ static void early_init(struct task_args *targ)
static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct rte_mbuf *mbuf)
{
struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbuf, struct qinq_hdr *);
- struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1);
+ prox_rte_ipv6_hdr *pip6 = (prox_rte_ipv6_hdr *)(pqinq + 1);
uint16_t svlan = pqinq->svlan.vlan_tci & 0xFF0F;
uint16_t cvlan = pqinq->cvlan.vlan_tci & 0xFF0F;
@@ -124,11 +124,11 @@ static inline uint8_t handle_qinq_decap6(struct task_qinq_decap6 *task, struct r
return OUT_DISCARD;
}
- pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(struct vlan_hdr));
+ pqinq = (struct qinq_hdr *)rte_pktmbuf_adj(mbuf, 2 * sizeof(prox_rte_vlan_hdr));
PROX_ASSERT(pqinq);
pqinq->ether_type = ETYPE_IPv6;
// Dest MAC addresses
- ether_addr_copy(&task->edaddr, &pqinq->d_addr);
+ prox_rte_ether_addr_copy(&task->edaddr, &pqinq->d_addr);
return 0;
}
diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.c b/VNFs/DPPD-PROX/handle_qinq_encap4.c
index 0b31660f..0b707b7a 100644
--- a/VNFs/DPPD-PROX/handle_qinq_encap4.c
+++ b/VNFs/DPPD-PROX/handle_qinq_encap4.c
@@ -152,7 +152,7 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
/* TODO: check if it is not necessary to limit reverse mapping
@@ -163,6 +163,10 @@ static void init_task_qinq_encap4(struct task_base *tbase, struct task_args *tar
}
/* task->src_mac[entry->port_idx] = *(uint64_t*)&prox_port_cfg[entry->port_idx].eth_addr; */
+ if (targ->runtime_flags & TASK_CLASSIFY) {
+ int rc = init_port_sched(&task->sched_port, targ);
+ PROX_PANIC(rc, "Did not find any QoS task to transmit to => undefined sched_port parameters\n");
+ }
}
static void arp_msg(struct task_base *tbase, void **data, uint16_t n_msgs)
@@ -440,14 +444,14 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf *
prefetch_pkts(mbufs, n_pkts);
for (uint16_t j = 0; j < n_pkts; ++j) {
- struct ipv4_hdr* ip = (struct ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *) + 1);
+ prox_rte_ipv4_hdr* ip = (prox_rte_ipv4_hdr *)(rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *) + 1);
task->keys[j] = (uint64_t)ip->dst_addr;
}
prox_rte_table_key8_lookup(task->cpe_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
if (likely(lookup_hit_mask == pkts_mask)) {
for (uint16_t j = 0; j < n_pkts; ++j) {
- struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
+ struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr));
uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
if (padlen) {
@@ -463,7 +467,7 @@ static int handle_qinq_encap4_bulk_pe(struct task_base *tbase, struct rte_mbuf *
out[j] = OUT_DISCARD;
continue;
}
- struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
+ struct cpe_pkt* cpe_pkt = (struct cpe_pkt*) rte_pktmbuf_prepend(mbufs[j], sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr));
uint16_t padlen = mbuf_calc_padlen(mbufs[j], cpe_pkt, &cpe_pkt->ipv4_hdr);
if (padlen) {
@@ -541,13 +545,13 @@ static inline uint8_t handle_qinq_encap4(struct task_qinq_encap4 *task, struct c
uint8_t queue = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] & 0x3;
uint8_t tc = task->dscp[cpe_pkt->ipv4_hdr.type_of_service >> 2] >> 2;
- rte_sched_port_pkt_write(mbuf, 0, entry->user, tc, queue, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, 0, entry->user, tc, queue, 0);
}
#ifdef ENABLE_EXTRA_USER_STATISTICS
task->stats_per_user[entry->user]++;
#endif
if (task->runtime_flags & TASK_TX_CRC) {
- prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, &cpe_pkt->ipv4_hdr, sizeof(struct qinq_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
}
return entry->mac_port.out_idx;
}
diff --git a/VNFs/DPPD-PROX/handle_qinq_encap4.h b/VNFs/DPPD-PROX/handle_qinq_encap4.h
index 639135e0..999abbd8 100644
--- a/VNFs/DPPD-PROX/handle_qinq_encap4.h
+++ b/VNFs/DPPD-PROX/handle_qinq_encap4.h
@@ -25,6 +25,7 @@
#include "etypes.h"
#include "mpls.h"
#include "task_init.h"
+#include "handle_sched.h"
struct task_qinq_encap4 {
struct task_base base;
@@ -41,6 +42,7 @@ struct task_qinq_encap4 {
uint32_t *stats_per_user;
uint32_t n_users;
#endif
+ struct rte_sched_port *sched_port;
};
struct qinq_gre_entry {
@@ -68,20 +70,20 @@ void init_cpe4_hash(struct task_args *targ);
static inline uint8_t mpls_untag(struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
const uint16_t eth_type = peth->ether_type;
if (eth_type == ETYPE_MPLSU) {
- struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
+ prox_rte_ether_hdr *pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1);
if (mpls->bos == 0) {
// Double MPLS tag
- pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
+ pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
PROX_ASSERT(pneweth);
}
- const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(pneweth + 1);
+ const prox_rte_ipv4_hdr *pip = (const prox_rte_ipv4_hdr *)(pneweth + 1);
if ((pip->version_ihl >> 4) == 4) {
pneweth->ether_type = ETYPE_IPv4;
return 1;
diff --git a/VNFs/DPPD-PROX/handle_qinq_encap6.c b/VNFs/DPPD-PROX/handle_qinq_encap6.c
index a46f30fb..c6538655 100644
--- a/VNFs/DPPD-PROX/handle_qinq_encap6.c
+++ b/VNFs/DPPD-PROX/handle_qinq_encap6.c
@@ -30,6 +30,7 @@
#include "hash_utils.h"
#include "quit.h"
#include "prox_compat.h"
+#include "handle_sched.h"
struct task_qinq_encap6 {
struct task_base base;
@@ -37,6 +38,7 @@ struct task_qinq_encap6 {
uint8_t tx_portid;
uint8_t runtime_flags;
struct rte_table_hash *cpe_table;
+ struct rte_sched_port *sched_port;
};
static void init_task_qinq_encap6(struct task_base *tbase, struct task_args *targ)
@@ -46,15 +48,19 @@ static void init_task_qinq_encap6(struct task_base *tbase, struct task_args *tar
task->qinq_tag = targ->qinq_tag;
task->cpe_table = targ->cpe_table;
task->runtime_flags = targ->runtime_flags;
+ if (task->runtime_flags & TASK_CLASSIFY) {
+ int rc = init_port_sched(&task->sched_port, targ);
+ PROX_PANIC(rc, "Did not find any QoS task to transmit to => undefined sched_port parameters\n");
+ }
}
/* Encapsulate IPv6 packet in QinQ where the QinQ is derived from the IPv6 address */
static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq_encap6 *task)
{
- struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(struct vlan_hdr));
+ struct qinq_hdr *pqinq = (struct qinq_hdr *)rte_pktmbuf_prepend(mbuf, 2 * sizeof(prox_rte_vlan_hdr));
PROX_ASSERT(pqinq);
- struct ipv6_hdr *pip6 = (struct ipv6_hdr *)(pqinq + 1);
+ prox_rte_ipv6_hdr *pip6 = (prox_rte_ipv6_hdr *)(pqinq + 1);
if (pip6->hop_limits) {
pip6->hop_limits--;
@@ -81,7 +87,7 @@ static inline uint8_t handle_qinq_encap6(struct rte_mbuf *mbuf, struct task_qinq
/* classification can only be done from this point */
if (task->runtime_flags & TASK_CLASSIFY) {
- rte_sched_port_pkt_write(mbuf, 0, entries[0]->user, 0, 0, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbuf, 0, entries[0]->user, 0, 0, 0);
}
return 0;
}
diff --git a/VNFs/DPPD-PROX/handle_qos.c b/VNFs/DPPD-PROX/handle_qos.c
index 142143e7..de9548f6 100644
--- a/VNFs/DPPD-PROX/handle_qos.c
+++ b/VNFs/DPPD-PROX/handle_qos.c
@@ -34,6 +34,7 @@
#include "qinq.h"
#include "prox_cfg.h"
#include "prox_shared.h"
+#include "prox_compat.h"
struct task_qos {
struct task_base base;
@@ -75,7 +76,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu
const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *);
uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci);
if (pqinq->ether_type == ETYPE_IPv4) {
- const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1);
+ const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1);
queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3;
tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2;
} else {
@@ -83,8 +84,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu
queue = 0;
tc = 0;
}
-
- rte_sched_port_pkt_write(mbufs[j], 0, task->user_table[qinq], tc, queue, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbufs[j], 0, task->user_table[qinq], tc, queue, 0);
}
#ifdef PROX_PREFETCH_OFFSET
prefetch_nta(rte_pktmbuf_mtod(mbufs[n_pkts - 1], void *));
@@ -92,7 +92,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu
const struct qinq_hdr *pqinq = rte_pktmbuf_mtod(mbufs[j], const struct qinq_hdr *);
uint32_t qinq = PKT_TO_LUTQINQ(pqinq->svlan.vlan_tci, pqinq->cvlan.vlan_tci);
if (pqinq->ether_type == ETYPE_IPv4) {
- const struct ipv4_hdr *ipv4_hdr = (const struct ipv4_hdr *)(pqinq + 1);
+ const prox_rte_ipv4_hdr *ipv4_hdr = (const prox_rte_ipv4_hdr *)(pqinq + 1);
queue = task->dscp[ipv4_hdr->type_of_service >> 2] & 0x3;
tc = task->dscp[ipv4_hdr->type_of_service >> 2] >> 2;
} else {
@@ -101,7 +101,7 @@ static inline int handle_qos_bulk(struct task_base *tbase, struct rte_mbuf **mbu
tc = 0;
}
- rte_sched_port_pkt_write(mbufs[j], 0, task->user_table[qinq], tc, queue, 0);
+ prox_rte_sched_port_pkt_write(task->sched_port, mbufs[j], 0, task->user_table[qinq], tc, queue, 0);
}
#endif
}
@@ -135,7 +135,11 @@ static void init_task_qos(struct task_base *tbase, struct task_args *targ)
PROX_PANIC(task->sched_port == NULL, "failed to create sched_port");
plog_info("number of pipes: %d\n\n", targ->qos_conf.port_params.n_pipes_per_subport);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ int err = rte_sched_subport_config(task->sched_port, 0, targ->qos_conf.subport_params, 0);
+#else
int err = rte_sched_subport_config(task->sched_port, 0, targ->qos_conf.subport_params);
+#endif
PROX_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err);
/* only single subport and single pipe profile is supported */
@@ -166,6 +170,7 @@ static void init_task_qos(struct task_base *tbase, struct task_args *targ)
}
static struct task_init task_init_qos = {
+ .mode = QOS,
.mode_str = "qos",
.init = init_task_qos,
.handle = handle_qos_bulk,
diff --git a/VNFs/DPPD-PROX/handle_routing.c b/VNFs/DPPD-PROX/handle_routing.c
index 9dd45ed8..4683ede7 100644
--- a/VNFs/DPPD-PROX/handle_routing.c
+++ b/VNFs/DPPD-PROX/handle_routing.c
@@ -37,10 +37,10 @@
#include "mpls.h"
#include "qinq.h"
#include "prox_cfg.h"
-#include "ip6_addr.h"
#include "prox_shared.h"
#include "prox_cksum.h"
#include "mbuf_utils.h"
+#include "prox_compat.h"
struct task_routing {
struct task_base base;
@@ -48,7 +48,6 @@ struct task_routing {
struct lcore_cfg *lconf;
struct rte_lpm *ipv4_lpm;
struct next_hop *next_hops;
- int offload_crc;
uint32_t number_free_rules;
uint16_t qinq_tag;
uint32_t marking[4];
@@ -144,9 +143,6 @@ static void init_task_routing(struct task_base *tbase, struct task_args *targ)
}
struct prox_port_cfg *port = find_reachable_port(targ);
- if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
- }
targ->lconf->ctrl_func_m[targ->task] = routing_update;
targ->lconf->ctrl_timeout = freq_to_tsc(20);
@@ -181,16 +177,14 @@ static int handle_routing_bulk(struct task_base *tbase, struct rte_mbuf **mbufs,
static void set_l2(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
*((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes;
*((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx];
}
-static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx, uint16_t l2_len)
+static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_t nh_idx)
{
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr));
- l2_len += sizeof(struct mpls_hdr);
- prox_ip_cksum(mbuf, (struct ipv4_hdr *)((uint8_t *)peth + l2_len), l2_len, sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, sizeof(struct mpls_hdr));
*((uint64_t *)(&peth->d_addr)) = task->next_hops[nh_idx].mac_port_8bytes;
*((uint64_t *)(&peth->s_addr)) = task->src_mac[task->next_hops[nh_idx].mac_port.out_idx];
@@ -198,8 +192,7 @@ static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_
struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
if (task->runtime_flags & TASK_MARK) {
- enum rte_meter_color color = rte_sched_port_pkt_read_color(mbuf);
-
+ enum prox_rte_color color = rte_sched_port_pkt_read_color(mbuf);
*(uint32_t *)mpls = task->next_hops[nh_idx].mpls | task->marking[color] | 0x00010000; // Set BoS to 1
}
else {
@@ -209,8 +202,8 @@ static void set_l2_mpls(struct task_routing *task, struct rte_mbuf *mbuf, uint8_
static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_offset, struct rte_mbuf *mbuf)
{
- struct ipv4_hdr *ip = (struct ipv4_hdr*)(beg + ip_offset);
- struct ether_hdr *peth_out;
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(beg + ip_offset);
+ prox_rte_ether_hdr *peth_out;
uint8_t tx_port;
uint32_t dst_ip;
@@ -224,7 +217,7 @@ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_o
switch(ip->next_proto_id) {
case IPPROTO_GRE: {
struct gre_hdr *pgre = (struct gre_hdr *)(ip + 1);
- dst_ip = ((struct ipv4_hdr *)(pgre + 1))->dst_addr;
+ dst_ip = ((prox_rte_ipv4_hdr *)(pgre + 1))->dst_addr;
break;
}
case IPPROTO_TCP:
@@ -255,7 +248,7 @@ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_o
rte_pktmbuf_trim(mbuf, padlen);
}
- set_l2_mpls(task, mbuf, next_hop_index, ip_offset);
+ set_l2_mpls(task, mbuf, next_hop_index);
}
else {
set_l2(task, mbuf, next_hop_index);
@@ -266,7 +259,7 @@ static uint8_t route_ipv4(struct task_routing *task, uint8_t *beg, uint32_t ip_o
static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf *mbuf)
{
struct qinq_hdr *qinq;
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
switch (peth->ether_type) {
case ETYPE_8021ad: {
@@ -283,7 +276,7 @@ static inline uint8_t handle_routing(struct task_routing *task, struct rte_mbuf
case ETYPE_MPLSU: {
/* skip MPLS headers if any for routing */
struct mpls_hdr *mpls = (struct mpls_hdr *)(peth + 1);
- uint32_t count = sizeof(struct ether_hdr);
+ uint32_t count = sizeof(prox_rte_ether_hdr);
while (!(mpls->bytes & 0x00010000)) {
mpls++;
count += sizeof(struct mpls_hdr);
@@ -311,7 +304,7 @@ static struct task_init task_init_routing = {
.mode_str = "routing",
.init = init_task_routing,
.handle = handle_routing_bulk,
- .flag_features = TASK_FEATURE_ROUTING,
+ .flag_features = TASK_FEATURE_ROUTING|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS,
.size = sizeof(struct task_routing)
};
diff --git a/VNFs/DPPD-PROX/handle_sched.h b/VNFs/DPPD-PROX/handle_sched.h
new file mode 100644
index 00000000..966efbba
--- /dev/null
+++ b/VNFs/DPPD-PROX/handle_sched.h
@@ -0,0 +1,45 @@
+/*
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef _PROX_SCHED_H
+#define _PROX_SCHED_H
+
+#include "task_init.h"
+#include "lconf.h"
+
+static int init_port_sched(struct rte_sched_port **sched_port, struct task_args *targ)
+{
+ *sched_port = NULL;
+#if RTE_VERSION >= RTE_VERSION_NUM(19,2,0,0)
+ for (uint8_t idx = 0; idx < MAX_PROTOCOLS; ++idx) {
+ for (uint8_t ring_idx = 0; ring_idx < targ->core_task_set[idx].n_elems; ++ring_idx) {
+ struct core_task ct = targ->core_task_set[idx].core_task[ring_idx];
+ struct task_args *dtarg = core_targ_get(ct.core, ct.task);
+ enum task_mode dmode = dtarg->mode;
+ if ((dmode == QOS) || (dmode == POLICE)) {
+ // Next task is QOS or POLICE
+ // We use the same configuration as the QoS we are transmitting to
+ *sched_port = rte_sched_port_config(&dtarg->qos_conf.port_params);
+ plog_info("\tInitializing sched_port based on QoS config of core %d task %d\n", ct.core, ct.task);
+ return 0;
+ }
+ }
+ }
+ return -1;
+#endif
+ return 0;
+}
+#endif
diff --git a/VNFs/DPPD-PROX/handle_swap.c b/VNFs/DPPD-PROX/handle_swap.c
index 68dfe2b4..503af598 100644
--- a/VNFs/DPPD-PROX/handle_swap.c
+++ b/VNFs/DPPD-PROX/handle_swap.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,94 +16,226 @@
#include <rte_mbuf.h>
#include <rte_udp.h>
+#include <rte_icmp.h>
#include "task_init.h"
#include "task_base.h"
#include "lconf.h"
#include "log.h"
-#include "arp.h"
-#include "handle_swap.h"
#include "prox_port_cfg.h"
#include "mpls.h"
#include "qinq.h"
#include "gre.h"
#include "prefetch.h"
+#include "defines.h"
+#include "igmp.h"
+#include "prox_cksum.h"
+#include "prox_compat.h"
+
+#define MAX_STORE_PKT_SIZE 2048
+
+struct packet {
+ unsigned int len;
+ unsigned char buf[MAX_STORE_PKT_SIZE];
+};
struct task_swap {
struct task_base base;
- uint8_t src_dst_mac[12];
+ struct rte_mempool *igmp_pool;
+ uint32_t flags;
uint32_t runtime_flags;
+ uint32_t igmp_address;
+ uint8_t src_dst_mac[12];
+ uint32_t local_ipv4;
+ int offload_crc;
+ uint64_t last_echo_req_rcvd_tsc;
+ uint64_t last_echo_rep_rcvd_tsc;
+ uint32_t n_echo_req;
+ uint32_t n_echo_rep;
+ uint32_t store_pkt_id;
+ uint32_t store_msk;
+ struct packet *store_buf;
+ FILE *fp;
};
+#define NB_IGMP_MBUF 1024
+#define IGMP_MBUF_SIZE 2048
+#define NB_CACHE_IGMP_MBUF 256
+
+#define GENEVE_PORT 0xc117 // in be
+
static void write_src_and_dst_mac(struct task_swap *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *hdr;
- struct ether_addr mac;
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ether_addr mac;
- if (unlikely((task->runtime_flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) {
+ if (unlikely((task->flags & (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET)) == (TASK_ARG_DST_MAC_SET|TASK_ARG_SRC_MAC_SET))) {
/* Source and Destination mac hardcoded */
- hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
rte_memcpy(hdr, task->src_dst_mac, sizeof(task->src_dst_mac));
} else {
- hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- if (likely((task->runtime_flags & TASK_ARG_SRC_MAC_SET) == 0)) {
+ hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ if (unlikely((task->flags & TASK_ARG_SRC_MAC_SET) == 0)) {
/* dst mac will be used as src mac */
- ether_addr_copy(&hdr->d_addr, &mac);
+ prox_rte_ether_addr_copy(&hdr->d_addr, &mac);
}
- if (unlikely(task->runtime_flags & TASK_ARG_DST_MAC_SET))
- ether_addr_copy((struct ether_addr *)&task->src_dst_mac[0], &hdr->d_addr);
+ if (unlikely(task->flags & TASK_ARG_DST_MAC_SET))
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[0], &hdr->d_addr);
else
- ether_addr_copy(&hdr->s_addr, &hdr->d_addr);
+ prox_rte_ether_addr_copy(&hdr->s_addr, &hdr->d_addr);
- if (unlikely(task->runtime_flags & TASK_ARG_SRC_MAC_SET)) {
- ether_addr_copy((struct ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
+ if (likely(task->flags & TASK_ARG_SRC_MAC_SET)) {
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
} else {
- ether_addr_copy(&mac, &hdr->s_addr);
+ prox_rte_ether_addr_copy(&mac, &hdr->s_addr);
}
}
}
-static inline int handle_arp_request(struct task_swap *task, struct ether_hdr_arp *hdr_arp, struct ether_addr *s_addr, uint32_t ip)
+static inline void build_mcast_mac(uint32_t ip, prox_rte_ether_addr *dst_mac)
{
- if ((hdr_arp->arp.data.tpa == ip) || (ip == 0)) {
- build_arp_reply(hdr_arp, s_addr);
- return 0;
- } else if (task->runtime_flags & TASK_MULTIPLE_MAC) {
- struct ether_addr tmp_s_addr;
- create_mac(hdr_arp, &tmp_s_addr);
- build_arp_reply(hdr_arp, &tmp_s_addr);
- return 0;
- } else {
- plogx_dbg("Received ARP on unexpected IP %x, expecting %x\n", rte_be_to_cpu_32(hdr_arp->arp.data.tpa), rte_be_to_cpu_32(ip));
- return OUT_DISCARD;
+ // MAC address is 01:00:5e followed by 23 LSB of IP address
+ uint64_t mac = 0x0000005e0001L | ((ip & 0xFFFF7F00L) << 16);
+ memcpy(dst_mac, &mac, sizeof(prox_rte_ether_addr));
+}
+
+static inline void build_icmp_reply_message(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ struct task_swap *task = (struct task_swap *)tbase;
+ prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_addr dst_mac;
+ prox_rte_ether_addr_copy(&hdr->s_addr, &dst_mac);
+ prox_rte_ether_addr_copy(&hdr->d_addr, &hdr->s_addr);
+ prox_rte_ether_addr_copy(&dst_mac, &hdr->d_addr);
+ prox_rte_ipv4_hdr *ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1);
+ ip_hdr->dst_addr = ip_hdr->src_addr;
+ ip_hdr->src_addr = task->local_ipv4;
+ prox_rte_icmp_hdr *picmp = (prox_rte_icmp_hdr *)(ip_hdr + 1);
+ picmp->icmp_type = PROX_RTE_IP_ICMP_ECHO_REPLY;
+}
+
+static inline void build_igmp_message(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t ip, uint8_t igmp_message)
+{
+ struct task_swap *task = (struct task_swap *)tbase;
+ prox_rte_ether_hdr *hdr = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_addr dst_mac;
+ build_mcast_mac(ip, &dst_mac);
+
+ rte_pktmbuf_pkt_len(mbuf) = 46;
+ rte_pktmbuf_data_len(mbuf) = 46;
+ init_mbuf_seg(mbuf);
+
+ prox_rte_ether_addr_copy(&dst_mac, &hdr->d_addr);
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_dst_mac[6], &hdr->s_addr);
+ hdr->ether_type = ETYPE_IPv4;
+
+ prox_rte_ipv4_hdr *ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1);
+ ip_hdr->version_ihl = 0x45; /**< version and header length */
+ ip_hdr->type_of_service = 0; /**< type of service */
+ ip_hdr->total_length = rte_cpu_to_be_16(32); /**< length of packet */
+ ip_hdr->packet_id = 0; /**< packet ID */
+ ip_hdr->fragment_offset = 0; /**< fragmentation offset */
+ ip_hdr->time_to_live = 1; /**< time to live */
+ ip_hdr->next_proto_id = IPPROTO_IGMP; /**< protocol ID */
+ ip_hdr->hdr_checksum = 0; /**< header checksum */
+ ip_hdr->src_addr = task->local_ipv4; /**< source address */
+ ip_hdr->dst_addr = ip; /**< destination address */
+ struct igmpv2_hdr *pigmp = (struct igmpv2_hdr *)(ip_hdr + 1);
+ pigmp->type = igmp_message;
+ pigmp->max_resp_time = 0;
+ pigmp->checksum = 0;
+ pigmp->group_address = ip;
+ prox_ip_udp_cksum(mbuf, ip_hdr, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
+}
+
+static void stop_swap(struct task_base *tbase)
+{
+ uint32_t i, j;
+ struct task_swap *task = (struct task_swap *)tbase;
+
+ if (task->igmp_pool) {
+ rte_mempool_free(task->igmp_pool);
+ task->igmp_pool = NULL;
+ }
+
+ if (task->store_msk) {
+ for (i = task->store_pkt_id & task->store_msk; i < task->store_msk + 1; i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
+ for (i = 0; i < (task->store_pkt_id & task->store_msk); i++) {
+ if (task->store_buf[i].len) {
+ fprintf(task->fp, "%06d: ", i);
+ for (j = 0; j < task->store_buf[i].len; j++) {
+ fprintf(task->fp, "%02x ", task->store_buf[i].buf[j]);
+ }
+ fprintf(task->fp, "\n");
+ }
+ }
}
}
-/*
- * swap mode does not send arp requests, so does not expect arp replies
- * Need to understand later whether we must send arp requests
- */
-static inline int handle_arp_replies(struct task_swap *task, struct ether_hdr_arp *hdr_arp)
+static void handle_ipv6(struct task_swap *task, struct rte_mbuf *mbufs, prox_rte_ipv6_hdr *ipv6_hdr, uint8_t *out)
{
- return OUT_DISCARD;
+ __m128i ip = _mm_loadu_si128((__m128i*)&(ipv6_hdr->src_addr));
+ uint16_t port;
+ uint16_t payload_len;
+ prox_rte_udp_hdr *udp_hdr;
+
+ rte_mov16((uint8_t *)&(ipv6_hdr->src_addr), (uint8_t *)&(ipv6_hdr->dst_addr)); // Copy dst into src
+ rte_mov16((uint8_t *)&(ipv6_hdr->dst_addr), (uint8_t *)&ip); // Copy src into dst
+ switch(ipv6_hdr->proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ payload_len = ipv6_hdr->payload_len;
+ udp_hdr = (prox_rte_udp_hdr *)(ipv6_hdr + 1);
+ if (unlikely(udp_hdr->dgram_len < payload_len)) {
+ plog_warn("Unexpected L4 len (%u) versus L3 payload len (%u) in IPv6 packet\n", udp_hdr->dgram_len, payload_len);
+ *out = OUT_DISCARD;
+ break;
+ }
+ port = udp_hdr->dst_port;
+ udp_hdr->dst_port = udp_hdr->src_port;
+ udp_hdr->src_port = port;
+ write_src_and_dst_mac(task, mbufs);
+ *out = 0;
+ break;
+ default:
+ plog_warn("Unsupported next hop %u in IPv6 packet\n", ipv6_hdr->proto);
+ *out = OUT_DISCARD;
+ break;
+ }
}
static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
struct task_swap *task = (struct task_swap *)tbase;
- struct ether_hdr *hdr;
- struct ether_addr mac;
- struct ipv4_hdr *ip_hdr;
- struct udp_hdr *udp_hdr;
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ether_addr mac;
+ prox_rte_ipv4_hdr *ip_hdr;
+ prox_rte_udp_hdr *udp_hdr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
+ struct gre_hdr *pgre;
+ prox_rte_ipv4_hdr *inner_ip_hdr;
uint32_t ip;
uint16_t port;
uint8_t out[64] = {0};
struct mpls_hdr *mpls;
uint32_t mpls_len = 0;
struct qinq_hdr *qinq;
- struct vlan_hdr *vlan;
- struct ether_hdr_arp *hdr_arp;
+ prox_rte_vlan_hdr *vlan;
uint16_t j;
+ struct igmpv2_hdr *pigmp;
+ prox_rte_icmp_hdr *picmp;
+ uint8_t type;
+ static int llc_printed = 0;
+ static int lldp_printed = 0;
+ static int geneve_printed = 0;
for (j = 0; j < n_pkts; ++j) {
PREFETCH0(mbufs[j]);
@@ -112,17 +244,24 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
PREFETCH0(rte_pktmbuf_mtod(mbufs[j], void *));
}
+ // TODO 1: check packet is long enough for Ethernet + IP + UDP = 42 bytes
for (uint16_t j = 0; j < n_pkts; ++j) {
- hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr *);
+ hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
switch (hdr->ether_type) {
case ETYPE_MPLSU:
mpls = (struct mpls_hdr *)(hdr + 1);
while (!(mpls->bytes & 0x00010000)) {
+ // TODO: verify pcket length
mpls++;
mpls_len += sizeof(struct mpls_hdr);
}
mpls_len += sizeof(struct mpls_hdr);
- ip_hdr = (struct ipv4_hdr *)(mpls + 1);
+ ip_hdr = (prox_rte_ipv4_hdr *)(mpls + 1);
+ if (unlikely((ip_hdr->version_ihl >> 4) == 6)) {
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(ip_hdr);
+ handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
+ continue;
+ }
break;
case ETYPE_8021ad:
qinq = (struct qinq_hdr *)hdr;
@@ -131,20 +270,34 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
out[j] = OUT_DISCARD;
continue;
}
- ip_hdr = (struct ipv4_hdr *)(qinq + 1);
+ if (qinq->ether_type == ETYPE_IPv4) {
+ ip_hdr = (prox_rte_ipv4_hdr *)(qinq + 1);
+ } else if (qinq->ether_type == ETYPE_IPv6) {
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(qinq + 1);
+ handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
+ continue;
+ } else {
+ plog_warn("Unsupported packet type\n");
+ out[j] = OUT_DISCARD;
+ continue;
+ }
break;
case ETYPE_VLAN:
- vlan = (struct vlan_hdr *)(hdr + 1);
+ vlan = (prox_rte_vlan_hdr *)(hdr + 1);
if (vlan->eth_proto == ETYPE_IPv4) {
- ip_hdr = (struct ipv4_hdr *)(vlan + 1);
+ ip_hdr = (prox_rte_ipv4_hdr *)(vlan + 1);
+ } else if (vlan->eth_proto == ETYPE_IPv6) {
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan + 1);
+ handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
+ continue;
} else if (vlan->eth_proto == ETYPE_VLAN) {
- vlan = (struct vlan_hdr *)(vlan + 1);
+ vlan = (prox_rte_vlan_hdr *)(vlan + 1);
if (vlan->eth_proto == ETYPE_IPv4) {
- ip_hdr = (struct ipv4_hdr *)(vlan + 1);
+ ip_hdr = (prox_rte_ipv4_hdr *)(vlan + 1);
}
else if (vlan->eth_proto == ETYPE_IPv6) {
- plog_warn("Unsupported IPv6\n");
- out[j] = OUT_DISCARD;
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan + 1);
+ handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
continue;
}
else {
@@ -159,48 +312,209 @@ static int handle_swap_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, ui
}
break;
case ETYPE_IPv4:
- ip_hdr = (struct ipv4_hdr *)(hdr + 1);
+ ip_hdr = (prox_rte_ipv4_hdr *)(hdr + 1);
break;
case ETYPE_IPv6:
- plog_warn("Unsupported IPv6\n");
- out[j] = OUT_DISCARD;
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
+ handle_ipv6(task, mbufs[j], ipv6_hdr, &out[j]);
continue;
case ETYPE_LLDP:
+ if (!lldp_printed) {
+ plog_info("Discarding LLDP packets (only printed once)\n");
+ lldp_printed = 1;
+ }
out[j] = OUT_DISCARD;
continue;
default:
+ if ((rte_bswap16(hdr->ether_type) < 0x600) && (rte_bswap16(hdr->ether_type) >= 16)) {
+ // 802.3
+ struct prox_llc {
+ uint8_t dsap;
+ uint8_t lsap;
+ uint8_t control;
+ };
+ struct prox_llc *llc = (struct prox_llc *)(hdr + 1);
+ if ((llc->dsap == 0x42) && (llc->lsap == 0x42)) {
+ // STP Protocol
+ out[j] = OUT_DISCARD;
+ if (!llc_printed) {
+ plog_info("Discarding STP packets (only printed once)\n");
+ llc_printed = 1;
+ }
+ continue;
+ }
+ }
plog_warn("Unsupported ether_type 0x%x\n", hdr->ether_type);
out[j] = OUT_DISCARD;
continue;
}
- udp_hdr = (struct udp_hdr *)(ip_hdr + 1);
+ // TODO 2 : check packet is long enough for Ethernet + IP + UDP + extra header (VLAN, MPLS, ...)
+ // IPv4 packet
+
ip = ip_hdr->dst_addr;
- ip_hdr->dst_addr = ip_hdr->src_addr;
- ip_hdr->src_addr = ip;
- if (ip_hdr->next_proto_id == IPPROTO_GRE) {
- struct gre_hdr *pgre = (struct gre_hdr *)(ip_hdr + 1);
- struct ipv4_hdr *inner_ip_hdr = ((struct ipv4_hdr *)(pgre + 1));
+ if (unlikely((ip_hdr->version_ihl >> 4) != 4)) {
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+
+ switch (ip_hdr->next_proto_id) {
+ case IPPROTO_GRE:
+ ip_hdr->dst_addr = ip_hdr->src_addr;
+ ip_hdr->src_addr = ip;
+
+ pgre = (struct gre_hdr *)(ip_hdr + 1);
+ inner_ip_hdr = ((prox_rte_ipv4_hdr *)(pgre + 1));
ip = inner_ip_hdr->dst_addr;
inner_ip_hdr->dst_addr = inner_ip_hdr->src_addr;
inner_ip_hdr->src_addr = ip;
- udp_hdr = (struct udp_hdr *)(inner_ip_hdr + 1);
+
+ udp_hdr = (prox_rte_udp_hdr *)(inner_ip_hdr + 1);
+ // TODO 3.1 : verify proto is UPD or TCP
port = udp_hdr->dst_port;
udp_hdr->dst_port = udp_hdr->src_port;
udp_hdr->src_port = port;
- } else {
+ write_src_and_dst_mac(task, mbufs[j]);
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ if (unlikely(task->igmp_address && PROX_RTE_IS_IPV4_MCAST(rte_be_to_cpu_32(ip)))) {
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ udp_hdr = (prox_rte_udp_hdr *)(ip_hdr + 1);
port = udp_hdr->dst_port;
+ ip_hdr->dst_addr = ip_hdr->src_addr;
+ ip_hdr->src_addr = ip;
+
+ if ((port == GENEVE_PORT) && (task->runtime_flags & TASK_DO_NOT_FWD_GENEVE)) {
+ if (!geneve_printed) {
+ plog_info("Discarding geneve (only printed once)\n");
+ geneve_printed = 1;
+ }
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+
udp_hdr->dst_port = udp_hdr->src_port;
udp_hdr->src_port = port;
+ write_src_and_dst_mac(task, mbufs[j]);
+ break;
+ case IPPROTO_ICMP:
+ picmp = (prox_rte_icmp_hdr *)(ip_hdr + 1);
+ type = picmp->icmp_type;
+ if (type == PROX_RTE_IP_ICMP_ECHO_REQUEST) {
+ if (ip_hdr->dst_addr == task->local_ipv4) {
+ task->n_echo_req++;
+ if (rte_rdtsc() - task->last_echo_req_rcvd_tsc > rte_get_tsc_hz()) {
+ plog_info("Received %u Echo Request on IP "IPv4_BYTES_FMT" (last received from IP "IPv4_BYTES_FMT")\n", task->n_echo_req, IPv4_BYTES(((uint8_t*)&ip_hdr->dst_addr)), IPv4_BYTES(((uint8_t*)&ip_hdr->src_addr)));
+ task->n_echo_req = 0;
+ task->last_echo_req_rcvd_tsc = rte_rdtsc();
+ }
+ build_icmp_reply_message(tbase, mbufs[j]);
+ } else {
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ } else if (type == PROX_RTE_IP_ICMP_ECHO_REPLY) {
+ if (ip_hdr->dst_addr == task->local_ipv4) {
+ task->n_echo_rep++;
+ if (rte_rdtsc() - task->last_echo_rep_rcvd_tsc > rte_get_tsc_hz()) {
+ plog_info("Received %u Echo Reply on IP "IPv4_BYTES_FMT" (last received from IP "IPv4_BYTES_FMT")\n", task->n_echo_rep, IPv4_BYTES(((uint8_t*)&ip_hdr->dst_addr)), IPv4_BYTES(((uint8_t*)&ip_hdr->src_addr)));
+ task->n_echo_rep = 0;
+ task->last_echo_rep_rcvd_tsc = rte_rdtsc();
+ }
+ } else {
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ } else {
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ break;
+ case IPPROTO_IGMP:
+ pigmp = (struct igmpv2_hdr *)(ip_hdr + 1);
+ // TODO: check packet len
+ type = pigmp->type;
+ if (type == IGMP_MEMBERSHIP_QUERY) {
+ if (task->igmp_address) {
+ // We have an address registered
+ if ((task->igmp_address == pigmp->group_address) || (pigmp->group_address == 0)) {
+ // We get a request for the registered address, or to 0.0.0.0
+ build_igmp_message(tbase, mbufs[j], task->igmp_address, IGMP_MEMBERSHIP_REPORT); // replace Membership query packet with a response
+ } else {
+ // Discard as either we are not registered or this is a query for a different group
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ } else {
+ // Discard as either we are not registered
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ } else {
+ // Do not forward other IGMP packets back
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ break;
+ default:
+ plog_warn("Unsupported IP protocol 0x%x\n", ip_hdr->next_proto_id);
+ out[j] = OUT_DISCARD;
+ continue;
+ }
+ }
+ if (task->store_msk) {
+ for (int i = 0; i < n_pkts; i++) {
+ if (out[i] != OUT_DISCARD) {
+ hdr = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ memcpy(&task->store_buf[task->store_pkt_id & task->store_msk].buf, hdr, rte_pktmbuf_pkt_len(mbufs[i]));
+ task->store_buf[task->store_pkt_id & task->store_msk].len = rte_pktmbuf_pkt_len(mbufs[i]);
+ task->store_pkt_id++;
+ }
}
- write_src_and_dst_mac(task, mbufs[j]);
}
return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
}
+void igmp_join_group(struct task_base *tbase, uint32_t igmp_address)
+{
+ struct task_swap *task = (struct task_swap *)tbase;
+ struct rte_mbuf *igmp_mbuf;
+ uint8_t out[64] = {0};
+ int ret;
+
+ task->igmp_address = igmp_address;
+ ret = rte_mempool_get(task->igmp_pool, (void **)&igmp_mbuf);
+ if (ret != 0) {
+ plog_err("Unable to allocate igmp mbuf\n");
+ return;
+ }
+ build_igmp_message(tbase, igmp_mbuf, task->igmp_address, IGMP_MEMBERSHIP_REPORT);
+ task->base.tx_pkt(&task->base, &igmp_mbuf, 1, out);
+}
+
+void igmp_leave_group(struct task_base *tbase)
+{
+ struct task_swap *task = (struct task_swap *)tbase;
+ struct rte_mbuf *igmp_mbuf;
+ uint8_t out[64] = {0};
+ int ret;
+
+ task->igmp_address = 0;
+ ret = rte_mempool_get(task->igmp_pool, (void **)&igmp_mbuf);
+ if (ret != 0) {
+ plog_err("Unable to allocate igmp mbuf\n");
+ return;
+ }
+ build_igmp_message(tbase, igmp_mbuf, task->igmp_address, IGMP_LEAVE_GROUP);
+ task->base.tx_pkt(&task->base, &igmp_mbuf, 1, out);
+}
+
static void init_task_swap(struct task_base *tbase, struct task_args *targ)
{
struct task_swap *task = (struct task_swap *)tbase;
- struct ether_addr *src_addr, *dst_addr;
+ prox_rte_ether_addr *src_addr, *dst_addr;
/*
* The destination MAC of the outgoing packet is based on the config file:
@@ -239,30 +553,51 @@ static void init_task_swap(struct task_base *tbase, struct task_args *targ)
plog_info("\t\tCore %d: src mac set from port\n", targ->lconf->id);
}
}
- task->runtime_flags = targ->flags;
+ task->flags = targ->flags;
+ task->runtime_flags = targ->runtime_flags;
+ task->igmp_address = rte_cpu_to_be_32(targ->igmp_address);
+ if (task->igmp_pool == NULL) {
+ static char name[] = "igmp0_pool";
+ name[4]++;
+ struct rte_mempool *ret = rte_mempool_create(name, NB_IGMP_MBUF, IGMP_MBUF_SIZE, NB_CACHE_IGMP_MBUF,
+ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
+ rte_socket_id(), 0);
+ PROX_PANIC(ret == NULL, "Failed to allocate IGMP memory pool on socket %u with %u elements\n",
+ rte_socket_id(), NB_IGMP_MBUF);
+ plog_info("\t\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_IGMP_MBUF,
+ IGMP_MBUF_SIZE, NB_CACHE_IGMP_MBUF, rte_socket_id());
+ task->igmp_pool = ret;
+ }
+ task->local_ipv4 = rte_cpu_to_be_32(targ->local_ipv4);
+
+ struct prox_port_cfg *port = find_reachable_port(targ);
+ if (port) {
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
+ }
+ task->store_pkt_id = 0;
+ if (targ->store_max) {
+ char filename[256];
+ sprintf(filename, "swap_buf_%02d_%02d", targ->lconf->id, targ->task);
+
+ task->store_msk = targ->store_max - 1;
+ task->store_buf = (struct packet *)malloc(sizeof(struct packet) * targ->store_max);
+ task->fp = fopen(filename, "w+");
+ PROX_PANIC(task->fp == NULL, "Unable to open %s\n", filename);
+ } else {
+ task->store_msk = 0;
+ }
}
static struct task_init task_init_swap = {
.mode_str = "swap",
.init = init_task_swap,
.handle = handle_swap_bulk,
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
- .size = sizeof(struct task_swap),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
-};
-
-static struct task_init task_init_swap_arp = {
- .mode_str = "swap",
- .sub_mode_str = "l3",
- .init = init_task_swap,
- .handle = handle_swap_bulk,
- .flag_features = TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS,
+ .flag_features = 0,
.size = sizeof(struct task_swap),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
+ .stop_last = stop_swap
};
__attribute__((constructor)) static void reg_task_swap(void)
{
reg_task(&task_init_swap);
- reg_task(&task_init_swap_arp);
}
diff --git a/VNFs/DPPD-PROX/handle_tsc.c b/VNFs/DPPD-PROX/handle_tsc.c
index e686aaa2..da0afea7 100644
--- a/VNFs/DPPD-PROX/handle_tsc.c
+++ b/VNFs/DPPD-PROX/handle_tsc.c
@@ -31,7 +31,11 @@ static int handle_bulk_tsc(struct task_base *tbase, struct rte_mbuf **mbufs, uin
const uint64_t rx_tsc = rte_rdtsc();
for (uint16_t j = 0; j < n_pkts; ++j)
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ memcpy(&mbufs[j]->dynfield1[0], &rx_tsc, sizeof(rx_tsc));
+#else
mbufs[j]->udata64 = rx_tsc;
+#endif
return task->base.tx_pkt(&task->base, mbufs, n_pkts, NULL);
}
@@ -40,9 +44,8 @@ static struct task_init task_init = {
.mode_str = "tsc",
.init = NULL,
.handle = handle_bulk_tsc,
- .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS|TASK_FEATURE_THROUGHPUT_OPT,
+ .flag_features = TASK_FEATURE_NEVER_DISCARDS|TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS|TASK_FEATURE_THROUGHPUT_OPT,
.size = sizeof(struct task_tsc),
- .mbuf_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
};
__attribute__((constructor)) static void reg_task_nop(void)
diff --git a/VNFs/DPPD-PROX/handle_untag.c b/VNFs/DPPD-PROX/handle_untag.c
index 2fc8fe64..ba3c6251 100644
--- a/VNFs/DPPD-PROX/handle_untag.c
+++ b/VNFs/DPPD-PROX/handle_untag.c
@@ -65,16 +65,16 @@ static int handle_untag_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, u
return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
}
-static inline uint8_t untag_mpls(struct rte_mbuf *mbuf, struct ether_hdr *peth)
+static inline uint8_t untag_mpls(struct rte_mbuf *mbuf, prox_rte_ether_hdr *peth)
{
- struct ether_hdr *pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
+ prox_rte_ether_hdr *pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
const struct mpls_hdr *mpls = (const struct mpls_hdr *)(peth + 1);
- const struct ipv4_hdr *pip = (const struct ipv4_hdr *)(mpls + 1);
+ const prox_rte_ipv4_hdr *pip = (const prox_rte_ipv4_hdr *)(mpls + 1);
PROX_ASSERT(pneweth);
if (mpls->bos == 0) {
// Double MPLS tag
- pneweth = (struct ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
+ pneweth = (prox_rte_ether_hdr *)rte_pktmbuf_adj(mbuf, 4);
PROX_ASSERT(pneweth);
}
@@ -98,13 +98,13 @@ static uint8_t untag_qinq(struct rte_mbuf *mbuf, struct qinq_hdr *qinq)
return OUT_DISCARD;
}
- rte_pktmbuf_adj(mbuf, sizeof(struct qinq_hdr) - sizeof(struct ether_hdr));
+ rte_pktmbuf_adj(mbuf, sizeof(struct qinq_hdr) - sizeof(prox_rte_ether_hdr));
return 0;
}
static inline uint8_t handle_untag(struct task_untag *task, struct rte_mbuf *mbuf)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
const uint16_t etype = peth->ether_type;
if (etype != task->etype) {
diff --git a/VNFs/DPPD-PROX/hash_entry_types.h b/VNFs/DPPD-PROX/hash_entry_types.h
index e2cbcb3c..6288d5a9 100644
--- a/VNFs/DPPD-PROX/hash_entry_types.h
+++ b/VNFs/DPPD-PROX/hash_entry_types.h
@@ -18,9 +18,10 @@
#define _HASH_ENTRY_TYPES_H_
#include <rte_ether.h>
+#include "prox_compat.h"
struct ether_addr_port {
- struct ether_addr mac;
+ prox_rte_ether_addr mac;
uint8_t pad;
uint8_t out_idx;
};
diff --git a/VNFs/DPPD-PROX/hash_utils.c b/VNFs/DPPD-PROX/hash_utils.c
index ad746d5c..3922ef0f 100644
--- a/VNFs/DPPD-PROX/hash_utils.c
+++ b/VNFs/DPPD-PROX/hash_utils.c
@@ -14,6 +14,11 @@
// limitations under the License.
*/
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <string.h>
#include <rte_hash_crc.h>
#include <rte_table_hash.h>
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
deleted file mode 100644
index 2dac5b69..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/README
+++ /dev/null
@@ -1,128 +0,0 @@
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-rapid (Rapid Automated Performance Indication for Dataplane)
-************************************************************
-
-rapid is a set of files offering an easy way to do a sanity check of the
-dataplane performance of an OpenStack environment.
-
-Copy the files in a directory on a machine that can run the OpenStack CLI
-commands and that can reach the OpenStack public network. Also create a qcow2
-image in the same directory with the following characteristics:
-* Name of the qcow2 file should be: rapidVM.qcow2
- This default name can be overruled on the rapid command line (--image_file)
-* Should have DPDK and PROX installed. PROX should be in /root/prox/ directory
-* Image should have cloud-init installed
-* /mnt/huge should exist to support a command that is executed at startup of the VM: 'mount -t hugetlbfs nodev /mnt/huge'
-* Compile prox with 'make crc=soft'. This is a workaround for some cases where the crc calculation offload is not working as expected.
-* Compile dpdk to support AESN-NI Multi Buffer Crypto Poll Mode Driver: http://dpdk.org/doc/guides/cryptodevs/aesni_mb.html
-
-Source the openrc file of the OpenStack environment so that the OpenStack CLI
-commands can be run:
- # source openrc
-Now you can run the createrapid.py file. Use help for more info on the usage:
- # ./createrapid.py --help
-
-createrapid.py will use the OpenStack CLI to create the flavor, key-pair, network, image,
-servers, ...
-It will create a <STACK>.env file containing all info that will be used by runrapid.py
-to actually run the tests. Logging can be found in the CREATE<STACK>.log file
-You can use floating IP addresses by specifying the floating IP network
---floating_network NETWORK
-or directly connect throught the INTERNAL_NETWORK by using the following parameter:
---floating_network NO
-
-Now you can run the runrapid.py file. Use help for more info on the usage:
- # ./runrapid.py --help
-The script will connect to all machines that have been instantiated and it will launch
-PROX in all machines. This will be done through the admin IP assigned to the machines.
-Once that is done it will connect to the PROX tcp socket and start sending
-commands to run the actual test.
-It will print test results on the screen while running.
-The actual test that is running is described in <TEST>.test.
-
-Notes about prox_user_data.sh script:
-- The script contains commands that will be executed using cloud-init at
- startup of the VMs.
-- The script also assumes some specific DPDK directory and tools which might
- change over different DPDK release. This release has been tested with DPDK-17.02.
-- huge pages are allocated for DPDK on node 0 (hard-coded) in the VM.
-
-Note on using SRIOV ports:
-Before running createrapid, make sure the network, subnet and ports are already created
-This can be done as follows (change the parameters to your needs):
-openstack network create --share --external --provider-network-type flat --provider-physical-network physnet2 fast-network
-openstack subnet create --network fast-network --subnet-range 20.20.20.0/24 --gateway none fast-subnet
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port1
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port2
-openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port3
-Make sure to use the network and subnet in the createrapid parameters list. Port1, Port2 and Port3
-are being used in the *.env file.
-
-Note when doing tests using the gateway functionality on OVS:
-When a GW VM is sending packets on behalf of another VM (e.g. the generator), we need to make sure the OVS
-will allow those packets to go through. Therefore you need to the IP address of the generator in the
-"allowed address pairs" of the GW VM.
-
-Note when doing tests using encryption on OVS:
-Your OVS configuration might block encrypted packets. To allow packets to go through,
-you can disable port_security. You can do this by using the following commands
-neutron port-update xxxxxx --no-security-groups
-neutron port-update xxxxxx --port_security_enabled=False
-
-An example of the env file generated by createrapid.py can be found below.
-Note that this file can be created manually in case the stack is created in a
-different way (not using the createrapid.py). This can be useful in case you are
-not using OpenStack as a VIM or when using special configurations that cannot be
-achieved using createrapid.py. Only the [Mx] sections are used as
-input for runrapid.py.
-[DEFAULT]
-admin_ip = none
-
-[M1]
-admin_ip = 192.168.4.130
-dp_ip = 10.10.10.6
-dp_mac = fa:16:3e:3c:1e:12
-
-[M2]
-admin_ip = 192.168.4.140
-dp_ip = 10.10.10.9
-dp_mac = fa:16:3e:2a:00:5d
-
-[M3]
-admin_ip = 192.168.4.138
-dp_ip = 10.10.10.11
-dp_mac = fa:16:3e:ae:fa:86
-
-[OpenStack]
-stack = rapid
-yaml = 3VMrapid.yaml
-key = prox
-flavor = prox_flavor
-image = rapidVM
-image_file = rapidVM.qcow2
-dataplane_network = dataplane-network
-subnet = dpdk-subnet
-subnet_cidr = 10.10.10.0/24
-internal_network = admin_internal_net
-floating_network = admin_floating_net
-
-[rapid]
-loglevel = DEBUG
-version = 17.10.25
-total_number_of_vms = 3
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py
deleted file mode 100755
index ffba5013..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/createrapid.py
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/usr/bin/python
-
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import stat
-import sys
-import time
-import subprocess
-import getopt
-import re
-import logging
-from logging.handlers import RotatingFileHandler
-from logging import handlers
-from prox_ctrl import prox_ctrl
-import ConfigParser
-
-version="18.3.27"
-stack = "rapid" #Default string for stack. This is not an OpenStack Heat stack, just a group of VMs
-vms = "rapidVMs" #Default string for vms file
-key = "prox" # default name for kay
-image = "rapidVM" # default name for the image
-image_file = "rapidVM.qcow2"
-dataplane_network = "dataplane-network" # default name for the dataplane network
-subnet = "dpdk-subnet" #subnet for dataplane
-subnet_cidr="10.10.10.0/24" # cidr for dataplane
-internal_network="admin_internal_net"
-floating_network="admin_floating_net"
-loglevel="DEBUG" # sets log level for writing to file
-runtime=10 # time in seconds for 1 test run
-
-def usage():
- print("usage: createrapid [--version] [-v]")
- print(" [--stack STACK_NAME]")
- print(" [--vms VMS_FILE]")
- print(" [--key KEY_NAME]")
- print(" [--image IMAGE_NAME]")
- print(" [--image_file IMAGE_FILE]")
- print(" [--dataplane_network DP_NETWORK]")
- print(" [--subnet DP_SUBNET]")
- print(" [--subnet_cidr SUBNET_CIDR]")
- print(" [--internal_network ADMIN_NETWORK]")
- print(" [--floating_network ADMIN_NETWORK]")
- print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
- print(" [-h] [--help]")
- print("")
- print("Command-line interface to createrapid")
- print("")
- print("optional arguments:")
- print(" -v, --version Show program's version number and exit")
- print(" --stack STACK_NAME Specify a name for the stack. Default is %s."%stack)
- print(" --vms VMS_FILE Specify the vms file to be used. Default is %s.vms."%vms)
- print(" --key KEY_NAME Specify the key to be used. Default is %s."%key)
- print(" --image IMAGE_NAME Specify the image to be used. Default is %s."%image)
- print(" --image_file IMAGE_FILE Specify the image qcow2 file to be used. Default is %s."%image_file)
- print(" --dataplane_network NETWORK Specify the network name to be used for the dataplane. Default is %s."%dataplane_network)
- print(" --subnet DP_SUBNET Specify the subnet name to be used for the dataplane. Default is %s."%subnet)
- print(" --subnet_cidr SUBNET_CIDR Specify the subnet CIDR to be used for the dataplane. Default is %s."%subnet_cidr)
- print(" --internal_network NETWORK Specify the network name to be used for the control plane. Default is %s."%internal_network)
- print(" --floating_network NETWORK Specify the external floating ip network name. Default is %s. NO if no floating ip used."%floating_network)
- print(" --log Specify logging level for log file output, screen output level is hard coded")
- print(" -h, --help Show help message and exit.")
- print("")
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "vms=","stack=","key=","image=","image_file=","dataplane_network=","subnet=","subnet_cidr=","internal_network=","floating_network=","log="])
-except getopt.GetoptError as err:
- print("===========================================")
- print(str(err))
- print("===========================================")
- usage()
- sys.exit(2)
-if args:
- usage()
- sys.exit(2)
-for opt, arg in opts:
- if opt in ("-h", "--help"):
- usage()
- sys.exit()
- if opt in ("-v", "--version"):
- print("Rapid Automated Performance Indication for Dataplane "+version)
- sys.exit()
- if opt in ("--stack"):
- stack = arg
- print ("Using '"+stack+"' as name for the stack")
- elif opt in ("--vms"):
- vms = arg
- print ("Using Virtual Machines Description: "+vms)
- elif opt in ("--key"):
- key = arg
- print ("Using key: "+key)
- elif opt in ("--image"):
- image = arg
- print ("Using image: "+image)
- elif opt in ("--image_file"):
- image_file = arg
- print ("Using qcow2 file: "+image_file)
- elif opt in ("--dataplane_network"):
- dataplane_network = arg
- print ("Using dataplane network: "+ dataplane_network)
- elif opt in ("--subnet"):
- subnet = arg
- print ("Using dataplane subnet: "+ subnet)
- elif opt in ("--subnet_cidr"):
- subnet_cidr = arg
- print ("Using dataplane subnet: "+ subnet_cidr)
- elif opt in ("--internal_network"):
- internal_network = arg
- print ("Using control plane network: "+ internal_network)
- elif opt in ("--floating_network"):
- floating_network = arg
- print ("Using floating ip network: "+ floating_network)
- elif opt in ("--log"):
- loglevel = arg
- print ("Log level: "+ loglevel)
-
-
-# create formatters
-screen_formatter = logging.Formatter("%(message)s")
-file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-
-# get a top-level logger,
-# set its log level,
-# BUT PREVENT IT from propagating messages to the root logger
-#
-log = logging.getLogger()
-numeric_level = getattr(logging, loglevel.upper(), None)
-if not isinstance(numeric_level, int):
- raise ValueError('Invalid log level: %s' % loglevel)
-log.setLevel(numeric_level)
-log.propagate = 0
-
-# create a console handler
-# and set its log level to the command-line option
-#
-console_handler = logging.StreamHandler(sys.stdout)
-console_handler.setLevel(logging.INFO)
-console_handler.setFormatter(screen_formatter)
-
-# create a file handler
-# and set its log level to DEBUG
-#
-log_file = 'CREATE' +stack +'.log'
-file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
-#file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
-file_handler.setLevel(numeric_level)
-file_handler.setFormatter(file_formatter)
-
-# add handlers to the logger
-#
-log.addHandler(file_handler)
-log.addHandler(console_handler)
-
-# Check if log exists and should therefore be rolled
-needRoll = os.path.isfile(log_file)
-
-
-# This is a stale log, so roll it
-if needRoll:
- # Add timestamp
- log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
-
- # Roll over on application start
- log.handlers[0].doRollover()
-
-# Add timestamp
-log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
-
-log.debug("createrapid.py version: "+version)
-# Checking if the control network already exists, if not, stop the script
-log.debug("Checking control plane network: "+internal_network)
-cmd = 'openstack network show '+internal_network
-log.debug (cmd)
-cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
-NetworkExist = subprocess.check_output(cmd , shell=True).strip()
-if NetworkExist == 'ACTIVE':
- log.info("Control plane network ("+internal_network+") already active")
-else:
- log.exception("Control plane network " + internal_network + " not existing")
- raise Exception("Control plane network " + internal_network + " not existing")
-
-# Checking if the floating ip network already exists, if not, stop the script
-if floating_network <>'NO':
- log.debug("Checking floating ip network: "+floating_network)
- cmd = 'openstack network show '+floating_network
- log.debug (cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- NetworkExist = subprocess.check_output(cmd , shell=True).strip()
- if NetworkExist == 'ACTIVE':
- log.info("Floating ip network ("+floating_network+") already active")
- else:
- log.exception("Floating ip network " + floating_network + " not existing")
- raise Exception("Floating ip network " + floating_network + " not existing")
-
-# Checking if the image already exists, if not create it
-log.debug("Checking image: "+image)
-cmd = 'openstack image show '+image
-log.debug(cmd)
-cmd = cmd +' |grep "status " | tr -s " " | cut -d" " -f 4'
-ImageExist = subprocess.check_output(cmd , shell=True).strip()
-if ImageExist == 'active':
- log.info("Image ("+image+") already available")
-else:
- log.info('Creating image ...')
- cmd = 'openstack image create --disk-format qcow2 --container-format bare --public --file ./'+image_file+ ' ' +image
- log.debug(cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- ImageExist = subprocess.check_output(cmd , shell=True).strip()
- if ImageExist == 'active':
- log.info('Image created and active')
- cmd = 'openstack image set --property hw_vif_multiqueue_enabled="true" ' +image
-# subprocess.check_call(cmd , shell=True)
- else :
- log.exception("Failed to create image")
- raise Exception("Failed to create image")
-
-# Checking if the key already exists, if not create it
-log.debug("Checking key: "+key)
-cmd = 'openstack keypair show '+key
-log.debug (cmd)
-cmd = cmd + ' |grep "name " | tr -s " " | cut -d" " -f 4'
-KeyExist = subprocess.check_output(cmd , shell=True).strip()
-if KeyExist == key:
- log.info("Key ("+key+") already installed")
-else:
- log.info('Creating key ...')
- cmd = 'openstack keypair create '+ key + '>' +key+'.pem'
- log.debug(cmd)
- subprocess.check_call(cmd , shell=True)
- cmd = 'chmod 600 ' +key+'.pem'
- subprocess.check_call(cmd , shell=True)
- cmd = 'openstack keypair show '+key
- log.debug(cmd)
- cmd = cmd + ' |grep "name " | tr -s " " | cut -d" " -f 4'
- KeyExist = subprocess.check_output(cmd , shell=True).strip()
- if KeyExist == key:
- log.info("Key created")
- else :
- log.exception("Failed to create key: " + key)
- raise Exception("Failed to create key: " + key)
-
-
-# Checking if the dataplane network already exists, if not create it
-log.debug("Checking dataplane network: "+dataplane_network)
-cmd = 'openstack network show '+dataplane_network
-log.debug (cmd)
-cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
-NetworkExist = subprocess.check_output(cmd , shell=True).strip()
-if NetworkExist == 'ACTIVE':
- log.info("Dataplane network ("+dataplane_network+") already active")
-else:
- log.info('Creating dataplane network ...')
- cmd = 'openstack network create '+dataplane_network
- log.debug(cmd)
- cmd = cmd + ' |grep "status " | tr -s " " | cut -d" " -f 4'
- NetworkExist = subprocess.check_output(cmd , shell=True).strip()
- if NetworkExist == 'ACTIVE':
- log.info("Dataplane network created")
- else :
- log.exception("Failed to create dataplane network: " + dataplane_network)
- raise Exception("Failed to create dataplane network: " + dataplane_network)
-
-# Checking if the dataplane subnet already exists, if not create it
-log.debug("Checking subnet: "+subnet)
-cmd = 'openstack subnet show '+ subnet
-log.debug (cmd)
-cmd = cmd +' |grep "name " | tr -s " " | cut -d"|" -f 3'
-SubnetExist = subprocess.check_output(cmd , shell=True).strip()
-if SubnetExist == subnet:
- log.info("Subnet (" +subnet+ ") already exists")
-else:
- log.info('Creating subnet ...')
- cmd = 'openstack subnet create --network ' + dataplane_network + ' --subnet-range ' + subnet_cidr +' --gateway none ' + subnet
- log.debug(cmd)
- cmd = cmd + ' |grep "name " | tr -s " " | cut -d"|" -f 3'
- SubnetExist = subprocess.check_output(cmd , shell=True).strip()
- if SubnetExist == subnet:
- log.info("Subnet created")
- else :
- log.exception("Failed to create subnet: " + subnet)
- raise Exception("Failed to create subnet: " + subnet)
-
-
-config = ConfigParser.RawConfigParser()
-vmconfig = ConfigParser.RawConfigParser()
-vmconfig.read(vms+'.vms')
-total_number_of_VMs = vmconfig.get('DEFAULT', 'total_number_of_vms')
-for vm in range(1, int(total_number_of_VMs)+1):
- flavor_info = vmconfig.get('VM%d'%vm, 'flavor_info')
- flavor_meta_data = vmconfig.get('VM%d'%vm, 'flavor_meta_data')
- boot_info = vmconfig.get('VM%d'%vm, 'boot_info')
- SRIOV_port = vmconfig.get('VM%d'%vm, 'SRIOV_port')
- server_name = '%s-VM%d'%(stack,vm)
- flavor_name = '%s-VM%d-flavor'%(stack,vm)
- log.debug("Checking server: "+server_name)
- cmd = 'openstack server show '+server_name
- log.debug (cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- ServerExist = subprocess.check_output(cmd , shell=True).strip()
- if ServerExist == server_name:
- log.info("Server ("+server_name+") already active")
- else:
- # Checking if the flavor already exists, if not create it
- log.debug("Checking flavor: "+flavor_name)
- cmd = 'openstack flavor show '+flavor_name
- log.debug (cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- FlavorExist = subprocess.check_output(cmd , shell=True).strip()
- if FlavorExist == flavor_name:
- log.info("Flavor ("+flavor_name+") already installed")
- else:
- log.info('Creating flavor ...')
- cmd = 'openstack flavor create %s %s'%(flavor_name,flavor_info)
- log.debug(cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- FlavorExist = subprocess.check_output(cmd , shell=True).strip()
- if FlavorExist == flavor_name:
- cmd = 'openstack flavor set %s %s'%(flavor_name, flavor_meta_data)
- log.debug(cmd)
- subprocess.check_call(cmd , shell=True)
- log.info("Flavor created")
- else :
- log.exception("Failed to create flavor: " + flavor_name)
- raise Exception("Failed to create flavor: " + flavor_name)
- if SRIOV_port == 'NO':
- nic_info = '--nic net-id=%s --nic net-id=%s'%(internal_network,dataplane_network)
- else:
- nic_info = '--nic net-id=%s'%(internal_network)
- for port in SRIOV_port.split(','):
- nic_info = nic_info + ' --nic port-id=%s'%(port)
- if vm==int(total_number_of_VMs):
- # For the last server, we want to wait for the server creation to complete, so the next operations will succeeed (e.g. IP allocation)
- # Note that this waiting is not bullet proof. Imagine, we loop through all the VMs, and the last VM was already running, while the previous
- # VMs still needed to be created. Or the previous server creations take much longer than the last one.
- # In that case, we might be to fast when we query for the IP & MAC addresses.
- wait = ' --wait '
- else:
- wait = ' '
- log.info("Creating server...")
- cmd = 'openstack server create --flavor %s --key-name %s --image %s %s %s%s%s'%(flavor_name,key,image,nic_info,boot_info,wait,server_name)
- log.debug(cmd)
- cmd = cmd + ' |grep "\sname\s" | tr -s " " | cut -d" " -f 4'
- ServerExist = subprocess.check_output(cmd , shell=True).strip()
- if floating_network <> 'NO':
- log.info('Creating floating IP ...')
- cmd = 'openstack floating ip create ' + floating_network
- log.debug(cmd)
- cmd = cmd + ' |grep "floating_ip_address " | tr -s " " | cut -d"|" -f 3'
- vmAdminIP = subprocess.check_output(cmd , shell=True).strip()
- log.info('Associating floating IP ...')
- cmd = 'openstack server add floating ip %s %s'%(server_name,vmAdminIP)
- log.debug(cmd)
- output = subprocess.check_output(cmd , shell=True).strip()
- print (output)
-for vm in range(1, int(total_number_of_VMs)+1):
- server_name = '%s-VM%d'%(stack,vm)
- cmd = 'openstack server show %s'%(server_name)
- log.debug(cmd)
- output = subprocess.check_output(cmd , shell=True).strip()
- searchString = '.*%s.*?([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*)' %(dataplane_network)
- matchObj = re.search(searchString, output, re.DOTALL)
- vmDPIP = matchObj.group(1)
- searchString = '.*%s=([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+),*\s*([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)*' %(internal_network)
- matchObj = re.search(searchString, output, re.DOTALL)
- vmAdminIP = matchObj.group(2)
- if vmAdminIP == None:
- vmAdminIP = matchObj.group(1)
- cmd = 'openstack port list |grep %s | tr -s " " | cut -d"|" -f 4'%(vmDPIP)
- log.debug(cmd)
- vmDPmac = subprocess.check_output(cmd , shell=True).strip()
- config.add_section('M%d'%vm)
- config.set('M%d'%vm, 'name', server_name)
- config.set('M%d'%vm, 'admin_ip', vmAdminIP)
- config.set('M%d'%vm, 'dp_ip', vmDPIP)
- config.set('M%d'%vm, 'dp_mac', vmDPmac)
- log.info('%s: (admin IP: %s), (dataplane IP: %s), (dataplane MAC: %s)' % (server_name,vmAdminIP,vmDPIP,vmDPmac))
-
-config.add_section('OpenStack')
-config.set('OpenStack', 'stack', stack)
-config.set('OpenStack', 'VMs', vms)
-config.set('OpenStack', 'key', key)
-config.set('OpenStack', 'image', image)
-config.set('OpenStack', 'image_file', image_file)
-config.set('OpenStack', 'dataplane_network', dataplane_network)
-config.set('OpenStack', 'subnet', subnet)
-config.set('OpenStack', 'subnet_cidr', subnet_cidr)
-config.set('OpenStack', 'internal_network', internal_network)
-config.set('OpenStack', 'floating_network', floating_network)
-config.add_section('rapid')
-config.set('rapid', 'loglevel', loglevel)
-config.set('rapid', 'version', version)
-config.set('rapid', 'total_number_of_machines', total_number_of_VMs)
-config.set('DEFAULT', 'admin_ip', 'none')
-# Writing the environment file
-with open(stack+'.env', 'wb') as envfile:
- config.write(envfile)
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test
deleted file mode 100644
index 3ad014d5..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.test
+++ /dev/null
@@ -1,56 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-[DEFAULT]
-name = BasicSwapTesting
-number_of_tests = 2
-total_number_of_test_machines = 2
-init_code= not_used
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 1
-accuracy = 0.01
-
-[TestM1]
-name = InterruptTesting
-machine_index = 1
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-[TestM2]
-name = InterruptTesting
-machine_index = 2
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-[TestM3]
-name = InterruptTesting
-machine_index = 3
-config_file = irq.cfg
-group1cores = [1,2,3]
-
-
-[test1]
-cmd=run_irqtest(sock[0])
-[test2]
-cmd=run_irqtest(sock[1])
-[test3]
-cmd=run_irqtest(sock[2])
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
deleted file mode 100644
index 059cbf71..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_ctrl.py
+++ /dev/null
@@ -1,248 +0,0 @@
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import subprocess
-import socket
-
-class prox_ctrl(object):
- def __init__(self, ip, key=None, user=None):
- self._ip = ip
- self._key = key
- self._user = user
- self._children = []
- self._proxsock = []
-
- def ip(self):
- return self._ip
-
- def connect(self):
- """Simply try to run 'true' over ssh on remote system.
- On failure, raise RuntimeWarning exception when possibly worth
- retrying, and raise RuntimeError exception otherwise.
- """
- return self.run_cmd('true', True)
-
- def close(self):
- """Must be called before program termination."""
- for prox in self._proxsock:
- prox.quit()
- children = len(self._children)
- if children == 0:
- return
- if children > 1:
- print('Waiting for %d child processes to complete ...' % children)
- for child in self._children:
- ret = os.waitpid(child[0], os.WNOHANG)
- if ret[0] == 0:
- print("Waiting for child process '%s' to complete ..." % child[1])
- ret = os.waitpid(child[0], 0)
- rc = ret[1]
- if os.WIFEXITED(rc):
- if os.WEXITSTATUS(rc) == 0:
- print("Child process '%s' completed successfully" % child[1])
- else:
- print("Child process '%s' returned exit status %d" % (
- child[1], os.WEXITSTATUS(rc)))
- elif os.WIFSIGNALED(rc):
- print("Child process '%s' exited on signal %d" % (
- child[1], os.WTERMSIG(rc)))
- else:
- print("Wait status for child process '%s' is 0x%04x" % (
- child[1], rc))
-
- def run_cmd(self, command, _connect=False):
- """Execute command over ssh on remote system.
- Wait for remote command completion.
- Return command output (combined stdout and stderr).
- _connect argument is reserved for connect() method.
- """
- cmd = self._build_ssh(command)
- try:
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- if _connect and ex.returncode == 255:
- raise RuntimeWarning(ex.output.strip())
- raise RuntimeError('ssh returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
-
- def fork_cmd(self, command, name=None):
- """Execute command over ssh on remote system, in a child process.
- Do not wait for remote command completion.
- Return child process id.
- """
- if name is None:
- name = command
- cmd = self._build_ssh(command)
- pid = os.fork()
- if (pid != 0):
- # In the parent process
- self._children.append((pid, name))
- return pid
- # In the child process: use os._exit to terminate
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError("Child process '%s' failed:\n"
- 'ssh returned exit status %d:\n%s'
- % (name, ex.returncode, ex.output.strip()))
- os._exit(0)
-
- def prox_sock(self, port=8474):
- """Connect to the PROX instance on remote system.
- Return a prox_sock object on success, None on failure.
- """
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- try:
- sock.connect((self._ip, port))
- prox = prox_sock(sock)
- self._proxsock.append(prox)
- return prox
- except:
- return None
-
- def scp_put(self, src, dst):
- """Copy src file from local system to dst on remote system."""
- cmd = [ 'scp',
- '-B',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- cmd.append(src)
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip + ':' + dst
- cmd.append(remote)
- try:
- # Actually ignore output on success, but capture stderr on failure
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError as ex:
- raise RuntimeError('scp returned exit status %d:\n%s'
- % (ex.returncode, ex.output.strip()))
-
- def _build_ssh(self, command):
- cmd = [ 'ssh',
- '-oBatchMode=yes',
- '-oStrictHostKeyChecking=no',
- '-oUserKnownHostsFile=/dev/null',
- '-oLogLevel=ERROR' ]
- if self._key is not None:
- cmd.extend(['-i', self._key])
- remote = ''
- if self._user is not None:
- remote += self._user + '@'
- remote += self._ip
- cmd.append(remote)
- cmd.append(command)
- return cmd
-
-class prox_sock(object):
- def __init__(self, sock):
- self._sock = sock
- self._rcvd = b''
-
- def quit(self):
- if self._sock is not None:
- self._send('quit')
- self._sock.close()
- self._sock = None
-
- def start(self, cores):
- self._send('start %s' % ','.join(map(str, cores)))
-
- def stop(self, cores):
- self._send('stop %s' % ','.join(map(str, cores)))
-
- def speed(self, speed, cores, tasks=None):
- if tasks is None:
- tasks = [ 0 ] * len(cores)
- elif len(tasks) != len(cores):
- raise ValueError('cores and tasks must have the same len')
- for (core, task) in zip(cores, tasks):
- self._send('speed %s %s %s' % (core, task, speed))
-
- def reset_stats(self):
- self._send('reset stats')
-
- def lat_stats(self, cores, task=0):
- min_lat = 999999999
- max_lat = avg_lat = 0
- self._send('lat stats %s %s' % (','.join(map(str, cores)), task))
- for core in cores:
- stats = self._recv().split(',')
- min_lat = min(int(stats[0]),min_lat)
- max_lat = max(int(stats[1]),max_lat)
- avg_lat += int(stats[2])
- avg_lat = avg_lat/len(cores)
- return min_lat, max_lat, avg_lat
-
- def irq_stats(self, core, bucket, task=0):
- self._send('stats task.core(%s).task(%s).irq(%s)' % (core, task, bucket))
- stats = self._recv().split(',')
- return int(stats[0])
-
- def show_irq_buckets(self, core, task=0):
- rx = tx = drop = tsc = hz = 0
- self._send('show irq buckets %s %s' % (core,task))
- buckets = self._recv().split(';')
- buckets = buckets[:-1]
- return buckets
-
- def core_stats(self, cores, task=0):
- rx = tx = drop = tsc = hz = 0
- self._send('core stats %s %s' % (','.join(map(str, cores)), task))
- for core in cores:
- stats = self._recv().split(',')
- rx += int(stats[0])
- tx += int(stats[1])
- drop += int(stats[2])
- tsc = int(stats[3])
- hz = int(stats[4])
- return rx, tx, drop, tsc, hz
-
- def set_random(self, cores, task, offset, mask, length):
- self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, mask, length))
-
- def set_size(self, cores, task, pkt_size):
- self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task, pkt_size))
-
- def set_value(self, cores, task, offset, value, length):
- self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)), task, offset, value, length))
-
- def _send(self, cmd):
- """Append LF and send command to the PROX instance."""
- if self._sock is None:
- raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
- self._sock.sendall(cmd.encode() + b'\n')
-
- def _recv(self):
- """Receive response from PROX instance, and return it with LF removed."""
- if self._sock is None:
- raise RuntimeError("PROX socket closed, cannot receive anymore")
- pos = self._rcvd.find(b'\n')
- while pos == -1:
- self._rcvd += self._sock.recv(256)
- pos = self._rcvd.find(b'\n')
- rsp = self._rcvd[:pos]
- self._rcvd = self._rcvd[pos+1:]
- return rsp.decode()
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms
deleted file mode 100644
index cf7b2c8d..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/rapidVMs.vms
+++ /dev/null
@@ -1,31 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-
-[DEFAULT]
-total_number_of_vms=3
-flavor_info=--ram 4096 --disk 20 --vcpus 4
-;flavor_meta_data=--property hw:mem_page_size=large --property hw:cpu_policy=dedicated --property hw:cpu_thread_policy=isolate --property hw:numa_nodes=1 --property hw:numa_cpus.0=0,1,2,3 --property hw:numa_mempolicy=strict --property hw:numa_mem.0=4096
-flavor_meta_data=--property hw:mem_page_size=large --property hw:cpu_policy=dedicated --property hw:cpu_thread_policy=isolate
-boot_info=--availability-zone nova --user-data prox_user_data.sh --security-group default
-SRIOV_port=NO
-
-[VM1]
-
-[VM2]
-
-[VM3]
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py
deleted file mode 100755
index 0f523cc0..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/runrapid.py
+++ /dev/null
@@ -1,574 +0,0 @@
-#!/usr/bin/python
-
-##
-## Copyright (c) 2010-2017 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-from __future__ import print_function
-
-import os
-import stat
-import sys
-import time
-import subprocess
-import getopt
-import re
-import logging
-from logging.handlers import RotatingFileHandler
-from logging import handlers
-from prox_ctrl import prox_ctrl
-import ConfigParser
-import ast
-
-version="18.3.27"
-env = "rapid" #Default string for environment
-test = "basicrapid" #Default string for test
-loglevel="DEBUG" # sets log level for writing to file
-runtime=10 # time in seconds for 1 test run
-configonly = False # IF True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
-
-def usage():
- print("usage: runrapid [--version] [-v]")
- print(" [--env ENVIRONMENT_NAME]")
- print(" [--test TEST_NAME]")
- print(" [--runtime TIME_FOR_TEST]")
- print(" [--configonly False|True]")
- print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
- print(" [-h] [--help]")
- print("")
- print("Command-line interface to runrapid")
- print("")
- print("optional arguments:")
- print(" -v, --version Show program's version number and exit")
- print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME.env Default is %s."%env)
- print(" --test TEST_NAME Test cases will be read from TEST_NAME.test Default is %s."%test)
- print(" --runtime Specify time in seconds for 1 test run")
- print(" --configonly If True, only upload all config files to the VMs, do not run the tests. Default is %s."%configonly)
- print(" --log Specify logging level for log file output, screen output level is hard coded")
- print(" -h, --help Show help message and exit.")
- print("")
-
-try:
- opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=","runtime=","configonly=","log="])
-except getopt.GetoptError as err:
- print("===========================================")
- print(str(err))
- print("===========================================")
- usage()
- sys.exit(2)
-if args:
- usage()
- sys.exit(2)
-for opt, arg in opts:
- if opt in ("-h", "--help"):
- usage()
- sys.exit()
- if opt in ("-v", "--version"):
- print("Rapid Automated Performance Indication for Dataplane "+version)
- sys.exit()
- if opt in ("--env"):
- env = arg
- print ("Using '"+env+"' as name for the environment")
- if opt in ("--test"):
- test = arg
- print ("Using '"+test+".test' for test case definition")
- if opt in ("--runtime"):
- runtime = arg
- print ("Runtime: "+ runtime)
- if opt in ("--configonly"):
- configonly = arg
- print ("configonly: "+ configonly)
- if opt in ("--log"):
- loglevel = arg
- print ("Log level: "+ loglevel)
-
-
-# create formatters
-screen_formatter = logging.Formatter("%(message)s")
-file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
-
-# get a top-level logger,
-# set its log level,
-# BUT PREVENT IT from propagating messages to the root logger
-#
-log = logging.getLogger()
-numeric_level = getattr(logging, loglevel.upper(), None)
-if not isinstance(numeric_level, int):
- raise ValueError('Invalid log level: %s' % loglevel)
-log.setLevel(numeric_level)
-log.propagate = 0
-
-# create a console handler
-# and set its log level to the command-line option
-#
-console_handler = logging.StreamHandler(sys.stdout)
-console_handler.setLevel(logging.INFO)
-console_handler.setFormatter(screen_formatter)
-
-# create a file handler
-# and set its log level to DEBUG
-#
-log_file = 'RUN' +env+'.'+test+'.log'
-file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
-#file_handler = log.handlers.TimedRotatingFileHandler(log_file, 'D', 1, 5)
-file_handler.setLevel(numeric_level)
-file_handler.setFormatter(file_formatter)
-
-# add handlers to the logger
-#
-log.addHandler(file_handler)
-log.addHandler(console_handler)
-
-# Check if log exists and should therefore be rolled
-needRoll = os.path.isfile(log_file)
-
-
-# This is a stale log, so roll it
-if needRoll:
- # Add timestamp
- log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
-
- # Roll over on application start
- log.handlers[0].doRollover()
-
-# Add timestamp
-log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
-
-log.debug("runrapid.py version: "+version)
-#========================================================================
-def connect_socket(client):
- attempts = 1
- log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
- sock = None
- while True:
- sock = client.prox_sock()
- if sock is not None:
- break
- attempts += 1
- if attempts > 20:
- log.exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
- raise Exception("Failed to connect to PROX on %s after %d attempts" % (client.ip(), attempts))
- time.sleep(2)
- log.debug("Trying to connect to PROX (just launched) on %s, attempt: %d" % (client.ip(), attempts))
- log.info("Connected to PROX on %s" % client.ip())
- return sock
-
-def connect_client(client):
- attempts = 1
- log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
- while True:
- try:
- client.connect()
- break
- except RuntimeWarning, ex:
- attempts += 1
- if attempts > 20:
- log.exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
- raise Exception("Failed to connect to VM after %d attempts:\n%s" % (attempts, ex))
- time.sleep(2)
- log.debug("Trying to connect to VM which was just launched on %s, attempt: %d" % (client.ip(), attempts))
- log.debug("Connected to VM on %s" % client.ip())
-
-def run_iteration(gensock,sutsock):
- sleep_time = 2
- # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
- time.sleep(sleep_time)
- abs_old_rx, abs_old_tx, abs_old_drop, abs_old_tsc, abs_tsc_hz = gensock.core_stats(genstatcores)
- gensock.start(gencores)
- time.sleep(sleep_time)
- if sutsock!='none':
- old_sut_rx, old_sut_tx, old_sut_drop, old_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores)
- old_rx, old_tx, old_drop, old_tsc, tsc_hz = gensock.core_stats(genstatcores)
- time.sleep(float(runtime))
- lat_min, lat_max, lat_avg = gensock.lat_stats(latcores)
- # Get statistics after some execution time
- new_rx, new_tx, new_drop, new_tsc, tsc_hz = gensock.core_stats(genstatcores)
- if sutsock!='none':
- new_sut_rx, new_sut_tx, new_sut_drop, new_sut_tsc, sut_tsc_hz = sutsock.core_stats(sutstatcores)
- #Stop generating
- gensock.stop(gencores)
- time.sleep(sleep_time)
- abs_new_rx, abs_new_tx, abs_new_drop, abs_new_tsc, abs_tsc_hz = gensock.core_stats(genstatcores)
- drop = new_drop-old_drop # drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
- rx = new_rx - old_rx # rx is all packets received by the nop task = all packets received in the gen VM
- tx = new_tx - old_tx # tx is all generated packets actually accepted by the interface
- abs_dropped = (abs_new_tx - abs_old_tx) - (abs_new_rx - abs_old_rx)
- tsc = new_tsc - old_tsc # time difference between the 2 measurements, expressed in cycles.
- pps_req_tx = (tx+drop-rx)*tsc_hz*1.0/(tsc*1000000)
- pps_tx = tx*tsc_hz*1.0/(tsc*1000000)
- pps_rx = rx*tsc_hz*1.0/(tsc*1000000)
- if sutsock!='none':
- sut_rx = new_sut_rx - old_sut_rx
- sut_tx = new_sut_tx - old_sut_tx
- sut_tsc = new_sut_tsc - old_sut_tsc
- pps_sut_tx = sut_tx*sut_tsc_hz*1.0/(sut_tsc*1000000)
- pps_sut_tx_str = '{:>9.3f}'.format(pps_sut_tx)
- else:
- pps_sut_tx = 0
- pps_sut_tx_str = 'NO MEAS.'
- if (tx == 0):
- log.critical("TX = 0. Test interrupted since no packet has been sent.")
- raise Exception("TX = 0")
- return(pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max,abs_dropped,(abs_new_tx - abs_old_tx))
-
-def new_speed(speed,minspeed,maxspeed,success):
- # Following calculates the ratio for the new speed to be applied
- # On the Y axis, we will find the ratio, a number between 0 and 1
- # On the x axis, we find the % of dropped packets, a number between 0 and 100
- # 2 lines are drawn and we take the minumun of these lines to calculate the ratio
- # One line goes through (0,y0) and (p,q)
- # The second line goes through (p,q) and (100,y100)
-# y0=0.99
-# y100=0.1
-# p=1
-# q=.99
-# ratio = min((q-y0)/p*drop_rate+y0,(q-y100)/(p-100)*drop_rate+q-p*(q-y100)/(p-100))
-# return (int(speed*ratio*100)+0.5)/100.0
- if success:
- minspeed = speed
- else:
- maxspeed = speed
- newspeed = (maxspeed+minspeed)/2.0
- return (newspeed,minspeed,maxspeed)
-
-def get_pps(speed,size):
- return (speed * 100.0 / (8*(size+24)))
-
-def run_speedtest(gensock,sutsock):
- log.info("+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| Generator is sending UDP (1 flow) packets (64 bytes) to SUT. SUT sends packets back |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- log.info("| Test | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio | Result |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- maxspeed = speed = 100
- minspeed = 0
- size=60
- attempts = 0
- endpps_sut_tx_str = 'NO_RESULTS'
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- while (maxspeed-minspeed > ACCURACY):
- attempts += 1
- print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- time.sleep(1)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% | SUCCESS |')
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- log.info('|{:>7}'.format(str(attempts))+" | " + '{:>5.1f}'.format(speed) + '% ' +'{:>6.3f}'.format(get_pps(speed,size)) + ' Mpps | '+ '{:>9.3f}'.format(pps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(pps_tx) +' Mpps | ' + '{:>9}'.format(pps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(pps_rx)+' Mpps | '+ '{:>9.0f}'.format(lat_avg)+' us | '+ '{:>9.0f}'.format(lat_max)+' us | '+ '{:>14d}'.format(abs_dropped)+ ' |''{:>9.2f}'.format(drop_rate)+ '% | FAILED |')
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- log.info('|{:>7}'.format('END')+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+'{:>14d}'.format(endabs_dropped)+ ' |''{:>9.2f}'.format(enddrop_rate)+ '% | SUCCESS |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+------------+")
- else:
- log.info('| Speed 0 or close to 0')
-
-def run_flowtest(gensock,sutsock):
- log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| UDP, 64 bytes, different number of flows by randomizing SRC & DST UDP port |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- log.info("| Flows | Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- size=60
- # To generate a desired number of flows, PROX will randomize the bits in source and destination ports, as specified by the bit masks in the flows variable.
- flows={128:['1000000000000XXX','100000000000XXXX'],1024:['10000000000XXXXX','10000000000XXXXX'],8192:['1000000000XXXXXX','100000000XXXXXXX'],65535:['10000000XXXXXXXX','10000000XXXXXXXX'],524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']}
-# flows={524280:['1000000XXXXXXXXX','100000XXXXXXXXXX']}
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- for flow_number in sorted(flows.iterkeys()):
- #speed = 100 Commented out: Not starting from 100% since we are trying more flows, so speed will not be higher than the speed achieved in previous loop
- gensock.reset_stats()
- if sutsock!='none':
- sutsock.reset_stats()
- source_port,destination_port = flows[flow_number]
- gensock.set_random(gencores,0,34,source_port,2)
- gensock.set_random(gencores,0,36,destination_port,2)
- endpps_sut_tx_str = 'NO_RESULTS'
- maxspeed = speed = 100
- minspeed = 0
- while (maxspeed-minspeed > ACCURACY):
- print(str(flow_number)+' flows: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- time.sleep(1)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info('|{:>7}'.format(str(flow_number))+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+ '{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- else:
- log.info('|{:>7}'.format(str(flow_number))+" | Speed 0 or close to 0")
-
-def run_sizetest(gensock,sutsock):
- log.info("+-----------------------------------------------------------------------------------------------------------------------------------------------------------------+")
- log.info("| UDP, 1 flow, different packet sizes |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- log.info("| Pktsize| Speed requested | Sent to NIC | Sent by Gen | Forward by SUT | Rec. by Gen | Avg. Latency | Max. Latency | Packets Lost | Loss Ratio |")
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- # PROX will use different packet sizes as defined in sizes[]
-# sizes=[1496,1020,508,252,124,60]
- sizes=[1020,508,252,124,60]
- for size in sizes:
- #speed = 100 Commented out: Not starting from 100% since we are trying smaller packets, so speed will not be higher than the speed achieved in previous loop
- gensock.reset_stats()
- if sutsock!='none':
- sutsock.reset_stats()
- gensock.set_size(gencores,0,size) # This is setting the frame size
- gensock.set_value(gencores,0,16,(size-14),2) # 18 is the difference between the frame size and IP size = size of (MAC addresses, ethertype and FCS)
- gensock.set_value(gencores,0,38,(size-34),2) # 38 is the difference between the frame size and UDP size = 18 + size of IP header (=20)
- # This will only work when using sending UDP packets. For different protocls and ehternet types, we would need a differnt calculation
- endpps_sut_tx_str = 'NO_RESULTS'
- maxspeed = speed = 100
- minspeed = 0
- while (maxspeed-minspeed > ACCURACY):
- print(str(size+4)+' bytes: Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
- sys.stdout.flush()
- # Start generating packets at requested speed (in % of a 10Gb/s link)
- gensock.speed(speed, gencores)
- # Get statistics now that the generation is stable and NO ARP messages any more
- pps_req_tx,pps_tx,pps_sut_tx_str,pps_rx,lat_avg,lat_max, abs_dropped, abs_tx = run_iteration(gensock,sutsock)
- drop_rate = 100.0*abs_dropped/abs_tx
- if ((get_pps(speed,size) - pps_tx)/get_pps(speed,size))<0.001 and ((drop_rate < DROP_RATE_TRESHOLD) or (abs_dropped==DROP_RATE_TRESHOLD ==0)):
- endspeed = speed
- endpps_req_tx = pps_req_tx
- endpps_tx = pps_tx
- endpps_sut_tx_str = pps_sut_tx_str
- endpps_rx = pps_rx
- endlat_avg = lat_avg
- endlat_max = lat_max
- endabs_dropped = abs_dropped
- enddrop_rate = drop_rate
- success = True
- else:
- success = False
- speed,minspeed,maxspeed = new_speed(speed,minspeed,maxspeed,success)
- if endpps_sut_tx_str <> 'NO_RESULTS':
- log.info('|{:>7}'.format(size+4)+" | " + '{:>5.1f}'.format(endspeed) + '% ' +'{:>6.3f}'.format(get_pps(endspeed,size)) + ' Mpps | '+ '{:>9.3f}'.format(endpps_req_tx)+' Mpps | '+ '{:>9.3f}'.format(endpps_tx) +' Mpps | ' + '{:>9}'.format(endpps_sut_tx_str) +' Mpps | '+ '{:>9.3f}'.format(endpps_rx)+' Mpps | '+ '{:>9.0f}'.format(endlat_avg)+' us | '+'{:>9.0f}'.format(endlat_max)+' us | '+ '{:>14d}'.format(endabs_dropped)+ ' |'+'{:>9.2f}'.format(enddrop_rate)+ '% |')
- log.info("+--------+--------------------+----------------+----------------+----------------+----------------+----------------+----------------+----------------+------------+")
- else:
- log.debug('|{:>7}'.format(str(size))+" | Speed 0 or close to 0")
-
-
-def run_irqtest(sock):
- log.info("+----------------------------------------------------------------------------------------------------------------------------")
- log.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic ")
- log.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and ")
- log.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was ")
- log.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout ")
- log.info("| the duration of the test. This is to avoid rounding errors in the case of 0.0 ")
- log.info("+----------------------------------------------------------------------------------------------------------------------------")
- sys.stdout.flush()
- buckets=sock.show_irq_buckets(1)
- print('Measurement ongoing ... ',end='\r')
- sock.stop(irqcores)
- old_irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq = [[0 for x in range(len(buckets)+1)] for y in range(len(irqcores)+1)]
- irq[0][0] = 'bucket us'
- for j,bucket in enumerate(buckets,start=1):
- irq[0][j] = '<'+ bucket
- irq[0][-1] = '>'+ buckets [-2]
- for j,bucket in enumerate(buckets,start=1):
- for i,irqcore in enumerate(irqcores,start=1):
- old_irq[i][j] = sock.irq_stats(irqcore,j-1)
- sock.start(irqcores)
- time.sleep(float(runtime))
- sock.stop(irqcores)
- for i,irqcore in enumerate(irqcores,start=1):
- irq[i][0]='core %s '%irqcore
- for j,bucket in enumerate(buckets,start=1):
- diff = sock.irq_stats(irqcore,j-1) - old_irq[i][j]
- if diff == 0:
- irq[i][j] = '0'
- else:
- irq[i][j] = diff/float(runtime)
- log.info('\n'.join([''.join(['{:>12}'.format(item) for item in row]) for row in irq]))
-
-
-def init_test():
-# Running at low speed to make sure the ARP messages can get through.
-# If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
-# Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
-# PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
- sock[0].speed(0.01, gencores)
- sock[0].start(genstatcores)
- time.sleep(2)
- sock[0].stop(gencores)
-
-global sutstatcores
-global genstatcores
-global latcores
-global gencores
-global irqcores
-global DROP_RATE_TRESHOLD
-global ACCURACY
-vmDPIP =[]
-vmAdminIP =[]
-vmDPmac =[]
-hexDPIP =[]
-config_file =[]
-script_control =[]
-
-testconfig = ConfigParser.RawConfigParser()
-testconfig.read(test+'.test')
-required_number_of_test_machines = testconfig.get('DEFAULT', 'total_number_of_test_machines')
-DROP_RATE_TRESHOLD = float(testconfig.get('DEFAULT', 'drop_rate_treshold'))
-ACCURACY = float(testconfig.get('DEFAULT', 'accuracy'))
-config = ConfigParser.RawConfigParser()
-config.read(env+'.env')
-key = config.get('OpenStack', 'key')
-total_number_of_machines = config.get('rapid', 'total_number_of_machines')
-if int(required_number_of_test_machines) > int(total_number_of_machines):
- log.exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
- raise Exception("Not enough VMs for this test: %s needed and only %s available" % (required_number_of_test_machines,total_number_of_machines))
-for vm in range(1, int(total_number_of_machines)+1):
- vmAdminIP.append(config.get('M%d'%vm, 'admin_ip'))
- vmDPmac.append(config.get('M%d'%vm, 'dp_mac'))
- vmDPIP.append(config.get('M%d'%vm, 'dp_ip'))
- ip = vmDPIP[-1].split('.')
- hexDPIP.append(hex(int(ip[0]))[2:].zfill(2) + ' ' + hex(int(ip[1]))[2:].zfill(2) + ' ' + hex(int(ip[2]))[2:].zfill(2) + ' ' + hex(int(ip[3]))[2:].zfill(2))
-machine_index = []
-for vm in range(1, int(required_number_of_test_machines)+1):
- machine_index.append(int(testconfig.get('TestM%d'%vm, 'machine_index'))-1)
-for vm in range(1, int(required_number_of_test_machines)+1):
- config_file.append(testconfig.get('TestM%d'%vm, 'config_file'))
- script_control.append(testconfig.get('TestM%d'%vm, 'script_control'))
- group1cores=testconfig.get('TestM%d'%vm, 'group1cores')
- if group1cores <> 'not_used':
- group1cores=ast.literal_eval(group1cores)
- group2cores=testconfig.get('TestM%d'%vm, 'group2cores')
- if group2cores <> 'not_used':
- group2cores=ast.literal_eval(group2cores)
- group3cores=testconfig.get('TestM%d'%vm, 'group3cores')
- if group3cores <> 'not_used':
- group3cores=ast.literal_eval(group3cores)
- with open("parameters%d.lua"%vm, "w") as f:
- f.write('name="%s"\n'% testconfig.get('TestM%d'%vm, 'name'))
- f.write('local_ip="%s"\n'% vmDPIP[machine_index[vm-1]])
- f.write('local_hex_ip="%s"\n'% hexDPIP[machine_index[vm-1]])
- gwVM = testconfig.get('TestM%d'%vm, 'gw_vm')
- if gwVM <> 'not_used':
- gwVMindex = int(gwVM)-1
- f.write('gw_ip="%s"\n'% vmDPIP[machine_index[gwVMindex]])
- f.write('gw_hex_ip="%s"\n'% hexDPIP[machine_index[gwVMindex]])
- destVM = testconfig.get('TestM%d'%vm, 'dest_vm')
- if destVM <> 'not_used':
- destVMindex = int(destVM)-1
- f.write('dest_ip="%s"\n'% vmDPIP[machine_index[destVMindex]])
- f.write('dest_hex_ip="%s"\n'% hexDPIP[machine_index[destVMindex]])
- f.write('dest_hex_mac="%s"\n'% vmDPmac[machine_index[destVMindex]].replace(':',' '))
- if group1cores <> 'not_used':
- f.write('group1="%s"\n'% ','.join(map(str, group1cores)))
- if group2cores <> 'not_used':
- f.write('group2="%s"\n'% ','.join(map(str, group2cores)))
- if group3cores <> 'not_used':
- f.write('group3="%s"\n'% ','.join(map(str, group3cores)))
- if re.match('(l2){0,1}gen.*\.cfg',config_file[-1]):
- gencores = group1cores
- latcores = group2cores
- genstatcores = group3cores
- elif config_file[-1] == 'gen_gw.cfg':
- gencores = group1cores
- latcores = group2cores
- genstatcores = group3cores
- elif re.match('(l2){0,1}swap.*\.cfg',config_file[-1]):
- sutstatcores = group1cores
- elif config_file[-1] == 'secgw2.cfg':
- sutstatcores = group1cores
- elif config_file[-1] == 'irq.cfg':
- irqcores = group1cores
- f.close
-#####################################################################################
-client =[]
-sock =[]
-
-for vm in range(0, int(required_number_of_test_machines)):
- client.append(prox_ctrl(vmAdminIP[machine_index[vm]], key+'.pem','root'))
- connect_client(client[-1])
-# Creating script to bind the right network interface to the poll mode driver
- devbindfile = "devbindvm%d.sh"%(vm+1)
- with open("devbind.sh") as f:
- newText=f.read().replace('MACADDRESS', vmDPmac[machine_index[vm]])
- with open(devbindfile, "w") as f:
- f.write(newText)
- st = os.stat(devbindfile)
- os.chmod(devbindfile, st.st_mode | stat.S_IEXEC)
- client[-1].scp_put('./%s'%devbindfile, '/root/devbind.sh')
- cmd = '/root/devbind.sh'
- client[-1].run_cmd(cmd)
- log.debug("devbind.sh running on VM%d"%(vm+1))
- client[-1].scp_put('./%s'%config_file[vm], '/root/%s'%config_file[vm])
- client[-1].scp_put('./parameters%d.lua'%(vm+1), '/root/parameters.lua')
- log.debug("Starting PROX on VM%d"%(vm+1))
- if script_control[vm] == 'true':
- cmd = '/root/prox/build/prox -e -t -o cli -f /root/%s'%config_file[vm]
- else:
- cmd = '/root/prox/build/prox -t -o cli -f /root/%s'%config_file[vm]
- if configonly == False:
- client[-1].fork_cmd(cmd, 'PROX Testing on TestM%d'%(vm+1))
- sock.append(connect_socket(client[-1]))
-if configonly:
- sys.exit()
-init_code = testconfig.get('DEFAULT', 'init_code')
-if init_code <> 'not_used':
- eval(init_code)
-####################################################
-# Run test cases
-# Best to run the flow test at the end since otherwise the tests coming after thatmight be influenced by the big number of entries in the switch flow tables
-####################################################
-number_of_tests = testconfig.get('DEFAULT', 'number_of_tests')
-for vm in range(1, int(number_of_tests)+1):
- cmd=testconfig.get('test%d'%vm,'cmd')
- eval(cmd)
-####################################################
-for vm in range(0, int(required_number_of_test_machines)):
- sock[vm].quit()
- client[vm].close()
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test b/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test
deleted file mode 100644
index 1ac171a6..00000000
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw.test
+++ /dev/null
@@ -1,59 +0,0 @@
-##
-## Copyright (c) 2010-2018 Intel Corporation
-##
-## Licensed under the Apache License, Version 2.0 (the "License");
-## you may not use this file except in compliance with the License.
-## You may obtain a copy of the License at
-##
-## http://www.apache.org/licenses/LICENSE-2.0
-##
-## Unless required by applicable law or agreed to in writing, software
-## distributed under the License is distributed on an "AS IS" BASIS,
-## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-## See the License for the specific language governing permissions and
-## limitations under the License.
-##
-
-[DEFAULT]
-name = GWTesting
-number_of_tests = 1
-total_number_of_test_machines = 3
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0.01
-accuracy = 0.01
-
-[TestM1]
-name = Generator
-machine_index = 1
-config_file = gen_gw.cfg
-dest_vm = 3
-gw_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
-
-[TestM2]
-name = GW1
-machine_index = 2
-config_file = secgw1.cfg
-dest_vm = 3
-group1cores = [1]
-
-[TestM3]
-name = GW2
-machine_index = 3
-config_file = secgw2.cfg
-group1cores = [1]
-
-[test1]
-cmd=run_speedtest(sock[0],sock[2])
-
-[test2]
-cmd=run_sizetest(sock[0],sock[2])
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
new file mode 100644
index 00000000..fef0fcaf
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/Dockerfile
@@ -0,0 +1,119 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+##################################################
+# Build all components in separate builder image #
+##################################################
+
+FROM ubuntu:20.04 as builder
+
+ARG DPDK_VERSION=22.07
+ENV DPDK_VERSION=${DPDK_VERSION}
+
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install Dependencies
+RUN apt update && apt -y install git wget gcc unzip libpcap-dev libncurses5-dev \
+ libedit-dev liblua5.3-dev linux-headers-generic iperf3 pciutils \
+ libnuma-dev vim tuna wireshark make driverctl openssh-server sudo \
+ meson python3-pyelftools pkg-config
+
+WORKDIR ${BUILD_DIR}
+
+# Install DPDK
+RUN wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz \
+ && tar -xf ./dpdk-${DPDK_VERSION}.tar.xz \
+ && cd dpdk-${DPDK_VERSION} \
+ && meson build -Dlibdir=lib/x86_64-linux-gnu -Denable_driver_sdk=true \
+ && ninja -C build install
+
+WORKDIR ${BUILD_DIR}
+
+# Install Prox
+RUN git clone https://gerrit.opnfv.org/gerrit/samplevnf \
+ && cd samplevnf/VNFs/DPPD-PROX \
+ && COMMIT_ID=$(git rev-parse HEAD) \
+ && echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id \
+ && meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/prox ${BUILD_DIR}/prox
+
+# Build and copy port info app
+WORKDIR ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info
+RUN meson build \
+ && ninja -C build \
+ && cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/build/port_info_app ${BUILD_DIR}/port_info_app
+
+RUN ldconfig && pkg-config --modversion libdpdk > ${BUILD_DIR}/dpdk_version
+# Create Minimal Install
+RUN ldd ${BUILD_DIR}/prox | awk '$2 ~ /=>/ {print $3}' >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components \
+ && echo "${BUILD_DIR}/dpdk_version" >> ${BUILD_DIR}/list_of_install_components \
+ && find /usr/local/lib/x86_64-linux-gnu -not -path '*/\.*' >> ${BUILD_DIR}/list_of_install_components \
+ && tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+
+#############################
+# Create slim runtime image #
+#############################
+FROM ubuntu:20.04
+
+ARG BUILD_DIR="/opt/rapid"
+ENV BUILD_DIR=${BUILD_DIR}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install Runtime Dependencies
+RUN apt update -y
+# Install required dynamically linked libraries + required packages
+RUN apt -y install sudo openssh-server libatomic1
+
+COPY --from=builder ${BUILD_DIR}/install_components.tgz ${BUILD_DIR}/install_components.tgz
+
+WORKDIR /
+RUN tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+RUN ldconfig
+RUN rm ${BUILD_DIR}/install_components.tgz
+
+# Expose SSH and PROX ports
+EXPOSE 22 8474
+
+RUN useradd -rm -d /home/rapid -s /bin/bash -g root -G sudo -u 1000 rapid \
+ && chmod 777 ${BUILD_DIR} \
+ && echo 'rapid:rapid' | chpasswd \
+ && mkdir /home/rapid/.ssh
+
+# Copy SSH keys
+COPY ./rapid_rsa_key.pub /home/rapid/.ssh/authorized_keys
+COPY ./rapid_rsa_key.pub /root/.ssh/authorized_keys
+
+RUN chown rapid:root /home/rapid/.ssh/authorized_keys \
+ && chmod 600 /home/rapid/.ssh/authorized_keys \
+ && chown root:root /root/.ssh/authorized_keys \
+ && chmod 600 /root/.ssh/authorized_keys
+
+#RUN apt-get clean && apt autoremove --purge
+RUN apt-get autoremove -y && apt-get clean all && rm -rf /var/cache/apt
+
+# Copy startup script
+COPY ./start.sh /start.sh
+RUN chmod +x /start.sh
+
+ENTRYPOINT ["/start.sh"]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/README b/VNFs/DPPD-PROX/helper-scripts/rapid/README
new file mode 100644
index 00000000..198b6db1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/README
@@ -0,0 +1,183 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+rapid (Rapid Automated Performance Indication for Dataplane)
+************************************************************
+
+rapid is a set of files offering an easy way to do a sanity check of the
+dataplane performance of an OpenStack or container environment.
+
+Most of the information below is now available on wiki.opnfv.org/display/SAM/Rapid+scripting
+
+In case of OpenStack, copy the files in a directory on a machine that can run the OpenStack CLI
+commands and that can reach the networks to connect to the VMs.
+
+You will need an image that has the PROX tool installed.
+A good way to do this is to use the packer tool to build an image for a target of your choice.
+You can also build this image manually by executing all the commands described in the deploycentostools.sh.
+The default name of the qcow2 file is rapidVM.qcow2
+
+When using the packer tool, the first step is to upload an
+existing CentOS cloud image from the internet into OpenStack.
+Check out: https://cloud.centos.org/centos/7/images/
+You should now create proper clouds.yaml file so Packer can connect to your OpenStack.
+Sample clouds.yaml could look like this:
+
+client:
+ force_ipv4: true
+clouds:
+ overcloud:
+ verify: False
+ interface: "public"
+ auth:
+ username: "admin"
+ password: "your_password"
+ project_name: "admin"
+ tenant_name: "admin"
+ auth_url: "https://192.168.1.1:5000/v3"
+ user_domain_name: "Default"
+ domain_name: "Default"
+ identity_api_version: "3"
+
+Packer could be run from docker image, you will need to create following alias:
+
+alias packer='docker run -it --env OS_CLOUD=$OS_CLOUD -v "$PWD":/root/project -w /root/project hashicorp/packer:light $@'
+and make sure the OS_CLOUD variable is set to the correct cloud: in the clouds.yaml example above, you would first
+export OS_CLOUD=overcloud
+
+There are 2 files: centos.json and deploycentostools.sh, allowing you to create
+an image automatically. Run
+ # packer build centos.json
+Edit centos.json to reflect the settings of your environment: The following fields need to populated
+with the values of your system:
+ - "source_image_name": Needs to be the name of the Centos cloud image
+ - "flavor": Needs to be the ID or name of the flavor existing in your OpenStack environment that will be used
+ to start the VM in which we will install all tools
+ - "network_discovery_cidrs": Should contain the CIDR of the network you want to use e.g. "10.6.6.0/24"
+ - "floating_ip_network": ID or name of the floating ip network in case floating ip are being used
+ - "security_groups": ID or name of the security group being used
+
+Refer to Packer docs for more details:
+https://www.packer.io/docs/builders/openstack.html
+
+Note that this procedure is not only installing the necessary tools to run PROX,
+but also does some system optimizations (tuned). Check deploycentostools.sh for more details.
+
+Now you need to create a stack, that will deploy the PROX VMs using the PROX
+image built in the previous step. The stack needs to have an ouput section
+with the following outputs:
+outputs:
+ number_of_servers:
+ value:
+ - <NUMBER_OF_SERVERS> # A list of <NUMBER_OF_SERVERS>
+ server_name:
+ value:
+ - - <SERVER_NAME> # A list containing a list of <SERVER_NAME>
+ data_plane_ips:
+ value:
+ - - <DATA_PLANE_IPS> # A list containing a list of <DATA_PLANE_IPS>
+ data_plane_macs:
+ value:
+ - - <DATA_PLANE_MACS> # A list containing a list of <DATA_PLANE_MACS>
+ mngmt_ips:
+ value:
+ - - <MNGMT_IP> # A list containing a list of <MNGMT_IP>
+where
+ * <NUMBER_OF_SERVERS> is an int
+ * <SERVER_NAME> is a string
+ * <DATA_PLANE_IPS> is a list of strings
+ * <DATA_PLANE_MACS> is a list of strings
+ * <MNGMT_IP> is a string
+
+createrapid.py will take the input from config_file, to create an ssh keypair
+and stack (if not already existing). The tool will use the yaml files as
+specified in the config_file and create a <STACK>.env file, containing
+input used for runrapid.py.
+
+Now you can run the runrapid.py file. Use help for more info on the usage:
+ # ./runrapid.py --help
+The script will connect to all machines that have been instantiated and it will launch
+PROX in all machines. This will be done through the admin IP assigned to the machines.
+Once that is done it will connect to the PROX tcp socket and start sending
+commands to run the actual test.
+Make sure the security groups allow for tcp access (ssh & prox port).
+It will print test results on the screen while running.
+The actual test that is running is described in <TEST>.test.
+
+Notes about prox_user_data.sh script:
+- The script contains commands that will be executed using cloud-init at
+ startup of the VMs.
+- huge pages are allocated for DPDK on node 0 (hard-coded) in the VM.
+
+Note on using SRIOV ports:
+Before running createrapid, make sure the network, subnet and ports are already created
+This can be done as follows (change the parameters to your needs):
+openstack network create --share --external --provider-network-type flat --provider-physical-network physnet2 fast-network
+openstack subnet create --network fast-network --subnet-range 20.20.20.0/24 --gateway none fast-subnet
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port1
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port2
+openstack port create --network fast-network --vnic-type direct --fixed-ip subnet=fast-subnet Port3
+
+Note when doing tests using the gateway functionality on OVS:
+When a GW VM is sending packets on behalf of another VM (e.g. the generator), we need to make sure the OVS
+will allow those packets to go through. Therefore you need to the IP address of the generator in the
+"allowed address pairs" of the GW VM.
+
+Note when doing tests using encryption on OVS:
+Your OVS configuration might block encrypted packets. To allow packets to go through,
+you can disable port_security. You can do this by using the following commands
+neutron port-update xxxxxx --no-security-groups
+neutron port-update xxxxxx --port_security_enabled=False
+
+An example of the env file generated by createrapid.py can be found below.
+Note that this file can be created manually in case the stack is created in a
+different way than what is described in this text. This can be useful in case
+you are not using OpenStack as a VIM or when using special configurations that
+cannot be achieved using createrapid.py. Fields needed for runrapid are:
+* all info in the [Mx] sections
+* the key information in the [ssh] section
+* the total_number_of_vms information in the [rapid] section
+
+[rapid]
+loglevel = DEBUG
+version = 19.6.30
+total_number_of_machines = 3
+
+[M1]
+name = rapid-VM1
+admin_ip = 10.25.1.109
+dp_ip1 = 10.10.10.4
+dp_mac1 = fa:16:3e:25:be:25
+
+[M2]
+name = rapid-VM2
+admin_ip = 10.25.1.110
+dp_ip1 = 10.10.10.7
+dp_mac1 = fa:16:3e:72:bf:e8
+
+[M3]
+name = rapid-VM3
+admin_ip = 10.25.1.125
+dp_ip1 = 10.10.10.15
+dp_mac1 = fa:16:3e:69:f3:e7
+
+[ssh]
+key = prox.pem
+user = centos
+
+[Varia]
+vim = OpenStack
+stack = rapid
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s b/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s
new file mode 100644
index 00000000..e1abbe75
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/README.k8s
@@ -0,0 +1,94 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+###############################################################################
+# REQUIREMENTS #
+###############################################################################
+1. Working Kubernetes cluster. It can be set up using Intel Container Bare
+Metal Reference Architecture https://github.com/intel/container-experience-kits
+
+2. 1024x 2M hugepages must be configured on the nodes
+
+3. SRIOV Network Device Plugin for Kubernetes installed
+https://github.com/intel/sriov-network-device-plugin.
+
+4. SRIOV VFs configured and rebind to the vfio-pci module
+As an example, SRIOV VFs (rebind to the vfio-pci driver) pool is named as
+intel.com/intel_sriov_vfio.
+
+Network attachment definition is named as
+k8s.v1.cni.cncf.io/networks: intel-sriov-vfio.
+
+5. PROX image created and pushed to the local registry or distributed and
+loaded on all of the testing nodes.
+
+###############################################################################
+# PROX IMAGE BUILD #
+###############################################################################
+Run
+# dockerimage.sh build
+to build PROX image.
+
+After the successfull build prox.tar will be created and can be used to load
+image on the k8s nodes or it can be pushed to the local repository using
+# dockerimage.sh push
+
+###############################################################################
+# TESTING #
+###############################################################################
+1. Edit rapidpods file and set the right name (nodeSelector_hostname) for the
+nodes on which you want to execute test PODs.
+
+# kubectl get nodes -o wide
+NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
+k8s-master1 Ready master 7d13h v1.13.5 10.10.0.10 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+k8s-node1 Ready node 7d13h v1.13.5 10.10.0.12 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+k8s-node2 Ready node 7d13h v1.13.5 10.10.0.13 <none> CentOS Linux 7 (Core) 3.10.0-1062.4.1.el7.x86_64 docker://18.6.2
+
+Set the right IP addresses (dp_ip) to use by the PODs for the Dataplane network.
+
+2. Edit pod-rapid.yaml file and set correct
+ - image name (image: localhost:5000/prox:latest)
+ - network attachment definition in metadata->annotation section
+ (k8s.v1.cni.cncf.io/networks: intel-sriov-vfio)
+ - SRIOV VFs resources attached to the vfio-pci driver
+ (intel.com/intel_sriov_vfio: '1')
+
+3. Copy SSH private key in the rapid_rsa_key file
+
+4. Run createrapidk8s.py to create test PODs according to the configuration from
+rapid.pods file.
+
+# ./createrapidk8s.py
+
+Check for rapid PODs. They should be up and running.
+
+# kubectl get pods -o wide
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+pod-rapid-1 1/1 Running 0 18h 10.244.2.87 k8s-node1 <none> <none>
+pod-rapid-2 1/1 Running 0 18h 10.244.1.40 k8s-node2 <none> <none>
+pod-rapid-3 1/1 Running 0 18h 10.244.1.39 k8s-node2 <none> <none>
+
+5. Run test case.
+
+# ./runrapid.py --test basicrapid.test
+
+###############################################################################
+# NOTES #
+###############################################################################
+If layer 2 tests are planned to be executed MAC adresses must be
+preconfigured for the SRIOV VFs to avoid issues with randomly generated MACs
+each time when the PROX starts.
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json b/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json
new file mode 100644
index 00000000..51784c0e
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/centos.json
@@ -0,0 +1,52 @@
+{
+"_Copyright": "Copyright (c) 2010-2020 Intel Corporation",
+"_License": "SPDX-License-Identifier: Apache-2.0",
+"builders": [
+ {
+"type": "openstack",
+"ssh_username": "centos",
+"image_name": "rapidVM",
+"source_image_name": "CentOS",
+"flavor": "packer_flavor",
+"network_discovery_cidrs":"10.6.6.0/24",
+"floating_ip_network": "admin_floating_net",
+"security_groups": "prox_security_group",
+"ssh_timeout":"1000s",
+"ssh_pty":"true"
+ }
+],
+"provisioners": [
+ {
+ "type": "shell",
+ "inline": [
+ "sudo mkdir -p /opt/rapid",
+ "sudo chmod 0777 /opt/rapid" ]
+ },
+ {
+ "type": "file",
+ "source": "./check_prox_system_setup.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./check-prox-system-setup.service",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./sharkproxlog.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "file",
+ "source": "./deploycentostools.sh",
+ "destination": "/opt/rapid/"
+ },
+ {
+ "type": "shell",
+ "inline": [
+ "chmod a+x /opt/rapid/deploycentostools.sh",
+ "/opt/rapid/deploycentostools.sh -u deploy" ]
+ }
+]
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service b/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service
new file mode 100644
index 00000000..f52055e7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check-prox-system-setup.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=Check PROX system setup (isolated_cores, vfio)
+DefaultDependencies=no
+After=multi-user.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/libexec/check_prox_system_setup.sh
+
+[Install]
+WantedBy=multi-user.target
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
new file mode 100755
index 00000000..3cf1113d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/check_prox_system_setup.sh
@@ -0,0 +1,78 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+## This script should run after booting: see check-prox-system-setup.service
+
+NCPUS="$(lscpu | egrep '^CPU\(s\):' | awk '{ print $2 }')"
+MAXCOREID="$((NCPUS-1))"
+
+tuned_config="/etc/tuned/realtime-virtual-guest-variables.conf"
+log_file="/opt/rapid/prox_system_setup.log"
+system_ready="/opt/rapid/system_ready_for_rapid"
+tuned_done="/opt/rapid/tuned_done"
+after_boot_file="/opt/rapid/after_boot.sh"
+
+tuned_and_reboot () {
+ echo "Applying tuned profile">>$log_file
+ tuned-adm profile realtime-virtual-guest
+ touch "$tuned_done"
+ echo "Rebooting...">>$log_file
+ reboot
+ exit 0
+}
+
+if [ -f "$tuned_config" ]
+then
+ while read -r line
+ do
+ case $line in
+ isolated_cores=1-$MAXCOREID*)
+ if test ! -f "$tuned_done"; then
+ tuned_and_reboot
+ fi
+ if test -f "$after_boot_file"; then
+ echo "Executing: $after_boot_file">>$log_file
+ ("$after_boot_file")
+ fi
+ echo "Isolated CPU(s) OK, no reboot: $line">>$log_file
+ ## rapid scripts will wait for the system_ready file to exist
+ ## Only then, they will be able to connect to the PROX instance
+ ## and start the testing
+ touch "$system_ready"
+ ## On some systems, we still need to use the igb_uio driver.
+ ## Example: good performance on AWS with the ENA interface.
+ ## Make sure that you change devbind.sh to use the preferred
+ ## driver. vfio is the default.
+ modprobe uio
+ insmod /opt/rapid/dpdk/build/kmod/igb_uio.ko wc_activate=1
+ exit 0
+ ;;
+ isolated_cores=*)
+ echo "Isolated CPU(s) NOK: $line">>$log_file
+ sed -i "/^isolated_cores=.*/c\isolated_cores=1-$MAXCOREID" $tuned_config
+ tuned_and_reboot
+ ;;
+ *)
+ echo "$line"
+ ;;
+ esac
+ done < "$tuned_config"
+ echo "isolated_cores=1-$MAXCOREID" >> $tuned_config
+ echo "No Isolated CPU(s) defined in config, line added: isolated_cores=1-$MAXCOREID">>$log_file
+ tuned_and_reboot
+else
+ echo "$tuned_config not found.">>$log_file
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/config_file b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
new file mode 100644
index 00000000..b5aeb3a9
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/config_file
@@ -0,0 +1,8 @@
+[OpenStack]
+cloud_name = openstackL6
+stack_name = rapid
+heat_template= openstack-rapid.yaml
+heat_param = params_rapid.yaml
+user = centos
+dataplane_subnet_mask = 24
+;push_gateway = http://192.168.36.61:9091/metrics/job/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
new file mode 100644
index 00000000..75267f35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/cgnat.cfg
@@ -0,0 +1,81 @@
+;;
+;; Copyright (c) 2021 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+public_start_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..2
+public_stop_ip = string.match(dest_ip1,"%d+\.%d+\.%d+\.")..20
+cgnat_table = {}
+cgnat_table.dynamic = {
+ {public_ip_range_start = ip(public_start_ip),public_ip_range_stop = ip(public_stop_ip), public_port = val_range(10,20000)},
+}
+lpm4 = {}
+lpm4.next_hops = {
+ {id = 0, port_id = 0, ip = ip("1.1.1.1"), mac = mac("00:00:00:00:00:01"), mpls = 0x212},
+}
+lpm4.routes = {};
+lpm4.routes[1] = {
+ cidr = {ip = ip(0), depth = 1},
+ next_hop_id = 0,
+}
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=internal_tap
+local ipv4=${local_ip1}
+
+[port 1]
+name=if1
+mac=hardware
+vlan=yes
+vdev=external_tap
+local ipv4=${local_ip2}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=nat
+task=0
+mode=cgnat
+sub mode=l3
+private=yes
+nat table=cgnat_table
+route table=lpm4
+rx port=if0
+tx ports from routing table=if1
+
+task=1
+mode=cgnat
+sub mode=l3
+private=no
+nat table=cgnat_table
+route table=lpm4
+rx port=if1
+tx ports from routing table=if0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
new file mode 100644
index 00000000..31728daf
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/esp.cfg
@@ -0,0 +1,47 @@
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=esp_tap
+local ipv4=$local_ip1
+
+[defaults]
+mempool size=64K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=enc
+task=0
+mode=esp_enc
+sub mode=l3
+remote ipv4=$dest_ip1
+rx port=if0
+tx cores=$altcores task=0
+drop=yes
+
+
+[core $altcores]
+name=dec
+task=0
+mode=esp_dec
+sub mode=l3
+remote ipv4=$dest_ip1
+rx ring=yes
+tx port=if0
+drop=yes
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
index a87ce758..8d3f8581 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2020 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,49 +14,56 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=gen_tap
+local ipv4=${local_ip1}
+
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
+heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
-pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+bps=1250000
+pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-;gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+;arp update time=1
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -65,6 +72,9 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+accuracy limit nsec=1000000
+latency bucket size=${bucket_size_exp}
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
index 7feaa7fd..8a477e5f 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/gen_gw.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/gen_gw.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2020 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,49 +14,56 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=gen_tap
+local ipv4=${local_ip1}
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
+heartbeat timeout=${heartbeat}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
sub mode=l3
tx port=p0
-bps=1250000000
-pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+bps=1250000
+pkt inline=00 00 00 00 00 00 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
+gateway ipv4=${gw_ip1}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+;arp update time=1
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -65,5 +72,7 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+latency bucket size=${bucket_size_exp}
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
new file mode 100644
index 00000000..32fadbc7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/genv6.cfg
@@ -0,0 +1,78 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+heartbeat timeout=${heartbeat}
+
+[core $mcore]
+mode=master
+
+[core $gencores]
+name=gen
+task=0
+mode=gen
+sub mode=ndp
+tx port=p0
+bps=1000
+pkt inline=00 00 01 00 00 01 00 00 02 00 00 02 86 dd 60 00 00 00 00 1a 11 40 ${local_hex_ip1} ${dest_hex_ip1} 13 88 13 88 00 1a 55 7b 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+global ipv6=${local_ip1}
+min bulk size=$mbs
+max bulk size=16
+drop=yes
+lat pos=62
+packet id pos=66
+signature pos=72
+signature=0x98765432
+accuracy pos=76
+pkt size=80
+
+
+
+[core $latcores]
+name=lat
+task=0
+mode=lat
+sub mode=ndp
+rx port=p0
+lat pos=62
+accuracy pos=76
+packet id pos=66
+signature=0x98765432
+signature pos=72
+accuracy limit nsec=1000000
+latency bucket size=${bucket_size_exp}
+global ipv6=${local_ip1}
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
index e8b3801d..3eaf80e7 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/impair.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,34 +14,39 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
+vdev=impair_tap
+local ipv4=${local_ip1}
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=impair
task=0
mode=impair
sub mode=l3
rx port=if0
tx port=if0
-delay us=10
-probability=100
-local ipv4=${local_ip}
-
+delay us=1000
+proba delay=50
+proba no drop=100
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
index 3ae539c5..0f26e6eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/irq.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/irq.cfg
@@ -14,29 +14,30 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
-[lua]
-dofile("parameters.lua")
-
-[port 0]
+[;port 0]
name=p0
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=irq
task=0
mode=irq
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
index 9e7bf90e..3af0ac99 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2gen.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,48 +14,50 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
[variables]
$mbs=8
[defaults]
-mempool size=4K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $gencores]
name=p0
task=0
mode=gen
tx port=p0
bps=1250000000
-pkt inline=${dest_hex_mac} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip} ${dest_hex_ip} 0b b8 0b b9 00 1a 55 7b
+pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
pkt size=60
-;gateway ipv4=${gw_ip}
-local ipv4=${local_ip}
min bulk size=$mbs
max bulk size=16
-drop=no
+drop=yes
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
-[core ${group2}]
+[core $latcores]
name=lat
task=0
mode=lat
@@ -63,5 +65,6 @@ rx port=p0
lat pos=42
accuracy pos=46
packet id pos=50
-signature=0x6789abcd
+signature=0x98765432
signature pos=56
+latency bucket size=${bucket_size_exp}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
new file mode 100644
index 00000000..dc988969
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2gen_bare.cfg
@@ -0,0 +1,59 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=p0
+rx desc=2048
+tx desc=2048
+vlan=yes
+
+[variables]
+$mbs=8
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $gencores]
+name=p0
+task=0
+mode=gen
+tx port=p0
+bps=1250000000
+pkt inline=${dest_hex_mac1} 00 00 00 00 00 00 08 00 45 00 00 2e 00 01 00 00 40 11 f7 7d ${local_hex_ip1} ${dest_hex_ip1} 0b b8 0b b9 00 1a 55 7b
+pkt size=60
+min bulk size=$mbs
+max bulk size=64
+drop=yes
+
+[core $latcores]
+name=drop
+task=0
+mode=none
+rx port=p0
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
index c02556d9..0ce3a1a3 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/l2swap.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,31 +14,34 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+rx desc=2048
+tx desc=2048
+vlan=yes
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=swap
task=0
mode=swap
rx port=if0
tx port=if0
drop=no
-
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
new file mode 100644
index 00000000..9ffd6e8f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/public_server.cfg
@@ -0,0 +1,57 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=public_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=PublicServer
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx cores=${self}t1
+drop=no
+
+task=1
+mode=mirror
+sub mode=l3
+multiplier=2
+mirror size=300
+rx ring=yes
+tx port=if0
+drop=no
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
index 30abb8f7..d941e5eb 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw1.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw1.cfg
@@ -18,12 +18,13 @@
; This is sample ESP config.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if
@@ -40,15 +41,15 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=esp_enc
task=0
mode=esp_enc
sub mode=l3
-local ipv4=${local_ip}
+local ipv4=${local_ip1}
remote ipv4=${dest_ip}
rx port=if
tx port=if
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
index a361e875..9aedc85d 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/secgw2.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/secgw2.cfg
@@ -17,13 +17,15 @@
;;
; This is sample ESP config.
;;
-[eal options]
--n=4 ; force number of memory channels
-no-output=no ; disable DPDK debug output
[lua]
dofile("parameters.lua")
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
[port 0]
name=if
mac=hardware
@@ -39,15 +41,15 @@ mempool size=16K
start time=20
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=esp_dec
task=0
mode=esp_dec
sub mode=l3
-local ipv4=${local_ip}
+local ipv4=${local_ip1}
rx port=if
tx port=if
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
new file mode 100644
index 00000000..f5ff5447
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/setup.cfg
@@ -0,0 +1,10 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
index 4229c207..f66322a9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/swap.cfg
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap.cfg
@@ -1,5 +1,5 @@
;;
-;; Copyright (c) 2010-2017 Intel Corporation
+;; Copyright (c) 2010-2019 Intel Corporation
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
@@ -14,33 +14,36 @@
;; limitations under the License.
;;
+[lua]
+dofile("parameters.lua")
+
[eal options]
-n=4 ; force number of memory channels
no-output=no ; disable DPDK debug output
-
-[lua]
-dofile("parameters.lua")
+eal=--proc-type auto ${eal}
[port 0]
name=if0
mac=hardware
+vlan=yes
+vdev=swap_tap
+local ipv4=${local_ip1}
[defaults]
-mempool size=2K
+mempool size=8K
[global]
name=${name}
-[core 0]
+[core $mcore]
mode=master
-[core ${group1}]
+[core $cores]
name=swap
task=0
mode=swap
sub mode=l3
rx port=if0
tx port=if0
-local ipv4=${local_ip}
drop=no
-
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
new file mode 100644
index 00000000..abadfa64
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swap_gw.cfg
@@ -0,0 +1,50 @@
+;;
+;; Copyright (c) 2010-2019 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+vdev=swap_tap
+local ipv4=${local_ip1}
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=swap
+task=0
+mode=swap
+sub mode=l3
+rx port=if0
+tx port=if0
+gateway ipv4=${gw_ip1}
+drop=no
+;arp update time=1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
new file mode 100644
index 00000000..61c8a594
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/configs/swapv6.cfg
@@ -0,0 +1,47 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[lua]
+dofile("parameters.lua")
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+eal=--proc-type auto ${eal}
+
+[port 0]
+name=if0
+mac=hardware
+vlan=yes
+
+[defaults]
+mempool size=8K
+
+[global]
+name=${name}
+
+[core $mcore]
+mode=master
+
+[core $cores]
+name=swap
+task=0
+mode=swap
+sub mode=ndp
+rx port=if0
+tx port=if0
+global ipv6=${local_ip1}
+drop=no
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
new file mode 100755
index 00000000..af1da307
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapid.py
@@ -0,0 +1,64 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+from rapid_log import RapidLog
+from stackdeployment import StackDeployment
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+
+class RapidStackManager(object):
+ @staticmethod
+ def parse_config(rapid_stack_params):
+ config = configparser.RawConfigParser()
+ config.read('config_file')
+ section = 'OpenStack'
+ options = config.options(section)
+ for option in options:
+ rapid_stack_params[option] = config.get(section, option)
+ if 'dataplane_subnet_mask' not in rapid_stack_params.keys():
+ rapid_stack_params['dataplane_subnet_mask'] = 24
+ return (rapid_stack_params)
+
+ @staticmethod
+ def deploy_stack(rapid_stack_params):
+ cloud_name = rapid_stack_params['cloud_name']
+ stack_name = rapid_stack_params['stack_name']
+ heat_template = rapid_stack_params['heat_template']
+ heat_param = rapid_stack_params['heat_param']
+ user = rapid_stack_params['user']
+ dataplane_subnet_mask = rapid_stack_params['dataplane_subnet_mask']
+ deployment = StackDeployment(cloud_name)
+ deployment.deploy(stack_name, heat_template, heat_param)
+ deployment.generate_env_file(user, dataplane_subnet_mask)
+
+def main():
+ rapid_stack_params = {}
+ RapidStackManager.parse_config(rapid_stack_params)
+ log_file = 'CREATE{}.log'.format(rapid_stack_params['stack_name'])
+ RapidLog.log_init(log_file, 'DEBUG', 'INFO', '2021.03.15')
+ #cloud_name = 'openstackL6'
+ #stack_name = 'rapid'
+ #heat_template = 'openstack-rapid.yaml'
+ #heat_param = 'params_rapid.yaml'
+ #user = 'centos'
+ RapidStackManager.deploy_stack(rapid_stack_params)
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
new file mode 100755
index 00000000..c4667f1f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/createrapidk8s.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import argparse
+from rapid_k8s_deployment import K8sDeployment
+
+# Config file name for deployment creation
+CREATE_CONFIG_FILE_NAME = "rapid.pods"
+
+# Config file name for runrapid script
+RUN_CONFIG_FILE_NAME = "rapid.env"
+
+def main():
+ # Parse command line arguments
+ argparser = argparse.ArgumentParser()
+ argparser.add_argument("-c", "--clean", action = "store_true",
+ help = "Terminate pod-rapid-* PODs. "
+ "Clean up cluster before or after the testing.")
+ args = argparser.parse_args()
+
+ # Create a new deployment
+ deployment = K8sDeployment()
+
+ # Load config file with test environment description
+ deployment.load_create_config(CREATE_CONFIG_FILE_NAME)
+
+ if args.clean:
+ deployment.delete_pods()
+ return
+
+ # Create PODs for test
+ deployment.create_pods()
+
+ # Save config file for runrapid script
+ deployment.save_runtime_config(RUN_CONFIG_FILE_NAME)
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
new file mode 100644
index 00000000..a0fe7cb2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/deploycentostools.sh
@@ -0,0 +1,305 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# Directory for package build
+BUILD_DIR="/opt/rapid"
+DPDK_VERSION="20.05"
+MULTI_BUFFER_LIB_VER="0.52"
+export RTE_SDK="${BUILD_DIR}/dpdk-${DPDK_VERSION}"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+# By default, do not update OS
+OS_UPDATE="n"
+# By default, asumming that we are in the VM
+K8S_ENV="n"
+
+# If already running from root, no need for sudo
+SUDO=""
+[ $(id -u) -ne 0 ] && SUDO="sudo"
+
+function os_pkgs_install()
+{
+ ${SUDO} yum install -y deltarpm yum-utils
+
+ # NASM repository for AESNI MB library
+ #${SUDO} yum-config-manager --add-repo http://www.nasm.us/nasm.repo
+
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+ ${SUDO} yum install -y git wget gcc unzip libpcap-devel ncurses-devel \
+ libedit-devel lua-devel kernel-devel iperf3 pciutils \
+ numactl-devel vim tuna openssl-devel wireshark \
+ make driverctl
+
+ ${SUDO} wget --no-check-certificate \
+ https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+ ${SUDO} rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+}
+
+function k8s_os_pkgs_runtime_install()
+{
+ [ "${OS_UPDATE}" == "y" ] && ${SUDO} yum update -y
+
+ # Install required dynamically linked libraries + required packages
+ ${SUDO} yum install -y numactl-libs libpcap openssh openssh-server \
+ openssh-clients sudo
+
+ # Install additional packets for universal image
+ ${SUDO} yum install -y epel-release python3 kubernetes-client
+ ${SUDO} yum install -y python3-paramiko python3-future
+ ${SUDO} python3 -m pip install --upgrade pip
+ ${SUDO} pip3 install scp kubernetes
+}
+
+function os_cfg()
+{
+ # huge pages to be used by DPDK
+ ${SUDO} sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+ ${SUDO} sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+ ${SUDO} sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+ # Enabling tuned with the realtime-virtual-guest profile
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm
+ wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm
+ # Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+ # do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ ${SUDO} rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.8.0-5.el7_4.2.noarch.rpm --nodeps
+ # Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+ # isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+ # When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+ # and update the realtime-virtual-guest-variables.conf accordingly.
+ echo "isolated_cores=1-3" | ${SUDO} tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+ ${SUDO} tuned-adm profile realtime-virtual-guest
+
+ # Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+ # assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+ ${SUDO} chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+ ${SUDO} mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+ ${SUDO} mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+ ${SUDO} systemctl daemon-reload
+ ${SUDO} systemctl enable check-prox-system-setup.service
+ popd > /dev/null 2>&1
+}
+
+function k8s_os_cfg()
+{
+ [ ! -f /etc/ssh/ssh_host_rsa_key ] && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ecdsa_key ] && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N ''
+ [ ! -f /etc/ssh/ssh_host_ed25519_key ] && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ''
+
+ [ ! -d /var/run/sshd ] && mkdir -p /var/run/sshd
+
+ USER_NAME="centos"
+ USER_PWD="centos"
+
+ useradd -m -d /home/${USER_NAME} -s /bin/bash -U ${USER_NAME}
+ echo "${USER_NAME}:${USER_PWD}" | chpasswd
+ usermod -aG wheel ${USER_NAME}
+
+ echo "%wheel ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/wheelnopass
+}
+
+function mblib_install()
+{
+ export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+
+ # Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+ unzip v${MULTI_BUFFER_LIB_VER}.zip
+ pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} make install
+ popd > /dev/null 2>&1
+ popd > /dev/null 2>&1
+}
+
+function dpdk_install()
+{
+ # Build DPDK for the latest kernel installed
+ LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+ export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+ # Get and compile DPDK
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ wget http://fast.dpdk.org/rel/dpdk-${DPDK_VERSION}.tar.xz
+ tar -xf ./dpdk-${DPDK_VERSION}.tar.xz
+ popd > /dev/null 2>&1
+
+ ${SUDO} ln -s ${RTE_SDK} ${BUILD_DIR}/dpdk
+
+ pushd ${RTE_SDK} > /dev/null 2>&1
+ make config T=${RTE_TARGET}
+ # Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+ # Uncomment the sed command to enable the driver compilation
+ #${SUDO} sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/c\/CONFIG_RTE_EAL_IGB_UIO=y' ${RTE_SDK}/build/.config
+
+ # For Kubernetes environment we use host vfio module
+ if [ "${K8S_ENV}" == "y" ]; then
+ sed -i 's/CONFIG_RTE_EAL_IGB_UIO=y/CONFIG_RTE_EAL_IGB_UIO=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_LIBRTE_KNI=y/CONFIG_RTE_LIBRTE_KNI=n/g' ${RTE_SDK}/build/.config
+ sed -i 's/CONFIG_RTE_KNI_KMOD=y/CONFIG_RTE_KNI_KMOD=n/g' ${RTE_SDK}/build/.config
+ fi
+
+ # Compile with MB library
+ sed -i '/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/c\CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y' ${RTE_SDK}/build/.config
+ make -j`getconf _NPROCESSORS_ONLN`
+ ln -s ${RTE_SDK}/build ${RTE_SDK}/${RTE_TARGET}
+ popd > /dev/null 2>&1
+}
+
+function prox_compile()
+{
+ # Compile PROX
+ pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX
+ COMMIT_ID=$(git rev-parse HEAD)
+ echo "${COMMIT_ID}" > ${BUILD_DIR}/commit_id
+ make -j`getconf _NPROCESSORS_ONLN`
+ ${SUDO} cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+ popd > /dev/null 2>&1
+}
+
+function prox_install()
+{
+ # Clone PROX
+ pushd ${BUILD_DIR} > /dev/null 2>&1
+ git clone https://git.opnfv.org/samplevnf
+ cp -R ./samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid ./src
+ popd > /dev/null 2>&1
+ prox_compile
+
+ # Clean build folder
+ rm -rf ${BUILD_DIR}/samplevnf
+}
+
+function port_info_build()
+{
+ [ ! -d ${BUILD_DIR}/port_info ] && echo "Skipping port_info compilation..." && return
+
+ pushd ${BUILD_DIR}/port_info > /dev/null 2>&1
+ make
+ ${SUDO} cp ${BUILD_DIR}/port_info/build/app/port_info_app ${BUILD_DIR}/port_info_app
+ popd > /dev/null 2>&1
+}
+
+function create_minimal_install()
+{
+ ldd ${BUILD_DIR}/prox | awk '{ if ($(NF-1) != "=>") print $(NF-1) }' >> ${BUILD_DIR}/list_of_install_components
+
+ echo "${BUILD_DIR}/prox" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/port_info_app" >> ${BUILD_DIR}/list_of_install_components
+ echo "${BUILD_DIR}/commit_id" >> ${BUILD_DIR}/list_of_install_components
+
+ tar -czvhf ${BUILD_DIR}/install_components.tgz -T ${BUILD_DIR}/list_of_install_components
+}
+
+function cleanup()
+{
+ ${SUDO} yum autoremove -y
+ ${SUDO} yum clean all
+ ${SUDO} rm -rf /var/cache/yum
+}
+
+function k8s_runtime_image()
+{
+ k8s_os_pkgs_runtime_install
+ k8s_os_cfg
+ cleanup
+
+ pushd / > /dev/null 2>&1
+ tar -xvf ${BUILD_DIR}/install_components.tgz --skip-old-files
+ popd > /dev/null 2>&1
+
+ ldconfig
+
+ rm -rf ${BUILD_DIR}/install_components.tgz
+}
+
+function print_usage()
+{
+ echo "Usage: ${0} [OPTIONS] [COMMAND]"
+ echo "Options:"
+ echo " -u, --update Full OS update"
+ echo " -k, --kubernetes Build for Kubernetes environment"
+ echo "Commands:"
+ echo " deploy Run through all deployment steps"
+ echo " compile PROX compile only"
+ echo " runtime_image Apply runtime configuration only"
+}
+
+COMMAND=""
+# Parse options and comman
+for opt in "$@"; do
+ case ${opt} in
+ -u|--update)
+ echo 'Full OS update will be done!'
+ OS_UPDATE="y"
+ ;;
+ -k|--kubernetes)
+ echo "Kubernetes environment is set!"
+ K8S_ENV="y"
+ ;;
+ compile)
+ COMMAND="compile"
+ ;;
+ runtime_image)
+ COMMAND="runtime_image"
+ ;;
+ deploy)
+ COMMAND="deploy"
+ ;;
+ *)
+ echo "Unknown option/command ${opt}"
+ print_usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ "${COMMAND}" == "compile" ]; then
+ echo "PROX compile only..."
+ prox_compile
+elif [ "${COMMAND}" == "runtime_image" ]; then
+ echo "Runtime image intallation and configuration..."
+ k8s_runtime_image
+elif [ "${COMMAND}" == "deploy" ]; then
+ [ ! -d ${BUILD_DIR} ] && ${SUDO} mkdir -p ${BUILD_DIR}
+ ${SUDO} chmod 0777 ${BUILD_DIR}
+
+ os_pkgs_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ k8s_os_cfg
+ else
+ os_cfg
+ fi
+
+ mblib_install
+ dpdk_install
+ prox_install
+
+ if [ "${K8S_ENV}" == "y" ]; then
+ port_info_build
+ create_minimal_install
+ fi
+
+ cleanup
+else
+ print_usage
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
new file mode 100755
index 00000000..0bde3cc2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/devbind.sh
@@ -0,0 +1,12 @@
+link="$(sudo ip -o link | grep MACADDRESS |cut -d":" -f 2)"
+if [ -n "$link" ];
+then
+ echo Need to bind
+ # Uncomment one of the following lines, depending on which driver
+ # you want to use: vfio-pci or igb_uio
+ #sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(sudo /opt/rapid/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
+ sudo driverctl set-override $(sudo ethtool -i $link |grep bus-info | cut -d" " -f 2) vfio-pci
+else
+ echo Assuming port is already bound to DPDK poll mode driver
+fi
+exit 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
new file mode 100755
index 00000000..e2266e58
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/dockerimage.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+PROX_DEPLOY_DIR="."
+PROX_IMAGE_NAME="rapid"
+RSA_KEY_FILE_NAME="rapid_rsa_key"
+
+DOCKERFILE="Dockerfile"
+DOCKER_REGISTRY="localhost:5000"
+
+USE_DOCKER_CACHE="n"
+
+IMAGE_BUILD_LOG="dockerimage-build.log"
+
+function create_ssh_key()
+{
+ if [ -f ./${RSA_KEY_FILE_NAME} ]; then
+ read -p "RSA key already exist! Do you want to remove it (yYnN)?" -n 1 -r
+
+ if [ "${REPLY}" == "y" ] || [ "${REPLY}" == "Y" ]; then
+ echo "Removing existing key..."
+ sleep 3
+
+ [ -f "./${RSA_KEY_FILE_NAME}" ] && rm -rf ./${RSA_KEY_FILE_NAME}
+ [ -f "./${RSA_KEY_FILE_NAME}.pub" ] && rm -rf ./${RSA_KEY_FILE_NAME}.pub
+ else
+ echo "Using existing key..."
+ return
+ fi
+ fi
+
+ echo "Generating new RSA key..."
+ ssh-keygen -t rsa -b 4096 -N "" -f ./${RSA_KEY_FILE_NAME}
+}
+
+function build_prox_image()
+{
+ if [ "${USE_DOCKER_CACHE}" == "y" ]; then
+ echo "Building image using cache..."
+ docker build --rm -t ${PROX_IMAGE_NAME}:latest -f ${DOCKERFILE} ${PROX_DEPLOY_DIR} 2>&1 | tee ./${IMAGE_BUILD_LOG}
+ else
+ echo "Building image without cache..."
+ docker build --no-cache --rm -t ${PROX_IMAGE_NAME}:latest -f ${DOCKERFILE} ${PROX_DEPLOY_DIR} 2>&1 | tee ./${IMAGE_BUILD_LOG}
+ fi
+}
+
+function save_prox_image()
+{
+ echo "Saving image ${PROX_IMAGE_NAME}:latest to ./${PROX_IMAGE_NAME}.tar"
+ docker save -o ./${PROX_IMAGE_NAME}.tar ${PROX_IMAGE_NAME}:latest
+}
+
+function load_prox_image()
+{
+ echo "Loading image ./${PROX_IMAGE_NAME}.tar"
+ docker load -i ./${PROX_IMAGE_NAME}.tar
+}
+
+function push_prox_image()
+{
+ docker tag ${PROX_IMAGE_NAME}:latest ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME}
+ docker push ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME}
+}
+
+function print_help()
+{
+ echo "${0}: [build|load|push]"
+ echo " build: build and save image ${PROX_IMAGE_NAME}:latest using ${DOCKERFILE}"
+ echo " load: load saved image from ${PROX_IMAGE_NAME}.tar file in the local registry"
+ echo " push: tag and push local ${PROX_IMAGE_NAME}:latest image in the ${DOCKER_REGISTRY}/${PROX_IMAGE_NAME} registry"
+}
+
+if [ "$1" == "build" ]; then
+ create_ssh_key
+ build_prox_image
+ save_prox_image
+elif [ "$1" == "load" ]; then
+ load_prox_image
+elif [ "$1" == "push" ]; then
+ push_prox_image
+else
+ print_help
+fi
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
new file mode 100644
index 00000000..8dcb09ba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/format.yaml
@@ -0,0 +1,105 @@
+;Format: PushGateway
+;Format: Xtesting
+;URL:
+ part1: http://testresults.opnfv.org/test/api/v1/results
+;URL:
+ part1: http://192.168.36.61:9091/metrics/job/
+ part2: test
+ part3: /instance/
+ part4: environment_file
+;FlowsizeTest:
+ Flows: Flows
+ Size: Size
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+FlowSizeTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+IrqTest:
+ Environment: environment_file
+ Test: test
+ Buckets: buckets
+ Machine_data: machine_data
+ImpairTest:
+ Environment: environment_file
+ Test: test
+ Flows: Flows
+ Size: Size
+ Speed (Mpps):
+ RequestedSpeed: RequestedSpeed
+ CoreGenerated: pps_req_tx
+ SentByNIC: pps_tx
+ FwdBySUT: pps_sut_tx
+ RevByCore: pps_rx
+ Latency (usec):
+ AvgLatency: lat_avg
+ PCTLatency: lat_perc
+ MinLatency: lat_min
+ MaxLatency: lat_max
+ Distribution:
+ bucket_size: bucket_size
+ buckets: buckets
+ Absolute Packet Count:
+ Sent: abs_tx
+ Received: abs_rx
+ Lost: abs_dropped
+ Re-ordering:
+ Misordered: mis_ordered
+ Extent: extent
+ Duplicated: duplicate
+CoreStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NonDPReceived: NonDPReceived
+ NonDPSent: NonDPSent
+ Dropped: Dropped
+PortStatsTest:
+ Environment: environment_file
+ Test: test
+ PROXID: PROXID
+ StepSize: StepSize
+ Received: Received
+ Sent: Sent
+ NoMbufs: NoMbufs
+ iErrMiss: iErrMiss
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
new file mode 100644
index 00000000..a5633409
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/helper.lua
@@ -0,0 +1,77 @@
+--
+-- Copyright (c) 2020 Intel Corporation
+--
+-- Licensed under the Apache License, Version 2.0 (the "License");
+-- you may not use this file except in compliance with the License.
+-- You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+function convertIPToHex(ip)
+ local address_chunks = {}
+ if type(ip) ~= "string" then
+ print ("IP ADDRESS ERROR: ", ip)
+ return "IP ADDRESS ERROR"
+ end
+
+ local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)(/%d+)$")}
+ if #chunks == 5 then
+ for i,v in ipairs(chunks) do
+ if i < 5 then
+ if tonumber(v) > 255 then
+ print ("IPV4 ADDRESS ERROR: ", ip)
+ return "IPV4 ADDRESS ERROR"
+ end
+ address_chunks[#address_chunks + 1] = string.format ("%02x", v)
+ end
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV4: ", result)
+ return result
+ end
+
+ local chunks = {ip:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)$")}
+ if #chunks == 4 then
+ for i,v in ipairs(chunks) do
+ if tonumber(v) > 255 then
+ print ("IPV4 ADDRESS ERROR: ", ip)
+ return "IPV4 ADDRESS ERROR"
+ end
+ address_chunks[#address_chunks + 1] = string.format ("%02x", v)
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV4: ", result)
+ return result
+ end
+
+ delimiter = ":"
+ for match in (ip..delimiter):gmatch("(.-)"..delimiter) do
+ if match ~= "" then
+ number = tonumber(match, 16)
+ if number <= 65535 then
+ table.insert(address_chunks, string.format("%02x %02x",number/256,number % 256))
+ end
+ else
+ table.insert(address_chunks, "")
+ end
+ end
+ for i, chunk in ipairs(address_chunks) do
+ if chunk =="" then
+ table.remove(address_chunks, i)
+ for j = 1,(8-#address_chunks) do
+ table.insert(address_chunks, i, "00 00")
+ end
+ break
+ end
+ end
+ result = table.concat(address_chunks, " ")
+ print ("Hex IPV6: ", result)
+ return result
+end
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
index adc184e3..38bc5a7e 100755..100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/devbind.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/machine.map
@@ -1,7 +1,5 @@
-#!/bin/bash
-
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2010-2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -15,12 +13,21 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+## This file contains the mapping for each test machine. The test machine will
+## be deployed on a machine defined in the *.env file, as defined by the
+## machine_index
+
+[DEFAULT]
+machine_index=0
+
+[TestM1]
+machine_index=1
+
+[TestM2]
+machine_index=2
+
+[TestM3]
+machine_index=3
-link="$(ip -o link | grep MACADDRESS |cut -d":" -f 2)"
-if [ -n "$link" ];
-then
- echo Need to bind
- /root/dpdk/usertools/dpdk-devbind.py --force --bind igb_uio $(/root/dpdk/usertools/dpdk-devbind.py --status |grep $link | cut -d" " -f 1)
-else
- echo Assuming port is already bound to DPDK
-fi
+[TestM4]
+machine_index=4
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
new file mode 100644
index 00000000..1cc11e04
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/openstack-rapid.yaml
@@ -0,0 +1,168 @@
+heat_template_version: 2015-10-15
+
+description: >
+ Template for deploying n PROX instances. The template allows for deploying
+ multiple groups of PROX VMs. You can create a first group with certain
+ flavors, availability groups, etc... Another group can be created with
+ different characteristics.
+
+parameters:
+ public_net_name: {description: Public network to allocate (floating) IPs to VMs', type: string, default: admin_floating_net}
+ mgmt_net_name: {description: Name of PROX mgmt network to be created, type: string, default: admin_internal_net}
+ PROX_image: {description: Image name to use for PROX, type: string, default: rapidVM}
+ PROX_key: {description: DO NOT CHANGE THIS DEFAULT KEY NAME, type: string, default: rapid_rsa_key}
+ my_availability_zone: {description: availability_zone for Hosting VMs, type: string, default: nova}
+ security_group: {description: Security Group to use, type: string, default: prox_security_group}
+ PROXType1VM_count: {description: Total number of testVMs to create, type: number, default: 2}
+ PROXType2VM_count: {description: Total number of testVMs type 2 to create, type: number, default: 1}
+ PROXType3VM_count: {description: Total number of testVMs type 3 to create, type: number, default: 1}
+
+# The following paramters are not used, but are here in case you want to also
+# create the management and dataplane networks in this template
+ mgmt_net_cidr: {description: PROX mgmt network CIDR, type: string, default: 20.20.1.0/24}
+ mgmt_net_gw: {description: PROX mgmt network gateway address, type: string, default: 20.20.1.1}
+ mgmt_net_pool_start: {description: Start of mgmt network IP address allocation pool, type: string, default: 20.20.1.100}
+ mgmt_net_pool_end: {description: End of mgmt network IP address allocation pool, type: string, default: 20.20.1.200}
+ data_net_name: {description: Name of PROX private network to be created, type: string, default: dataplane-network}
+ data_net_cidr: {description: PROX private network CIDR,type: string, default: 30.30.1.0/24}
+ data_net_pool_start: {description: Start of private network IP address allocation pool, type: string, default: 30.30.1.100}
+ data_net_pool_end: {description: End of private network IP address allocation pool, type: string, default: 30.30.1.200}
+ data2_net_name: {description: Name of PROX private network 2 to be created, type: string, default: data2}
+ dns:
+ type: comma_delimited_list
+ label: DNS nameservers
+ description: Comma separated list of DNS nameservers for the management network.
+ default: '8.8.8.8'
+
+resources:
+ PROXType1VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType1VM_count }
+ resource_def:
+ type: rapid-openstack-server.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidVM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ PROXType2VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType2VM_count }
+ resource_def:
+ type: rapid-openstack-server-2ports.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidType2VM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data_net_name}
+ PROX_data2_net_id: {get_param: data2_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ PROXType3VMs:
+ type: OS::Heat::ResourceGroup
+ description: Group of PROX VMs according to specs described in this section
+ properties:
+ count: { get_param: PROXType3VM_count }
+ resource_def:
+ type: rapid-openstack-server.yaml
+ properties:
+ PROX_availability_zone : {get_param: my_availability_zone}
+ PROX_security_group : {get_param: security_group}
+ PROX_image: {get_param: PROX_image}
+ PROX_key: {get_param: PROX_key}
+ PROX_server_name: rapidType3VM-%index%
+ PROX_public_net: {get_param: public_net_name}
+ PROX_mgmt_net_id: {get_param: mgmt_net_name}
+ PROX_data_net_id: {get_param: data2_net_name}
+ PROX_config: {get_resource: MyConfig}
+ depends_on:
+ - MyConfig
+
+ MyConfig:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ users:
+ - default
+ - name: rapid
+ groups: "users,root"
+ lock-passwd: false
+ passwd: 'test'
+ shell: "/bin/bash"
+ sudo: "ALL=(ALL) NOPASSWD:ALL"
+ ssh_pwauth: true
+ chpasswd:
+ list: |
+ rapid:rapid
+ expire: False
+ write_files:
+ - path: /opt/rapid/after_boot_do_not_run.sh
+ # - path: /opt/rapid/after_boot.sh
+ # after_boot.sh is ran by check_prox_system_setup.sh, if it exists
+ # This can be used to fix some issues, like in the example below
+ # Remove this section or rename the file, if you do not want to run
+ # this after booting
+ # The code below is just an example of what could be ran after boot
+ content: |
+ OLDIFS="${IFS}"
+ IFS=$'\n'
+ list="$(ip route | grep via | grep -v 'dev eth0')"
+ # Delete all routes using gateway on other interfaces than eth0
+ for item in ${list}
+ do /bin/bash -c "sudo ip route del ${item}"
+ done
+ # Make sure to replace the IP address with your gateway
+ /bin/bash -c "sudo ip route add default via 10.6.6.1 dev eth0"
+ /bin/bash -c "echo nameserver 8.8.8.8 > /etc/resolv.conf"
+ IFS="${OLDIFS}"
+ permissions: '0777'
+
+outputs:
+ number_of_servers:
+ description: List of number or PROX instance
+ value:
+ - {get_param: PROXType1VM_count}
+ - {get_param: PROXType2VM_count}
+ - {get_param: PROXType3VM_count}
+ server_name:
+ description: List of list of names of the PROX instances
+ value:
+ - {get_attr: [PROXType1VMs, name]}
+ - {get_attr: [PROXType2VMs, name]}
+ - {get_attr: [PROXType3VMs, name]}
+ mngmt_ips:
+ description: List of list of Management IPs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, mngmt_ip]}
+ - {get_attr: [PROXType2VMs, mngmt_ip]}
+ - {get_attr: [PROXType3VMs, mngmt_ip]}
+ data_plane_ips:
+ description: List of list of list of DataPlane IPs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_ips]}
+ - {get_attr: [PROXType2VMs, data_plane_ips]}
+ - {get_attr: [PROXType3VMs, data_plane_ips]}
+ data_plane_macs:
+ description: List of list of list of DataPlane MACs of the VMs
+ value:
+ - {get_attr: [PROXType1VMs, data_plane_mac]}
+ - {get_attr: [PROXType2VMs, data_plane_mac]}
+ - {get_attr: [PROXType3VMs, data_plane_mac]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
new file mode 100644
index 00000000..fbef2f54
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/params_rapid.yaml
@@ -0,0 +1,10 @@
+parameters:
+ public_net_name: admin_floating_net
+ data_net_name: dataplane-network
+ PROX_image: rapidVM
+ PROX_key: rapid_rsa_key
+ my_availability_zone: nova
+ security_group: prox_security_group
+ PROXType1VM_count: 3
+ PROXType2VM_count: 0
+ PROXType3VM_count: 0
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
new file mode 100644
index 00000000..9e269f60
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pod-rapid.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-rapid-
+ annotations:
+ k8s.v1.cni.cncf.io/networks: intel-sriov-vfio
+spec:
+ containers:
+ - name: pod-rapid
+ image: opnfv/rapid:latest
+ imagePullPolicy: Always
+ securityContext:
+ capabilities:
+ add: ["IPC_LOCK", "NET_ADMIN"]
+ volumeMounts:
+ - mountPath: /dev/hugepages
+ name: hugepages
+ resources:
+ requests:
+ hugepages-2Mi: 1Gi
+ memory: 1Gi
+ cpu: 8
+ intel.com/intel_sriov_vfio: '1'
+ limits:
+ hugepages-2Mi: 1Gi
+ memory: 1Gi
+ cpu: 8
+ intel.com/intel_sriov_vfio: '1'
+ volumes:
+ - name: hugepages
+ emptyDir:
+ medium: HugePages
+ restartPolicy: Never
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile
new file mode 100644
index 00000000..f91cf156
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/Makefile
@@ -0,0 +1,42 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = port_info_app
+
+# all source are stored in SRCS-y
+SRCS-y := port_info.c
+
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+EXTRA_CFLAGS += -O3 -g -Wfatal-errors
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
new file mode 100644
index 00000000..f2efd667
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/meson.build
@@ -0,0 +1,101 @@
+##
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+project('port-info', 'C',
+ version:
+ run_command(['git', 'describe',
+ '--abbrev=8', '--dirty', '--always']).stdout().strip(),
+ license: 'Apache',
+ default_options: ['buildtype=release', 'c_std=gnu99'],
+ meson_version: '>= 0.47'
+)
+
+cc = meson.get_compiler('c')
+
+# Configure options for prox
+# Grab the DPDK version here "manually" as it is not available in the dpdk_dep
+# object
+dpdk_version = run_command('pkg-config', '--modversion', 'libdpdk').stdout()
+
+
+cflags = [
+ '-DPROGRAM_NAME="port_info_app"',
+ '-fno-stack-protector',
+ '-DGRE_TP',
+ '-D_GNU_SOURCE'] # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+# Add configured cflags to arguments
+foreach arg: cflags
+ add_project_arguments(arg, language: 'c')
+endforeach
+
+# enable warning flags if they are supported by the compiler
+warning_flags = [
+ '-Wno-unused',
+ '-Wno-unused-parameter',
+ '-Wno-unused-result',
+ '-Wno-deprecated-declarations']
+
+foreach arg: warning_flags
+ if cc.has_argument(arg)
+ add_project_arguments(arg, language: 'c')
+ endif
+endforeach
+
+has_sym_args = [
+ [ 'HAVE_LIBEDIT_EL_RFUNC_T', 'histedit.h',
+ 'el_rfunc_t' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'libedit_autoconf.h', configuration : config)
+
+# All other dependencies
+dpdk_dep = dependency('libdpdk', required: true)
+tinfo_dep = dependency('tinfo', required: false)
+threads_dep = dependency('threads', required: true)
+pcap_dep = dependency('pcap', required: true)
+libedit_dep = dependency('libedit', required: true)
+math_dep = cc.find_library('m', required : false)
+dl_dep = cc.find_library('dl', required : true)
+
+deps = [dpdk_dep,
+ tinfo_dep,
+ threads_dep,
+ pcap_dep,
+ libedit_dep,
+ math_dep,
+ dl_dep]
+
+# Explicitly add these to the dependency list
+deps += [cc.find_library('rte_bus_pci', required: true)]
+deps += [cc.find_library('rte_bus_vdev', required: true)]
+
+if dpdk_version.version_compare('<20.11.0')
+deps += [cc.find_library('rte_pmd_ring', required: true)]
+else
+deps += [cc.find_library('rte_net_ring', required: true)]
+endif
+
+sources = files(
+ 'port_info.c')
+
+executable('port_info_app',
+ sources,
+ c_args: cflags,
+ dependencies: deps,
+ install: true)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
new file mode 100644
index 00000000..917c0636
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/port_info/port_info.c
@@ -0,0 +1,70 @@
+/*
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_version.h>
+
+static const uint16_t rx_rings = 1, tx_rings = 1;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+static const struct rte_eth_conf port_conf = { .link_speeds = ETH_LINK_SPEED_AUTONEG };
+#else
+static const struct rte_eth_conf port_conf = { .link_speeds = RTE_ETH_LINK_SPEED_AUTONEG };
+#endif
+
+static inline int
+port_info(void)
+{
+ uint8_t port_id;
+ int ret_val;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ret_val = rte_eth_dev_configure(port_id, rx_rings, tx_rings, &port_conf);
+ if (ret_val != 0)
+ return ret_val;
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+ struct ether_addr addr;
+#else
+ struct rte_ether_addr addr;
+#endif
+ rte_eth_macaddr_get(port_id, &addr);
+ printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8
+ ":%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ (unsigned) port_id,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+ }
+
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ /* Initialize the Environment Abstraction Layer (EAL). */
+ int ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ argc -= ret;
+ argv += ret;
+
+ return port_info();
+}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
new file mode 100644
index 00000000..8754ebc4
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/prox_ctrl.py
@@ -0,0 +1,293 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from __future__ import print_function
+from __future__ import division
+
+from builtins import map
+from builtins import range
+from past.utils import old_div
+from builtins import object
+import os
+import time
+import subprocess
+import socket
+from rapid_log import RapidLog
+from rapid_sshclient import SSHClient
+
+class prox_ctrl(object):
+ def __init__(self, ip, key=None, user=None, password = None):
+ self._ip = ip
+ self._key = key
+ self._user = user
+ self._password = password
+ self._proxsock = []
+ self._sshclient = SSHClient(ip = ip, user = user, password = password,
+ rsa_private_key = key, timeout = None)
+
+ def ip(self):
+ return self._ip
+
+ def test_connection(self):
+ attempts = 1
+ RapidLog.debug("Trying to connect to machine \
+ on %s, attempt: %d" % (self._ip, attempts))
+ while True:
+ try:
+ if (self.run_cmd('test -e /opt/rapid/system_ready_for_rapid \
+ && echo exists')):
+ break
+ time.sleep(2)
+ except RuntimeWarning as ex:
+ RapidLog.debug("RuntimeWarning %d:\n%s"
+ % (ex.returncode, ex.output.strip()))
+ attempts += 1
+ if attempts > 20:
+ RapidLog.exception("Failed to connect to instance after %d\
+ attempts:\n%s" % (attempts, ex))
+ time.sleep(2)
+ RapidLog.debug("Trying to connect to machine \
+ on %s, attempt: %d" % (self._ip, attempts))
+ RapidLog.debug("Connected to machine on %s" % self._ip)
+
+ def connect_socket(self):
+ attempts = 1
+ RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
+ attempt: %d" % (self._ip, attempts))
+ sock = None
+ while True:
+ sock = self.prox_sock()
+ if sock is not None:
+ break
+ attempts += 1
+ if attempts > 20:
+ RapidLog.exception("Failed to connect to PROX on %s after %d \
+ attempts" % (self._ip, attempts))
+ time.sleep(2)
+ RapidLog.debug("Trying to connect to PROX (just launched) on %s, \
+ attempt: %d" % (self._ip, attempts))
+ RapidLog.info("Connected to PROX on %s" % self._ip)
+ return sock
+
+ def close(self):
+ for sock in self._proxsock:
+ sock.quit()
+
+ def run_cmd(self, command):
+ self._sshclient.run_cmd(command)
+ return self._sshclient.get_output()
+
+ def prox_sock(self, port=8474):
+ """Connect to the PROX instance on remote system.
+ Return a prox_sock object on success, None on failure.
+ """
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self._ip, port))
+ prox = prox_sock(sock)
+ self._proxsock.append(prox)
+ return prox
+ except:
+ return None
+
+ def scp_put(self, src, dst):
+ self._sshclient.scp_put(src, dst)
+ RapidLog.info("Copying from {} to {}:{}".format(src, self._ip, dst))
+
+ def scp_get(self, src, dst):
+ self._sshclient.scp_get('/home/' + self._user + src, dst)
+ RapidLog.info("Copying from {}:/home/{}{} to {}".format(self._ip,
+ self._user, src, dst))
+
+class prox_sock(object):
+ def __init__(self, sock):
+ self._sock = sock
+ self._rcvd = b''
+
+ def __del__(self):
+ if self._sock is not None:
+ self._sock.close()
+ self._sock = None
+
+ def start(self, cores):
+ self._send('start %s' % ','.join(map(str, cores)))
+
+ def stop(self, cores):
+ self._send('stop %s' % ','.join(map(str, cores)))
+
+ def speed(self, speed, cores, tasks=[0]):
+ for core in cores:
+ for task in tasks:
+ self._send('speed %s %s %s' % (core, task, speed))
+
+ def reset_stats(self):
+ self._send('reset stats')
+
+ def lat_stats(self, cores, tasks=[0]):
+ result = {}
+ result['lat_min'] = 999999999
+ result['lat_max'] = result['lat_avg'] = 0
+ result['buckets'] = [0] * 128
+ result['mis_ordered'] = 0
+ result['extent'] = 0
+ result['duplicate'] = 0
+ number_tasks_returning_stats = 0
+ self._send('lat all stats %s %s' % (','.join(map(str, cores)),
+ ','.join(map(str, tasks))))
+ for core in cores:
+ for task in tasks:
+ stats = self._recv().split(',')
+ if 'is not measuring' in stats[0]:
+ continue
+ if stats[0].startswith('error'):
+ RapidLog.critical("lat stats error: unexpected reply from PROX\
+ (potential incompatibility between scripts and PROX)")
+ raise Exception("lat stats error")
+ number_tasks_returning_stats += 1
+ result['lat_min'] = min(int(stats[0]),result['lat_min'])
+ result['lat_max'] = max(int(stats[1]),result['lat_max'])
+ result['lat_avg'] += int(stats[2])
+ #min_since begin = int(stats[3])
+ #max_since_begin = int(stats[4])
+ result['lat_tsc'] = int(stats[5])
+ # Taking the last tsc as the timestamp since
+ # PROX will return the same tsc for each
+ # core/task combination
+ result['lat_hz'] = int(stats[6])
+ #coreid = int(stats[7])
+ #taskid = int(stats[8])
+ result['mis_ordered'] += int(stats[9])
+ result['extent'] += int(stats[10])
+ result['duplicate'] += int(stats[11])
+ stats = self._recv().split(':')
+ if stats[0].startswith('error'):
+ RapidLog.critical("lat stats error: unexpected lat bucket \
+ reply (potential incompatibility between scripts \
+ and PROX)")
+ raise Exception("lat bucket reply error")
+ result['buckets'][0] = int(stats[1])
+ for i in range(1, 128):
+ stats = self._recv().split(':')
+ result['buckets'][i] += int(stats[1])
+ result['lat_avg'] = old_div(result['lat_avg'],
+ number_tasks_returning_stats)
+ self._send('stats latency(0).used')
+ used = float(self._recv())
+ self._send('stats latency(0).total')
+ total = float(self._recv())
+ result['lat_used'] = old_div(used,total)
+ return (result)
+
+ def irq_stats(self, core, bucket, task=0):
+ self._send('stats task.core(%s).task(%s).irq(%s)' %
+ (core, task, bucket))
+ stats = self._recv().split(',')
+ return int(stats[0])
+
+ def show_irq_buckets(self, core, task=0):
+ rx = tx = drop = tsc = hz = 0
+ self._send('show irq buckets %s %s' % (core,task))
+ buckets = self._recv().split(';')
+ buckets = buckets[:-1]
+ return buckets
+
+ def core_stats(self, cores, tasks=[0]):
+ rx = tx = drop = tsc = hz = rx_non_dp = tx_non_dp = tx_fail = 0
+ self._send('dp core stats %s %s' % (','.join(map(str, cores)),
+ ','.join(map(str, tasks))))
+ for core in cores:
+ for task in tasks:
+ stats = self._recv().split(',')
+ if stats[0].startswith('error'):
+ if stats[0].startswith('error: invalid syntax'):
+ RapidLog.critical("dp core stats error: unexpected \
+ invalid syntax (potential incompatibility \
+ between scripts and PROX)")
+ raise Exception("dp core stats error")
+ continue
+ rx += int(stats[0])
+ tx += int(stats[1])
+ rx_non_dp += int(stats[2])
+ tx_non_dp += int(stats[3])
+ drop += int(stats[4])
+ tx_fail += int(stats[5])
+ tsc = int(stats[6])
+ hz = int(stats[7])
+ return rx, rx_non_dp, tx, tx_non_dp, drop, tx_fail, tsc, hz
+
+ def multi_port_stats(self, ports=[0]):
+ rx = tx = port_id = tsc = no_mbufs = errors = 0
+ self._send('multi port stats %s' % (','.join(map(str, ports))))
+ result = self._recv().split(';')
+ if result[0].startswith('error'):
+ RapidLog.critical("multi port stats error: unexpected invalid \
+ syntax (potential incompatibility between scripts and \
+ PROX)")
+ raise Exception("multi port stats error")
+ for statistics in result:
+ stats = statistics.split(',')
+ port_id = int(stats[0])
+ rx += int(stats[1])
+ tx += int(stats[2])
+ no_mbufs += int(stats[3])
+ errors += int(stats[4])
+ tsc = int(stats[5])
+ return rx, tx, no_mbufs, errors, tsc
+
+ def set_random(self, cores, task, offset, mask, length):
+ self._send('set random %s %s %s %s %s' % (','.join(map(str, cores)),
+ task, offset, mask, length))
+
+ def set_size(self, cores, task, pkt_size):
+ self._send('pkt_size %s %s %s' % (','.join(map(str, cores)), task,
+ pkt_size))
+
+ def set_imix(self, cores, task, imix):
+ self._send('imix %s %s %s' % (','.join(map(str, cores)), task,
+ ','.join(map(str,imix))))
+
+ def set_value(self, cores, task, offset, value, length):
+ self._send('set value %s %s %s %s %s' % (','.join(map(str, cores)),
+ task, offset, value, length))
+
+ def quit_prox(self):
+ self._send('quit')
+
+ def _send(self, cmd):
+ """Append LF and send command to the PROX instance."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot send '%s'" % cmd)
+ try:
+ self._sock.sendall(cmd.encode() + b'\n')
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
+
+ def _recv(self):
+ """Receive response from PROX instance, return it with LF removed."""
+ if self._sock is None:
+ raise RuntimeError("PROX socket closed, cannot receive anymore")
+ try:
+ pos = self._rcvd.find(b'\n')
+ while pos == -1:
+ self._rcvd += self._sock.recv(256)
+ pos = self._rcvd.find(b'\n')
+ rsp = self._rcvd[:pos]
+ self._rcvd = self._rcvd[pos+1:]
+ except ConnectionResetError as e:
+ RapidLog.error('Pipe reset by Prox instance: traffic too high?')
+ raise
+ return rsp.decode()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
new file mode 100644
index 00000000..374b58cb
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools>=42",
+ "wheel"
+]
+build-backend = "setuptools.build_meta"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
new file mode 100644
index 00000000..e1095fbd
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server-2ports.yaml
@@ -0,0 +1,94 @@
+heat_template_version: 2014-10-16
+
+description: single server resource with 2 dataplane ports used by resource groups.
+
+parameters:
+ PROX_public_net:
+ type: string
+ PROX_mgmt_net_id:
+ type: string
+ PROX_data_net_id:
+ type: string
+ PROX_data2_net_id:
+ type: string
+ PROX_server_name:
+ type: string
+ PROX_availability_zone:
+ type: string
+ PROX_security_group:
+ type: string
+ PROX_image:
+ type: string
+ PROX_key:
+ type: string
+ PROX_config:
+ type: string
+
+resources:
+ PROX_instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: PROX_server_name }
+ availability_zone : {get_param: PROX_availability_zone}
+ flavor: {get_resource: PROX_flavor}
+ image: {get_param: PROX_image}
+ key_name: {get_param: PROX_key}
+ networks:
+ - port: {get_resource: mgmt_port }
+ - port: {get_resource: data_port }
+ - port: {get_resource: data2_port }
+ user_data: {get_param: PROX_config}
+ user_data_format: RAW
+
+ PROX_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 4096
+ vcpus: 4
+ disk: 80
+ extra_specs: {"hw:mem_page_size": "large","hw:cpu_policy": "dedicated","hw:cpu_thread_policy":"isolate"}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_mgmt_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: PROX_public_net}
+ port_id: {get_resource: mgmt_port}
+
+ data_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ data2_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data2_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+outputs:
+ name:
+ description: Name of the PROX instance
+ value: {get_attr: [PROX_instance, name]}
+ mngmt_ip:
+ description: Management IP of the VM
+ value: {get_attr: [floating_ip, floating_ip_address ]}
+ data_plane_ips:
+ description: List of DataPlane IPs of the VM
+ value:
+ - {get_attr: [data_port, fixed_ips, 0, ip_address]}
+ - {get_attr: [data2_port, fixed_ips, 0, ip_address]}
+ data_plane_mac:
+ description: List of DataPlane MACs of the VM
+ value:
+ - {get_attr: [data_port, mac_address]}
+ - {get_attr: [data2_port, mac_address]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml
new file mode 100644
index 00000000..84311e25
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid-openstack-server.yaml
@@ -0,0 +1,82 @@
+heat_template_version: 2014-10-16
+
+description: single server resource used by resource groups.
+
+parameters:
+ PROX_public_net:
+ type: string
+ PROX_mgmt_net_id:
+ type: string
+ PROX_data_net_id:
+ type: string
+ PROX_server_name:
+ type: string
+ PROX_availability_zone:
+ type: string
+ PROX_security_group:
+ type: string
+ PROX_image:
+ type: string
+ PROX_key:
+ type: string
+ PROX_config:
+ type: string
+
+resources:
+ PROX_instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: PROX_server_name }
+ availability_zone : {get_param: PROX_availability_zone}
+ flavor: {get_resource: PROX_flavor}
+ image: {get_param: PROX_image}
+ key_name: {get_param: PROX_key}
+ networks:
+ - port: {get_resource: mgmt_port }
+ - port: {get_resource: data_port }
+ user_data: {get_param: PROX_config}
+ user_data_format: RAW
+
+ PROX_flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 4096
+ vcpus: 4
+ disk: 80
+ extra_specs: {"hw:mem_page_size": "large","hw:cpu_policy": "dedicated","hw:cpu_thread_policy":"isolate"}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_mgmt_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+ floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network: {get_param: PROX_public_net}
+ port_id: {get_resource: mgmt_port}
+
+ data_port:
+ type: OS::Neutron::Port
+ properties:
+ network_id: { get_param: PROX_data_net_id }
+ security_groups:
+ - {get_param: PROX_security_group}
+
+outputs:
+ name:
+ description: Name of the PROX instance
+ value: {get_attr: [PROX_instance, name]}
+ mngmt_ip:
+ description: Management IP of the VM
+ value: {get_attr: [floating_ip, floating_ip_address ]}
+ data_plane_ips:
+ description: List of DataPlane IPs of the VM
+ value:
+ - {get_attr: [data_port, fixed_ips, 0, ip_address]}
+ data_plane_mac:
+ description: List of DataPlane MACs of the VM
+ value:
+ - {get_attr: [data_port, mac_address]}
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
index f211934a..cd54d507 100755..100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/prox_user_data.sh
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid.pods
@@ -1,7 +1,5 @@
-#!/bin/bash
-
##
-## Copyright (c) 2010-2017 Intel Corporation
+## Copyright (c) 2019 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -16,8 +14,16 @@
## limitations under the License.
##
-echo 1024 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
-mount -t hugetlbfs nodev /mnt/huge
-modprobe uio
-insmod /root/dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-iptables -F
+[DEFAULT]
+total_number_of_pods=2
+namespace=rapid-testing
+
+[POD1]
+nodeSelector_hostname=k8s-node1
+dp_ip=192.168.30.11
+dp_subnet=24
+
+[POD2]
+nodeSelector_hostname=k8s-node2
+dp_ip=192.168.30.12
+dp_subnet=24
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
new file mode 100644
index 00000000..d103deba
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_cli.py
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import getopt
+import sys
+
+class RapidCli(object):
+ """
+ Class to deal with runrapid cli
+ """
+ @staticmethod
+ def usage(test_params):
+ print("usage: runrapid [--version] [-v]")
+ print(" [--env ENVIRONMENT_NAME]")
+ print(" [--test TEST_NAME]")
+ print(" [--map MACHINE_MAP_FILE]")
+ print(" [--runtime TIME_FOR_TEST]")
+ print(" [--configonly False|True]")
+ print(" [--log DEBUG|INFO|WARNING|ERROR|CRITICAL]")
+ print(" [-h] [--help]")
+ print("")
+ print("Command-line interface to runrapid")
+ print("")
+ print("optional arguments:")
+ print(" -v, --version Show program's version number and exit")
+ print(" --env ENVIRONMENT_NAME Parameters will be read from ENVIRONMENT_NAME. Default is %s."%test_params['environment_file'])
+ print(" --test TEST_NAME Test cases will be read from TEST_NAME. Default is %s."%test_params['test_file'])
+ print(" --map MACHINE_MAP_FILE Machine mapping will be read from MACHINE_MAP_FILE. Default is %s."%test_params['machine_map_file'])
+ print(" --map INDEX_LIST This parameter can also be a list of indices, e.g. [2,3]")
+ print(" --runtime Specify time in seconds for 1 test run")
+ print(" --configonly If this option is specified, only upload all config files to the VMs, do not run the tests")
+ print(" --log Specify logging level for log file output, default is DEBUG")
+ print(" --screenlog Specify logging level for screen output, default is INFO")
+ print(" -h, --help Show help message and exit.")
+ print("")
+
+ @staticmethod
+ def process_cli(test_params):
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "vh", ["version","help", "env=", "test=", "map=", "runtime=","configonly","log=","screenlog="])
+ except getopt.GetoptError as err:
+ print("===========================================")
+ print(str(err))
+ print("===========================================")
+ RapidCli.usage(test_params)
+ sys.exit(2)
+ if args:
+ RapidCli.usage(test_params)
+ sys.exit(2)
+ for opt, arg in opts:
+ if opt in ["-h", "--help"]:
+ RapidCli.usage(test_params)
+ sys.exit()
+ if opt in ["-v", "--version"]:
+ print("Rapid Automated Performance Indication for Dataplane "+test_params['version'])
+ sys.exit()
+ if opt in ["--env"]:
+ test_params['environment_file'] = arg
+ if opt in ["--test"]:
+ test_params['test_file'] = arg
+ if opt in ["--map"]:
+ test_params['machine_map_file'] = arg
+ if opt in ["--runtime"]:
+ test_params['runtime'] = int(arg)
+ if opt in ["--configonly"]:
+ test_params['configonly'] = True
+ print('No actual runs, only uploading configuration files')
+ if opt in ["--log"]:
+ test_params['loglevel'] = arg
+ print ("Log level: "+ test_params['loglevel'])
+ if opt in ["--screenlog"]:
+ test_params['screenloglevel'] = arg
+ print ("Screen Log level: "+ test_params['screenloglevel'])
+ print ("Using '"+test_params['environment_file']+"' as name for the environment")
+ print ("Using '"+test_params['test_file']+"' for test case definition")
+ print ("Using '"+test_params['machine_map_file']+"' for machine mapping")
+ print ("Runtime: "+ str(test_params['runtime']))
+ return(test_params)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
new file mode 100644
index 00000000..e6a7f517
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_corestatstest.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class CoreStatsTest(RapidTest):
+ """
+ Class to manage the corestatstesting
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ RapidLog.info("+------------------------------------------------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring core statistics on 1 or more PROX instances |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ RapidLog.info("| PROX ID | Time | RX | TX | non DP RX | non DP TX | TX - RX | nonDP TX-RX| DROP TOT |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ duration = self.test['runtime']
+ tot_drop = []
+ old_rx = []; old_non_dp_rx = []; old_tx = []; old_non_dp_tx = []; old_drop = []; old_tx_fail = []; old_tsc = []
+ new_rx = []; new_non_dp_rx = []; new_tx = []; new_non_dp_tx = []; new_drop = []; new_tx_fail = []; new_tsc = []
+ machines_to_go = len (self.machines)
+ for machine in self.machines:
+ machine.reset_stats()
+ tot_drop.append(0)
+ old_rx.append(0); old_non_dp_rx.append(0); old_tx.append(0); old_non_dp_tx.append(0); old_drop.append(0); old_tx_fail.append(0); old_tsc.append(0)
+ old_rx[-1], old_non_dp_rx[-1], old_tx[-1], old_non_dp_tx[-1], old_drop[-1], old_tx_fail[-1], old_tsc[-1], tsc_hz = machine.core_stats()
+ new_rx.append(0); new_non_dp_rx.append(0); new_tx.append(0); new_non_dp_tx.append(0); new_drop.append(0); new_tx_fail.append(0); new_tsc.append(0)
+ while (duration > 0):
+ time.sleep(0.5)
+ # Get statistics after some execution time
+ for i, machine in enumerate(self.machines, start=0):
+ new_rx[i], new_non_dp_rx[i], new_tx[i], new_non_dp_tx[i], new_drop[i], new_tx_fail[i], new_tsc[i], tsc_hz = machine.core_stats()
+ drop = new_drop[i]-old_drop[i]
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ non_dp_rx = new_non_dp_rx[i] - old_non_dp_rx[i]
+ non_dp_tx = new_non_dp_tx[i] - old_non_dp_tx[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ machines_to_go -= 1
+ old_drop[i] = new_drop[i]
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_non_dp_rx[i] = new_non_dp_rx[i]
+ old_non_dp_tx[i] = new_non_dp_tx[i]
+ old_tsc[i] = new_tsc[i]
+ tot_drop[i] = tot_drop[i] + tx - rx
+ RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(non_dp_rx)+' | '+'{:>10.0f}'.format(non_dp_tx)+' | ' + '{:>10.0f}'.format(tx-rx) + ' | '+ '{:>10.0f}'.format(non_dp_tx-non_dp_rx) + ' | '+'{:>10.0f}'.format(tot_drop[i]) +' |')
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NonDPReceived': non_dp_rx,
+ 'NonDPSent': non_dp_tx,
+ 'Dropped': tot_drop[i]}
+ result_details = self.post_data(result_details)
+ if machines_to_go == 0:
+ duration = duration - 1
+ machines_to_go = len (self.machines)
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+------------+------------+------------+")
+ return (True, result_details)
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
new file mode 100644
index 00000000..27d2430d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_defaults.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+class RapidDefaults(object):
+ """
+ Class to define the test defaults
+ """
+ test_params = {
+ 'version' : '2023.01.16', # Please do NOT change, used for debugging
+ 'environment_file' : 'rapid.env', #Default string for environment
+ 'test_file' : 'tests/basicrapid.test', #Default string for test
+ 'machine_map_file' : 'machine.map', #Default string for machine map file
+ 'loglevel' : 'DEBUG', # sets log level for writing to file
+ 'screenloglevel' : 'INFO', # sets log level for writing to screen
+ 'runtime' : 10, # time in seconds for 1 test run
+ 'configonly' : False, # If True, the system will upload all the necessary config fiels to the VMs, but not start PROX and the actual testing
+ 'rundir' : '/opt/rapid', # Directory where to find the tools in the machines running PROX
+ 'resultsdir' : '.', # Directory where to store log files
+ 'sleep_time' : 2, # Sleep time between two loop iteration. Minimum is 2 seconds. Might be useful to let SUT clean caches
+ 'lat_percentile' : 0.99
+ }
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
new file mode 100644
index 00000000..ea42fc9a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_flowsizetest.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+import sys
+import time
+import copy
+from math import ceil
+from statistics import mean
+from past.utils import old_div
+from rapid_log import RapidLog
+from rapid_log import bcolors
+from rapid_test import RapidTest
+inf = float("inf")
+
+class FlowSizeTest(RapidTest):
+ """
+ Class to manage the flowsizetesting
+ """
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines, sleep_time):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.gen_machine = gen_machine
+ self.sut_machine = sut_machine
+ self.background_machines = background_machines
+ self.test['lat_percentile'] = lat_percentile
+ self.test['sleep_time'] = sleep_time
+ if self.test['test'] == 'TST009test':
+ # This test implements some of the testing as defined in
+ # https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
+ self.test['TST009_n'] = int(ceil(old_div(
+ self.test['maxframespersecondallingress'],
+ self.test['stepsize'])))
+ self.test['TST009'] = True
+ self.test['TST009_L'] = 0
+ self.test['TST009_R'] = self.test['TST009_n'] - 1
+ self.test['TST009_S']= []
+ for m in range(0, self.test['TST009_n']):
+ self.test['TST009_S'].append((m+1) * self.test['stepsize'])
+ elif self.test['test'] == 'fixed_rate':
+ for key in['drop_rate_threshold','lat_avg_threshold',
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']:
+ self.test[key] = inf
+
+ def new_speed(self, speed,size,success):
+ if self.test['test'] == 'fixed_rate':
+ return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (speed + self.test['step'])
+ elif 'TST009' in self.test.keys():
+ if success:
+ self.test['TST009_L'] = self.test['TST009_m'] + 1
+ else:
+ self.test['TST009_R'] = max(self.test['TST009_m'] - 1,
+ self.test['TST009_L'])
+ self.test['TST009_m'] = int (old_div((self.test['TST009_L'] +
+ self.test['TST009_R']),2))
+ return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
+ else:
+ if success:
+ self.test['minspeed'] = speed
+ else:
+ self.test['maxspeed'] = speed
+ return (old_div((self.test['minspeed'] + self.test['maxspeed']),2.0))
+
+ def get_start_speed_and_init(self, size):
+ if self.test['test'] == 'fixed_rate':
+ return (self.test['startspeed'])
+ elif self.test['test'] == 'increment_till_fail':
+ return (self.test['startspeed'])
+ elif 'TST009' in self.test.keys():
+ self.test['TST009_L'] = 0
+ self.test['TST009_R'] = self.test['TST009_n'] - 1
+ self.test['TST009_m'] = int(old_div((self.test['TST009_L'] +
+ self.test['TST009_R']), 2))
+ return (self.get_percentageof10Gbps(self.test['TST009_S'][self.test['TST009_m']],size))
+ else:
+ self.test['minspeed'] = 0
+ self.test['maxspeed'] = self.test['startspeed']
+ return (self.test['startspeed'])
+
+ def resolution_achieved(self):
+ if self.test['test'] == 'fixed_rate':
+ return (True)
+ elif 'TST009' in self.test.keys():
+ return (self.test['TST009_L'] == self.test['TST009_R'])
+ else:
+ return ((self.test['maxspeed'] - self.test['minspeed']) <= self.test['accuracy'])
+
+ def warm_up(self):
+ # Running at low speed to make sure the ARP messages can get through.
+ # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
+ # Note however that if we would run the test steps during a very long time, the ARP would expire in the switch.
+ # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
+
+ if WARMUPTIME == 0:
+ RapidLog.info(("Not Warming up"))
+ return
+
+ RapidLog.info(("Warming up during {} seconds..., packet size = {},"
+ " flows = {}, speed = {}").format(WARMUPTIME, imix, FLOWSIZE,
+ WARMUPSPEED))
+ self.gen_machine.set_generator_speed(WARMUPSPEED)
+ self.set_background_speed(self.background_machines, WARMUPSPEED)
+ self.gen_machine.set_udp_packet_size(imix)
+ self.set_background_size(self.background_machines, imix)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
+ self.set_background_flows(self.background_machines, FLOWSIZE)
+ self.gen_machine.start()
+ self.start_background_traffic(self.background_machines)
+ time.sleep(WARMUPTIME)
+ self.stop_background_traffic(self.background_machines)
+ self.gen_machine.stop()
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ TestResult = 0
+ end_data = {}
+ iteration_prefix = {}
+ self.warm_up()
+ for imix in self.test['imixs']:
+ size = mean(imix)
+ self.gen_machine.set_udp_packet_size(imix)
+ if self.background_machines:
+ backgroundinfo = ('{}Running {} x background traffic not '
+ 'represented in the table{}').format(bcolors.FLASH,
+ len(self.background_machines),bcolors.ENDC)
+ else:
+ backgroundinfo = '{}{}'.format(bcolors.FLASH,bcolors.ENDC)
+ self.set_background_size(self.background_machines, imix)
+ RapidLog.info('+' + '-' * 200 + '+')
+ RapidLog.info(("| UDP, {:>5} bytes, different number of flows by "
+ "randomizing SRC & DST UDP port. {:128.128}|").
+ format(round(size), backgroundinfo))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Flows | Speed requested | Gen by core | Sent by'
+ ' NIC | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f}'
+ ' Pcentil| Max. Lat.| Sent | Received | Lost | Total'
+ ' Lost|L.Ratio|Mis-ordered|Time').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 11 + '+' + '-' * 4 + '+')
+ for flow_number in self.test['flows']:
+ attempts = 0
+ self.gen_machine.reset_stats()
+ if self.sut_machine:
+ self.sut_machine.reset_stats()
+ if flow_number != 0:
+ flow_number = self.gen_machine.set_flows(flow_number)
+ self.set_background_flows(self.background_machines, flow_number)
+ end_data['speed'] = None
+ speed = self.get_start_speed_and_init(size)
+ while True:
+ attempts += 1
+ endwarning = False
+ print('{} flows: Measurement ongoing at speed: {}%'.format(
+ str(flow_number), str(round(speed, 2))), end=' \r')
+ sys.stdout.flush()
+ iteration_data = self.run_iteration(
+ float(self.test['runtime']),flow_number,size,speed)
+ if iteration_data['r'] > 1:
+ retry_warning = '{} {:1} retries needed{}'.format(
+ bcolors.WARNING, iteration_data['r'],
+ bcolors.ENDC)
+ else:
+ retry_warning = ''
+ # Drop rate is expressed in percentage. lat_used is a ratio
+ # (0 to 1). The sum of these 2 should be 100%.
+ # If the sum is lower than 95, it means that more than 5%
+ # of the latency measurements where dropped for accuracy
+ # reasons.
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used'] * 100,
+ bcolors.ENDC)
+ else:
+ lat_warning = ''
+ iteration_prefix = {'speed' : bcolors.ENDC,
+ 'lat_avg' : bcolors.ENDC,
+ 'lat_perc' : bcolors.ENDC,
+ 'lat_max' : bcolors.ENDC,
+ 'abs_drop_rate' : bcolors.ENDC,
+ 'mis_ordered' : bcolors.ENDC,
+ 'drop_rate' : bcolors.ENDC}
+ if self.test['test'] == 'fixed_rate':
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
+ if lat_warning or retry_warning:
+ endwarning = '| | {:177.177} |'.format(
+ retry_warning + lat_warning)
+ success = True
+ # TestResult = TestResult + iteration_data['pps_rx']
+ # fixed rate testing result is strange: we just report
+ # the pps received
+ # The following if statement is testing if we pass the
+ # success criteria of a certain drop rate, average latency
+ # and maximum latency below the threshold.
+ # The drop rate success can be achieved in 2 ways: either
+ # the drop rate is below a treshold, either we want that no
+ # packet has been lost during the test.
+ # This can be specified by putting 0 in the .test file
+ elif ((self.get_pps(speed,size) - iteration_data['pps_tx']) / self.get_pps(speed,size)) \
+ < self.test['generator_threshold'] and \
+ ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or \
+ (iteration_data['abs_dropped']==self.test['drop_rate_threshold']==0)) and \
+ (iteration_data['lat_avg']< self.test['lat_avg_threshold']) and \
+ (iteration_data['lat_perc']< self.test['lat_perc_threshold']) and \
+ (iteration_data['lat_max'] < self.test['lat_max_threshold'] and \
+ iteration_data['mis_ordered'] <= self.test['mis_ordered_threshold']):
+ end_data = copy.deepcopy(iteration_data)
+ end_prefix = copy.deepcopy(iteration_prefix)
+ success = True
+ success_message=' SUCCESS'
+ if (old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))>0.01:
+ iteration_prefix['speed'] = bcolors.WARNING
+ if iteration_data['abs_tx_fail'] > 0:
+ gen_warning = bcolors.WARNING + ' Network limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps - {} failed to be transmitted'.format(self.get_pps(speed,size), iteration_data['pps_tx'], iteration_data['abs_tx_fail']) + bcolors.ENDC
+ else:
+ gen_warning = bcolors.WARNING + ' Generator limit?: requesting {:<.3f} Mpps and getting {:<.3f} Mpps'.format(self.get_pps(speed,size), iteration_data['pps_tx']) + bcolors.ENDC
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning + gen_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
+ break
+ else:
+ iteration_prefix['speed'] = bcolors.ENDC
+ gen_warning = ''
+ if lat_warning or retry_warning:
+ endwarning = '| | {:186.186} |'.format(retry_warning + lat_warning)
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) + success_message +
+ retry_warning + lat_warning + gen_warning)
+ else:
+ success_message=' FAILED'
+ if ((iteration_data['abs_dropped']>0) and (self.test['drop_rate_threshold'] ==0)):
+ iteration_prefix['abs_drop_rate'] = bcolors.FAIL
+ if (iteration_data['drop_rate'] <= self.test['drop_rate_threshold']):
+ iteration_prefix['drop_rate'] = bcolors.ENDC
+ else:
+ iteration_prefix['drop_rate'] = bcolors.FAIL
+ if (iteration_data['lat_avg']< self.test['lat_avg_threshold']):
+ iteration_prefix['lat_avg'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_avg'] = bcolors.FAIL
+ if (iteration_data['lat_perc']< self.test['lat_perc_threshold']):
+ iteration_prefix['lat_perc'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_perc'] = bcolors.FAIL
+ if (iteration_data['lat_max']< self.test['lat_max_threshold']):
+ iteration_prefix['lat_max'] = bcolors.ENDC
+ else:
+ iteration_prefix['lat_max'] = bcolors.FAIL
+ if ((old_div((self.get_pps(speed,size) - iteration_data['pps_tx']),self.get_pps(speed,size)))<0.001):
+ iteration_prefix['speed'] = bcolors.ENDC
+ else:
+ iteration_prefix['speed'] = bcolors.FAIL
+ if (iteration_data['mis_ordered']< self.test['mis_ordered_threshold']):
+ iteration_prefix['mis_ordered'] = bcolors.ENDC
+ else:
+ iteration_prefix['mis_ordered'] = bcolors.FAIL
+
+ success = False
+ RapidLog.debug(self.report_result(-attempts, size,
+ iteration_data, iteration_prefix) +
+ success_message + retry_warning + lat_warning)
+ speed = self.new_speed(speed, size, success)
+ if self.test['test'] == 'increment_till_fail':
+ if not success:
+ break
+ elif self.resolution_achieved():
+ break
+ if end_data['speed'] is None:
+ end_data = iteration_data
+ end_prefix = iteration_prefix
+ RapidLog.info('|{:>7} | {:<177} |'.format("FAILED","Speed 0 or close to 0, data for last failed step below:"))
+ RapidLog.info(self.report_result(flow_number, size,
+ end_data, end_prefix))
+ if end_data['avg_bg_rate']:
+ tot_avg_rx_rate = end_data['pps_rx'] + (end_data['avg_bg_rate'] * len(self.background_machines))
+ endtotaltrafficrate = '| | Total amount of traffic received by all generators during this test: {:>4.3f} Gb/s {:7.3f} Mpps {} |'.format(RapidTest.get_speed(tot_avg_rx_rate,size) , tot_avg_rx_rate, ' '*84)
+ RapidLog.info (endtotaltrafficrate)
+ if endwarning:
+ RapidLog.info (endwarning)
+ if self.test['test'] != 'fixed_rate':
+ TestResult = TestResult + end_data['pps_rx']
+ end_data['test'] = self.test['testname']
+ end_data['environment_file'] = self.test['environment_file']
+ end_data['Flows'] = flow_number
+ end_data['Size'] = size
+ end_data['RequestedSpeed'] = RapidTest.get_pps(end_data['speed'] ,size)
+ result_details = self.post_data(end_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ return (TestResult, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
new file mode 100644
index 00000000..e52b17db
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_generator_machine.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from rapid_machine import RapidMachine
+from math import ceil, log2
+
+
+class RandomPortBits(object):
+ """
+ Class to generate PROX bitmaps for random bit generation
+ in source & dst UPD ports to emulate mutiple flows
+ """
+ @staticmethod
+ def get_bitmap(flow_number):
+ number_of_random_bits = ceil(log2(flow_number))
+ if number_of_random_bits > 30:
+ raise Exception("Not able to support that many flows")
+ # throw exeption since we need the first bit to be 1
+ # Otherwise, the randomization could results in all 0's
+ # and that might be an invalid UDP port and result in
+ # packets being discarded
+ src_number_of_random_bits = number_of_random_bits // 2
+ dst_number_of_random_bits = (number_of_random_bits -
+ src_number_of_random_bits)
+ src_port_bitmap = '1000000000000000'.replace ('0','X',
+ src_number_of_random_bits)
+ dst_port_bitmap = '1000000000000000'.replace ('0','X',
+ dst_number_of_random_bits)
+ return [src_port_bitmap, dst_port_bitmap, 1 << number_of_random_bits]
+
+class RapidGeneratorMachine(RapidMachine):
+ """
+ Class to deal with a generator PROX instance (VM, bare metal, container)
+ """
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly, ipv6):
+ mac_address_size = 6
+ ethertype_size = 2
+ FCS_size = 4
+ if ipv6:
+ ip_header_size = 40
+ self.ip_length_offset = 18
+ # In IPV6, the IP size is the size of the IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size)
+ else:
+ ip_header_size = 20
+ self.ip_length_offset = 16
+ # In IPV4, the IP size is the size of the IP header + IP content
+ self.frame_size_minus_ip_size = (2 * mac_address_size +
+ ethertype_size + FCS_size)
+ self.frame_size_minus_udp_header_and_content = (2 * mac_address_size +
+ ethertype_size + ip_header_size + FCS_size )
+ udp_header_start_offset = (2 * mac_address_size + ethertype_size +
+ ip_header_size)
+ self.udp_source_port_offset = udp_header_start_offset
+ self.udp_dest_port_offset = udp_header_start_offset + 2
+ self.udp_length_offset = udp_header_start_offset + 4
+ self.ipv6 = ipv6
+ if 'bucket_size_exp' in machine_params.keys():
+ self.bucket_size_exp = machine_params['bucket_size_exp']
+ else:
+ self.bucket_size_exp = 11
+ super().__init__(key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly)
+
+ def get_cores(self):
+ return (self.machine_params['gencores'] +
+ self.machine_params['latcores'])
+
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (gencores, latcores)
+ """
+ super().remap_all_cpus()
+
+ if self.cpu_mapping is None:
+ return
+
+ if 'gencores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['gencores'])
+ RapidLog.debug('{} ({}): gencores {} remapped to {}'.format(self.name, self.ip, self.machine_params['gencores'], cpus_remapped))
+ self.machine_params['gencores'] = cpus_remapped
+
+ if 'latcores' in self.machine_params.keys():
+ cpus_remapped = super().remap_cpus(self.machine_params['latcores'])
+ RapidLog.debug('{} ({}): latcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['latcores'], cpus_remapped))
+ self.machine_params['latcores'] = cpus_remapped
+
+ def generate_lua(self):
+ appendix = 'gencores="%s"\n'% ','.join(map(str,
+ self.machine_params['gencores']))
+ appendix = appendix + 'latcores="%s"\n'% ','.join(map(str,
+ self.machine_params['latcores']))
+ appendix = (appendix +
+ 'bucket_size_exp="{}"\n'.format(self.bucket_size_exp))
+ if 'heartbeat' in self.machine_params.keys():
+ appendix = (appendix +
+ 'heartbeat="%s"\n'% self.machine_params['heartbeat'])
+ else:
+ appendix = appendix + 'heartbeat="60"\n'
+ super().generate_lua(appendix)
+
+ def start_prox(self):
+ # Start the generator with the -e option so that the cores don't
+ # start automatically
+ super().start_prox('-e')
+
+ def set_generator_speed(self, speed):
+ # The assumption is that we only use task 0 for generating
+ # We should check the gen.cfg file to make sure there is only task=0
+ speed_per_gen_core = speed / len(self.machine_params['gencores'])
+ self.socket.speed(speed_per_gen_core, self.machine_params['gencores'])
+
+ def set_udp_packet_size(self, imix_frame_sizes):
+ # We should check the gen.cfg to make sure we only send UDP packets
+ # If only 1 packet size, still using the 'old' way of setting the
+ # packet sizes in PROX. Otherwise, using the 'new' way which
+ # automatically sets IP and UDP sizes. We should switch to the new way
+ # eventually for all cases.
+ if len(imix_frame_sizes) == 1:
+ # Frame size = PROX pkt size + 4 bytes CRC
+ # The set_size function takes the PROX packet size as a parameter
+ self.socket.set_size(self.machine_params['gencores'], 0,
+ imix_frame_sizes[0] - 4)
+ # Writing length in the ip header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.ip_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_ip_size, 2)
+ # Writing length in the udp header
+ self.socket.set_value(self.machine_params['gencores'], 0,
+ self.udp_length_offset, imix_frame_sizes[0] -
+ self.frame_size_minus_udp_header_and_content, 2)
+ else:
+ if self.ipv6:
+ RapidLog.critical('IMIX not supported for IPV6')
+ prox_sizes = [frame_size - 4 for frame_size in imix_frame_sizes]
+ self.socket.set_imix(self.machine_params['gencores'], 0,
+ prox_sizes)
+
+ def set_flows(self, number_of_flows):
+ source_port, destination_port, actualflows = RandomPortBits.get_bitmap(
+ number_of_flows)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_source_port_offset, source_port,2)
+ self.socket.set_random(self.machine_params['gencores'],0,
+ self.udp_dest_port_offset, destination_port,2)
+ return actualflows
+
+ def start_gen_cores(self):
+ self.socket.start(self.machine_params['gencores'])
+
+ def stop_gen_cores(self):
+ self.socket.stop(self.machine_params['gencores'])
+
+ def start_latency_cores(self):
+ self.socket.start(self.machine_params['latcores'])
+
+ def stop_latency_cores(self):
+ self.socket.stop(self.machine_params['latcores'])
+
+ def lat_stats(self):
+ # Checking all tasks in the cfg file. In this way, we can have more
+ # latency tasks on the same core
+ return (self.socket.lat_stats(self.machine_params['latcores'],
+ self.all_tasks_for_this_cfg))
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
new file mode 100644
index 00000000..4d210409
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: rapid
+description: A Helm chart for deploying RAPID test scripts and environment
+type: application
+version: 0.0.1
+appVersion: "1.0.0"
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
new file mode 100644
index 00000000..74fc6297
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/deployment.yaml
@@ -0,0 +1,26 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rapid-testing
+ namespace: {{ .Values.namespace }}
+ labels:
+ app: rapid-testing
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rapid-testing
+ template:
+ metadata:
+ labels:
+ app: rapid-testing
+ spec:
+ serviceAccountName: rapid-testing-sa
+ containers:
+ - name: rapid-mgmt
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7886ade3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/templates/serviceaccount.yaml
@@ -0,0 +1,36 @@
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Values.namespace }}
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: rapid-testing-cr
+rules:
+- apiGroups: [""]
+ resources: ["pods", "pods/exec", "pods/status"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: rapid-testing-crb
+subjects:
+- kind: ServiceAccount
+ name: rapid-testing-sa
+ namespace: {{ .Values.namespace }}
+roleRef:
+ kind: ClusterRole
+ name: rapid-testing-cr
+ apiGroup: rbac.authorization.k8s.io
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
new file mode 100644
index 00000000..76b8037a
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_helm_chart/values.yaml
@@ -0,0 +1,8 @@
+namespace: rapid-testing
+
+image:
+ repository: opnfv/rapid
+ tag: "latest"
+ pullPolicy: IfNotPresent
+
+nodeSelector: {}
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
new file mode 100644
index 00000000..3945cd8e
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_impairtest.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_log import bcolors
+from rapid_test import RapidTest
+from statistics import mean
+
+class ImpairTest(RapidTest):
+ """
+ Class to manage the impair testing
+ """
+ def __init__(self, test_param, lat_percentile, runtime, testname,
+ environment_file, gen_machine, sut_machine, background_machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.gen_machine = gen_machine
+ self.sut_machine = sut_machine
+ self.background_machines = background_machines
+ self.test['lat_percentile'] = lat_percentile
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ imix = self.test['imix']
+ size = mean (imix)
+ flow_number = self.test['flowsize']
+ attempts = self.test['steps']
+ self.gen_machine.set_udp_packet_size(imix)
+ flow_number = self.gen_machine.set_flows(flow_number)
+ self.gen_machine.start_latency_cores()
+ RapidLog.info('+' + '-' * 188 + '+')
+ RapidLog.info(("| Generator is sending UDP ({:>5} flow) packets ({:>5}"
+ " bytes) to SUT via GW dropping and delaying packets. SUT sends "
+ "packets back.{:>60}").format(flow_number,round(size),'|'))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ RapidLog.info(('| Test | Speed requested | Gen by core | Sent by NIC'
+ ' | Fwrd by SUT | Rec. by core | Avg. Lat.|{:.0f} Pcentil'
+ '| Max. Lat.| Sent | Received | Lost | Total Lost|'
+ 'L.Ratio|Time|').format(self.test['lat_percentile']*100))
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ speed = self.test['startspeed']
+ self.gen_machine.set_generator_speed(speed)
+ while attempts:
+ attempts -= 1
+ print('Measurement ongoing at speed: ' + str(round(speed,2)) + '% ',end='\r')
+ sys.stdout.flush()
+ time.sleep(1)
+ # Get statistics now that the generation is stable and NO ARP messages any more
+ iteration_data = self.run_iteration(float(self.test['runtime']),flow_number,size,speed)
+ iteration_data['speed'] = speed
+ # Drop rate is expressed in percentage. lat_used is a ratio (0 to 1). The sum of these 2 should be 100%.
+ # If the sum is lower than 95, it means that more than 5% of the latency measurements where dropped for accuracy reasons.
+ if (iteration_data['drop_rate'] +
+ iteration_data['lat_used'] * 100) < 95:
+ lat_warning = ('{} Latency accuracy issue?: {:>3.0f}%'
+ '{}').format(bcolors.WARNING,
+ iteration_data['lat_used']*100, bcolors.ENDC)
+ else:
+ lat_warning = ''
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(attempts, size, iteration_data,
+ iteration_prefix))
+ iteration_data['test'] = self.test['testname']
+ iteration_data['environment_file'] = self.test['environment_file']
+ iteration_data['Flows'] = flow_number
+ iteration_data['Size'] = size
+ iteration_data['RequestedSpeed'] = RapidTest.get_pps(
+ iteration_data['speed'] ,size)
+ result_details = self.post_data(iteration_data)
+ RapidLog.debug(result_details)
+ RapidLog.info('+' + '-' * 8 + '+' + '-' * 18 + '+' + '-' * 13 +
+ '+' + '-' * 13 + '+' + '-' * 13 + '+' + '-' * 24 + '+' +
+ '-' * 10 + '+' + '-' * 10 + '+' + '-' * 10 + '+' + '-' * 11
+ + '+' + '-' * 11 + '+' + '-' * 11 + '+' + '-' * 11 + '+'
+ + '-' * 7 + '+' + '-' * 4 + '+')
+ self.gen_machine.stop_latency_cores()
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
new file mode 100644
index 00000000..de7e6ae3
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_irqtest.py
@@ -0,0 +1,106 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from past.utils import old_div
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class IrqTest(RapidTest):
+ """
+ Class to manage the irq testing
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring time probably spent dealing with an interrupt. Interrupting DPDK cores for more than 50us might be problematic |")
+ RapidLog.info("| and result in packet loss. The first row shows the interrupted time buckets: first number is the bucket between 0us and |")
+ RapidLog.info("| that number expressed in us and so on. The numbers in the other rows show how many times per second, the program was |")
+ RapidLog.info("| interrupted for a time as specified by its bucket. '0' is printed when there are no interrupts in this bucket throughout |")
+ RapidLog.info("| the duration of the test. 0.00 means there were interrupts in this bucket but very few. Due to rounding this shows as 0.00 |")
+ RapidLog.info("+----------------------------------------------------------------------------------------------------------------------------+")
+ sys.stdout.flush()
+ max_loop_duration = 0
+ machine_details = {}
+ for machine in self.machines:
+ buckets=machine.socket.show_irq_buckets(machine.get_cores()[0])
+ if max_loop_duration == 0:
+ # First time we go through the loop, we need to initialize
+ # result_details
+ result_details = {'test': self.test['testname'],
+ 'environment_file': self.test['environment_file'],
+ 'buckets': buckets}
+ print('Measurement ongoing ... ',end='\r')
+ machine.start() # PROX cores will be started within 0 to 1 seconds
+ # That is why we sleep a bit over 1 second to make sure all cores
+ # are started
+ time.sleep(1.2)
+ old_irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ irq = [[0 for x in range(len(buckets))] for y in range(len(machine.get_cores()))]
+ column_names = []
+ for bucket in buckets:
+ column_names.append('<{}'.format(bucket))
+ column_names[-1] = '>{}'.format(buckets[-2])
+ for j,bucket in enumerate(buckets):
+ for i,irqcore in enumerate(machine.get_cores()):
+ old_irq[i][j] = machine.socket.irq_stats(irqcore,j)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
+ time.sleep(float(self.test['runtime']))
+ row_names = []
+ for i,irqcore in enumerate(machine.get_cores()):
+ row_names.append(irqcore)
+ for j,bucket in enumerate(buckets):
+ diff = machine.socket.irq_stats(irqcore,j) - old_irq[i][j]
+ if diff == 0:
+ irq[i][j] = '0'
+ else:
+ irq[i][j] = str(round(old_div(diff,
+ float(self.test['runtime'])), 2))
+ if max_loop_duration < int(bucket):
+ max_loop_duration = int(bucket)
+ # Measurements in the loop above, are updated by PROX every second
+ # This means that taking the same measurement 0.5 second later
+ # might result in the same data or data from the next 1s window
+ # Conclusion: we don't know the exact window size.
+ # Real measurement windows might be wrong by 1 second
+ # This could be fixed in this script by checking this data every
+ # 0.5 seconds Not implemented since we can also run this test for
+ # a longer time and decrease the error. The absolute number of
+ # interrupts is not so important.
+ machine.stop()
+ core_details = {}
+ RapidLog.info('Results for PROX instance %s'%machine.name)
+ RapidLog.info('{:>12}'.format('bucket us') +
+ ''.join(['{:>12}'.format(item) for item in column_names]))
+ for j, row in enumerate(irq):
+ RapidLog.info('Core {:>7}'.format(row_names[j]) +
+ ''.join(['{:>12}'.format(item) for item in row]))
+ core_details['Core {}'.format(row_names[j])] = row
+ machine_details[machine.name] = core_details
+ result_details['machine_data'] = machine_details
+ result_details = self.post_data(result_details)
+ return (500000 - max_loop_duration, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
new file mode 100644
index 00000000..1d1112f7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_deployment.py
@@ -0,0 +1,236 @@
+##
+## Copyright (c) 2019-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+from kubernetes import client, config
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+import logging
+from logging import handlers
+
+from rapid_k8s_pod import Pod
+
+class K8sDeployment:
+ """Deployment class to create containers for test execution in Kubernetes
+ environment.
+ """
+ LOG_FILE_NAME = "createrapidk8s.log"
+ SSH_PRIVATE_KEY = "./rapid_rsa_key"
+ SSH_USER = "rapid"
+
+ POD_YAML_TEMPLATE_FILE_NAME = "pod-rapid.yaml"
+
+ _log = None
+ _create_config = None
+ _runtime_config = None
+ _total_number_of_pods = 0
+ _namespace = "rapid-testing"
+ _pods = []
+
+ def __init__(self):
+ # Configure logger
+ self._log = logging.getLogger("k8srapid")
+ self._log.setLevel(logging.DEBUG)
+
+ console_formatter = logging.Formatter("%(message)s")
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(logging.DEBUG)
+ console_handler.setFormatter(console_formatter)
+
+ file_formatter = logging.Formatter("%(asctime)s - "
+ "%(levelname)s - "
+ "%(message)s")
+ file_handler = logging.handlers.RotatingFileHandler(self.LOG_FILE_NAME,
+ backupCount=10)
+ file_handler.setLevel(logging.DEBUG)
+ file_handler.setFormatter(file_formatter)
+
+ self._log.addHandler(file_handler)
+ self._log.addHandler(console_handler)
+
+ # Initialize k8s plugin
+ try:
+ config.load_kube_config()
+ except:
+ config.load_incluster_config()
+
+ Pod.k8s_CoreV1Api = client.CoreV1Api()
+
+ def load_create_config(self, config_file_name):
+ """Read and parse configuration file for the test environment.
+ """
+ self._log.info("Loading configuration file %s", config_file_name)
+ self._create_config = configparser.RawConfigParser()
+ try:
+ self._create_config.read(config_file_name)
+ except Exception as e:
+ self._log.error("Failed to read config file!\n%s\n" % e)
+ return -1
+
+ # Now parse config file content
+ # Parse [DEFAULT] section
+ if self._create_config.has_option("DEFAULT", "total_number_of_pods"):
+ self._total_number_of_pods = self._create_config.getint(
+ "DEFAULT", "total_number_of_pods")
+ else:
+ self._log.error("No option total_number_of_pods in DEFAULT section")
+ return -1
+
+ self._log.debug("Total number of pods %d" % self._total_number_of_pods)
+
+ if self._create_config.has_option("DEFAULT", "namespace"):
+ self._namespace = self._create_config.get(
+ "DEFAULT", "namespace")
+ else:
+ self._log.error("No option namespace in DEFAULT section")
+ return -1
+
+ self._log.debug("Using namespace %s" % self._total_number_of_pods)
+
+ # Parse [PODx] sections
+ for i in range(1, int(self._total_number_of_pods) + 1):
+ # Search for POD name
+ if self._create_config.has_option("POD%d" % i,
+ "name"):
+ pod_name = self._create_config.get(
+ "POD%d" % i, "name")
+ else:
+ pod_name = "prox-pod-%d" % i
+
+ # Search for POD hostname
+ if self._create_config.has_option("POD%d" % i,
+ "nodeSelector_hostname"):
+ pod_nodeselector_hostname = self._create_config.get(
+ "POD%d" % i, "nodeSelector_hostname")
+ else:
+ pod_nodeselector_hostname = None
+
+ # Search for POD spec
+ if self._create_config.has_option("POD%d" % i,
+ "spec_file_name"):
+ pod_spec_file_name = self._create_config.get(
+ "POD%d" % i, "spec_file_name")
+ else:
+ pod_spec_file_name = K8sDeployment.POD_YAML_TEMPLATE_FILE_NAME
+
+ # Search for POD dataplane static IP
+ if self._create_config.has_option("POD%d" % i,
+ "dp_ip"):
+ pod_dp_ip = self._create_config.get(
+ "POD%d" % i, "dp_ip")
+ else:
+ pod_dp_ip = None
+
+ # Search for POD dataplane subnet
+ if self._create_config.has_option("POD%d" % i,
+ "dp_subnet"):
+ pod_dp_subnet = self._create_config.get(
+ "POD%d" % i, "dp_subnet")
+ else:
+ pod_dp_subnet = "24"
+
+ pod = Pod(pod_name, self._namespace)
+ pod.set_nodeselector(pod_nodeselector_hostname)
+ pod.set_spec_file_name(pod_spec_file_name)
+ pod.set_dp_ip(pod_dp_ip)
+ pod.set_dp_subnet(pod_dp_subnet)
+ pod.set_id(i)
+
+ # Add POD to the list of PODs which need to be created
+ self._pods.append(pod)
+
+ return 0
+
+ def create_pods(self):
+ """ Create test PODs and wait for them to start.
+ Collect information for tests to run.
+ """
+ self._log.info("Creating PODs...")
+
+ # Create PODs using template from yaml file
+ for pod in self._pods:
+ self._log.info("Creating POD %s...", pod.get_name())
+ pod.create_from_yaml()
+
+ # Wait for PODs to start
+ for pod in self._pods:
+ pod.wait_for_start()
+
+ # Collect information from started PODs for test execution
+ for pod in self._pods:
+ pod.set_ssh_credentials(K8sDeployment.SSH_USER, K8sDeployment.SSH_PRIVATE_KEY)
+ pod.get_sriov_dev_mac()
+ pod.get_qat_dev()
+
+ def save_runtime_config(self, config_file_name):
+ self._log.info("Saving config %s for runrapid script...",
+ config_file_name)
+ self._runtime_config = configparser.RawConfigParser()
+
+ # Section [DEFAULT]
+# self._runtime_config.set("DEFAULT",
+# "total_number_of_test_machines",
+# self._total_number_of_pods)
+
+ # Section [ssh]
+ self._runtime_config.add_section("ssh")
+ self._runtime_config.set("ssh",
+ "key",
+ K8sDeployment.SSH_PRIVATE_KEY)
+ self._runtime_config.set("ssh",
+ "user",
+ K8sDeployment.SSH_USER)
+
+ # Section [rapid]
+ self._runtime_config.add_section("rapid")
+ self._runtime_config.set("rapid",
+ "total_number_of_machines",
+ self._total_number_of_pods)
+
+ # Export information about each pod
+ # Sections [Mx]
+ for pod in self._pods:
+ self._runtime_config.add_section("M%d" % pod.get_id())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "admin_ip", pod.get_admin_ip())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_mac1", pod.get_dp_mac())
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_pci_dev", pod.get_dp_pci_dev())
+ if (pod.get_qat_pci_dev()):
+ for qat_index, qat_device in enumerate(pod.get_qat_pci_dev()):
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "qat_pci_dev%d" % qat_index, qat_device)
+ self._runtime_config.set("M%d" % pod.get_id(),
+ "dp_ip1", pod.get_dp_ip() + "/" +
+ pod.get_dp_subnet())
+
+ # Section [Varia]
+ self._runtime_config.add_section("Varia")
+ self._runtime_config.set("Varia",
+ "vim",
+ "kubernetes")
+
+ # Write runtime config file
+ with open(config_file_name, "w") as file:
+ self._runtime_config.write(file)
+
+ def delete_pods(self):
+ for pod in self._pods:
+ pod.terminate()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
new file mode 100644
index 00000000..beaedd69
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_k8s_pod.py
@@ -0,0 +1,264 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from os import path
+import time, yaml
+import logging
+from kubernetes import client, config
+
+from rapid_sshclient import SSHClient
+
+class Pod:
+ """Class which represents test pods.
+ For example with traffic gen, forward/swap applications, etc
+ """
+ k8s_CoreV1Api = None
+
+ _log = None
+
+ _name = "pod"
+ _namespace = "default"
+ _nodeSelector_hostname = None
+ _spec_filename = None
+ _last_status = None
+ _id = None
+ _admin_ip = None
+ _dp_ip = None
+ _dp_subnet = None
+
+ _ssh_client = None
+
+ _sriov_vf = None
+ _sriov_vf_mac = None
+
+ def __init__(self, name, namespace = "default", logger_name = "k8srapid"):
+ self._log = logging.getLogger(logger_name)
+
+ self._name = name
+ self._namespace = namespace
+ self._ssh_client = SSHClient(logger_name = logger_name)
+ self.qat_vf = []
+
+ def __del__(self):
+ """Destroy POD. Do a cleanup.
+ """
+ if self._ssh_client is not None:
+ self._ssh_client.disconnect()
+
+ def create_from_yaml(self):
+ """Load POD description from yaml file.
+ """
+ with open(path.join(path.dirname(__file__),
+ self._spec_filename)) as yaml_file:
+ self.body = yaml.safe_load(yaml_file)
+
+ self.body["metadata"]["name"] = self._name
+
+ if (self._nodeSelector_hostname is not None):
+ if ("nodeSelector" not in self.body["spec"]):
+ self.body["spec"]["nodeSelector"] = {}
+ self.body["spec"]["nodeSelector"]["kubernetes.io/hostname"] = \
+ self._nodeSelector_hostname
+ self._log.debug("Creating POD, body:\n%s" % self.body)
+
+ try:
+ self.k8s_CoreV1Api.create_namespaced_pod(body = self.body,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't create POD %s!\n%s\n" % (self._name,
+ e))
+
+ def terminate(self):
+ """Terminate POD. Close SSH connection.
+ """
+ if self._ssh_client is not None:
+ self._ssh_client.disconnect()
+
+ try:
+ self.k8s_CoreV1Api.delete_namespaced_pod(name = self._name,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ if e.reason != "Not Found":
+ self._log.error("Couldn't delete POD %s!\n%s\n" % (self._name, e.reason))
+
+ def update_admin_ip(self):
+ """Check for admin IP address assigned by k8s.
+ """
+ try:
+ pod = self.k8s_CoreV1Api.read_namespaced_pod_status(name = self._name, namespace = self._namespace)
+ self._admin_ip = pod.status.pod_ip
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't update POD %s admin IP!\n%s\n" % (self._name, e))
+
+ def wait_for_start(self):
+ """Wait for POD to start.
+ """
+ self._log.info("Waiting for POD %s to start..." % self._name)
+ while True:
+ self.get_status()
+ if (self._last_status == "Running" or self._last_status == "Failed"
+ or self._last_status == "Unknown"):
+ break
+ else:
+ time.sleep(3)
+
+ self.update_admin_ip()
+
+ return self._last_status
+
+ def ssh_run_cmd(self, cmd):
+ """Execute command for POD via SSH connection.
+ SSH credentials should be configured before use of this function.
+ """
+ self._ssh_client.run_cmd(cmd)
+
+ def get_name(self):
+ return self._name
+
+ def get_admin_ip(self):
+ return self._admin_ip
+
+ def get_dp_ip(self):
+ return self._dp_ip
+
+ def get_dp_subnet(self):
+ return self._dp_subnet
+
+ def get_dp_mac(self):
+ return self._sriov_vf_mac
+
+ def get_dp_pci_dev(self):
+ return self._sriov_vf
+
+ def get_qat_pci_dev(self):
+ return self.qat_vf
+
+ def get_id(self):
+ return self._id
+
+ def get_status(self):
+ """Get current status fro the pod.
+ """
+ try:
+ pod = self.k8s_CoreV1Api.read_namespaced_pod_status(name = self._name,
+ namespace = self._namespace)
+ except client.rest.ApiException as e:
+ self._log.error("Couldn't read POD %s status!\n%s\n" % (self._name, e))
+
+ self._last_status = pod.status.phase
+ return self._last_status
+
+ def get_qat_dev(self):
+ """Get qat devices if any, assigned by k8s QAT device plugin.
+ """
+ self._log.info("Checking assigned QAT VF for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_qat_device_plugin_envs")
+ if ret != 0:
+ self._log.error("Failed to check assigned QAT VF!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+
+ if cmd_output:
+ self._log.debug("Before: Using QAT VF %s" % self.qat_vf)
+ self._log.debug("Environment variable %s" % cmd_output)
+ for line in cmd_output.splitlines():
+ self.qat_vf.append(line.split("=")[1])
+ self._log.debug("Using QAT VF %s" % self.qat_vf)
+ else:
+ self._log.debug("No QAT devices for this pod")
+ self.qat_vf = None
+
+ def get_sriov_dev_mac(self):
+ """Get assigned by k8s SRIOV network device plugin SRIOV VF devices.
+ Return 0 in case of sucessfull configuration.
+ Otherwise return -1.
+ """
+ self._log.info("Checking assigned SRIOV VF for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/k8s_sriov_device_plugin_envs")
+ if ret != 0:
+ self._log.error("Failed to check assigned SRIOV VF!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug("Environment variable %s" % cmd_output)
+
+ # Parse environment variable
+ cmd_output = cmd_output.split("=")[1]
+ self._sriov_vf = cmd_output.split(",")[0]
+ self._log.debug("Using first SRIOV VF %s" % self._sriov_vf)
+
+ # find DPDK version
+ self._log.info("Checking DPDK version for POD %s" % self._name)
+ ret = self._ssh_client.run_cmd("cat /opt/rapid/dpdk_version")
+ if ret != 0:
+ self._log.error("Failed to check DPDK version"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+ dpdk_version = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug("DPDK version %s" % dpdk_version)
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+
+ self._log.info("Getting MAC address for assigned SRIOV VF %s" % \
+ self._sriov_vf)
+ self._ssh_client.run_cmd("sudo /opt/rapid/port_info_app -n 4 \
+ --{} {}".format(allow_parameter, self._sriov_vf))
+ if ret != 0:
+ self._log.error("Failed to get MAC address!"
+ "Error %s" % self._ssh_client.get_error())
+ return -1
+
+ # Parse MAC address
+ cmd_output = self._ssh_client.get_output().decode("utf-8").rstrip()
+ self._log.debug(cmd_output)
+ cmd_output = cmd_output.splitlines()
+ for line in cmd_output:
+ if line.startswith("Port 0 MAC: "):
+ self._sriov_vf_mac = line[12:]
+
+ self._log.debug("MAC %s" % self._sriov_vf_mac)
+
+ def set_dp_ip(self, dp_ip):
+ self._dp_ip = dp_ip
+
+ def set_dp_subnet(self, dp_subnet):
+ self._dp_subnet = dp_subnet
+
+ def set_id(self, pod_id):
+ self._id = pod_id
+
+ def set_nodeselector(self, hostname):
+ """Set hostname on which POD will be executed.
+ """
+ self._nodeSelector_hostname = hostname
+
+ def set_spec_file_name(self, file_name):
+ """Set pod spec filename.
+ """
+ self._spec_filename = file_name
+
+ def set_ssh_credentials(self, user, rsa_private_key):
+ """Set SSH credentials for the SSH connection to the POD.
+ """
+ self.update_admin_ip()
+ self._ssh_client.set_credentials(ip = self._admin_ip,
+ user = user,
+ rsa_private_key = rsa_private_key)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
new file mode 100644
index 00000000..1ad54273
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_log.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import logging
+from logging.handlers import RotatingFileHandler
+from logging import handlers
+import os
+import sys
+import time
+
+class bcolors(object):
+ HEADER = '\033[95m'
+ OKBLUE = '\033[94m'
+ OKGREEN = '\033[92m'
+ WARNING = '\033[93m'
+ FAIL = '\033[91m'
+ ENDC = '\033[0m'
+ BOLD = '\033[1m'
+ UNDERLINE = '\033[4m'
+ FLASH = '\033[5m'
+
+class RapidLog(object):
+ """
+ Class to deal with rapid logging
+ """
+ log = None
+
+ @staticmethod
+ def log_init(log_file, loglevel, screenloglevel, version):
+ log = logging.getLogger(__name__)
+ makeFileHandler = True
+ makeStreamHandler = True
+ if len(log.handlers) > 0:
+ for handler in log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ makeFileHandler = False
+ elif isinstance(handler, logging.StreamHandler):
+ makeStreamHandler = False
+ if makeStreamHandler:
+ # create formatters
+ screen_formatter = logging.Formatter("%(message)s")
+ # create a console handler
+ # and set its log level to the command-line option
+ #
+ console_handler = logging.StreamHandler(sys.stdout)
+ #console_handler.setLevel(logging.INFO)
+ numeric_screenlevel = getattr(logging, screenloglevel.upper(), None)
+ if not isinstance(numeric_screenlevel, int):
+ raise ValueError('Invalid screenlog level: %s' % screenloglevel)
+ console_handler.setLevel(numeric_screenlevel)
+ console_handler.setFormatter(screen_formatter)
+ # add handler to the logger
+ #
+ log.addHandler(console_handler)
+ if makeFileHandler:
+ # create formatters
+ file_formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
+ # get a top-level logger,
+ # set its log level,
+ # BUT PREVENT IT from propagating messages to the root logger
+ #
+ numeric_level = getattr(logging, loglevel.upper(), None)
+ if not isinstance(numeric_level, int):
+ raise ValueError('Invalid log level: %s' % loglevel)
+ log.setLevel(numeric_level)
+ log.propagate = 0
+
+
+ # create a file handler
+ # and set its log level
+ #
+ file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=10)
+ file_handler.setLevel(numeric_level)
+ file_handler.setFormatter(file_formatter)
+
+ # add handler to the logger
+ #
+ log.addHandler(file_handler)
+
+ # Check if log exists and should therefore be rolled
+ needRoll = os.path.isfile(log_file)
+
+
+ # This is a stale log, so roll it
+ if needRoll:
+ # Add timestamp
+ log.debug('\n---------\nLog closed on %s.\n---------\n' % time.asctime())
+
+ # Roll over on application start
+ file_handler.doRollover()
+
+ # Add timestamp
+ log.debug('\n---------\nLog started on %s.\n---------\n' % time.asctime())
+
+ log.debug("rapid version: " + version)
+ RapidLog.log = log
+
+ @staticmethod
+ def log_close():
+ for handler in RapidLog.log.handlers:
+ if isinstance(handler, logging.FileHandler):
+ handler.close()
+ RapidLog.log.removeHandler(handler)
+
+ @staticmethod
+ def exception(exception_info):
+ RapidLog.log.exception(exception_info)
+ exit(1)
+
+ @staticmethod
+ def critical(critical_info):
+ RapidLog.log.critical(critical_info)
+ exit(1)
+
+ @staticmethod
+ def error(error_info):
+ RapidLog.log.error(error_info)
+
+ @staticmethod
+ def debug(debug_info):
+ RapidLog.log.debug(debug_info)
+
+ @staticmethod
+ def info(info):
+ RapidLog.log.info(info)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
new file mode 100644
index 00000000..47f858d0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_machine.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from prox_ctrl import prox_ctrl
+import os
+import re
+import uuid
+
+class RapidMachine(object):
+ """
+ Class to deal with a PROX instance (VM, bare metal, container)
+ """
+ def __init__(self, key, user, password, vim, rundir, resultsdir,
+ machine_params, configonly):
+ self.name = machine_params['name']
+ self.ip = machine_params['admin_ip']
+ self.key = key
+ self.user = user
+ self.password = password
+ self.rundir = rundir
+ self.resultsdir = resultsdir
+ self.dp_ports = []
+ self.dpdk_port_index = []
+ self.configonly = configonly
+ index = 1
+ while True:
+ ip_key = 'dp_ip{}'.format(index)
+ mac_key = 'dp_mac{}'.format(index)
+ if ip_key in machine_params.keys():
+ if mac_key in machine_params.keys():
+ dp_port = {'ip': machine_params[ip_key], 'mac' : machine_params[mac_key]}
+ else:
+ dp_port = {'ip': machine_params[ip_key], 'mac' : None}
+ self.dp_ports.append(dict(dp_port))
+ self.dpdk_port_index.append(index - 1)
+ index += 1
+ else:
+ break
+ self.machine_params = machine_params
+ self.vim = vim
+ self.cpu_mapping = None
+ if 'config_file' in self.machine_params.keys():
+ PROXConfigfile = open (self.machine_params['config_file'], 'r')
+ PROXConfig = PROXConfigfile.read()
+ PROXConfigfile.close()
+ self.all_tasks_for_this_cfg = set(re.findall("task\s*=\s*(\d+)",PROXConfig))
+
+ def get_cores(self):
+ return (self.machine_params['cores'])
+
+ def expand_list_format(self, list):
+ """Expand cpuset list format provided as comma-separated list of
+ numbers and ranges of numbers. For more information please see
+ https://man7.org/linux/man-pages/man7/cpuset.7.html
+ """
+ list_expanded = []
+ for num in list.split(','):
+ if '-' in num:
+ num_range = num.split('-')
+ list_expanded += range(int(num_range[0]), int(num_range[1]) + 1)
+ else:
+ list_expanded.append(int(num))
+ return list_expanded
+
+ def read_cpuset(self):
+ """Read list of cpus on which we allowed to execute
+ """
+ cpu_set_file = '/sys/fs/cgroup/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ cpu_set_file = '/sys/fs/cgroup/cpuset/cpuset.cpus'
+ cmd = 'test -e {0} && echo exists'.format(cpu_set_file)
+ if (self._client.run_cmd(cmd).decode().rstrip()):
+ cmd = 'cat {}'.format(cpu_set_file)
+ else:
+ RapidLog.critical('{Cannot determine cpuset')
+ cpuset_cpus = self._client.run_cmd(cmd).decode().rstrip()
+ RapidLog.debug('{} ({}): Allocated cpuset: {}'.format(self.name, self.ip, cpuset_cpus))
+ self.cpu_mapping = self.expand_list_format(cpuset_cpus)
+ RapidLog.debug('{} ({}): Expanded cpuset: {}'.format(self.name, self.ip, self.cpu_mapping))
+
+ # Log CPU core mapping for user information
+ cpu_mapping_str = ''
+ for i in range(len(self.cpu_mapping)):
+ cpu_mapping_str = cpu_mapping_str + '[' + str(i) + '->' + str(self.cpu_mapping[i]) + '], '
+ cpu_mapping_str = cpu_mapping_str[:-2]
+ RapidLog.debug('{} ({}): CPU mapping: {}'.format(self.name, self.ip, cpu_mapping_str))
+
+ def remap_cpus(self, cpus):
+ """Convert relative cpu ids provided as function parameter to match
+ cpu ids from allocated list
+ """
+ cpus_remapped = []
+ for cpu in cpus:
+ cpus_remapped.append(self.cpu_mapping[cpu])
+ return cpus_remapped
+
+ def remap_all_cpus(self):
+ """Convert relative cpu ids for different parameters (mcore, cores)
+ """
+ if self.cpu_mapping is None:
+ RapidLog.debug('{} ({}): cpu mapping is not defined! Please check the configuration!'.format(self.name, self.ip))
+ return
+
+ if 'mcore' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['mcore'])
+ RapidLog.debug('{} ({}): mcore {} remapped to {}'.format(self.name, self.ip, self.machine_params['mcore'], cpus_remapped))
+ self.machine_params['mcore'] = cpus_remapped
+
+ if 'cores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['cores'])
+ RapidLog.debug('{} ({}): cores {} remapped to {}'.format(self.name, self.ip, self.machine_params['cores'], cpus_remapped))
+ self.machine_params['cores'] = cpus_remapped
+
+ if 'altcores' in self.machine_params.keys():
+ cpus_remapped = self.remap_cpus(self.machine_params['altcores'])
+ RapidLog.debug('{} ({}): altcores {} remapped to {}'.format(self.name, self.ip, self.machine_params['altcores'], cpus_remapped))
+ self.machine_params['altcores'] = cpus_remapped
+
+ def devbind(self):
+ # Script to bind the right network interface to the poll mode driver
+ for index, dp_port in enumerate(self.dp_ports, start = 1):
+ DevBindFileName = self.rundir + '/devbind-{}-port{}.sh'.format(self.ip, index)
+ self._client.scp_put('./devbind.sh', DevBindFileName)
+ cmd = 'sed -i \'s/MACADDRESS/' + dp_port['mac'] + '/\' ' + DevBindFileName
+ result = self._client.run_cmd(cmd)
+ RapidLog.debug('devbind.sh MAC updated for port {} on {} {}'.format(index, self.name, result))
+ if ((not self.configonly) and self.machine_params['prox_launch_exit']):
+ result = self._client.run_cmd(DevBindFileName)
+ RapidLog.debug('devbind.sh running for port {} on {} {}'.format(index, self.name, result))
+
+ def generate_lua(self, appendix = ''):
+ self.LuaFileName = 'parameters-{}.lua'.format(self.ip)
+ with open(self.LuaFileName, "w") as LuaFile:
+ LuaFile.write('require "helper"\n')
+ LuaFile.write('name="%s"\n'% self.name)
+ for index, dp_port in enumerate(self.dp_ports, start = 1):
+ LuaFile.write('local_ip{}="{}"\n'.format(index, dp_port['ip']))
+ LuaFile.write('local_hex_ip{}=convertIPToHex(local_ip{})\n'.format(index, index))
+ if self.vim in ['kubernetes']:
+ cmd = 'cat /opt/rapid/dpdk_version'
+ dpdk_version = self._client.run_cmd(cmd).decode().rstrip()
+ if (dpdk_version >= '20.11.0'):
+ allow_parameter = 'allow'
+ else:
+ allow_parameter = 'pci-whitelist'
+ eal_line = 'eal=\"--file-prefix {}{} --{} {} --force-max-simd-bitwidth=512'.format(
+ self.name, str(uuid.uuid4()), allow_parameter,
+ self.machine_params['dp_pci_dev'])
+ looking_for_qat = True
+ index = 0
+ while (looking_for_qat):
+ if 'qat_pci_dev{}'.format(index) in self.machine_params:
+ eal_line += ' --{} {}'.format(allow_parameter,
+ self.machine_params['qat_pci_dev{}'.format(index)])
+ index += 1
+ else:
+ looking_for_qat = False
+ eal_line += '"\n'
+ LuaFile.write(eal_line)
+ else:
+ LuaFile.write("eal=\"\"\n")
+ if 'mcore' in self.machine_params.keys():
+ LuaFile.write('mcore="%s"\n'% ','.join(map(str,
+ self.machine_params['mcore'])))
+ if 'cores' in self.machine_params.keys():
+ LuaFile.write('cores="%s"\n'% ','.join(map(str,
+ self.machine_params['cores'])))
+ if 'altcores' in self.machine_params.keys():
+ LuaFile.write('altcores="%s"\n'% ','.join(map(str,
+ self.machine_params['altcores'])))
+ if 'ports' in self.machine_params.keys():
+ LuaFile.write('ports="%s"\n'% ','.join(map(str,
+ self.machine_params['ports'])))
+ if 'dest_ports' in self.machine_params.keys():
+ for index, dest_port in enumerate(self.machine_params['dest_ports'], start = 1):
+ LuaFile.write('dest_ip{}="{}"\n'.format(index, dest_port['ip']))
+ LuaFile.write('dest_hex_ip{}=convertIPToHex(dest_ip{})\n'.format(index, index))
+ if dest_port['mac']:
+ LuaFile.write('dest_hex_mac{}="{}"\n'.format(index ,
+ dest_port['mac'].replace(':',' ')))
+ if 'gw_vm' in self.machine_params.keys():
+ for index, gw_ip in enumerate(self.machine_params['gw_ips'],
+ start = 1):
+ LuaFile.write('gw_ip{}="{}"\n'.format(index, gw_ip))
+ LuaFile.write('gw_hex_ip{}=convertIPToHex(gw_ip{})\n'.
+ format(index, index))
+ LuaFile.write(appendix)
+ self._client.scp_put(self.LuaFileName, self.rundir + '/parameters.lua')
+ self._client.scp_put('helper.lua', self.rundir + '/helper.lua')
+
+ def start_prox(self, autostart=''):
+ if self.machine_params['prox_socket']:
+ self._client = prox_ctrl(self.ip, self.key, self.user,
+ self.password)
+ self._client.test_connection()
+ if self.vim in ['OpenStack']:
+ self.devbind()
+ if self.vim in ['kubernetes']:
+ self.read_cpuset()
+ self.remap_all_cpus()
+ _, prox_config_file_name = os.path.split(self.
+ machine_params['config_file'])
+ if self.machine_params['prox_launch_exit']:
+ self.generate_lua()
+ self._client.scp_put(self.machine_params['config_file'], '{}/{}'.
+ format(self.rundir, prox_config_file_name))
+ if not self.configonly:
+ cmd = 'sudo {}/prox {} -t -o cli -f {}/{}'.format(self.rundir,
+ autostart, self.rundir, prox_config_file_name)
+ RapidLog.debug("Starting PROX on {}: {}".format(self.name,
+ cmd))
+ result = self._client.run_cmd(cmd)
+ RapidLog.debug("Finished PROX on {}: {}".format(self.name,
+ cmd))
+
+ def close_prox(self):
+ if (not self.configonly) and self.machine_params[
+ 'prox_socket'] and self.machine_params['prox_launch_exit']:
+ self.socket.quit_prox()
+ self._client.scp_get('/prox.log', '{}/{}.prox.log'.format(
+ self.resultsdir, self.name))
+
+ def connect_prox(self):
+ if self.machine_params['prox_socket']:
+ self.socket = self._client.connect_socket()
+
+ def start(self):
+ self.socket.start(self.get_cores())
+
+ def stop(self):
+ self.socket.stop(self.get_cores())
+
+ def reset_stats(self):
+ self.socket.reset_stats()
+
+ def core_stats(self):
+ return (self.socket.core_stats(self.get_cores(), self.all_tasks_for_this_cfg))
+
+ def multi_port_stats(self):
+ return (self.socket.multi_port_stats(self.dpdk_port_index))
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
new file mode 100644
index 00000000..143323b8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_parser.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+from rapid_log import RapidLog
+from past.utils import old_div
+try:
+ import configparser
+except ImportError:
+ # Python 2.x fallback
+ import ConfigParser as configparser
+import ast
+inf = float("inf")
+
+class RapidConfigParser(object):
+ """
+ Class to deal with rapid configuration files
+ """
+ @staticmethod
+ def parse_config(test_params):
+ testconfig = configparser.RawConfigParser()
+ testconfig.read(test_params['test_file'])
+ test_params['required_number_of_test_machines'] = int(testconfig.get(
+ 'TestParameters', 'total_number_of_test_machines'))
+ test_params['number_of_tests'] = int(testconfig.get('TestParameters',
+ 'number_of_tests'))
+ test_params['TestName'] = testconfig.get('TestParameters', 'name')
+ if testconfig.has_option('TestParameters', 'lat_percentile'):
+ test_params['lat_percentile'] = old_div(float(
+ testconfig.get('TestParameters', 'lat_percentile')),100.0)
+ else:
+ test_params['lat_percentile'] = 0.99
+ RapidLog.info('Latency percentile at {:.0f}%'.format(
+ test_params['lat_percentile']*100))
+ if testconfig.has_option('TestParameters', 'sleep_time'):
+ test_params['sleep_time'] = int(testconfig.get('TestParameters', 'sleep_time'))
+ if test_params['sleep_time'] < 2:
+ test_params['sleep_time'] = 2
+ else:
+ test_params['sleep_time'] = 2
+
+ if testconfig.has_option('TestParameters', 'ipv6'):
+ test_params['ipv6'] = testconfig.getboolean('TestParameters','ipv6')
+ else:
+ test_params['ipv6'] = False
+ config = configparser.RawConfigParser()
+ config.read(test_params['environment_file'])
+ test_params['vim_type'] = config.get('Varia', 'vim')
+ test_params['user'] = config.get('ssh', 'user')
+ if config.has_option('ssh', 'key'):
+ test_params['key'] = config.get('ssh', 'key')
+ else:
+ test_params['key'] = None
+ if config.has_option('ssh', 'password'):
+ test_params['password'] = config.get('ssh', 'password')
+ else:
+ test_params['password'] = None
+ test_params['total_number_of_machines'] = int(config.get('rapid',
+ 'total_number_of_machines'))
+ tests = []
+ test = {}
+ for test_index in range(1, test_params['number_of_tests']+1):
+ test.clear()
+ section = 'test%d'%test_index
+ options = testconfig.options(section)
+ for option in options:
+ if option in ['imix','imixs','flows', 'warmupimix']:
+ test[option] = ast.literal_eval(testconfig.get(section,
+ option))
+ elif option in ['maxframespersecondallingress','stepsize',
+ 'flowsize','warmupflowsize','warmuptime', 'steps']:
+ test[option] = int(testconfig.get(section, option))
+ elif option in ['startspeed', 'step', 'drop_rate_threshold',
+ 'generator_threshold','lat_avg_threshold','lat_perc_threshold',
+ 'lat_max_threshold','accuracy','maxr','maxz',
+ 'ramp_step','warmupspeed','mis_ordered_threshold']:
+ test[option] = float(testconfig.get(section, option))
+ else:
+ test[option] = testconfig.get(section, option)
+ tests.append(dict(test))
+ for test in tests:
+ if test['test'] in ['flowsizetest', 'TST009test', 'increment_till_fail']:
+ if 'drop_rate_threshold' not in test.keys():
+ test['drop_rate_threshold'] = 0
+ thresholds = ['generator_threshold','lat_avg_threshold', \
+ 'lat_perc_threshold','lat_max_threshold','mis_ordered_threshold']
+ for threshold in thresholds:
+ if threshold not in test.keys():
+ test[threshold] = inf
+ test_params['tests'] = tests
+ if test_params['required_number_of_test_machines'] > test_params[
+ 'total_number_of_machines']:
+ RapidLog.exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
+ raise Exception("Not enough VMs for this test: %d needed and only %d available" % (required_number_of_test_machines,total_number_of_machines))
+ map_info = test_params['machine_map_file'].strip('[]').split(',')
+ map_info_length = len(map_info)
+ # If map_info is a list where the first entry is numeric, we assume we
+ # are dealing with a list of machines and NOT the machine.map file
+ if map_info[0].isnumeric():
+ if map_info_length < test_params[
+ 'required_number_of_test_machines']:
+ RapidLog.exception('Not enough machine indices in --map \
+ parameter: {}. Needing {} entries'.format(map_info,
+ test_params['required_number_of_test_machines']))
+ machine_index = list(map(int,map_info))
+ else:
+ machine_map = configparser.RawConfigParser()
+ machine_map.read(test_params['machine_map_file'])
+ machine_index = []
+ for test_machine in range(1,
+ test_params['required_number_of_test_machines']+1):
+ machine_index.append(int(machine_map.get(
+ 'TestM%d'%test_machine, 'machine_index')))
+ machine_map = configparser.RawConfigParser()
+ machine_map.read(test_params['machine_map_file'])
+ machines = []
+ machine = {}
+ for test_machine in range(1, test_params[
+ 'required_number_of_test_machines']+1):
+ machine.clear()
+ section = 'TestM%d'%test_machine
+ options = testconfig.options(section)
+ for option in options:
+ if option in ['prox_socket','prox_launch_exit','monitor']:
+ machine[option] = testconfig.getboolean(section, option)
+ elif option in ['mcore', 'cores', 'gencores', 'latcores',
+ 'altcores']:
+ machine[option] = ast.literal_eval(testconfig.get(
+ section, option))
+ elif option in ['bucket_size_exp']:
+ machine[option] = int(testconfig.get(section, option))
+ if machine[option] < 11:
+ RapidLog.exception(
+ "Minimum Value for bucket_size_exp is 11")
+ else:
+ machine[option] = testconfig.get(section, option)
+ for key in ['prox_socket','prox_launch_exit']:
+ if key not in machine.keys():
+ machine[key] = True
+ if 'monitor' not in machine.keys():
+ machine['monitor'] = True
+ section = 'M%d'%machine_index[test_machine-1]
+ options = config.options(section)
+ for option in options:
+ machine[option] = config.get(section, option)
+ machines.append(dict(machine))
+ for machine in machines:
+ dp_ports = []
+ if 'dest_vm' in machine.keys():
+ index = 1
+ while True:
+ dp_ip_key = 'dp_ip{}'.format(index)
+ dp_mac_key = 'dp_mac{}'.format(index)
+ if dp_ip_key in machines[int(machine['dest_vm'])-1].keys():
+ if dp_mac_key in machines[int(machine['dest_vm'])-1].keys():
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : machines[int(machine['dest_vm'])-1][dp_mac_key]}
+ else:
+ dp_port = {'ip': machines[int(machine['dest_vm'])-1][dp_ip_key],
+ 'mac' : None}
+ dp_ports.append(dict(dp_port))
+ index += 1
+ else:
+ break
+ machine['dest_ports'] = list(dp_ports)
+ gw_ips = []
+ if 'gw_vm' in machine.keys():
+ index = 1
+ while True:
+ gw_ip_key = 'dp_ip{}'.format(index)
+ if gw_ip_key in machines[int(machine['gw_vm'])-1].keys():
+ gw_ip = machines[int(machine['gw_vm'])-1][gw_ip_key]
+ gw_ips.append(gw_ip)
+ index += 1
+ else:
+ break
+ machine['gw_ips'] = list(gw_ips)
+ test_params['machines'] = machines
+ return (test_params)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
new file mode 100644
index 00000000..8157ddf2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_portstatstest.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+import requests
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class PortStatsTest(RapidTest):
+ """
+ Class to manage the portstatstesting
+ """
+ def __init__(self, test_param, runtime, testname, environment_file,
+ machines):
+ super().__init__(test_param, runtime, testname, environment_file)
+ self.machines = machines
+
+ def run(self):
+ result_details = {'Details': 'Nothing'}
+ RapidLog.info("+---------------------------------------------------------------------------+")
+ RapidLog.info("| Measuring port statistics on 1 or more PROX instances |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ RapidLog.info("| PROX ID | Time | RX | TX | no MBUFS | ierr&imiss |")
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ duration = float(self.test['runtime'])
+ old_rx = []; old_tx = []; old_no_mbufs = []; old_errors = []; old_tsc = []
+ new_rx = []; new_tx = []; new_no_mbufs = []; new_errors = []; new_tsc = []
+ machines_to_go = len (self.machines)
+ for machine in self.machines:
+ machine.reset_stats()
+ old_rx.append(0); old_tx.append(0); old_no_mbufs.append(0); old_errors.append(0); old_tsc.append(0)
+ old_rx[-1], old_tx[-1], old_no_mbufs[-1], old_errors[-1], old_tsc[-1] = machine.multi_port_stats()
+ new_rx.append(0); new_tx.append(0); new_no_mbufs.append(0); new_errors.append(0); new_tsc.append(0)
+ while (duration > 0):
+ time.sleep(0.5)
+ # Get statistics after some execution time
+ for i, machine in enumerate(self.machines, start=0):
+ new_rx[i], new_tx[i], new_no_mbufs[i], new_errors[i], new_tsc[i] = machine.multi_port_stats()
+ rx = new_rx[i] - old_rx[i]
+ tx = new_tx[i] - old_tx[i]
+ no_mbufs = new_no_mbufs[i] - old_no_mbufs[i]
+ errors = new_errors[i] - old_errors[i]
+ tsc = new_tsc[i] - old_tsc[i]
+ if tsc == 0 :
+ continue
+ machines_to_go -= 1
+ old_rx[i] = new_rx[i]
+ old_tx[i] = new_tx[i]
+ old_no_mbufs[i] = new_no_mbufs[i]
+ old_errors[i] = new_errors[i]
+ old_tsc[i] = new_tsc[i]
+ RapidLog.info('|{:>10.0f}'.format(i)+ ' |{:>10.0f}'.format(duration)+' | ' + '{:>10.0f}'.format(rx) + ' | ' +'{:>10.0f}'.format(tx) + ' | '+'{:>10.0f}'.format(no_mbufs)+' | '+'{:>10.0f}'.format(errors)+' |')
+ result_details = {'test': self.test['test'],
+ 'environment_file': self.test['environment_file'],
+ 'PROXID': i,
+ 'StepSize': duration,
+ 'Received': rx,
+ 'Sent': tx,
+ 'NoMbufs': no_mbufs,
+ 'iErrMiss': errors}
+ result_details = self.post_data(result_details)
+ if machines_to_go == 0:
+ duration = duration - 1
+ machines_to_go = len (self.machines)
+ RapidLog.info("+-----------+-----------+------------+------------+------------+------------+")
+ return (True, result_details)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
new file mode 100644
index 00000000..6ecdb277
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
@@ -0,0 +1,49 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
+NhAAAAAwEAAQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuGS2Cg/tcP8zZHvzk8/lkR
+85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2ugWxp2FaePHKp0FBkMgup
+GHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5ypEg19ywtZmDooiXz6fWoa
+WgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsCyMFvCIIhrm/YIs8pwkqJ
+sPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4ZDIrMtbKBQPHP90Ku+C5i
+jY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwSL3PmUGl02JdWJgYG/B37
+fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN1qbIYYGgpIuLEgQUKC39
+/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJIuDmwtqxUHwCuKNThUVh
+A3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WOv08tiRJL1lrfMis8Z9so
+fYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqPSggS+dNjDacl0v1e6ByB
+kAAAdQW2kXgltpF4IAAAAHc3NoLXJzYQAAAgEArNsWTFD70ljjL+WnXc0GblN7KliciiuG
+S2Cg/tcP8zZHvzk8/lkR85EcXGpvYrHkTF1daZCbQUy3is0KvP27OholrxVv9HAn4BkA2u
+gWxp2FaePHKp0FBkMgupGHFVhzeg4hA4oFtjpaM95ATMcWTB++7nul6dW+f5/vhxzya5yp
+Eg19ywtZmDooiXz6fWoaWgSqjy0NiLFoJEoNE5JYjz2XHTgBDKZ7Sr+oAto9/cOe3G5JsC
+yMFvCIIhrm/YIs8pwkqJsPMEPg6DbG6P6S1YbnL6rM/BswVjp1IoWpPVbmZhDbhlNSk/4Z
+DIrMtbKBQPHP90Ku+C5ijY6ZNJ4gD7Cwm+ZLp4qdIqJoNoezmG8C0YvO8WvfMLRoyUChwS
+L3PmUGl02JdWJgYG/B37fJQbm80d6HOvAE5rvO5Z9dbwBvzZC0Yp5dX130OtNajpOhfBRN
+1qbIYYGgpIuLEgQUKC39/i1hGMNTOVDjJ4GNbiSUhUkbc64j0k2B+uYs947tfuwrotNumJ
+IuDmwtqxUHwCuKNThUVhA3U1tblCWMS6ExVY4zawElXBT/preiAYaFlzFuYoHjzuWXN0WO
+v08tiRJL1lrfMis8Z9sofYc3qBSqlLgAsW5dtB5PMIy3JxXWqjFQIdgjlxWZ54Bu9t5fqP
+SggS+dNjDacl0v1e6ByBkAAAADAQABAAACABLHepSv96vSnFwHxzcZnyk9SJRBLECWmfB2
+fwcwtjrmGsVbopS/eIPNsBcaOR+v0+239v4RB80AWLBrtk7yAfU+AfoTiiY0SSC/lqgxrs
+fFNUlbxbeLd5BGmreqN9LJ2UHZZxzLUfOKQ2J/Mt0kg/ehO00Ngej1n8ydw5gaPPwT+QpN
+DO2SPhmbt+u3+D7H2DUPbLhBXMcM/xNyOBl4PMbTGifCfdqx+5MTX11v+GwpZIjuMnNBY7
+baSu/pnE7OZbO14wWuUugbd8PCr7mAbtNj5Jn5JGv/SDEWCMPHYauYVU+hZTgitUX+xRnn
+unXC/uffXYivZfLwlyRp6Zsd0r2z3dY+bjhZ/SBheAmP3FaKy4ZA1ggn7VHCM/RWywJJlP
+/xdKHWQs2j/kF+s84Z5+eb6r1p3xBS7Dv3Lt9KQPN/nLciJNWYwUHiVXo3BtFw4IRosP+k
+W4Km3bfmfs0yrgrAdypUeLHbD9fyYu/BjhdcDqCj9ntlxUnDfo4WQga1J1kY/5zUDOpVCV
+LYit6y4SCvFM1H8mIHX9n3jxEfs1fdx52OhcahfGc7Qg8EbMJFt3CqXcc4ErVkUxC61sWX
+7mfFqzp0eho1QrGU5a+1l9UaVTJhN1B0ruhEfdBm1FahcQ91ZEn2m6Wf1P0+RImI7m0cH1
+FZ0WDdX+DETUWNHr0BAAABAGEBn6UfyzTYtk/HWW8Px+ae60U4BJCcQ8m/ARSMGGLds2f3
+5NJjm6KliZJ+b7sdN4UYj2hm9zxjef+kwFXUEYmYVm16NufQRR1svF7YqLzNnOQ7eXluZS
+S3SEj1siziCveQ6kyLYrfedNtX/TErdR5SFqcbuanMzd7mqw1vMpejoEGKriSpYOSohsZW
+7Rkcej3XSR4jt5pzxfzUObcKrm5mWAYddINbflAYVswpT/LxNl7jduUsQd3Ul6fOBX4sBK
+rWYMv3Qo4z25oShqvWOJbvvQ1voTOiDF8LTOu60/YbbOfF116J6BcWTHbwe8z+Du8SxdVi
+1N4tFcadL7HqsZEAAAEBAN4ma7nbSI0fA3QM1IK9h5cN/h0qMk91Syh7+vFyNfe/DILFnJ
+0TGNaYhAow1jNMOQKeyEJOfuZkeMdR9/ohtfwSvzSJml/k0JV9aIZHehncZOMt93Gi6WtC
++Os2owyhcXMJN7MbKo1e3Ln21OyaAJi6TAdwSDivFSytvNCKoX8NncQu/UIPzNQVJcrvJn
+SZ+0AHFeuZVl9HgxZY1fUvIs24m9QnYH3HpMiYc2p8UT1hEOqq1bJpgKx9WHhj0fNCBsZ1
+6zTnCDa/HiDADHmlif6pyEu7nD+3MHAeGxS7LJjmMSvtbH/ltrYaz6wFSowlr/RiX7Z8pT
+Ib1lf7KPYulYUAAAEBAMcxzoKSEZt/eYz5w4h9Bs6tdBEBnmSzwni8P0DTv1q0sDan1g4Q
++Mcuo42lSXS9aTmfI+hJDRSuRraLE9xzmxUJ+R2bQkpOLgG6QOF1uU36ZtMoxtptII8pXT
+yQtIW2sHSz9Kgv16PFp98EaEfwzmdk/C8A6NxoGW7EpzAXzXZYLRSwgAr6wVE83jUsbIu5
+lAN6DG6vIm62PLsxmpDZuS5idQwxP8DP4itHMMRh2jE0+msQAWHRQ514nCTqeuy/ORbNSO
+4A1yMy1KxXBH6hQ/oE8ZXqtBqJ3CbINPEyuLK9PYj9e2zABoEOcXTaJcvmVve97xhhw6om
+zVgd4qw70oUAAAAVeWt5bHVsaW5AMGJkODI0NDk5MTYwAQIDBAUG
+-----END OPENSSH PRIVATE KEY-----
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
new file mode 100644
index 00000000..c735d178
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCs2xZMUPvSWOMv5addzQZuU3sqWJyKK4ZLYKD+1w/zNke/OTz+WRHzkRxcam9iseRMXV1pkJtBTLeKzQq8/bs6GiWvFW/0cCfgGQDa6BbGnYVp48cqnQUGQyC6kYcVWHN6DiEDigW2Oloz3kBMxxZMH77ue6Xp1b5/n++HHPJrnKkSDX3LC1mYOiiJfPp9ahpaBKqPLQ2IsWgkSg0TkliPPZcdOAEMpntKv6gC2j39w57cbkmwLIwW8IgiGub9gizynCSomw8wQ+DoNsbo/pLVhucvqsz8GzBWOnUihak9VuZmENuGU1KT/hkMisy1soFA8c/3Qq74LmKNjpk0niAPsLCb5kunip0iomg2h7OYbwLRi87xa98wtGjJQKHBIvc+ZQaXTYl1YmBgb8Hft8lBubzR3oc68ATmu87ln11vAG/NkLRinl1fXfQ601qOk6F8FE3WpshhgaCki4sSBBQoLf3+LWEYw1M5UOMngY1uJJSFSRtzriPSTYH65iz3ju1+7Cui026Yki4ObC2rFQfAK4o1OFRWEDdTW1uUJYxLoTFVjjNrASVcFP+mt6IBhoWXMW5igePO5Zc3RY6/Ty2JEkvWWt8yKzxn2yh9hzeoFKqUuACxbl20Hk8wjLcnFdaqMVAh2COXFZnngG723l+o9KCBL502MNpyXS/V7oHIGQ== default@default
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
new file mode 100644
index 00000000..d8aeacc1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_sshclient.py
@@ -0,0 +1,164 @@
+##
+## Copyright (c) 2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import paramiko
+from scp import SCPClient
+import logging
+
+class SSHClient:
+ """Wrapper class for paramiko module to connect via SSH
+ """
+ _log = None
+
+ _ip = None
+ _user = None
+ _rsa_private_key = None
+ _timeout = None
+ _ssh = None
+ _connected = False
+
+ _output = None
+ _error = None
+
+ def __init__(self, ip=None, user=None, rsa_private_key=None, timeout=15,
+ logger_name=None, password = None):
+ self._ip = ip
+ self._user = user
+ self._password = password
+ self._rsa_private_key = rsa_private_key
+ self._timeout = timeout
+
+ if (logger_name is not None):
+ self._log = logging.getLogger(logger_name)
+
+ self._connected = False
+
+ def set_credentials(self, ip, user, rsa_private_key, password = None):
+ self._ip = ip
+ self._user = user
+ self._password = password
+ self._rsa_private_key = rsa_private_key
+
+ def connect(self):
+
+ if self._connected:
+ if (self._log is not None):
+ self._log.debug("Already connected!")
+ return
+ if ((self._ip is None) or (self._user is None) or
+ ((self._rsa_private_key is None) ==
+ (self._password is None))):
+ if (self._log is not None):
+ self._log.error("Wrong parameter! IP %s, user %s, RSA private key %s"
+ % (self._ip, self._user, self._rsa_private_key))
+ self._connected = False
+ return
+
+ self._ssh = paramiko.SSHClient()
+ self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ if (self._rsa_private_key is not None):
+ private_key = paramiko.RSAKey.from_private_key_file(self._rsa_private_key)
+ else:
+ private_key = None
+
+ try:
+ self._ssh.connect(hostname = self._ip, username = self._user,
+ password = self._password, pkey = private_key)
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to connect to the host! IP %s, user %s, RSA private key %s\n%s"
+ % (self._ip, self._user, self._rsa_private_key, e))
+ self._connected = False
+ self._ssh.close()
+ return
+
+ self._connected = True
+
+ def disconnect(self):
+ if self._connected:
+ self._connected = False
+ self._ssh.close()
+
+ def run_cmd(self, cmd):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ _stdin, stdout, stderr = self._ssh.exec_command(cmd, timeout = self._timeout)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def scp_put(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.put(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def scp_get(self, src, dst):
+ self.connect()
+
+ if self._connected is not True:
+ return -1
+
+ try:
+ ret = 0
+ scp = SCPClient(self._ssh.get_transport())
+ scp.get(src, dst)
+ self._output = stdout.read()
+ self._error = stderr.read()
+ except Exception as e:
+ if (self._log is not None):
+ self._log.error("Failed to execute command! IP %s, cmd %s\n%s"
+ % (self._ip, cmd, e))
+ ret = -1
+
+ self.disconnect()
+
+ return ret
+
+ def get_output(self):
+ return self._output
+
+ def get_error(self):
+ return self._error
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
new file mode 100644
index 00000000..deba695f
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_test.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import yaml
+import requests
+import time
+import os
+import copy
+from past.utils import old_div
+from rapid_log import RapidLog
+from rapid_log import bcolors
+inf = float("inf")
+from datetime import datetime as dt
+
+_CURR_DIR = os.path.dirname(os.path.realpath(__file__))
+
+class RapidTest(object):
+ """
+ Class to manage the testing
+ """
+ def __init__(self, test_param, runtime, testname, environment_file ):
+ self.test = test_param
+ self.test['runtime'] = runtime
+ self.test['testname'] = testname
+ self.test['environment_file'] = environment_file
+ if 'maxr' not in self.test.keys():
+ self.test['maxr'] = 1
+ if 'maxz' not in self.test.keys():
+ self.test['maxz'] = inf
+ with open(os.path.join(_CURR_DIR,'format.yaml')) as f:
+ self.data_format = yaml.load(f, Loader=yaml.FullLoader)
+
+ @staticmethod
+ def get_percentageof10Gbps(pps_speed,size):
+ # speed is given in pps, returning % of 10Gb/s
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (pps_speed / 1000000.0 * 0.08 * (size+20))
+
+ @staticmethod
+ def get_pps(speed,size):
+ # speed is given in % of 10Gb/s, returning Mpps
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (speed * 100.0 / (8*(size+20)))
+
+ @staticmethod
+ def get_speed(packet_speed,size):
+ # return speed in Gb/s
+ # 12 bytes is the inter packet gap
+ # pre-amble is 7 bytes
+ # SFD (start of frame delimiter) is 1 byte
+ # Total of 20 bytes overhead per packet
+ return (packet_speed / 1000.0 * (8*(size+20)))
+
+ @staticmethod
+ def set_background_flows(background_machines, number_of_flows):
+ for machine in background_machines:
+ _ = machine.set_flows(number_of_flows)
+
+ @staticmethod
+ def set_background_speed(background_machines, speed):
+ for machine in background_machines:
+ machine.set_generator_speed(speed)
+
+ @staticmethod
+ def set_background_size(background_machines, imix):
+ # imixs is a list of packet sizes
+ for machine in background_machines:
+ machine.set_udp_packet_size(imix)
+
+ @staticmethod
+ def start_background_traffic(background_machines):
+ for machine in background_machines:
+ machine.start()
+
+ @staticmethod
+ def stop_background_traffic(background_machines):
+ for machine in background_machines:
+ machine.stop()
+
+ @staticmethod
+ def parse_data_format_dict(data_format, variables):
+ for k, v in data_format.items():
+ if type(v) is dict:
+ RapidTest.parse_data_format_dict(v, variables)
+ else:
+ if v in variables.keys():
+ data_format[k] = variables[v]
+
+ def post_data(self, variables):
+ test_type = type(self).__name__
+ var = copy.deepcopy(self.data_format)
+ self.parse_data_format_dict(var, variables)
+ if var.keys() >= {'URL', test_type, 'Format'}:
+ URL=''
+ for value in var['URL'].values():
+ URL = URL + value
+ HEADERS = {'X-Requested-With': 'Python requests', 'Content-type': 'application/rapid'}
+ if var['Format'] == 'PushGateway':
+ data = "\n".join("{} {}".format(k, v) for k, v in var[test_type].items()) + "\n"
+ response = requests.post(url=URL, data=data,headers=HEADERS)
+ elif var['Format'] == 'Xtesting':
+ data = var[test_type]
+ response = requests.post(url=URL, json=data)
+ if (response.status_code >= 300):
+ RapidLog.info('Cannot send metrics to {}'.format(URL))
+ RapidLog.info(data)
+ return (var[test_type])
+
+ @staticmethod
+ def report_result(flow_number, size, data, prefix):
+ if flow_number < 0:
+ flow_number_str = '| ({:>4}) |'.format(abs(flow_number))
+ else:
+ flow_number_str = '|{:>7} |'.format(flow_number)
+ if data['pps_req_tx'] is None:
+ pps_req_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_req_tx_str = '{:>7.3f} Mpps |'.format(data['pps_req_tx'])
+ if data['pps_tx'] is None:
+ pps_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_tx_str = '{:>7.3f} Mpps |'.format(data['pps_tx'])
+ if data['pps_sut_tx'] is None:
+ pps_sut_tx_str = '{0: >14}'.format(' NA |')
+ else:
+ pps_sut_tx_str = '{:>7.3f} Mpps |'.format(data['pps_sut_tx'])
+ if data['pps_rx'] is None:
+ pps_rx_str = '{0: >25}'.format('NA |')
+ else:
+ pps_rx_str = bcolors.OKBLUE + '{:>4.1f} Gb/s |{:7.3f} Mpps {}|'.format(
+ RapidTest.get_speed(data['pps_rx'],size),data['pps_rx'],bcolors.ENDC)
+ if data['abs_dropped'] is None:
+ tot_drop_str = ' | NA | '
+ else:
+ tot_drop_str = ' | {:>9.0f} | '.format(data['abs_dropped'])
+ if data['lat_perc'] is None:
+ lat_perc_str = '|{:^10.10}|'.format('NA')
+ elif data['lat_perc_max'] == True:
+ lat_perc_str = '|>{}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
+ else:
+ lat_perc_str = '| {}{:>5.0f} us{} |'.format(prefix['lat_perc'],
+ float(data['lat_perc']), bcolors.ENDC)
+ if data['actual_duration'] is None:
+ elapsed_time_str = ' NA |'
+ else:
+ elapsed_time_str = '{:>3.0f} |'.format(data['actual_duration'])
+ if data['mis_ordered'] is None:
+ mis_ordered_str = ' NA '
+ else:
+ mis_ordered_str = '{:>9.0f} '.format(data['mis_ordered'])
+ return(flow_number_str + '{:>5.1f}'.format(data['speed']) + '% ' + prefix['speed']
+ + '{:>6.3f}'.format(RapidTest.get_pps(data['speed'],size)) + ' Mpps|' +
+ pps_req_tx_str + pps_tx_str + bcolors.ENDC + pps_sut_tx_str +
+ pps_rx_str + prefix['lat_avg'] + ' {:>6.0f}'.format(data['lat_avg']) +
+ ' us' + lat_perc_str +prefix['lat_max']+'{:>6.0f}'.format(data['lat_max'])
+ + ' us | ' + '{:>9.0f}'.format(data['abs_tx']) + ' | {:>9.0f}'.format(data['abs_rx']) +
+ ' | '+ prefix['abs_drop_rate']+ '{:>9.0f}'.format(data['abs_tx']-data['abs_rx']) +
+ tot_drop_str + prefix['drop_rate'] +
+ '{:>5.2f}'.format(100*old_div(float(data['abs_tx']-data['abs_rx']),data['abs_tx'])) + ' |' +
+ prefix['mis_ordered'] + mis_ordered_str + bcolors.ENDC +
+ ' |' + elapsed_time_str)
+
+ def run_iteration(self, requested_duration, flow_number, size, speed):
+ BUCKET_SIZE_EXP = self.gen_machine.bucket_size_exp
+ sleep_time = self.test['sleep_time']
+ LAT_PERCENTILE = self.test['lat_percentile']
+ iteration_data= {}
+ time_loop_data= {}
+ iteration_data['r'] = 0;
+
+ while (iteration_data['r'] < self.test['maxr']):
+ self.gen_machine.start_latency_cores()
+ time.sleep(sleep_time)
+ # Sleep_time is needed to be able to do accurate measurements to check for packet loss. We need to make this time large enough so that we do not take the first measurement while some packets from the previous tests migth still be in flight
+ t1_rx, t1_non_dp_rx, t1_tx, t1_non_dp_tx, t1_drop, t1_tx_fail, t1_tsc, abs_tsc_hz = self.gen_machine.core_stats()
+ t1_dp_rx = t1_rx - t1_non_dp_rx
+ t1_dp_tx = t1_tx - t1_non_dp_tx
+ self.gen_machine.set_generator_speed(0)
+ self.gen_machine.start_gen_cores()
+ self.set_background_speed(self.background_machines, 0)
+ self.start_background_traffic(self.background_machines)
+ if 'ramp_step' in self.test.keys():
+ ramp_speed = self.test['ramp_step']
+ else:
+ ramp_speed = speed
+ while ramp_speed < speed:
+ self.gen_machine.set_generator_speed(ramp_speed)
+ self.set_background_speed(self.background_machines, ramp_speed)
+ time.sleep(2)
+ ramp_speed = ramp_speed + self.test['ramp_step']
+ self.gen_machine.set_generator_speed(speed)
+ self.set_background_speed(self.background_machines, speed)
+ iteration_data['speed'] = speed
+ time_loop_data['speed'] = speed
+ time.sleep(2) ## Needs to be 2 seconds since this 1 sec is the time that PROX uses to refresh the stats. Note that this can be changed in PROX!! Don't do it.
+ start_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, _ = bg_gen_machine.core_stats()
+ bg_gen_stat = {
+ "bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc
+ }
+ start_bg_gen_stats.append(dict(bg_gen_stat))
+ if self.sut_machine!= None:
+ t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
+ t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc, tsc_hz = self.gen_machine.core_stats()
+ tx = t2_tx - t1_tx
+ iteration_data['abs_tx'] = tx - (t2_non_dp_tx - t1_non_dp_tx )
+ iteration_data['abs_rx'] = t2_rx - t1_rx - (t2_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
+ if tx == 0:
+ RapidLog.critical("TX = 0. Test interrupted since no packet has been sent.")
+ if iteration_data['abs_tx'] == 0:
+ RapidLog.critical("Only non-dataplane packets (e.g. ARP) sent. Test interrupted since no packet has been sent.")
+ # Ask PROX to calibrate the bucket size once we have a PROX function to do this.
+ # Measure latency statistics per second
+ iteration_data.update(self.gen_machine.lat_stats())
+ t2_lat_tsc = iteration_data['lat_tsc']
+ sample_count = 0
+ for sample_percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (sample_percentile == len(iteration_data['buckets']))
+ iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) / (old_div(float(iteration_data['lat_hz']),float(10**6)))
+ time_loop_data['bucket_size'] = iteration_data['bucket_size']
+ iteration_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
+ if self.test['test'] == 'fixed_rate':
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['pps_rx'] = None
+ iteration_data['lat_perc'] = None
+ iteration_data['actual_duration'] = None
+ iteration_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size,
+ iteration_data, iteration_prefix ))
+ tot_rx = tot_non_dp_rx = tot_tx = tot_non_dp_tx = tot_drop = 0
+ iteration_data['lat_avg'] = iteration_data['lat_used'] = 0
+ tot_lat_measurement_duration = float(0)
+ iteration_data['actual_duration'] = float(0)
+ tot_sut_core_measurement_duration = float(0)
+ tot_sut_rx = tot_sut_non_dp_rx = tot_sut_tx = tot_sut_non_dp_tx = tot_sut_drop = tot_sut_tx_fail = tot_sut_tsc = 0
+ lat_avail = core_avail = sut_avail = False
+ while (iteration_data['actual_duration'] - float(requested_duration) <= 0.1) or (tot_lat_measurement_duration - float(requested_duration) <= 0.1):
+ time.sleep(0.5)
+ time_loop_data.update(self.gen_machine.lat_stats())
+ # Get statistics after some execution time
+ if time_loop_data['lat_tsc'] != t2_lat_tsc:
+ single_lat_measurement_duration = (time_loop_data['lat_tsc'] - t2_lat_tsc) * 1.0 / time_loop_data['lat_hz'] # time difference between the 2 measurements, expressed in seconds.
+ # A second has passed in between to lat_stats requests. Hence we need to process the results
+ tot_lat_measurement_duration = tot_lat_measurement_duration + single_lat_measurement_duration
+ if iteration_data['lat_min'] > time_loop_data['lat_min']:
+ iteration_data['lat_min'] = time_loop_data['lat_min']
+ if iteration_data['lat_max'] < time_loop_data['lat_max']:
+ iteration_data['lat_max'] = time_loop_data['lat_max']
+ iteration_data['lat_avg'] = iteration_data['lat_avg'] + time_loop_data['lat_avg'] * single_lat_measurement_duration # Sometimes, There is more than 1 second between 2 lat_stats. Hence we will take the latest measurement
+ iteration_data['lat_used'] = iteration_data['lat_used'] + time_loop_data['lat_used'] * single_lat_measurement_duration # and give it more weigth.
+ sample_count = 0
+ for sample_percentile, bucket in enumerate(time_loop_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(time_loop_data['buckets']) * LAT_PERCENTILE:
+ break
+ time_loop_data['lat_perc_max'] = (sample_percentile == len(time_loop_data['buckets']))
+ time_loop_data['lat_perc'] = sample_percentile * iteration_data['bucket_size']
+ iteration_data['buckets'] = [iteration_data['buckets'][i] + time_loop_data['buckets'][i] for i in range(len(iteration_data['buckets']))]
+ t2_lat_tsc = time_loop_data['lat_tsc']
+ lat_avail = True
+ t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc, tsc_hz = self.gen_machine.core_stats()
+ if t3_tsc != t2_tsc:
+ time_loop_data['actual_duration'] = (t3_tsc - t2_tsc) * 1.0 / tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ iteration_data['actual_duration'] = iteration_data['actual_duration'] + time_loop_data['actual_duration']
+ delta_rx = t3_rx - t2_rx
+ tot_rx += delta_rx
+ delta_non_dp_rx = t3_non_dp_rx - t2_non_dp_rx
+ tot_non_dp_rx += delta_non_dp_rx
+ delta_tx = t3_tx - t2_tx
+ tot_tx += delta_tx
+ delta_non_dp_tx = t3_non_dp_tx - t2_non_dp_tx
+ tot_non_dp_tx += delta_non_dp_tx
+ delta_dp_tx = delta_tx -delta_non_dp_tx
+ delta_dp_rx = delta_rx -delta_non_dp_rx
+ time_loop_data['abs_dropped'] = delta_dp_tx - delta_dp_rx
+ iteration_data['abs_dropped'] += time_loop_data['abs_dropped']
+ delta_drop = t3_drop - t2_drop
+ tot_drop += delta_drop
+ t2_rx, t2_non_dp_rx, t2_tx, t2_non_dp_tx, t2_drop, t2_tx_fail, t2_tsc = t3_rx, t3_non_dp_rx, t3_tx, t3_non_dp_tx, t3_drop, t3_tx_fail, t3_tsc
+ core_avail = True
+ if self.sut_machine!=None:
+ t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc, sut_tsc_hz = self.sut_machine.core_stats()
+ if t3_sut_tsc != t2_sut_tsc:
+ single_sut_core_measurement_duration = (t3_sut_tsc - t2_sut_tsc) * 1.0 / sut_tsc_hz # time difference between the 2 measurements, expressed in seconds.
+ tot_sut_core_measurement_duration = tot_sut_core_measurement_duration + single_sut_core_measurement_duration
+ tot_sut_rx += t3_sut_rx - t2_sut_rx
+ tot_sut_non_dp_rx += t3_sut_non_dp_rx - t2_sut_non_dp_rx
+ delta_sut_tx = t3_sut_tx - t2_sut_tx
+ tot_sut_tx += delta_sut_tx
+ delta_sut_non_dp_tx = t3_sut_non_dp_tx - t2_sut_non_dp_tx
+ tot_sut_non_dp_tx += delta_sut_non_dp_tx
+ t2_sut_rx, t2_sut_non_dp_rx, t2_sut_tx, t2_sut_non_dp_tx, t2_sut_drop, t2_sut_tx_fail, t2_sut_tsc = t3_sut_rx, t3_sut_non_dp_rx, t3_sut_tx, t3_sut_non_dp_tx, t3_sut_drop, t3_sut_tx_fail, t3_sut_tsc
+ sut_avail = True
+ if self.test['test'] == 'fixed_rate':
+ if lat_avail == core_avail == True:
+ lat_avail = core_avail = False
+ time_loop_data['pps_req_tx'] = (delta_tx + delta_drop - delta_rx)/time_loop_data['actual_duration']/1000000
+ time_loop_data['pps_tx'] = delta_tx/time_loop_data['actual_duration']/1000000
+ if self.sut_machine != None and sut_avail:
+ time_loop_data['pps_sut_tx'] = delta_sut_tx/single_sut_core_measurement_duration/1000000
+ sut_avail = False
+ else:
+ time_loop_data['pps_sut_tx'] = None
+ time_loop_data['pps_rx'] = delta_rx/time_loop_data['actual_duration']/1000000
+ time_loop_data['abs_tx'] = delta_dp_tx
+ time_loop_data['abs_rx'] = delta_dp_rx
+ time_loop_prefix = {'speed' : '',
+ 'lat_avg' : '',
+ 'lat_perc' : '',
+ 'lat_max' : '',
+ 'abs_drop_rate' : '',
+ 'mis_ordered' : '',
+ 'drop_rate' : ''}
+ RapidLog.info(self.report_result(flow_number, size, time_loop_data,
+ time_loop_prefix))
+ time_loop_data['test'] = self.test['testname']
+ time_loop_data['environment_file'] = self.test['environment_file']
+ time_loop_data['Flows'] = flow_number
+ time_loop_data['Size'] = size
+ time_loop_data['RequestedSpeed'] = RapidTest.get_pps(speed, size)
+ _ = self.post_data(time_loop_data)
+ end_bg_gen_stats = []
+ for bg_gen_machine in self.background_machines:
+ bg_rx, bg_non_dp_rx, bg_tx, bg_non_dp_tx, _, _, bg_tsc, bg_hz = bg_gen_machine.core_stats()
+ bg_gen_stat = {"bg_dp_rx" : bg_rx - bg_non_dp_rx,
+ "bg_dp_tx" : bg_tx - bg_non_dp_tx,
+ "bg_tsc" : bg_tsc,
+ "bg_hz" : bg_hz
+ }
+ end_bg_gen_stats.append(dict(bg_gen_stat))
+ self.stop_background_traffic(self.background_machines)
+ i = 0
+ bg_rates =[]
+ while i < len(end_bg_gen_stats):
+ bg_rates.append(0.000001*(end_bg_gen_stats[i]['bg_dp_rx'] -
+ start_bg_gen_stats[i]['bg_dp_rx']) / ((end_bg_gen_stats[i]['bg_tsc'] -
+ start_bg_gen_stats[i]['bg_tsc']) * 1.0 / end_bg_gen_stats[i]['bg_hz']))
+ i += 1
+ if len(bg_rates):
+ iteration_data['avg_bg_rate'] = sum(bg_rates) / len(bg_rates)
+ RapidLog.debug('Average Background traffic rate: {:>7.3f} Mpps'.format(iteration_data['avg_bg_rate']))
+ else:
+ iteration_data['avg_bg_rate'] = None
+ #Stop generating
+ self.gen_machine.stop_gen_cores()
+ time.sleep(3.5)
+ self.gen_machine.stop_latency_cores()
+ iteration_data['r'] += 1
+ iteration_data['lat_avg'] = old_div(iteration_data['lat_avg'], float(tot_lat_measurement_duration))
+ iteration_data['lat_used'] = old_div(iteration_data['lat_used'], float(tot_lat_measurement_duration))
+ t4_tsc = t2_tsc
+ while t4_tsc == t2_tsc:
+ t4_rx, t4_non_dp_rx, t4_tx, t4_non_dp_tx, t4_drop, t4_tx_fail, t4_tsc, abs_tsc_hz = self.gen_machine.core_stats()
+ if self.test['test'] == 'fixed_rate':
+ iteration_data['lat_tsc'] = t2_lat_tsc
+ while iteration_data['lat_tsc'] == t2_lat_tsc:
+ iteration_data.update(self.gen_machine.lat_stats())
+ sample_count = 0
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
+ delta_rx = t4_rx - t2_rx
+ delta_non_dp_rx = t4_non_dp_rx - t2_non_dp_rx
+ delta_tx = t4_tx - t2_tx
+ delta_non_dp_tx = t4_non_dp_tx - t2_non_dp_tx
+ delta_dp_tx = delta_tx -delta_non_dp_tx
+ delta_dp_rx = delta_rx -delta_non_dp_rx
+ iteration_data['abs_tx'] = delta_dp_tx
+ iteration_data['abs_rx'] = delta_dp_rx
+ iteration_data['abs_dropped'] += delta_dp_tx - delta_dp_rx
+ iteration_data['pps_req_tx'] = None
+ iteration_data['pps_tx'] = None
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['drop_rate'] = 100.0*(iteration_data['abs_tx']-iteration_data['abs_rx'])/iteration_data['abs_tx']
+ iteration_data['actual_duration'] = None
+ break ## Not really needed since the while loop will stop when evaluating the value of r
+ else:
+ sample_count = 0
+ for percentile, bucket in enumerate(iteration_data['buckets'],start=1):
+ sample_count += bucket
+ if sample_count > sum(iteration_data['buckets']) * LAT_PERCENTILE:
+ break
+ iteration_data['lat_perc_max'] = (percentile == len(iteration_data['buckets']))
+ iteration_data['lat_perc'] = percentile * iteration_data['bucket_size']
+ iteration_data['pps_req_tx'] = (tot_tx + tot_drop - tot_rx)/iteration_data['actual_duration']/1000000.0 # tot_drop is all packets dropped by all tasks. This includes packets dropped at the generator task + packets dropped by the nop task. In steady state, this equals to the number of packets received by this VM
+ iteration_data['pps_tx'] = tot_tx/iteration_data['actual_duration']/1000000.0 # tot_tx is all generated packets actually accepted by the interface
+ iteration_data['pps_rx'] = tot_rx/iteration_data['actual_duration']/1000000.0 # tot_rx is all packets received by the nop task = all packets received in the gen VM
+ if self.sut_machine != None and sut_avail:
+ iteration_data['pps_sut_tx'] = tot_sut_tx / tot_sut_core_measurement_duration / 1000000.0
+ else:
+ iteration_data['pps_sut_tx'] = None
+ iteration_data['abs_tx'] = (t4_tx - t1_tx) - (t4_non_dp_tx - t1_non_dp_tx)
+ iteration_data['abs_rx'] = (t4_rx - t1_rx) - (t4_non_dp_rx - t1_non_dp_rx)
+ iteration_data['abs_dropped'] = iteration_data['abs_tx'] - iteration_data['abs_rx']
+ iteration_data['drop_rate'] = 100.0*iteration_data['abs_dropped']/iteration_data['abs_tx']
+ if ((iteration_data['drop_rate'] < self.test['drop_rate_threshold']) or (iteration_data['abs_dropped'] == self.test['drop_rate_threshold'] ==0) or (iteration_data['abs_dropped'] > self.test['maxz'])):
+ break
+ self.gen_machine.stop_latency_cores()
+ iteration_data['abs_tx_fail'] = t4_tx_fail - t1_tx_fail
+ return (iteration_data)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
new file mode 100644
index 00000000..a86ce806
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_warmuptest.py
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import sys
+import time
+from rapid_log import RapidLog
+from rapid_test import RapidTest
+
+class WarmupTest(RapidTest):
+ """
+ Class to manage the warmup testing
+ """
+ def __init__(self, test_param, gen_machine):
+ self.test = test_param
+ self.gen_machine = gen_machine
+
+ def run(self):
+ # Running at low speed to make sure the ARP messages can get through.
+ # If not doing this, the ARP message could be dropped by a switch in overload and then the test will not give proper results
+ # Note hoever that if we would run the test steps during a very long time, the ARP would expire in the switch.
+ # PROX will send a new ARP request every seconds so chances are very low that they will all fail to get through
+ imix = self.test['warmupimix']
+ FLOWSIZE = self.test['warmupflowsize']
+ WARMUPSPEED = self.test['warmupspeed']
+ WARMUPTIME = self.test['warmuptime']
+ self.gen_machine.set_generator_speed(WARMUPSPEED)
+ self.gen_machine.set_udp_packet_size(imix)
+ # gen_machine['socket'].set_value(gencores,0,56,1,1)
+ if FLOWSIZE:
+ _ = self.gen_machine.set_flows(FLOWSIZE)
+ self.gen_machine.start()
+ time.sleep(WARMUPTIME)
+ self.gen_machine.stop()
+ # gen_machine['socket'].set_value(gencores,0,56,50,1)
+ time.sleep(WARMUPTIME)
+ return (True, None)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
new file mode 100644
index 00000000..2f6b9443
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/rapidxt.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python3
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+# pylint: disable=missing-docstring
+
+import json
+import os
+import sys
+import time
+
+from xtesting.core import testcase
+from runrapid import RapidTestManager
+from rapid_cli import RapidCli
+from rapid_log import RapidLog
+
+class RapidXt(testcase.TestCase):
+
+ def run(self, **kwargs):
+ try:
+ test_params = RapidTestManager.get_defaults()
+ for key in kwargs:
+ test_params[key] = kwargs[key]
+ os.makedirs(self.res_dir, exist_ok=True)
+ test_params['resultsdir'] = self.res_dir
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(
+ test_params['environment_file'])
+ log_file = '{}/RUN{}.{}.log'.format(self.res_dir,
+ environment_file_name, test_file_name)
+ RapidLog.log_init(log_file, test_params['loglevel'],
+ test_params['screenloglevel'] , test_params['version'] )
+ test_manager = RapidTestManager()
+ self.start_time = time.time()
+ self.result, self.details = test_manager.run_tests(test_params)
+ self.stop_time = time.time()
+ RapidLog.log_close()
+
+ except Exception: # pylint: disable=broad-except
+ print("Unexpected error:", sys.exc_info()[0])
+ self.result = 0
+ self.stop_time = time.time()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
new file mode 100755
index 00000000..7ec270a1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/runrapid.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python3
+
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+from __future__ import print_function
+from __future__ import print_function
+from __future__ import division
+
+from future import standard_library
+standard_library.install_aliases()
+from builtins import object
+import os
+import sys
+import concurrent.futures
+from concurrent.futures import ALL_COMPLETED
+from rapid_cli import RapidCli
+from rapid_log import RapidLog
+from rapid_parser import RapidConfigParser
+from rapid_defaults import RapidDefaults
+from rapid_machine import RapidMachine
+from rapid_generator_machine import RapidGeneratorMachine
+from rapid_flowsizetest import FlowSizeTest
+from rapid_corestatstest import CoreStatsTest
+from rapid_portstatstest import PortStatsTest
+from rapid_impairtest import ImpairTest
+from rapid_irqtest import IrqTest
+from rapid_warmuptest import WarmupTest
+
+class RapidTestManager(object):
+ """
+ RapidTestManager Class
+ """
+ def __init__(self):
+ """
+ Init Function
+ """
+ self.machines = []
+
+ def __del__(self):
+ for machine in self.machines:
+ machine.close_prox()
+
+ @staticmethod
+ def get_defaults():
+ return (RapidDefaults.test_params)
+
+ def run_tests(self, test_params):
+ test_params = RapidConfigParser.parse_config(test_params)
+ monitor_gen = monitor_sut = False
+ background_machines = []
+ sut_machine = gen_machine = None
+ configonly = test_params['configonly']
+ machine_names = []
+ machine_counter = {}
+ for machine_params in test_params['machines']:
+ if machine_params['name'] not in machine_names:
+ machine_names.append(machine_params['name'])
+ machine_counter[machine_params['name']] = 1
+ else:
+ machine_counter[machine_params['name']] += 1
+ machine_params['name'] = '{}_{}'.format(machine_params['name'],
+ machine_counter[machine_params['name']])
+ if 'gencores' in machine_params.keys():
+ machine = RapidGeneratorMachine(test_params['key'],
+ test_params['user'], test_params['password'],
+ test_params['vim_type'], test_params['rundir'],
+ test_params['resultsdir'], machine_params, configonly,
+ test_params['ipv6'])
+ if machine_params['monitor']:
+ if monitor_gen:
+ RapidLog.exception("Can only monitor 1 generator")
+ raise Exception("Can only monitor 1 generator")
+ else:
+ monitor_gen = True
+ gen_machine = machine
+ else:
+ background_machines.append(machine)
+ else:
+ machine = RapidMachine(test_params['key'], test_params['user'],
+ test_params['password'], test_params['vim_type'],
+ test_params['rundir'], test_params['resultsdir'],
+ machine_params, configonly)
+ if machine_params['monitor']:
+ if monitor_sut:
+ RapidLog.exception("Can only monitor 1 sut")
+ raise Exception("Can only monitor 1 sut")
+ else:
+ monitor_sut = True
+ if machine_params['prox_socket']:
+ sut_machine = machine
+ self.machines.append(machine)
+ RapidLog.debug(test_params)
+ try:
+ prox_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ self.future_to_prox = {prox_executor.submit(machine.start_prox): machine for machine in self.machines}
+ if configonly:
+ concurrent.futures.wait(self.future_to_prox,return_when=ALL_COMPLETED)
+ sys.exit()
+ socket_executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.machines))
+ future_to_connect_prox = {socket_executor.submit(machine.connect_prox): machine for machine in self.machines}
+ concurrent.futures.wait(future_to_connect_prox,return_when=ALL_COMPLETED)
+ result = 0
+ for test_param in test_params['tests']:
+ RapidLog.info(test_param['test'])
+ if test_param['test'] in ['flowsizetest', 'TST009test',
+ 'fixed_rate', 'increment_till_fail']:
+ test = FlowSizeTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines,
+ test_params['sleep_time'])
+ elif test_param['test'] in ['corestatstest']:
+ test = CoreStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['portstatstest']:
+ test = PortStatsTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['impairtest']:
+ test = ImpairTest(test_param,
+ test_params['lat_percentile'],
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ gen_machine,
+ sut_machine, background_machines)
+ elif test_param['test'] in ['irqtest']:
+ test = IrqTest(test_param,
+ test_params['runtime'],
+ test_params['TestName'],
+ test_params['environment_file'],
+ self.machines)
+ elif test_param['test'] in ['warmuptest']:
+ test = WarmupTest(test_param,
+ gen_machine)
+ else:
+ RapidLog.debug('Test name ({}) is not valid:'.format(
+ test_param['test']))
+ single_test_result, result_details = test.run()
+ result = result + single_test_result
+ for machine in self.machines:
+ machine.close_prox()
+ concurrent.futures.wait(self.future_to_prox,
+ return_when=ALL_COMPLETED)
+ except (ConnectionError, KeyboardInterrupt) as e:
+ result = result_details = None
+ socket_executor.shutdown(wait=False)
+ socket_executor._threads.clear()
+ prox_executor.shutdown(wait=False)
+ prox_executor._threads.clear()
+ concurrent.futures.thread._threads_queues.clear()
+ RapidLog.error("Test interrupted: {} {}".format(
+ type(e).__name__,e))
+ return (result, result_details)
+
+def main():
+ """Main function.
+ """
+ test_params = RapidTestManager.get_defaults()
+ # When no cli is used, the process_cli can be replaced by code modifying
+ # test_params
+ test_params = RapidCli.process_cli(test_params)
+ _, test_file_name = os.path.split(test_params['test_file'])
+ _, environment_file_name = os.path.split(test_params['environment_file'])
+ if 'resultsdir' in test_params:
+ res_dir = test_params['resultsdir']
+ log_file = '{}/RUN{}.{}.log'.format(res_dir,environment_file_name,
+ test_file_name)
+ else:
+ log_file = 'RUN{}.{}.log'.format(environment_file_name, test_file_name)
+ RapidLog.log_init(log_file, test_params['loglevel'],
+ test_params['screenloglevel'] , test_params['version'] )
+ test_manager = RapidTestManager()
+ test_result, _ = test_manager.run_tests(test_params)
+ RapidLog.log_close()
+
+if __name__ == "__main__":
+ main()
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
new file mode 100644
index 00000000..bac49bd5
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.cfg
@@ -0,0 +1,16 @@
+[metadata]
+name = rapidxt
+version = 1
+
+[files]
+packages = .
+package_dir = .
+
+[options.data_files]
+. = format.yaml
+
+[entry_points]
+xtesting.testcase =
+ rapidxt = rapidxt:RapidXt
+[options.packages.find]
+where = .
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
new file mode 100644
index 00000000..fa9d59ac
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/setup.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+# pylint: disable=missing-docstring
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh
new file mode 100755
index 00000000..c2c4ab07
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/sharkproxlog.sh
@@ -0,0 +1,31 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+## This code will help in using tshark to decode packets that were dumped
+## in the prox.log file as a result of dump, dump_tx or dump_rx commands
+
+#egrep '^[0-9]{4}|^[0-9]+\.' prox.log | text2pcap -q - - | tshark -r -
+while read -r line ; do
+ if [[ $line =~ (^[0-9]{4}\s.*) ]] ;
+ then
+ echo "$line" >> tempshark.log
+ fi
+ if [[ $line =~ (^[0-9]+\.[0-9]+)(.*) ]] ;
+ then
+ date -d@"${BASH_REMATCH[1]}" -u +%H:%M:%S.%N >> tempshark.log
+ fi
+done < <(cat prox.log)
+text2pcap -t "%H:%M:%S." -q tempshark.log - | tshark -r -
+rm tempshark.log
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
new file mode 100755
index 00000000..7038ab66
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/stackdeployment.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+
+##
+## Copyright (c) 2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+import os_client_config
+import heatclient
+from heatclient.client import Client as Heat_Client
+from keystoneclient.v3 import Client as Keystone_Client
+from heatclient.common import template_utils
+from novaclient import client as NovaClient
+import yaml
+import os
+import time
+import sys
+from collections import OrderedDict
+from rapid_log import RapidLog
+
+class StackDeployment(object):
+ """Deployment class to create VMs for test execution in OpenStack
+ environment.
+ """
+ def __init__(self, cloud_name):
+# RapidLog.log_init('CREATEStack.log', 'DEBUG', 'INFO', '2020.05.05')
+ self.dp_ips = []
+ self.dp_macs = []
+ self.mngmt_ips = []
+ self.names = []
+ self.number_of_servers = 0
+ self.cloud_name = cloud_name
+ self.heat_template = 'L6_heat_template.yaml'
+ self.heat_param = 'params_rapid.yaml'
+ self.cloud_config = os_client_config.OpenStackConfig().get_all_clouds()
+ ks_client = None
+ for cloud in self.cloud_config:
+ if cloud.name == self.cloud_name:
+ ks_client = Keystone_Client(**cloud.config['auth'])
+ break
+ if ks_client == None:
+ sys.exit()
+ heat_endpoint = ks_client.service_catalog.url_for(service_type='orchestration',
+ endpoint_type='publicURL')
+ self.heatclient = Heat_Client('1', heat_endpoint, token=ks_client.auth_token)
+ self.nova_client = NovaClient.Client(2, **cloud.config['auth'])
+
+ def generate_paramDict(self):
+ for output in self.stack.output_list()['outputs']:
+ output_value = self.stack.output_show(output['output_key'])['output']['output_value']
+ for server_group_output in output_value:
+ if (output['output_key'] == 'number_of_servers'):
+ self.number_of_servers += int (server_group_output)
+ elif (output['output_key'] == 'mngmt_ips'):
+ for ip in server_group_output:
+ self.mngmt_ips.append(ip)
+ elif (output['output_key'] == 'data_plane_ips'):
+ for dps in server_group_output:
+ self.dp_ips.append(dps)
+ elif (output['output_key'] == 'data_plane_macs'):
+ for mac in server_group_output:
+ self.dp_macs.append(mac)
+ elif (output['output_key'] == 'server_name'):
+ for name in server_group_output:
+ self.names.append(name)
+
+ def print_paramDict(self, user, dataplane_subnet_mask):
+ if not(len(self.dp_ips) == len(self.dp_macs) == len(self.mngmt_ips)):
+ sys.exit()
+ _ENV_FILE_DIR = os.path.dirname(os.path.realpath(__file__))
+ env_file = os.path.join(_ENV_FILE_DIR, self.stack.stack_name)+ '.env'
+ with open(env_file, 'w') as env_file:
+ env_file.write('[rapid]\n')
+ env_file.write('total_number_of_machines = {}\n'.format(str(self.number_of_servers)))
+ env_file.write('\n')
+ for count in range(self.number_of_servers):
+ env_file.write('[M' + str(count+1) + ']\n')
+ env_file.write('name = {}\n'.format(str(self.names[count])))
+ env_file.write('admin_ip = {}\n'.format(str(self.mngmt_ips[count])))
+ if type(self.dp_ips[count]) == list:
+ for i, dp_ip in enumerate(self.dp_ips[count], start = 1):
+ env_file.write('dp_ip{} = {}/{}\n'.format(i, str(dp_ip),
+ dataplane_subnet_mask))
+ else:
+ env_file.write('dp_ip1 = {}/{}\n'.format(str(self.dp_ips[count]),
+ dataplane_subnet_mask))
+ if type(self.dp_macs[count]) == list:
+ for i, dp_mac in enumerate(self.dp_macs[count], start = 1):
+ env_file.write('dp_mac{} = {}\n'.format(i, str(dp_mac)))
+ else:
+ env_file.write('dp_mac1 = {}\n'.format(str(self.dp_macs[count])))
+ env_file.write('\n')
+ env_file.write('[ssh]\n')
+ env_file.write('key = {}\n'.format(self.key_name))
+ env_file.write('user = {}\n'.format(user))
+ env_file.write('\n')
+ env_file.write('[Varia]\n')
+ env_file.write('vim = OpenStack\n')
+ env_file.write('stack = {}\n'.format(self.stack.stack_name))
+
+ def create_stack(self, stack_name, stack_file_path, heat_parameters):
+ files, template = template_utils.process_template_path(stack_file_path)
+ stack_created = self.heatclient.stacks.create(stack_name = stack_name,
+ template = template, parameters = heat_parameters,
+ files = files)
+ stack = self.heatclient.stacks.get(stack_created['stack']['id'],
+ resolve_outputs=True)
+ # Poll at 5 second intervals, until the status is no longer 'BUILD'
+ while stack.stack_status == 'CREATE_IN_PROGRESS':
+ print('waiting..')
+ time.sleep(5)
+ stack = self.heatclient.stacks.get(stack_created['stack']['id'], resolve_outputs=True)
+ if stack.stack_status == 'CREATE_COMPLETE':
+ return stack
+ else:
+ RapidLog.exception('Error in stack deployment')
+
+ def create_key(self):
+ if os.path.exists(self.key_name):
+ public_key_file = "{}.pub".format(self.key_name)
+ if not os.path.exists(public_key_file):
+ RapidLog.critical('Keypair {}.pub does not exist'.format(
+ self.key_name))
+ with open(public_key_file, mode='rb') as public_file:
+ public_key = public_file.read()
+ else:
+ public_key = None
+ keypair = self.nova_client.keypairs.create(name = self.key_name,
+ public_key = public_key)
+ # Create a file for writing that can only be read and written by owner
+ if not os.path.exists(self.key_name):
+ fp = os.open(self.key_name, os.O_WRONLY | os.O_CREAT, 0o600)
+ with os.fdopen(fp, 'w') as f:
+ f.write(keypair.private_key)
+ RapidLog.info('Keypair {} created'.format(self.key_name))
+
+ def IsDeployed(self, stack_name):
+ for stack in self.heatclient.stacks.list():
+ if stack.stack_name == stack_name:
+ RapidLog.info('Stack already existing: {}'.format(stack_name))
+ self.stack = stack
+ return True
+ return False
+
+ def IsKey(self):
+ keypairs = self.nova_client.keypairs.list()
+ if next((x for x in keypairs if x.name == self.key_name), None):
+ RapidLog.info('Keypair {} already exists'.format(self.key_name))
+ return True
+ return False
+
+ def deploy(self, stack_name, heat_template, heat_param):
+ heat_parameters_file = open(heat_param)
+ heat_parameters = yaml.load(heat_parameters_file,
+ Loader=yaml.BaseLoader)['parameters']
+ heat_parameters_file.close()
+ self.key_name = heat_parameters['PROX_key']
+ if not self.IsDeployed(stack_name):
+ if not self.IsKey():
+ self.create_key()
+ self.stack = self.create_stack(stack_name, heat_template,
+ heat_parameters)
+
+ def generate_env_file(self, user = 'centos', dataplane_subnet_mask = '24'):
+ self.generate_paramDict()
+ self.print_paramDict(user, dataplane_subnet_mask)
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
new file mode 100755
index 00000000..78772dd2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/start.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+function save_k8s_envs()
+{
+ printenv | grep "PCIDEVICE" > /opt/rapid/k8s_sriov_device_plugin_envs
+ printenv | grep "QAT[0-9]" > /opt/rapid/k8s_qat_device_plugin_envs
+}
+
+function create_tun()
+{
+ mkdir -p /dev/net
+ mknod /dev/net/tun c 10 200
+ chmod 600 /dev/net/tun
+}
+
+save_k8s_envs
+create_tun
+
+# Ready for testing
+touch /opt/rapid/system_ready_for_rapid
+
+# Start SSH server in background
+echo "mkdir -p /var/run/sshd" >> /etc/rc.local
+service ssh start
+
+echo "rapid ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
+
+sleep infinity
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
new file mode 100644
index 00000000..9e26fdb1
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/README
@@ -0,0 +1,194 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# This README is describing the format of all the rapid test files that you can
+# find in this directory.
+# These files can be specified as a parameter for the runrapid.py script, using
+# the --test [testfile] option. The default file name is specified in
+# rapid_defaults.py and is basicrapid.test.
+#
+# There are 3 types of sections in this config file:
+# - the [TestParameters] section, which defines how many [TestMx] sections and
+# how many [testy] sections need to be present in this file.
+# - at least one TestMachine section [TestMx], where x is the index of the Test
+# machines starting at index 1
+# - at least one tests definition section [testy], where y is the index of the
+# test to be run. Index starts at 1.
+
+[TestParameters]
+# The name of this test. Can be chosen freely to describe this test
+name = BasicSwapTesting
+
+# Defines how may different tests will be executed when running this test file.
+# This is usually set to 1. You need to define as many [testy] sections as
+# defined in this parameter.
+number_of_tests = 1
+
+# The next parameter defines how many PROX instance are needed to run this test.
+# You need to define as many [TestMx] sections as defined in this parameter.
+total_number_of_test_machines = 2
+
+# Some rapid tests are reporting the latency percentile statistics. This
+# parameter defines which latency percentile will be used for this test.
+lat_percentile = 99
+
+# When doing ipv6 testing, this parameter needs to be set to True, default is
+# False. This is used by the generator code to calculate the proper packet
+# header length offsets.
+ipv6 = True
+
+# The following section describes the role of the first Test Machine. Note that
+# the connection details for each PROX instance are defined in the environment
+# file (default: rapid.env). There is a --map parameter for runrapid.py that
+# specifies how the Test machines are mapped onto the available PROX instances.
+[TestM1]
+# Name can be freely chosen
+name = Generator
+# the PROX configuration files that will be used to start PROX on this Test
+# machine. This configuration file will define the role that PROX will play in
+# this Test machine.
+config_file = configs/gen.cfg
+# The values of the remaining parameters in this section are passed on to the
+# PROX configuration file through a file called parameters.lua
+#
+# The next parameter defines the destination Test machine index. This will be
+# used by a generator to define which destination MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+dest_vm = 2
+# The next parameter defines the GW Test machine index. This will be
+# used by a generator to define which GW MAC or IP addresses should be
+# used in the generated packets. The fact that we use MAC or IP addresses is
+# defined by the use of l2 or l3.
+#gw_vm = 2
+# mcore defines whichmaster core PROX will use. It is not advised to change
+# this. The PROX instances are optimized to use core 0 for the master and all
+# other cores for DPDK usage.
+mcore = [0]
+# gencores defines which cores will be used to generate packets. If the
+# generator is not able to generate enough packets, you migth want to assign
+# more cores to the generator. Make sure not to use more cores in these
+# variables than you have available in your PROX instance.
+gencores = [1]
+# latcores defines that cores that will do the task of measuring latency,
+# reordering and other statistics.
+latcores = [3]
+# Non generator Test machines only require the cores parameter to find out on
+# which cores they need to place the PROX tasks.
+# cores = [1-3]
+# cores = [1,2,3]
+# The bucket_size_exp parameter is only needed for generator machines when
+# collecting percentile latency statistics. PROX is assigning every packet to
+# one of the 128 latency buckets. The size of the latency buckets depends on
+# the processor frequency and this parameter using some complicated formula.
+# iteration_data['bucket_size'] = float(2 ** BUCKET_SIZE_EXP) /
+# (old_div(float(iteration_data['lat_hz']),float(10**6)))
+# Teh result is expressing the width of each bucket in micro-seconds.
+# The minimum value (which is also the default value) for this parameter is 11.
+# For a processor with a frequency of 2Ghz, and a parameter of 11, this results
+# in a bucket size of 1.024 us. Since we have 128 buckets, the maximum latency
+# that can be stored in the buckets is in theory 128 * 1.024 = 131.072 us. We
+# will however place every measurement with a latency higher than 131.072 us in
+# the last bucket. When you are dealing with higher latency, you will have to
+# increase this parameter. Each time you increase this parameter by 1, you will
+# double the bucket size.
+#bucket_size_exp = 12
+# We can only monitor one generator and one reflector (swap) Test machine.
+# Monitoring means that we will sue the statistics coming from these Test
+# machines to report statistics and make decisions on the success of a test.
+# Test machines not playing a role in this process, need to have the monitor
+# parameter set to false. You can only have 1 generator machines and 1 SUT Test
+# machine. The parameter can be set to false for background traffic Test
+# machines, GW Test machines, etc... Default is true
+#monitor = false
+# The prox_socket parameter instruct the rapid scripts to connect to the PROX
+# instance and collect statistics. Default is true. If set to none, we will not
+# collect any statistics from this machine
+#prox_socket = false
+# The prox_launch_exit parameter instructs the script to actually start PROX at
+# the beginning of a test, and to stop it at the end. The default is true. You
+# can set this parameter to false in case you want to start PROX manually and
+# inspect the PROX UI, while the rapid scripts are dringing the testing.
+#prox_launch_exit = false
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = false
+#prox_launch_exit = false
+
+# The following section describes the first test that will run. You need at
+# least 1 test section. In most cases, you will only have one.
+[test1]
+# The test that we will run. A limited set of tests are available: you need to
+# select from the available tests as you can see in the runrapid.py code.
+# At the moment of the writing of this text, we have the following tests
+# available: flowsizetest, TST009test, fixed_rate, increment_till_fail,
+# corestatstest, portstatstest, impairtest, irqtest, warmuptest
+test=flowsizetest
+# The next warmup parameters, are used to warm up the system before the actual
+# test is started. This is to make sure ARP is being resolved in PROX and in the
+# underlying infrastructure so that this does not influence the results.
+# warmupflowsize instruct how many parallel flows need to be generated during
+# warmup
+warmupflowsize=512
+# Give the imix packet size that will be used during warmup. It is a list of
+# packet sizes
+warmupimix=[64, 300, 250, 250, 80]
+# The speed at whcih we will generate packets during the warmup phase. The speed
+# is expressed as a percentage of 10Gb/s. You could say this is expressed in
+# units of 100Mb/s.
+warmupspeed=1
+# warmuptime is the time this warmup phase will run. It is expressed in seconds.
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+# Each element in this list will result in an seperate test.
+flows=[64,500000]
+# The drop_rate_threshold defines the maximum amount of packets that can be
+# dropped without decalring the test as failed. This number is expressed as a
+# percentage of the total amount of packets being sent by the generator. If this
+# number is set to 0, the test will only be declared succesful, if zero packets
+# were dropped during this test
+drop_rate_threshold = 0.1
+# Setting one of the following thresholds to infinity (inf), results in the
+# criterion not being evaluated to rate the test as succesful. The latency
+# tresholds are expressed in micro-seconds.
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+# When we run binary searches, we are always trying at a new speed, halfway
+# between the last failed speed and the last succesful speed (initially, we
+# consider 0 as that last succesful speed). When stop doing this binary search
+# when the difference between the last speed and the news speed is less than
+# what is defined by accuracy, expressed in percentages.
+accuracy = 1
+# Speed at which we will start the binary search, expressed in percentage of
+# 10Gb/s.
+startspeed = 50
+# When using ramp_step, we will at the beginning of each measurement, increase
+# the traffic slowly, till we reach the requested speed. Can be used with
+# certain soft switches that are reconfiguring the resource usage, based on the
+# actual traffic. In order not the influence the measurement, we then slowly go
+# to the requested traffic rate.
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
new file mode 100644
index 00000000..8b765e7d
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput.test
@@ -0,0 +1,54 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[8,1024]
+drop_rate_threshold = 0
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
new file mode 100644
index 00000000..27794a12
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[64]
+drop_rate_threshold = 0
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
new file mode 100644
index 00000000..69e4ebc7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_acaeab_16384F.test
@@ -0,0 +1,57 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = Rapid_ETSINFV_TST009
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64,256,64,1024,64,128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[16384]
+drop_rate_threshold = 0
+lat_avg_threshold = 120
+lat_perc_threshold = 220
+lat_max_threshold = inf
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
new file mode 100644
index 00000000..ff902de6
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009ipV6.test
@@ -0,0 +1,61 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+ipv6 = True
+
+[TestM1]
+name = Generator
+config_file = configs/genv6.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swapv6.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=TST009test
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[8,1024]
+drop_rate_threshold = 0
+MAXr = 3
+MAXz = 5000
+MAXFramesPerSecondAllIngress = 12000000
+StepSize = 10000
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
new file mode 100644
index 00000000..803c65e7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/bare.test
@@ -0,0 +1,51 @@
+##
+## Copyright (c) 2010-2020 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+[TestParameters]
+name = BareTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = Generator
+config_file = configs/l2gen_bare.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = Swap
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=10
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512,1]
+drop_rate_threshold = 0
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
new file mode 100644
index 00000000..9874de47
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid.test
@@ -0,0 +1,65 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
new file mode 100644
index 00000000..a876a049
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/basicrapid_gw.test
@@ -0,0 +1,73 @@
+##
+## Copyright (c) 2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapWithGatewayTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+gw_vm = 2
+dest_vm = 3
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Gateway
+monitor = false
+prox_socket = false
+prox_launch_exit = false
+
+[TestM3]
+name = Swap
+config_file = configs/swap_gw.cfg
+gw_vm = 2
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
+#ramp_step = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
new file mode 100644
index 00000000..927ecf35
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/cgnat.test
@@ -0,0 +1,63 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = CGNATTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+dest_vm = 3
+gw_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = CGNAT
+config_file = configs/cgnat.cfg
+dest_vm = 3
+mcore = [0]
+cores = [1]
+monitor = false
+prox_socket = true
+prox_launch_exit = true
+
+[TestM3]
+name = PublicSide
+config_file = configs/public_server.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512]
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
new file mode 100644
index 00000000..660f79b0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/corestats.test
@@ -0,0 +1,31 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = CoreStatistics
+number_of_tests = 1
+total_number_of_test_machines = 1
+
+[TestM1]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=corestatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
new file mode 100644
index 00000000..bc5e96b8
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/encrypt.test
@@ -0,0 +1,70 @@
+##
+## Copyright (c) 2023 luc.provoost@gmail.com
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = EncryptionDecryption
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+bucket_size_exp = 16
+#prox_launch_exit = false
+
+[TestM2]
+name = Encrypt
+config_file = configs/esp.cfg
+dest_vm = 1
+mcore = [0]
+cores = [1]
+altcores=[2]
+#prox_socket = true
+#prox_launch_exit = false
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+#imixs=[[64],[64,250,800,800]]
+imixs=[[1500],[512],[256],[128]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.5
+lat_avg_threshold = inf
+lat_perc_threshold = inf
+lat_max_threshold = inf
+accuracy = 5
+startspeed = 250
+#ramp_step = 1
+
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
index 3042e722..898062c9 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/impair.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/impair.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,43 +13,50 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
+[TestParameters]
name = impairTesting
-number_of_tests = 1
+number_of_tests = 2
total_number_of_test_machines = 3
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 1
-accuracy = 0.01
-
[TestM1]
name = Generator
-machine_index = 1
-config_file = gen_gw.cfg
+config_file = configs/gen_gw.cfg
gw_vm = 2
dest_vm = 3
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = ImpairGW
-machine_index = 2
-config_file = impair.cfg
-group1cores = [1]
+config_file = configs/impair.cfg
+mcore = [0]
+cores = [1]
+monitor = False
[TestM3]
name = Swap
-machine_index = 3
-config_file = swap.cfg
-group1cores = [1]
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[2])
+test=warmuptest
+warmupflowsize=1024
+warmupimix=[64]
+warmupspeed=10
+warmuptime=2
+
+[test2]
+test=impairtest
+steps=5
+imix=[64]
+flowsize=64
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 5
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
new file mode 100644
index 00000000..cb673de2
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/increment_till_fail.test
@@ -0,0 +1,64 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = IncrementTillFailTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+
+[TestM1]
+name = Generator
+config_file = configs/gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=increment_till_fail
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64],[64,250,800,800]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+step = 0.5
+startspeed = 1
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
new file mode 100644
index 00000000..f0330589
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/ipv6.test
@@ -0,0 +1,65 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+lat_percentile = 99
+ipv6 = True
+
+[TestM1]
+name = Generator
+config_file = configs/genv6.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+#bucket_size_exp = 12
+
+[TestM2]
+name = Swap
+config_file = configs/swapv6.cfg
+mcore = [0]
+cores = [1]
+#prox_socket = true
+#prox_launch_exit = true
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[84]
+warmupspeed=1
+warmuptime=2
+# DO NOT USE IMIX FOR IPV6 TESTING. THE LIST OF IMIXS CAN ONLY CONTAIN LISTS
+# WITH ONE ELEMENT!!!
+# PACKET SIZE NEEDS TO BE AT LEAST 84 (66 + 18) FOR IPV6
+# 18 bytes needed for UDP LATENCY AND COUNTER CONTENT
+imixs=[[84],[250]]
+# Number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[64,500000]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0.1
+lat_avg_threshold = 50
+lat_perc_threshold = 80
+lat_max_threshold = inf
+accuracy = 1
+startspeed = 50
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
new file mode 100644
index 00000000..77c9cbec
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/irq.test
@@ -0,0 +1,37 @@
+##
+## Copyright (c) 2010-2019 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+[TestParameters]
+name = IRQTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = InterruptTestMachine1
+config_file = configs/irq.cfg
+mcore = [0]
+cores = [1,2,3]
+monitor = False
+
+[TestM2]
+name = InterruptTestMachine2
+config_file = configs/irq.cfg
+mcore = [0]
+cores = [1,2,3]
+monitor = False
+
+[test1]
+test=irqtest
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
index 1ea7f0a2..542fe634 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/l2zeroloss.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,41 +13,30 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
+[TestParameters]
name = L2BasicSwapTesting
-number_of_tests = 3
+number_of_tests = 1
total_number_of_test_machines = 2
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0
-accuracy = 0.1
[TestM1]
name = Generator
-machine_index = 1
-config_file = l2gen.cfg
+config_file = configs/l2gen.cfg
dest_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = Swap
-machine_index = 2
-config_file = l2swap.cfg
-group1cores = [1]
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[1])
-[test2]
-cmd=run_sizetest(sock[0],sock[1])
-[test3]
-cmd=run_flowtest(sock[0],sock[1])
-
+test=fixed_rate
+startspeed = 10
+imixs=[[256]]
+flows=[64]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
new file mode 100644
index 00000000..d3a2ba7c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l2zeroloss.test
@@ -0,0 +1,60 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = L2BasicSwapTesting
+number_of_tests = 1
+total_number_of_test_machines = 2
+
+[TestM1]
+name = Generator
+config_file = configs/l2gen.cfg
+dest_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = Swap
+config_file = configs/l2swap.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+# Each element in the imix list will result in a separate test. Each element
+# is on its turn a list of packet sizes which will be used during one test
+# execution. If you only want to test 1 size, define a list with only one
+# element.
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^30
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[512]
+# Setting one of the following thresholds to infinity (inf)
+# results in the criterion not being evaluated to rate the test as succesful
+drop_rate_threshold = 0
+lat_avg_threshold = 500
+lat_perc_threshold = 800
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
index 6a9998c6..f0db6b28 100644
--- a/VNFs/DPPD-PROX/helper-scripts/openstackrapid/basicrapid.test
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/l3framerate.test
@@ -1,5 +1,5 @@
##
-## Copyright (c) 2010-2018 Intel Corporation
+## Copyright (c) 2010-2021 Intel Corporation
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
@@ -13,42 +13,37 @@
## See the License for the specific language governing permissions and
## limitations under the License.
##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
-[DEFAULT]
-name = BasicSwapTesting
-number_of_tests = 3
+[TestParameters]
+name = L3FrameRateTesting
+number_of_tests = 1
total_number_of_test_machines = 2
-init_code=init_test()
-dest_vm = not_used
-gw_vm = not_used
-script_control = false
-group1cores = not_used
-group2cores = not_used
-group3cores = not_used
-drop_rate_treshold = 0.1
-accuracy = 0.01
-
[TestM1]
name = Generator
-machine_index = 1
-config_file = gen.cfg
+config_file = configs/gen.cfg
dest_vm = 2
-script_control = true
-group1cores = [1]
-group2cores = [3]
-group3cores = [1,3]
+mcore = [0]
+gencores = [1]
+latcores = [3]
[TestM2]
name = Swap
-machine_index = 2
-config_file = swap.cfg
-group1cores = [1]
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
[test1]
-cmd=run_speedtest(sock[0],sock[1])
-[test2]
-cmd=run_sizetest(sock[0],sock[1])
-[test3]
-cmd=run_flowtest(sock[0],sock[1])
-
+test=fixed_rate
+warmupflowsize=64
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64],[128]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# If not a power of 2, we will use the lowest power of 2 that is larger than
+# the requested number of flows. e.g. 9 will result in 16 flows
+flows=[1,64]
+startspeed=5
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
new file mode 100644
index 00000000..20d66209
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/portstats.test
@@ -0,0 +1,32 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = PortStats
+number_of_tests = 1
+total_number_of_test_machines = 1
+
+[TestM1]
+name = Swap
+config_file = configs/swap.cfg
+mcore = [0]
+cores = [1]
+ports = [0]
+
+[test1]
+test=portstatstest
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
new file mode 100644
index 00000000..e4bddad0
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/tests/secgw.test
@@ -0,0 +1,60 @@
+##
+## Copyright (c) 2010-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+# CHECK README IN THIS DIRECTORY FOR MORE EXPLANATION
+# ON PARAMETERS IN THIS FILE
+
+[TestParameters]
+name = GWTesting
+number_of_tests = 1
+total_number_of_test_machines = 3
+
+[TestM1]
+name = Generator
+config_file = configs/gen_gw.cfg
+dest_vm = 3
+gw_vm = 2
+mcore = [0]
+gencores = [1]
+latcores = [3]
+
+[TestM2]
+name = GW1
+config_file = configs/secgw1.cfg
+dest_vm = 3
+mcore = [0]
+cores = [1]
+
+[TestM3]
+name = GW2
+config_file = configs/secgw2.cfg
+mcore = [0]
+cores = [1]
+
+[test1]
+test=flowsizetest
+warmupflowsize=512
+warmupimix=[64]
+warmupspeed=1
+warmuptime=2
+imixs=[[64]]
+# the number of flows in the list need to be powers of 2, max 2^20
+# Select from following numbers: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576
+flows=[512]
+drop_rate_threshold = 0.1
+lat_avg_threshold = 500
+lat_max_threshold = 1000
+accuracy = 0.1
+startspeed = 10
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
new file mode 100644
index 00000000..8a092def
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/Dockerfile
@@ -0,0 +1,28 @@
+##
+## Copyright (c) 2020-2021 Intel Corporation
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+FROM opnfv/xtesting
+
+RUN apk upgrade --update
+
+ENV RAPID_TEST =rapid_tst009_throughput
+
+RUN git clone https://git.opnfv.org/samplevnf /samplevnf
+WORKDIR /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid
+RUN chmod 400 /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/rapid_rsa_key
+COPY testcases.yaml /usr/lib/python3.8/site-packages/xtesting/ci/testcases.yaml
+RUN apk add python3-dev openssh-client && cd /samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/ && git init && pip3 install .
+CMD ["run_tests", "-t", "all"]
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
new file mode 100644
index 00000000..92fc7b4c
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/site.yaml
@@ -0,0 +1,13 @@
+---
+- hosts:
+ - 127.0.0.1
+ roles:
+ - role: collivier.xtesting
+ project: rapidxt
+ repo: 127.0.0.1
+ dport: 5000
+ gerrit:
+ suites:
+ - container: rapidxt
+ tests:
+ - rapid_tst009
diff --git a/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
new file mode 100644
index 00000000..3cdda7d7
--- /dev/null
+++ b/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting/testcases.yaml
@@ -0,0 +1,54 @@
+---
+tiers:
+ -
+ name: IRQ_rapid_benchmarking
+ order: 1
+ description: 'IRQ Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_irq
+ project_name: rapidxt
+ criteria: 499500
+ # Criterium for irq is defined as 500000 - the maximal allowed interrupt time per PMD loop (in us)
+ blocking: true
+ clean_flag: false
+ description: 'IRQ test'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/irq.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ name: TST009_rapid_benchmarking
+ order: 2
+ description: 'TST009 Rapid Testing'
+ testcases:
+ -
+ case_name: rapid_tst009_64b_64f
+ project_name: rapidxt
+ criteria: 0.5
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, 64 byte packets, 64 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_64B_64F.test
+ runtime: 5
+ environment_file: config/rapid.env
+ -
+ case_name: rapid_tst009_acaeab_16384f
+ project_name: rapidxt
+ criteria: 0.2
+ # Criterium for TST009 testing is defined as the minimum packets per second received in the generator, expressed in Mpps
+ blocking: true
+ clean_flag: false
+ description: 'TST009 test, imix acaeab, 16384 flows'
+ run:
+ name: rapidxt
+ args:
+ test_file: tests/TST009_Throughput_acaeab_16384F.test
+ runtime: 5
+ environment_file: config/rapid.env
diff --git a/VNFs/DPPD-PROX/igmp.h b/VNFs/DPPD-PROX/igmp.h
new file mode 100644
index 00000000..7b868aae
--- /dev/null
+++ b/VNFs/DPPD-PROX/igmp.h
@@ -0,0 +1,59 @@
+/*
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef _IGMP_H_
+#define _IGMP_H_
+
+#define IGMP_MEMBERSHIP_QUERY 0x11
+#define IGMP_MEMBERSHIP_REPORT_V1 0x12
+#define IGMP_MEMBERSHIP_REPORT 0x16
+#define IGMP_LEAVE_GROUP 0x17
+
+struct igmpv1_hdr {
+ uint8_t type: 4; /* type */
+ uint8_t version: 4; /* version */
+ uint8_t unused; /* unused */
+ uint16_t checksum; /* checksum */
+ uint32_t group_address; /* group address */
+} __attribute__((__packed__));
+
+struct igmpv2_hdr {
+ uint8_t type; /* type */
+ uint8_t max_resp_time; /* maximum response time */
+ uint16_t checksum; /* checksum */
+ uint32_t group_address; /* group address */
+} __attribute__((__packed__));
+
+struct igmpv3_hdr {
+ uint8_t type; /* type */
+ uint8_t max_resp_time; /* maximum response time */
+ uint16_t checksum; /* checksum */
+ uint32_t group_address; /* group address */
+ uint8_t bits: 4; /* S(suppress router-side processing)QRV(Querier.s Robustness Variable) bits */
+ uint8_t reserved: 4; /* reserved */
+ uint8_t QQIC; /* Querier.s Query Interval Code */
+ uint16_t n_src; /* Number of source addresses */
+} __attribute__((__packed__));
+
+struct task_base;
+
+// igmp_join and leave functions are so far implemented within handle_swap.
+// Only swap task can use them right now, as they use igmp_pool
+// only defined in swap task.
+void igmp_join_group(struct task_base *tbase, uint32_t igmp_address);
+void igmp_leave_group(struct task_base *tbase);
+
+#endif /* _IGMP_H_ */
diff --git a/VNFs/DPPD-PROX/input_conn.c b/VNFs/DPPD-PROX/input_conn.c
index 63e6511e..13d6110e 100644
--- a/VNFs/DPPD-PROX/input_conn.c
+++ b/VNFs/DPPD-PROX/input_conn.c
@@ -20,10 +20,13 @@
#include <sys/un.h>
#include <unistd.h>
+#include <rte_cycles.h>
#include "input_conn.h"
#include "input.h"
+#include "log.h"
#include "run.h"
#include "cmd_parser.h"
+#include "prox_cfg.h"
static struct input tcp_server;
int tcp_server_started;
@@ -132,6 +135,8 @@ static void handle_client(struct input* client_input)
return ;
}
+ prox_cfg.heartbeat_tsc = rte_rdtsc() + prox_cfg.heartbeat_timeout * rte_get_tsc_hz();
+
/* Scan in data until \n (\r skipped if followed by \n) */
for (int i = 0; i < ret; ++i) {
if (cur[i] == '\r' && i + 1 < ret && cur[i + 1] == '\n')
@@ -150,6 +155,18 @@ static void handle_client(struct input* client_input)
}
}
+void stop_handling_client(void)
+{
+ size_t i;
+ for (i = 0; i < sizeof(clients)/sizeof(clients[0]); ++i) {
+ if (clients[i].enabled) {
+ close(clients[i].input.fd);
+ clients[i].enabled = 0;
+ unreg_input(&clients[i].input);
+ }
+ }
+}
+
static void handle_new_client(struct input* server)
{
size_t i;
diff --git a/VNFs/DPPD-PROX/input_conn.h b/VNFs/DPPD-PROX/input_conn.h
index 98e9af45..9e39c808 100644
--- a/VNFs/DPPD-PROX/input_conn.h
+++ b/VNFs/DPPD-PROX/input_conn.h
@@ -23,5 +23,6 @@ int reg_input_uds(void);
void unreg_input_tcp(void);
void unreg_input_uds(void);
+void stop_handling_client(void);
#endif /* _INPUT_CONN_H_ */
diff --git a/VNFs/DPPD-PROX/input_curses.c b/VNFs/DPPD-PROX/input_curses.c
index 6f79869b..346b0e31 100644
--- a/VNFs/DPPD-PROX/input_curses.c
+++ b/VNFs/DPPD-PROX/input_curses.c
@@ -28,6 +28,8 @@
#include "input_curses.h"
#include "histedit.h"
+#include "libedit_autoconf.h"
+
static EditLine *el;
static History *hist;
@@ -124,7 +126,11 @@ static int peek_stdin(void)
return FD_ISSET(fileno(stdin), &in_fd);
}
+#ifdef HAVE_LIBEDIT_EL_RFUNC_T
+static int do_get_char(EditLine *e, wchar_t *c)
+#else
static int get_char(EditLine *e, char *c)
+#endif
{
*c = display_getch();
@@ -167,6 +173,10 @@ static int get_char(EditLine *e, char *c)
return 1;
}
+#ifdef HAVE_LIBEDIT_EL_RFUNC_T
+static el_rfunc_t get_char = &do_get_char;
+#endif
+
static void proc_keyboard(struct input *input)
{
const char *line;
diff --git a/VNFs/DPPD-PROX/ip6_addr.h b/VNFs/DPPD-PROX/ip6_addr.h
index f9b56c19..3279ded1 100644
--- a/VNFs/DPPD-PROX/ip6_addr.h
+++ b/VNFs/DPPD-PROX/ip6_addr.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,8 +17,6 @@
#ifndef _IP6_ADDR_H_
#define _IP6_ADDR_H_
-#include <inttypes.h>
-
struct ipv6_addr {
uint8_t bytes[16];
};
diff --git a/VNFs/DPPD-PROX/lconf.c b/VNFs/DPPD-PROX/lconf.c
index 935bac5d..be2486e7 100644
--- a/VNFs/DPPD-PROX/lconf.c
+++ b/VNFs/DPPD-PROX/lconf.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -127,6 +127,10 @@ static void msg_stop(struct lcore_cfg *lconf)
idx++;
}
}
+ // Check that task id is valid and running
+ if (idx == -1)
+ return;
+
lconf->task_is_running[lconf->msg.task_id] = 0;
t = lconf->tasks_all[lconf->msg.task_id];
@@ -155,8 +159,14 @@ static void msg_start(struct lcore_cfg *lconf)
t->aux->start(t);
}
lconf->n_tasks_run = lconf->n_tasks_all;
+ return;
}
- else if (lconf->n_tasks_run == 0) {
+
+ // Check that task id is valid
+ if (lconf->msg.task_id >= lconf->n_tasks_all)
+ return;
+
+ if (lconf->n_tasks_run == 0) {
t = lconf->tasks_run[0] = lconf->tasks_all[lconf->msg.task_id];
lconf->n_tasks_run = 1;
lconf->task_is_running[lconf->msg.task_id] = 1;
@@ -167,9 +177,13 @@ static void msg_start(struct lcore_cfg *lconf)
t->aux->start(t);
}
else {
+ if (lconf->task_is_running[lconf->msg.task_id])
+ return;
for (int i = lconf->n_tasks_run - 1; i >= 0; --i) {
idx = lconf_get_task_id(lconf, lconf->tasks_run[i]);
if (idx == lconf->msg.task_id) {
+ // We should not come here as checking earlier if task id is running...
+ plog_warn("Unexpectedly get request to start task %d already running\n", idx);
break;
}
else if (idx > lconf->msg.task_id) {
@@ -232,7 +246,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
if (lconf->msg.type == LCONF_MSG_DUMP ||
lconf->msg.type == LCONF_MSG_DUMP_TX) {
t->aux->task_rt_dump.n_print_tx = lconf->msg.val;
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
if (t->aux->tx_pkt_orig)
t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
@@ -250,11 +264,10 @@ int lconf_do_flags(struct lcore_cfg *lconf)
t = lconf->tasks_all[lconf->msg.task_id];
if (lconf->msg.val) {
- t->aux->task_rt_dump.n_trace = lconf->msg.val;
-
if (task_base_get_original_rx_pkt_function(t) != rx_pkt_dummy) {
+ t->aux->task_rt_dump.n_trace = lconf->msg.val;
task_base_add_rx_pkt_function(t, rx_pkt_trace);
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
if (t->aux->tx_pkt_orig)
t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
@@ -267,7 +280,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
}
} else {
t->aux->task_rt_dump.n_print_tx = lconf->msg.val;
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
if (t->aux->tx_pkt_orig)
t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
@@ -293,7 +306,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
t = lconf->tasks_all[task_id];
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
t->aux->tx_pkt_l2 = tx_pkt_distr;
} else {
@@ -315,7 +328,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
t = lconf->tasks_all[task_id];
if (t->aux->tx_pkt_orig) {
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
t->aux->tx_pkt_orig = NULL;
} else {
@@ -358,7 +371,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
t = lconf->tasks_all[task_id];
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
t->aux->tx_pkt_orig = t->aux->tx_pkt_l2;
t->aux->tx_pkt_l2 = tx_pkt_bw;
} else {
@@ -372,7 +385,7 @@ int lconf_do_flags(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
t = lconf->tasks_all[task_id];
if (t->aux->tx_pkt_orig) {
- if (t->tx_pkt == tx_pkt_l3) {
+ if ((t->tx_pkt == tx_pkt_l3) || (t->tx_pkt == tx_pkt_ndp)) {
t->aux->tx_pkt_l2 = t->aux->tx_pkt_orig;
t->aux->tx_pkt_orig = NULL;
} else {
diff --git a/VNFs/DPPD-PROX/lconf.h b/VNFs/DPPD-PROX/lconf.h
index 4bfa705d..897e6b37 100644
--- a/VNFs/DPPD-PROX/lconf.h
+++ b/VNFs/DPPD-PROX/lconf.h
@@ -17,6 +17,11 @@
#ifndef _LCONF_H_
#define _LCONF_H_
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include "task_init.h"
#include "stats.h"
@@ -52,6 +57,7 @@ struct lconf_msg {
#define LCONF_FLAG_TX_DISTR_ACTIVE 0x00000004
#define LCONF_FLAG_RX_BW_ACTIVE 0x00000008
#define LCONF_FLAG_TX_BW_ACTIVE 0x00000010
+#define LCONF_FLAG_SCHED_RR 0x00000020
struct lcore_cfg {
/* All tasks running at the moment. This is empty when the core is stopped. */
@@ -99,8 +105,8 @@ static inline void lconf_flush_all_queues(struct lcore_cfg *lconf)
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
task = lconf->tasks_all[task_id];
- if (!(task->flags & FLAG_TX_FLUSH) || (task->flags & FLAG_NEVER_FLUSH)) {
- task->flags |= FLAG_TX_FLUSH;
+ if (!(task->flags & TBASE_FLAG_TX_FLUSH) || (task->flags & TBASE_FLAG_NEVER_FLUSH)) {
+ task->flags |= TBASE_FLAG_TX_FLUSH;
continue;
}
lconf->flush_queues[task_id](task);
diff --git a/VNFs/DPPD-PROX/log.c b/VNFs/DPPD-PROX/log.c
index 7049a5e3..2fa63f34 100644
--- a/VNFs/DPPD-PROX/log.c
+++ b/VNFs/DPPD-PROX/log.c
@@ -25,10 +25,12 @@
#include <rte_mbuf.h>
#include "log.h"
+#include "quit.h"
#include "display.h"
#include "defaults.h"
#include "etypes.h"
#include "prox_cfg.h"
+#include "prox_compat.h"
static pthread_mutex_t file_mtx = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
int log_lvl = PROX_MAX_LOG_LVL;
@@ -56,7 +58,7 @@ const char *get_warning(int i)
static void store_warning(const char *warning)
{
- strncpy(last_warn[n_warnings % 5], warning, sizeof(last_warn[0]));
+ prox_strncpy(last_warn[n_warnings % 5], warning, sizeof(last_warn[0]));
n_warnings++;
}
@@ -69,10 +71,10 @@ void plog_init(const char *log_name, int log_name_pid)
if (log_name_pid)
snprintf(buf, sizeof(buf), "%s-%u.log", "prox", getpid());
else
- strncpy(buf, "prox.log", sizeof(buf));
+ prox_strncpy(buf, "prox.log", sizeof(buf));
}
else {
- strncpy(buf, log_name, sizeof(buf));
+ prox_strncpy(buf, log_name, sizeof(buf));
}
fp = fopen(buf, "w");
@@ -80,6 +82,13 @@ void plog_init(const char *log_name, int log_name_pid)
tsc_off = rte_rdtsc() + 2500000000;
}
+void plog_end(void)
+{
+ if (fp)
+ fclose(fp);
+ fp = NULL;
+}
+
int plog_set_lvl(int lvl)
{
if (lvl <= PROX_MAX_LOG_LVL) {
@@ -143,8 +152,8 @@ static const char* lvl_to_str(int lvl, int always)
static int dump_pkt(char *dst, size_t dst_size, const struct rte_mbuf *mbuf)
{
- const struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, const struct ether_hdr *);
- const struct ipv4_hdr *dpip = (const struct ipv4_hdr *)(peth + 1);
+ const prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, const prox_rte_ether_hdr *);
+ const prox_rte_ipv4_hdr *dpip = (const prox_rte_ipv4_hdr *)(peth + 1);
const uint8_t *pkt_bytes = (const uint8_t *)peth;
const uint16_t len = rte_pktmbuf_pkt_len(mbuf);
size_t str_len = 0;
@@ -204,6 +213,10 @@ static int vplog(int lvl, const char *format, va_list ap, const struct rte_mbuf
ret--;
ret += dump_pkt(buf + ret, sizeof(buf) - ret, mbuf);
}
+
+ if (lvl == PROX_LOG_PANIC)
+ PROX_PANIC(1, "%s", buf);
+
plog_buf(buf);
if (lvl == PROX_LOG_WARN) {
@@ -270,6 +283,23 @@ int plog_err(const char *fmt, ...)
return ret;
}
+int plog_err_or_panic(int do_panic, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ if (do_panic) {
+ ret = vplog(PROX_LOG_PANIC, fmt, ap, NULL, 0);
+ va_end(ap);
+ return ret;
+ } else {
+ ret = vplog(PROX_LOG_ERR, fmt, ap, NULL, 0);
+ va_end(ap);
+ return ret;
+ }
+}
+
int plogx_err(const char *fmt, ...)
{
va_list ap;
diff --git a/VNFs/DPPD-PROX/log.h b/VNFs/DPPD-PROX/log.h
index a5dcf47a..b270462e 100644
--- a/VNFs/DPPD-PROX/log.h
+++ b/VNFs/DPPD-PROX/log.h
@@ -17,6 +17,7 @@
#ifndef _LOG_H_
#define _LOG_H_
+#define PROX_LOG_PANIC -1
#define PROX_LOG_ERR 0
#define PROX_LOG_WARN 1
#define PROX_LOG_INFO 2
@@ -33,11 +34,13 @@ const char* get_warning(int i);
struct rte_mbuf;
#if PROX_MAX_LOG_LVL >= PROX_LOG_ERR
+int plog_err_or_panic(int do_panic, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold));
int plog_err(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold));
int plogx_err(const char *fmt, ...) __attribute__((format(printf, 1, 2), cold));
int plogd_err(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold));
int plogdx_err(const struct rte_mbuf *mbuf, const char *fmt, ...) __attribute__((format(printf, 2, 3), cold));
#else
+__attribute__((format(printf, 2, 3))) static inline int plog_err_or_panic(__attribute__((unused)) int do_panic, __attribute__((unused)) const char *fmt, ...) {return 0;}
__attribute__((format(printf, 1, 2))) static inline int plog_err(__attribute__((unused)) const char *fmt, ...) {return 0;}
__attribute__((format(printf, 1, 2))) static inline int plogx_err(__attribute__((unused)) const char *fmt, ...) {return 0;}
__attribute__((format(printf, 2, 3))) static inline int plogd_err(__attribute__((unused)) const struct rte_mbuf *mbuf, __attribute__((unused)) const char *fmt, ...) {return 0;}
@@ -81,6 +84,7 @@ __attribute__((format(printf, 2, 3))) static inline int plogdx_dbg(__attribute__
#endif
void plog_init(const char *log_name, int log_name_pid);
+void plog_end(void);
void file_print(const char *str);
int plog_set_lvl(int lvl);
diff --git a/VNFs/DPPD-PROX/main.c b/VNFs/DPPD-PROX/main.c
index 2c8517f0..61abe6e6 100644
--- a/VNFs/DPPD-PROX/main.c
+++ b/VNFs/DPPD-PROX/main.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,6 +18,7 @@
#include <locale.h>
#include <unistd.h>
#include <signal.h>
+#include <curses.h>
#include <rte_cycles.h>
#include <rte_atomic.h>
@@ -53,6 +54,7 @@
#endif
uint8_t lb_nb_txrings = 0xff;
+extern const char *git_version;
struct rte_ring *ctrl_rings[RTE_MAX_LCORE*MAX_TASKS_PER_CORE];
static void __attribute__((noreturn)) prox_usage(const char *prgname)
@@ -114,7 +116,7 @@ static void check_mixed_normal_pipeline(void)
}
}
-static void check_zero_rx(void)
+static void check_no_rx(void)
{
struct lcore_cfg *lconf = NULL;
struct task_args *targ;
@@ -127,12 +129,34 @@ static void check_zero_rx(void)
}
}
+static void check_nb_mbuf(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ = NULL;
+ uint8_t port_id;
+ int n_txd = 0, n_rxd = 0;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ port_id = targ->tx_port_queue[i].port;
+ n_txd = prox_port_cfg[port_id].n_txd;
+ }
+ for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
+ port_id = targ->rx_port_queue[i].port;
+ n_rxd = prox_port_cfg[port_id].n_rxd;
+ }
+ if (targ->nb_mbuf <= n_rxd + n_txd + targ->nb_cache_mbuf + MAX_PKT_BURST) {
+ plog_warn("Core %d, task %d might not have enough mbufs (%d) to support %d txd, %d rxd and %d cache_mbuf\n",
+ lconf->id, targ->id, targ->nb_mbuf, n_txd, n_rxd, targ->nb_cache_mbuf);
+ }
+ }
+}
+
static void check_missing_rx(void)
{
struct lcore_cfg *lconf = NULL, *rx_lconf = NULL, *tx_lconf = NULL;
struct task_args *targ, *rx_targ = NULL, *tx_targ = NULL;
- struct prox_port_cfg *port;
- uint8_t port_id, rx_port_id, ok;
+ uint8_t port_id, rx_port_id, ok, l3, ndp;
while (core_targ_next(&lconf, &targ, 0) == 0) {
PROX_PANIC((targ->flags & TASK_ARG_RX_RING) && targ->rx_rings[0] == 0 && !targ->tx_opt_ring_task,
@@ -145,12 +169,17 @@ static void check_missing_rx(void)
lconf = NULL;
while (core_targ_next(&lconf, &targ, 0) == 0) {
- if (strcmp(targ->sub_mode_str, "l3") != 0)
+ l3 = ndp = 0;
+ if (strcmp(targ->sub_mode_str, "l3") == 0)
+ l3 = 1;
+ else if (strcmp(targ->sub_mode_str, "ndp") == 0)
+ ndp = 1;
+ else
continue;
- PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3 task must have a RX or a TX port\n");
- // If the L3 sub_mode receives from a port, check that there is at least one core/task
- // transmitting to this port in L3 sub_mode
+ PROX_PANIC((targ->nb_rxports == 0) && (targ->nb_txports == 0), "L3/NDP task must have a RX or a TX port\n");
+ // If the L3/NDP sub_mode receives from a port, check that there is at least one core/task
+ // transmitting to this port in L3/NDP sub_mode
for (uint8_t i = 0; i < targ->nb_rxports; ++i) {
rx_port_id = targ->rx_port_queue[i].port;
ok = 0;
@@ -158,42 +187,48 @@ static void check_missing_rx(void)
while (core_targ_next(&tx_lconf, &tx_targ, 0) == 0) {
if ((port_id = tx_targ->tx_port_queue[0].port) == OUT_DISCARD)
continue;
- if ((rx_port_id == port_id) && (tx_targ->flags & TASK_ARG_L3)){
+ if ((rx_port_id == port_id) &&
+ ( ((tx_targ->flags & TASK_ARG_L3) && l3) ||
+ ((tx_targ->flags & TASK_ARG_NDP) && ndp) ) ) {
ok = 1;
break;
}
}
- PROX_PANIC(ok == 0, "RX L3 sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", rx_port_id, lconf->id, targ->id);
+ PROX_PANIC(ok == 0, "RX %s sub mode for port %d on core %d task %d, but no core/task transmitting on that port\n", l3 ? "l3":"ndp", rx_port_id, lconf->id, targ->id);
}
- // If the L3 sub_mode transmits to a port, check that there is at least one core/task
- // receiving from that port in L3 sub_mode.
+ // If the L3/NDP sub_mode transmits to a port, check that there is at least one core/task
+ // receiving from that port in L3/NDP sub_mode.
if ((port_id = targ->tx_port_queue[0].port) == OUT_DISCARD)
continue;
rx_lconf = NULL;
ok = 0;
- plog_info("\tCore %d task %d transmitting to port %d in L3 mode\n", lconf->id, targ->id, port_id);
+ plog_info("\tCore %d task %d transmitting to port %d in %s submode\n", lconf->id, targ->id, port_id, l3 ? "l3":"ndp");
while (core_targ_next(&rx_lconf, &rx_targ, 0) == 0) {
for (uint8_t i = 0; i < rx_targ->nb_rxports; ++i) {
rx_port_id = rx_targ->rx_port_queue[i].port;
- if ((rx_port_id == port_id) && (rx_targ->flags & TASK_ARG_L3)){
+ if ((rx_port_id == port_id) &&
+ ( ((rx_targ->flags & TASK_ARG_L3) && l3) ||
+ ((rx_targ->flags & TASK_ARG_NDP) && ndp) ) ){
ok = 1;
break;
}
}
if (ok == 1) {
- plog_info("\tCore %d task %d has found core %d task %d receiving from port %d\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id);
+ plog_info("\tCore %d task %d has found core %d task %d receiving from port %d in %s submode\n", lconf->id, targ->id, rx_lconf->id, rx_targ->id, port_id,
+ ((rx_targ->flags & TASK_ARG_L3) && l3) ? "l3":"ndp");
break;
}
}
- PROX_PANIC(ok == 0, "L3 sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", port_id, lconf->id, targ->id);
+ PROX_PANIC(ok == 0, "%s sub mode for port %d on core %d task %d, but no core/task receiving on that port\n", l3 ? "l3":"ndp", port_id, lconf->id, targ->id);
}
}
static void check_cfg_consistent(void)
{
+ check_nb_mbuf();
check_missing_rx();
- check_zero_rx();
+ check_no_rx();
check_mixed_normal_pipeline();
}
@@ -224,6 +259,21 @@ static int chain_flag_state(struct task_args *targ, uint64_t flag, int is_set)
return 0;
}
+static int chain_flag_always_set(struct task_args *targ, uint64_t flag)
+{
+ return (!chain_flag_state(targ, flag, 0));
+}
+
+static int chain_flag_never_set(struct task_args *targ, uint64_t flag)
+{
+ return (!chain_flag_state(targ, flag, 1));
+}
+
+static int chain_flag_sometimes_set(struct task_args *targ, uint64_t flag)
+{
+ return (chain_flag_state(targ, flag, 1));
+}
+
static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
{
uint8_t if_port;
@@ -247,44 +297,68 @@ static void configure_if_tx_queues(struct task_args *targ, uint8_t socket)
prox_port_cfg[if_port].n_txq = 1;
targ->tx_port_queue[i].queue = 0;
}
- /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
- the tasks up to the task transmitting to the port
- does not use refcnt. */
- if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT, 1)) {
- prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
- plog_info("\t\tEnabling No refcnt on port %d\n", if_port);
- }
- else {
- plog_info("\t\tRefcnt used on port %d\n", if_port);
- }
-
/* By default OFFLOAD is enabled, but if the whole
chain has NOOFFLOADS set all the way until the
first task that receives from a port, it will be
disabled for the destination port. */
- if (chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, 1)) {
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
- plog_info("\t\tDisabling TX offloads on port %d\n", if_port);
- } else {
- plog_info("\t\tEnabling TX offloads on port %d\n", if_port);
}
-
- /* By default NOMULTSEGS is disabled, as drivers/NIC might split packets on RX
- It should only be enabled when we know for sure that the RX does not split packets.
- Set the ETH_TXQ_FLAGS_NOMULTSEGS flag if none of the tasks up to the task
- transmitting to the port does not use multsegs. */
- if (!chain_flag_state(targ, TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, 0)) {
- prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
- plog_info("\t\tEnabling No MultiSegs on port %d\n", if_port);
- }
- else {
- plog_info("\t\tMultiSegs used on port %d\n", if_port);
+#else
+ if (chain_flag_always_set(targ, TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS)) {
+ prox_port_cfg[if_port].requested_tx_offload &= ~(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
+#endif
}
}
static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
{
+ struct prox_port_cfg *port;
+ uint8_t port_used_counter[PROX_MAX_PORTS] = {0};
+ bool multiple_port_reference = false;
+ uint8_t total_number_of_queues = 0;
+ // Check how many times a port is referenced for this task
+ for (uint8_t i = 0; i < targ->nb_rxports; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+ port_used_counter[if_port]++;
+ if (port_used_counter[if_port] > 1) {
+ multiple_port_reference = true;
+ port = &prox_port_cfg[if_port];
+ PROX_PANIC((port->all_rx_queues), "Multiple queues defined in rx port, but all_rx_queues also set for port %s\n", port->names[0]);
+ }
+ }
+ // If only referenced once, it is possible that we want to use all queues
+ // Therefore we will check all_rx_queues for that port
+ if (!multiple_port_reference) {
+ for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+ if (port_used_counter[if_port]) {
+ port = &prox_port_cfg[if_port];
+ if (port->all_rx_queues) {
+ port_used_counter[if_port] = port->max_rxq;
+ total_number_of_queues += port->max_rxq;
+ plog_info("\tall_rx_queues for Port %s: %u rx_queues will be applied\n", port->names[0], port_used_counter[if_port]);
+ }
+ }
+ }
+ }
+ if (total_number_of_queues) {
+ PROX_PANIC((total_number_of_queues > PROX_MAX_PORTS), "%u queues using the all_rx_queues. PROX_MAX_PORTS is set to %u\n", total_number_of_queues, PROX_MAX_PORTS);
+ uint8_t index = 0;
+ for (uint8_t i = 0; i < PROX_MAX_PORTS; i++) {
+ if (port_used_counter[i]) {
+ for (uint8_t j = 0; j < port_used_counter[i]; j++) {
+ targ->rx_port_queue[index].port = i;
+ index ++;
+ }
+ port = &prox_port_cfg[i];
+ plog_info("\t\tConfiguring task to use port %s with %u rx_queues\n", port->names[0], port_used_counter[i]);
+ }
+ }
+ targ->nb_rxports = index;
+ }
for (int i = 0; i < targ->nb_rxports; i++) {
uint8_t if_port = targ->rx_port_queue[i].port;
@@ -292,18 +366,26 @@ static void configure_if_rx_queues(struct task_args *targ, uint8_t socket)
return;
}
- PROX_PANIC(!prox_port_cfg[if_port].active, "Port %u not used, aborting...\n", if_port);
+ port = &prox_port_cfg[if_port];
+ PROX_PANIC(!port->active, "Port %u not used, aborting...\n", if_port);
- if(prox_port_cfg[if_port].rx_ring[0] != '\0') {
- prox_port_cfg[if_port].n_rxq = 0;
+ if(port->rx_ring[0] != '\0') {
+ port->n_rxq = 0;
}
- targ->rx_port_queue[i].queue = prox_port_cfg[if_port].n_rxq;
- prox_port_cfg[if_port].pool[targ->rx_port_queue[i].queue] = targ->pool;
- prox_port_cfg[if_port].pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
- prox_port_cfg[if_port].n_rxq++;
+ // If the mbuf size (of the rx task) is not big enough, we might receive multiple segments
+ // This is usually the case when setting a big mtu size i.e. enabling jumbo frames.
+ // If the packets get transmitted, then multi segments will have to be enabled on the TX port
+ uint16_t max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+ if (max_frame_size + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > targ->mbuf_size) {
+ targ->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTSEGS;
+ }
+ targ->rx_port_queue[i].queue = port->n_rxq;
+ port->pool[targ->rx_port_queue[i].queue] = targ->pool;
+ port->pool_size[targ->rx_port_queue[i].queue] = targ->nb_mbuf - 1;
+ port->n_rxq++;
- int dsocket = prox_port_cfg[if_port].socket;
+ int dsocket = port->socket;
if (dsocket != -1 && dsocket != socket) {
plog_warn("RX core on socket %d while device on socket %d\n", socket, dsocket);
}
@@ -319,8 +401,64 @@ static void configure_if_queues(void)
while (core_targ_next(&lconf, &targ, 0) == 0) {
socket = rte_lcore_to_socket_id(lconf->id);
- configure_if_tx_queues(targ, socket);
configure_if_rx_queues(targ, socket);
+ configure_if_tx_queues(targ, socket);
+ }
+}
+
+static void configure_tx_queue_flags(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ uint8_t socket;
+ uint8_t if_port;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ socket = rte_lcore_to_socket_id(lconf->id);
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ if_port = targ->tx_port_queue[i].port;
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ /* Set the ETH_TXQ_FLAGS_NOREFCOUNT flag if none of
+ the tasks up to the task transmitting to the port
+ use refcnt. */
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+ prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+ }
+#else
+ /* Set the RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE flag if none of
+ the tasks up to the task transmitting to the port
+ use refcnt and per-queue all mbufs comes from the same mempool. */
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_REFCOUNT)) {
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL))
+ prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+ }
+#endif
+ }
+ }
+}
+
+static void configure_multi_segments(void)
+{
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ uint8_t if_port;
+
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ for (uint8_t i = 0; i < targ->nb_txports; ++i) {
+ if_port = targ->tx_port_queue[i].port;
+ // Multi segment is disabled for most tasks. It is only enabled for tasks requiring big packets.
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,1)
+ // We can only enable "no multi segment" if no such task exists in the chain of tasks.
+ if (chain_flag_never_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+ prox_port_cfg[if_port].tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ }
+#else
+ // We enable "multi segment" if at least one task requires it in the chain of tasks.
+ if (chain_flag_sometimes_set(targ, TASK_FEATURE_TXQ_FLAGS_MULTSEGS)) {
+ prox_port_cfg[if_port].requested_tx_offload |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+ }
+#endif
+ }
}
}
@@ -441,7 +579,7 @@ static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct
starg->ctrl_plane_ring = ring;
}
- plog_info("\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
+ plog_info("\t\t\tCore %u task %u to -> core %u task %u ctrl_ring %s %p %s\n",
lconf->id, starg->id, ct.core, ct.task, ct.type == CTRL_TYPE_PKT?
"pkt" : "msg", ring, ring->name);
ris->n_ctrl_rings++;
@@ -500,6 +638,8 @@ static struct rte_ring *init_ring_between_tasks(struct lcore_cfg *lconf, struct
PROX_ASSERT(dtarg->nb_rxrings < MAX_RINGS_PER_TASK);
dtarg->rx_rings[dtarg->nb_rxrings] = ring;
++dtarg->nb_rxrings;
+ if (dtarg->nb_rxrings > 1)
+ dtarg->task_init->flag_features |= TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL;
}
dtarg->nb_slave_threads = starg->core_task_set[idx].n_elems;
dtarg->lb_friend_core = lconf->id;
@@ -543,7 +683,7 @@ static void init_rings(void)
lconf = NULL;
struct prox_port_cfg *port;
while (core_targ_next(&lconf, &starg, 1) == 0) {
- if ((starg->task_init) && (starg->flags & TASK_ARG_L3)) {
+ if ((starg->task_init) && (starg->flags & (TASK_ARG_L3|TASK_ARG_NDP))) {
struct core_task ct;
ct.core = prox_cfg.master;
ct.task = 0;
@@ -552,7 +692,7 @@ static void init_rings(void)
ct.core = lconf->id;
ct.task = starg->id;;
- struct rte_ring *tx_ring = init_ring_between_tasks(lcore_cfg, lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
+ struct rte_ring *tx_ring = init_ring_between_tasks(&lcore_cfg[prox_cfg.master], lcore_cfg[prox_cfg.master].targs, ct, 0, 0, &ris);
}
}
}
@@ -562,13 +702,14 @@ static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
struct rte_mbuf** pkts = prox_zmalloc(nb_mbuf * sizeof(*pkts), rte_socket_id());
uint64_t got = 0;
- while (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0)
+ while ((got < nb_mbuf) && (rte_mempool_get_bulk(mempool, (void**)(pkts + got), 1) == 0))
++got;
+ nb_mbuf = got;
while (got) {
int idx;
do {
- idx = rand() % nb_mbuf - 1;
+ idx = rand() % nb_mbuf;
} while (pkts[idx] == 0);
rte_mempool_put_bulk(mempool, (void**)&pkts[idx], 1);
@@ -578,6 +719,50 @@ static void shuffle_mempool(struct rte_mempool* mempool, uint32_t nb_mbuf)
prox_free(pkts);
}
+static void set_mbuf_size(struct task_args *targ)
+{
+ /* mbuf size can be set
+ * - from config file (highest priority, overwriting any other config) - should only be used as workaround
+ * - defaulted to MBUF_SIZE.
+ * Except if set explicitely, ensure that size is big enough for vmxnet3 driver
+ */
+ if (targ->mbuf_size)
+ return;
+
+ targ->mbuf_size = MBUF_SIZE;
+ struct prox_port_cfg *port;
+ uint16_t max_frame_size = 0, min_buffer_size = 0;
+ int i40e = 0;
+ for (int i = 0; i < targ->nb_rxports; i++) {
+ uint8_t if_port = targ->rx_port_queue[i].port;
+
+ if (if_port == OUT_DISCARD) {
+ continue;
+ }
+ port = &prox_port_cfg[if_port];
+ if (max_frame_size < port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE)
+ max_frame_size = port->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN + 2 * PROX_VLAN_TAG_SIZE;
+ if (min_buffer_size < port->min_rx_bufsize)
+ min_buffer_size = port->min_rx_bufsize;
+
+ // Check whether we receive from i40e. This driver have extra mbuf size requirements
+ if (strcmp(port->short_name, "i40e") == 0)
+ i40e = 1;
+ }
+ if (i40e) {
+ // i40e supports a maximum of 5 descriptors chained
+ uint16_t required_mbuf_size = RTE_ALIGN(max_frame_size / 5, 128) + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+ if (required_mbuf_size > targ->mbuf_size) {
+ targ->mbuf_size = required_mbuf_size;
+ plog_info("\t\tSetting mbuf_size to %u to support frame_size %u\n", targ->mbuf_size, max_frame_size);
+ }
+ }
+ if (min_buffer_size > targ->mbuf_size) {
+ plog_warn("Mbuf size might be too small. This might result in packet segmentation and memory leak\n");
+ }
+
+}
+
static void setup_mempools_unique_per_socket(void)
{
uint32_t flags = 0;
@@ -595,11 +780,7 @@ static void setup_mempools_unique_per_socket(void)
uint8_t socket = rte_lcore_to_socket_id(lconf->id);
PROX_ASSERT(socket < MAX_SOCKETS);
- if (targ->mbuf_size_set_explicitely)
- flags = MEMPOOL_F_NO_SPREAD;
- if ((!targ->mbuf_size_set_explicitely) && (targ->task_init->mbuf_size != 0)) {
- targ->mbuf_size = targ->task_init->mbuf_size;
- }
+ set_mbuf_size(targ);
if (targ->rx_port_queue[0].port != OUT_DISCARD) {
struct prox_port_cfg* port_cfg = &prox_port_cfg[targ->rx_port_queue[0].port];
PROX_ASSERT(targ->nb_mbuf != 0);
@@ -616,28 +797,26 @@ static void setup_mempools_unique_per_socket(void)
PROX_PANIC(mbuf_size[socket] != targ->mbuf_size,
"all mbuf_size must have the same size if using a unique mempool per socket\n");
}
- if ((!targ->mbuf_size_set_explicitely) && (strcmp(port_cfg->short_name, "vmxnet3") == 0)) {
- if (mbuf_size[socket] < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
- mbuf_size[socket] = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
- }
}
}
for (int i = 0 ; i < MAX_SOCKETS; i++) {
if (mbuf_count[i] != 0) {
sprintf(name, "socket_%u_pool", i);
- pool[i] = rte_mempool_create(name,
- mbuf_count[i] - 1, mbuf_size[i],
- nb_cache_mbuf[i],
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- prox_pktmbuf_init, NULL,
- i, flags);
- PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
- plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
- mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
-
- if (prox_cfg.flags & DSF_SHUFFLE) {
- shuffle_mempool(pool[i], mbuf_count[i]);
+ if ((pool[i] = rte_mempool_lookup(name)) == NULL) {
+ pool[i] = rte_mempool_create(name,
+ mbuf_count[i] - 1, mbuf_size[i],
+ nb_cache_mbuf[i],
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_pktmbuf_pool_init, NULL,
+ prox_pktmbuf_init, NULL,
+ i, flags);
+ PROX_PANIC(pool[i] == NULL, "\t\tError: cannot create mempool for socket %u\n", i);
+ plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", pool[i],
+ mbuf_count[i], mbuf_size[i], nb_cache_mbuf[i], i);
+
+ if (prox_cfg.flags & DSF_SHUFFLE) {
+ shuffle_mempool(pool[i], mbuf_count[i]);
+ }
}
}
}
@@ -652,7 +831,7 @@ static void setup_mempools_unique_per_socket(void)
targ->pool = pool[socket];
/* Set the number of mbuf to the number of the unique mempool, so that the used and free work */
targ->nb_mbuf = mbuf_count[socket];
- plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
+ plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
targ->nb_mbuf, mbuf_size[socket], targ->nb_cache_mbuf, socket);
}
}
@@ -668,33 +847,16 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
char memzone_name[64];
char name[64];
- /* mbuf size can be set
- * - from config file (highest priority, overwriting any other config) - should only be used as workaround
- * - through each 'mode', overwriting the default mbuf_size
- * - defaulted to MBUF_SIZE i.e. 1518 Bytes
- * Except is set expliciteky, ensure that size is big enough for vmxnet3 driver
- */
- if (targ->mbuf_size_set_explicitely) {
- flags = MEMPOOL_F_NO_SPREAD;
- /* targ->mbuf_size already set */
- }
- else if (targ->task_init->mbuf_size != 0) {
- /* mbuf_size not set through config file but set through mode */
- targ->mbuf_size = targ->task_init->mbuf_size;
- }
- else if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
- if (targ->mbuf_size < MBUF_SIZE + RTE_PKTMBUF_HEADROOM)
- targ->mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
- }
+ set_mbuf_size(targ);
/* allocate memory pool for packets */
PROX_ASSERT(targ->nb_mbuf != 0);
if (targ->pool_name[0] == '\0') {
- sprintf(name, "core_%u_port_%u_pool", lconf->id, targ->id);
+ sprintf(name, "core_%u_task_%u_pool", lconf->id, targ->id);
}
- snprintf(memzone_name, sizeof(memzone_name)-1, "MP_%s", targ->pool_name);
+ snprintf(memzone_name, sizeof(memzone_name), "MP_%.*s", (int)(sizeof(memzone_name)-4), targ->pool_name);
mz = rte_memzone_lookup(memzone_name);
if (mz != NULL) {
@@ -724,7 +886,7 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
receiving from if one core receives from multiple
ports, all the ports use the same mempool */
if (targ->pool == NULL) {
- plog_info("\t\tCreating mempool with name '%s'\n", name);
+ plog_info("\tCreating mempool with name '%s' on socket %d\n", name, socket);
targ->pool = rte_mempool_create(name,
targ->nb_mbuf - 1, targ->mbuf_size,
targ->nb_cache_mbuf,
@@ -735,9 +897,9 @@ static void setup_mempool_for_rx_task(struct lcore_cfg *lconf, struct task_args
}
PROX_PANIC(targ->pool == NULL,
- "\t\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
+ "\tError: cannot create mempool for core %u port %u: %s\n", lconf->id, targ->id, rte_strerror(rte_errno));
- plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
+ plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", targ->pool,
targ->nb_mbuf, targ->mbuf_size, targ->nb_cache_mbuf, socket);
if (prox_cfg.flags & DSF_SHUFFLE) {
shuffle_mempool(targ->pool, targ->nb_mbuf);
@@ -829,10 +991,10 @@ static void setup_all_task_structs(void)
while(prox_core_next(&lcore_id, 1) == 0) {
lconf = &lcore_cfg[lcore_id];
- plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
+ plog_info("\t*** Initializing core %d (%d task) ***\n", lcore_id, lconf->n_tasks_all);
for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
if (!task_is_master(&lconf->targs[task_id])) {
- plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
+ plog_info("\t\tInitializing struct for core %d task %d\n", lcore_id, task_id);
lconf->targs[task_id].tmaster = tmaster;
lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
}
@@ -888,14 +1050,13 @@ static void init_lcores(void)
plog_info("=== Initializing rings on cores ===\n");
init_rings();
+ configure_multi_segments();
+ configure_tx_queue_flags();
+
plog_info("=== Checking configuration consistency ===\n");
check_cfg_consistent();
plog_all_rings();
-
- setup_all_task_structs_early_init();
- plog_info("=== Initializing tasks ===\n");
- setup_all_task_structs();
}
static int setup_prox(int argc, char **argv)
@@ -923,6 +1084,10 @@ static int setup_prox(int argc, char **argv)
plog_info("=== Initializing ports ===\n");
init_port_all();
+ setup_all_task_structs_early_init();
+ plog_info("=== Initializing tasks ===\n");
+ setup_all_task_structs();
+
if (prox_cfg.logbuf_size) {
prox_cfg.logbuf = prox_zmalloc(prox_cfg.logbuf_size, rte_socket_id());
PROX_PANIC(prox_cfg.logbuf == NULL, "Failed to allocate memory for logbuf with size = %d\n", prox_cfg.logbuf_size);
@@ -1009,6 +1174,40 @@ static void sigterm_handler(int signum)
quit();
}
+static void set_term_env(void)
+{
+ static const char var[] = "TERM";
+ static char str[] = "TERM=putty";
+ char *old_value, *new_value;
+ int max_ver = 0, min_ver = 0, n;
+
+ old_value = getenv(var);
+
+ const char *ncurses_version = curses_version();
+ n = sscanf(ncurses_version, "ncurses %d.%d", &max_ver, &min_ver);
+ if (n != 2) {
+ plog_info("\tUnable to extract ncurses version from %s. TERM left unchanged to %s\n", ncurses_version, old_value);
+ return;
+ } else {
+ plog_info("\tncurses version = %d.%d (%s)\n", max_ver, min_ver, ncurses_version);
+ }
+
+ if ((old_value) && ((max_ver > 6) || ((max_ver == 6) && (min_ver >= 1))) && (strcmp(old_value, "xterm") == 0)) {
+ // On recent OSes such as RHEL 8.0, ncurses(6.1) introduced support
+ // for ECMA-48 repeat character control.
+ // Some terminal emulators use TERM=xterm but do not support this feature.
+ // In this case, printing repeating character such as "22000000 Hz" might
+ // display as 220 Hz.
+ // Other emulattors, such as tmux, use TERM=screen, and do not exhibit the issue.
+ plog_info("\tChanged TERM from %s ", old_value);
+ putenv(str);
+ new_value = getenv(var);
+ plog_info("to %s\n", new_value);
+ } else {
+ plog_info("\tTERM left unchanged to %s\n", old_value);
+ }
+}
+
int main(int argc, char **argv)
{
/* set en_US locale to print big numbers with ',' */
@@ -1017,10 +1216,11 @@ int main(int argc, char **argv)
if (prox_parse_args(argc, argv) != 0){
prox_usage(argv[0]);
}
-
plog_init(prox_cfg.log_name, prox_cfg.log_name_pid);
- plog_info("=== " PROGRAM_NAME " " VERSION_STR " ===\n");
+ plog_info("=== " PROGRAM_NAME " %s ===\n", VERSION_STR());
plog_info("\tUsing DPDK %s\n", rte_version() + sizeof(RTE_VER_PREFIX));
+ plog_info("\tgit version %s\n", git_version);
+ set_term_env();
read_rdt_info();
if (prox_cfg.flags & DSF_LIST_TASK_MODES) {
@@ -1087,5 +1287,6 @@ int main(int argc, char **argv)
if (setup_prox(argc, argv) != 0)
return EXIT_FAILURE;
run(prox_cfg.flags);
+
return EXIT_SUCCESS;
}
diff --git a/VNFs/DPPD-PROX/mbuf_utils.h b/VNFs/DPPD-PROX/mbuf_utils.h
index 22d57a39..d48b5098 100644
--- a/VNFs/DPPD-PROX/mbuf_utils.h
+++ b/VNFs/DPPD-PROX/mbuf_utils.h
@@ -22,6 +22,7 @@
#include <rte_ip.h>
#include <rte_version.h>
#include <rte_ether.h>
+#include "prox_compat.h"
static void init_mbuf_seg(struct rte_mbuf *mbuf)
{
@@ -35,7 +36,7 @@ static void init_mbuf_seg(struct rte_mbuf *mbuf)
static uint16_t pkt_len_to_wire_size(uint16_t pkt_len)
{
- return (pkt_len < 60? 60 : pkt_len) + ETHER_CRC_LEN + 20;
+ return (pkt_len < 60? 60 : pkt_len) + PROX_RTE_ETHER_CRC_LEN + 20;
}
static uint16_t mbuf_wire_size(const struct rte_mbuf *mbuf)
@@ -45,7 +46,7 @@ static uint16_t mbuf_wire_size(const struct rte_mbuf *mbuf)
return pkt_len_to_wire_size(pkt_len);
}
-static uint16_t mbuf_calc_padlen(const struct rte_mbuf *mbuf, void *pkt, struct ipv4_hdr *ipv4)
+static uint16_t mbuf_calc_padlen(const struct rte_mbuf *mbuf, void *pkt, prox_rte_ipv4_hdr *ipv4)
{
uint16_t pkt_len = rte_pktmbuf_pkt_len(mbuf);
uint16_t ip_offset = (uint8_t *)ipv4 - (uint8_t*)pkt;
diff --git a/VNFs/DPPD-PROX/meson.build b/VNFs/DPPD-PROX/meson.build
new file mode 100644
index 00000000..48251e8d
--- /dev/null
+++ b/VNFs/DPPD-PROX/meson.build
@@ -0,0 +1,206 @@
+##
+## Copyright (c) 2021 Heinrich Kuhn <heinrich.kuhn@corigine.com>
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+##
+
+project('dppd-prox', 'C',
+ version:
+ run_command(['git', 'describe',
+ '--abbrev=8', '--dirty', '--always']).stdout().strip(),
+ license: 'Apache',
+ default_options: ['buildtype=release', 'c_std=gnu99'],
+ meson_version: '>= 0.47'
+)
+
+cc = meson.get_compiler('c')
+
+# Configure options for prox
+# Grab the DPDK version here "manually" as it is not available in the dpdk_dep
+# object
+dpdk_version = run_command('pkg-config', '--modversion', 'libdpdk').stdout()
+
+if get_option('bng_qinq').enabled()
+ add_project_arguments('-DUSE_QINQ', language: 'c')
+endif
+
+if get_option('mpls_routing').enabled()
+ add_project_arguments('-DMPLS_ROUTING', language: 'c')
+endif
+
+if get_option('prox_stats').enabled()
+ add_project_arguments('-DPROX_STATS', language: 'c')
+endif
+
+if get_option('hw_direct_stats').enabled()
+ add_project_arguments('-DPROX_HW_DIRECT_STATS', language: 'c')
+endif
+
+if get_option('dbg')
+ add_project_arguments('-ggdb', language: 'c')
+endif
+
+if get_option('log')
+ add_project_arguments('-DPROX_MAX_LOG_LVL=2', language: 'c')
+endif
+
+if get_option('gen_decap_ipv6_to_ipv4_cksum').enabled()
+ add_project_arguments('-DGEN_DECAP_IPV6_TO_IPV4_CKSUM', language: 'c')
+endif
+
+if get_option('crc') == 'soft'
+ add_project_arguments('-DSOFT_CRC', language: 'c')
+endif
+
+cflags = [
+ '-DPROGRAM_NAME="prox"',
+ '-fno-stack-protector',
+ '-DPROX_PREFETCH_OFFSET=2',
+ '-DLATENCY_PER_PACKET',
+ '-DLATENCY_HISTOGRAM',
+ '-DGRE_TP',
+ '-D_GNU_SOURCE'] # for PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+# Add configured cflags to arguments
+foreach arg: cflags
+ add_project_arguments(arg, language: 'c')
+endforeach
+
+# enable warning flags if they are supported by the compiler
+warning_flags = [
+ '-Wno-unused',
+ '-Wno-unused-parameter',
+ '-Wno-unused-result',
+ '-Wno-deprecated-declarations']
+
+foreach arg: warning_flags
+ if cc.has_argument(arg)
+ add_project_arguments(arg, language: 'c')
+ endif
+endforeach
+
+# Attempt to find a suitable lua and add to deps
+lua_versions = ['lua', 'lua5.2', 'lua5.3']
+foreach i:lua_versions
+ lua_dep = dependency(i, required: false)
+ if not lua_dep.found()
+ lua_dep = cc.find_library(i, required: false)
+ endif
+ if lua_dep.found()
+ break
+ endif
+endforeach
+if not lua_dep.found()
+ error('Suitable lua version not found')
+endif
+
+has_sym_args = [
+ [ 'HAVE_LIBEDIT_EL_RFUNC_T', 'histedit.h',
+ 'el_rfunc_t' ],
+]
+config = configuration_data()
+foreach arg:has_sym_args
+ config.set(arg[0], cc.has_header_symbol(arg[1], arg[2]))
+endforeach
+configure_file(output : 'libedit_autoconf.h', configuration : config)
+
+# All other dependencies
+dpdk_dep = dependency('libdpdk', required: true)
+tinfo_dep = dependency('tinfo', required: false)
+threads_dep = dependency('threads', required: true)
+pcap_dep = dependency('pcap', required: true)
+ncurses_dep = dependency('ncurses', required: true)
+ncursesw_dep = dependency('ncursesw', required: true)
+libedit_dep = dependency('libedit', required: true)
+math_dep = cc.find_library('m', required : false)
+dl_dep = cc.find_library('dl', required : true)
+
+deps = [dpdk_dep,
+ tinfo_dep,
+ threads_dep,
+ pcap_dep,
+ ncurses_dep,
+ ncursesw_dep,
+ libedit_dep,
+ math_dep,
+ dl_dep,
+ lua_dep]
+
+# Explicitly add these to the dependency list
+deps += [cc.find_library('rte_bus_pci', required: true)]
+deps += [cc.find_library('rte_bus_vdev', required: true)]
+
+if dpdk_version.version_compare('<20.11.0')
+deps += [cc.find_library('rte_pmd_ring', required: true)]
+else
+deps += [cc.find_library('rte_net_ring', required: true)]
+endif
+
+sources = files(
+ 'task_init.c', 'handle_aggregator.c', 'handle_nop.c', 'handle_irq.c',
+ 'handle_arp.c', 'handle_impair.c', 'handle_lat.c', 'handle_qos.c',
+ 'handle_qinq_decap4.c', 'handle_routing.c', 'handle_untag.c',
+ 'handle_mplstag.c', 'handle_qinq_decap6.c',
+ 'handle_lb_qinq.c', 'handle_lb_pos.c', 'handle_lb_net.c',
+ 'handle_qinq_encap4.c', 'handle_qinq_encap6.c', 'handle_classify.c',
+ 'handle_l2fwd.c', 'handle_swap.c', 'handle_police.c', 'handle_acl.c',
+ 'handle_gen.c', 'handle_master.c', 'packet_utils.c', 'handle_mirror.c',
+ 'handle_genl4.c', 'handle_ipv6_tunnel.c', 'handle_read.c',
+ 'handle_cgnat.c', 'handle_nat.c', 'handle_dump.c', 'handle_tsc.c',
+ 'handle_fm.c', 'handle_lb_5tuple.c', 'handle_blockudp.c', 'toeplitz.c',
+ 'thread_nop.c', 'thread_generic.c', 'prox_args.c', 'prox_cfg.c',
+ 'prox_cksum.c', 'prox_port_cfg.c', 'cfgfile.c', 'clock.c',
+ 'commands.c', 'cqm.c', 'msr.c', 'defaults.c', 'display.c',
+ 'display_latency.c', 'display_latency_distr.c', 'display_mempools.c',
+ 'display_ports.c', 'display_rings.c', 'display_priority.c',
+ 'display_pkt_len.c', 'display_l4gen.c', 'display_tasks.c',
+ 'display_irq.c', 'log.c', 'hash_utils.c', 'main.c', 'parse_utils.c',
+ 'file_utils.c', 'run.c', 'input_conn.c', 'input_curses.c', 'rx_pkt.c',
+ 'lconf.c', 'tx_pkt.c', 'expire_cpe.c', 'ip_subnet.c', 'stats_port.c',
+ 'stats_mempool.c', 'stats_ring.c', 'stats_l4gen.c', 'stats_latency.c',
+ 'stats_global.c', 'stats_core.c', 'stats_task.c', 'stats_prio.c',
+ 'stats_irq.c', 'cmd_parser.c', 'input.c', 'prox_shared.c',
+ 'prox_lua_types.c', 'genl4_bundle.c', 'heap.c', 'genl4_stream_tcp.c',
+ 'genl4_stream_udp.c', 'cdf.c', 'stats.c', 'stats_cons_log.c',
+ 'stats_cons_cli.c', 'stats_parser.c', 'hash_set.c', 'prox_lua.c',
+ 'prox_malloc.c', 'prox_ipv6.c', 'prox_compat.c', 'handle_nsh.c')
+
+sources += files('rw_reg.c')
+
+# Include a couple of source files depending on DPDK support
+if cc.find_library('rte_crypto_ipsec_mb', required: false).found()
+ add_project_arguments('-DRTE_LIBRTE_PMD_AESNI_MB', language: 'c')
+ sources += files('handle_esp.c')
+else
+ warning('Building w/o IPSEC support')
+endif
+
+if cc.find_library('rte_pipeline', required: false).found()
+ sources += files('handle_pf_acl.c', 'thread_pipeline.c')
+endif
+
+# Generate the git_version.c file and add to sources
+git_version = configuration_data()
+git_version.set('GIT_VERSION', '@0@'.format(meson.project_version()))
+git_version_c = configure_file(input: 'git_version.c.in',
+ output: 'git_version.c',
+ configuration: git_version)
+
+git_version_file = join_paths(meson.current_build_dir(), 'git_version.c')
+sources += files(git_version_file)
+
+executable('prox',
+ sources,
+ c_args: cflags,
+ dependencies: deps,
+ install: true)
diff --git a/VNFs/DPPD-PROX/meson_options.txt b/VNFs/DPPD-PROX/meson_options.txt
new file mode 100644
index 00000000..afc2be7e
--- /dev/null
+++ b/VNFs/DPPD-PROX/meson_options.txt
@@ -0,0 +1,9 @@
+#Keep the options sorted alphabetically
+option('bng_qinq', type: 'feature', value: 'enabled')
+option('crc', type: 'string', value: 'hard')
+option('dbg', type: 'boolean', value: false)
+option('gen_decap_ipv6_to_ipv4_cksum', type: 'feature', value: 'enabled')
+option('hw_direct_stats', type: 'feature', value: 'enabled')
+option('log', type: 'boolean', value: true)
+option('mpls_routing', type: 'feature', value: 'enabled')
+option('prox_stats', type: 'feature', value: 'enabled')
diff --git a/VNFs/DPPD-PROX/packet_utils.c b/VNFs/DPPD-PROX/packet_utils.c
index 9e5bcde4..95ce7abc 100644
--- a/VNFs/DPPD-PROX/packet_utils.c
+++ b/VNFs/DPPD-PROX/packet_utils.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,28 +17,40 @@
#include <rte_lcore.h>
#include <rte_hash.h>
#include <rte_hash_crc.h>
+#include <rte_lpm.h>
+
#include "task_base.h"
#include "lconf.h"
#include "prefetch.h"
#include "log.h"
+#include "defines.h"
#include "handle_master.h"
#include "prox_port_cfg.h"
+#include "packet_utils.h"
+#include "prox_shared.h"
+#include "prox_lua.h"
+#include "hash_entry_types.h"
+#include "prox_compat.h"
+#include "prox_cfg.h"
+#include "defines.h"
+#include "prox_ipv6.h"
+#include "tx_pkt.h"
-#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24
-
-static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_dst)
+static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_dst, uint16_t *vlan)
{
- struct vlan_hdr *vlan_hdr;
- struct ether_hdr *eth_hdr = (struct ether_hdr*)pkt;
- struct ipv4_hdr *ip;
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
+ prox_rte_ipv4_hdr *ip;
uint16_t ether_type = eth_hdr->ether_type;
- uint16_t l2_len = sizeof(struct ether_hdr);
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+ *vlan = 0;
// Unstack VLAN tags
- while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(struct vlan_hdr) < len)) {
- vlan_hdr = (struct vlan_hdr *)((uint8_t *)pkt + l2_len);
+ while (((ether_type == ETYPE_VLAN) || (ether_type == ETYPE_8021ad)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
l2_len +=4;
ether_type = vlan_hdr->eth_proto;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F); // Store VLAN, or CVLAN if QinQ
}
switch (ether_type) {
@@ -60,8 +72,8 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_
break;
}
- if (l2_len && (l2_len + sizeof(struct ipv4_hdr) <= len)) {
- struct ipv4_hdr *ip = (struct ipv4_hdr *)((uint8_t *)pkt + l2_len);
+ if (l2_len && (l2_len + sizeof(prox_rte_ipv4_hdr) <= len)) {
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr *)((uint8_t *)pkt + l2_len);
// TODO: implement LPM => replace ip_dst by next hop IP DST
*ip_dst = ip->dst_addr;
return 0;
@@ -69,89 +81,400 @@ static inline int find_ip(struct ether_hdr_arp *pkt, uint16_t len, uint32_t *ip_
return -1;
}
-int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst)
+static inline void find_vlan(struct ether_hdr_arp *pkt, uint16_t len, uint16_t *vlan)
+{
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ether_hdr *eth_hdr = (prox_rte_ether_hdr*)pkt;
+ uint16_t ether_type = eth_hdr->ether_type;
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+
+ *vlan = 0;
+ // Unstack VLAN tags
+ while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
+ l2_len +=4;
+ ether_type = vlan_hdr->eth_proto;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F); // Store VLAN, or CVLAN if QinQ
+ }
+}
+
+static inline struct ipv6_addr *find_ip6(prox_rte_ether_hdr *pkt, uint16_t len, struct ipv6_addr *ip_dst, uint16_t *vlan)
+{
+ uint16_t ether_type = pkt->ether_type;
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+ *vlan = 0;
+
+ if ((ether_type == ETYPE_VLAN) || (ether_type == ETYPE_8021ad)) {
+ prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)pkt + l2_len);
+ ether_type = vlan_hdr->eth_proto;
+ l2_len +=4;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ if (ether_type == ETYPE_VLAN) {
+ vlan_hdr = (prox_rte_vlan_hdr *)(vlan_hdr + 1);
+ ether_type = vlan_hdr->eth_proto;
+ l2_len +=4;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ }
+ }
+ if ((ether_type == ETYPE_IPv6) && (l2_len + sizeof(prox_rte_ipv6_hdr) <= len)) {
+ prox_rte_ipv6_hdr *ip = (prox_rte_ipv6_hdr *)((uint8_t *)pkt + l2_len);
+ // TODO: implement LPM => replace ip_dst by next hop IP DST
+ memcpy(ip_dst, &ip->dst_addr, sizeof(struct ipv6_addr));
+ return (struct ipv6_addr *)&ip->src_addr;
+ }
+ return NULL;
+}
+
+void send_unsollicited_neighbour_advertisement(struct task_base *tbase)
+{
+ int ret;
+ uint8_t out = 0, port_id = tbase->l3.reachable_port_id;
+ struct rte_mbuf *mbuf = NULL;
+
+ if (*(__int128 *)(&tbase->l3.local_ipv6) != 0) {
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = port_id;
+ build_neighbour_advertisement(tbase->l3.tmaster, mbuf, &prox_port_cfg[port_id].eth_addr, &tbase->l3.local_ipv6, PROX_UNSOLLICITED, prox_port_cfg[port_id].vlan_tags[0]);
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ } else {
+ plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ return;
+ }
+ }
+ if (*(__int128 *)(&tbase->l3.global_ipv6) != 0) {
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = port_id;
+ build_neighbour_advertisement(tbase->l3.tmaster, mbuf, &prox_port_cfg[port_id].eth_addr, &tbase->l3.global_ipv6, PROX_UNSOLLICITED, prox_port_cfg[port_id].vlan_tags[0]);
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ } else {
+ plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ return;
+ }
+ }
+ if (mbuf == NULL) {
+ plog_err("No neighbor advertisement sent as no local or global ipv6\n");
+ }
+}
+
+static void send_router_sollicitation(struct task_base *tbase, struct task_args *targ)
+{
+ int ret;
+ uint8_t out = 0, port_id = tbase->l3.reachable_port_id;
+ struct rte_mbuf *mbuf;
+
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = port_id;
+ build_router_sollicitation(mbuf, &prox_port_cfg[port_id].eth_addr, &targ->local_ipv6, prox_port_cfg[port_id].vlan_tags[0]);
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbuf, 1, &out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ } else {
+ plog_err("Failed to get a mbuf from arp/ndp mempool\n");
+ }
+}
+
+/* This implementation could be improved: instead of checking each time we send a packet whether we need also
+ to send an ARP, we should only check whether the MAC is valid.
+ We should check arp_ndp_retransmit_timeout in the master process. This would also require the generating task to clear its arp ring
+ to avoid sending many ARP while starting after a long stop.
+ We could also check for reachable_timeout in the master so that dataplane has only to check whether MAC is available
+ but this would require either thread safety, or the the exchange of information between master and generating core.
+ */
+
+static inline int add_key_and_send_arp(struct rte_hash *ip_hash, uint32_t *ip_dst, struct arp_table *entries, uint64_t tsc, uint64_t hz, uint32_t arp_ndp_retransmit_timeout, prox_next_hop_index_type nh, uint64_t **time)
+{
+ int ret = rte_hash_add_key(ip_hash, (const void *)ip_dst);
+ if (unlikely(ret < 0)) {
+ // No reason to send ARP, as reply would be anyhow ignored
+ plogx_err("Unable to add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(*ip_dst));
+ return DROP_MBUF;
+ } else {
+ entries[ret].ip = *ip_dst;
+ entries[ret].nh = nh;
+ *time = &entries[ret].arp_ndp_retransmit_timeout;
+ }
+ return SEND_ARP_ND;
+}
+
+static inline int update_mac_and_send_mbuf(struct arp_table *entry, prox_rte_ether_addr *mac, uint64_t tsc, uint64_t hz, uint32_t arp_ndp_retransmit_timeout, uint64_t **time)
+{
+ if (likely((tsc < entry->arp_ndp_retransmit_timeout) && (tsc < entry->reachable_timeout))) {
+ memcpy(mac, &entry->mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF;
+ } else if (tsc > entry->arp_ndp_retransmit_timeout) {
+ // long time since we have sent an arp, send arp
+ *time = &entry->arp_ndp_retransmit_timeout;
+ if (tsc < entry->reachable_timeout){
+ // MAC is valid in the table => send also the mbuf
+ memcpy(mac, &entry->mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF_AND_ARP_ND;
+ } else {
+ // MAC still unknown, or timed out => only send ARP
+ return SEND_ARP_ND;
+ }
+ }
+ // MAC is unknown and we already sent an ARP recently, drop mbuf and wait for ARP reply
+ return DROP_MBUF;
+}
+
+int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst, uint16_t *vlan, uint64_t **time, uint64_t tsc)
{
const uint64_t hz = rte_get_tsc_hz();
struct ether_hdr_arp *packet = rte_pktmbuf_mtod(mbuf, struct ether_hdr_arp *);
- struct ether_addr *mac = &packet->ether_hdr.d_addr;
+ prox_rte_ether_addr *mac = &packet->ether_hdr.d_addr;
+ prox_next_hop_index_type next_hop_index;
+ static uint64_t last_tsc = 0, n_no_route = 0;
- uint64_t tsc = rte_rdtsc();
struct l3_base *l3 = &(tbase->l3);
+
+ // First find the next hop
+ if (l3->ipv4_lpm) {
+ // A routing table was configured
+ // If a gw (gateway_ipv4) is also specified, it is used as default gw only i.e. lowest priority (shortest prefix)
+ // This is implemented automatically through lpm
+ uint16_t len = rte_pktmbuf_pkt_len(mbuf);
+ if (find_ip(packet, len, ip_dst, vlan) != 0) {
+ // Unable to find IP address => non IP packet => send it as it
+ return SEND_MBUF;
+ }
+ if (unlikely(rte_lpm_lookup(l3->ipv4_lpm, rte_bswap32(*ip_dst), &next_hop_index) != 0)) {
+ // Prevent printing too many messages
+ n_no_route++;
+ if (tsc > last_tsc + rte_get_tsc_hz()) {
+ plogx_err("No route to IP "IPv4_BYTES_FMT" (%ld times)\n", IP4(*ip_dst), n_no_route);
+ last_tsc = tsc;
+ n_no_route = 0;
+ }
+ return DROP_MBUF;
+ }
+ struct arp_table *entry = &l3->next_hops[next_hop_index];
+
+ if (entry->ip) {
+ *ip_dst = entry->ip;
+ return update_mac_and_send_mbuf(entry, mac, tsc, hz, l3->arp_ndp_retransmit_timeout, time);
+ }
+
+ // no next ip: this is a local route
+ // Find IP in lookup table. Send ARP if not found
+ int ret = rte_hash_lookup(l3->ip_hash, (const void *)ip_dst);
+ if (unlikely(ret < 0)) {
+ // IP not found, try to send an ARP
+ return add_key_and_send_arp(l3->ip_hash, ip_dst, l3->arp_table, tsc, hz, l3->arp_ndp_retransmit_timeout, MAX_HOP_INDEX, time);
+ } else {
+ return update_mac_and_send_mbuf(&l3->arp_table[ret], mac, tsc, hz, l3->arp_ndp_retransmit_timeout, time);
+ }
+ return 0;
+ }
+ // No Routing table specified: only a local ip and maybe a gateway
+ // Old default behavior: if a gw is specified, ALL packets go to this gateway (even those we could send w/o the gw
+
+ uint16_t len = rte_pktmbuf_pkt_len(mbuf);
if (l3->gw.ip) {
- if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_update_time) && (tsc < l3->gw.arp_timeout))) {
- memcpy(mac, &l3->gw.mac, sizeof(struct ether_addr));
- return 0;
- } else if (tsc > l3->gw.arp_update_time) {
- // long time since we have sent an arp, send arp
- l3->gw.arp_update_time = tsc + hz;
+ find_vlan(packet, len, vlan);
+ if (likely((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.arp_ndp_retransmit_timeout) && (tsc < l3->gw.reachable_timeout))) {
+ memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF;
+ } else if (tsc > l3->gw.arp_ndp_retransmit_timeout) {
+ // long time since we have successfully sent an arp, send arp
+ // If sending ARP failed (ring full) then arp_ndp_retransmit_timeout is not updated to avoid having to wait 1 sec to send ARP REQ again
+ *time = &l3->gw.arp_ndp_retransmit_timeout;
+ l3->gw.arp_ndp_retransmit_timeout = tsc + l3->arp_ndp_retransmit_timeout * hz / 1000;
+
*ip_dst = l3->gw.ip;
- return -1;
+ if ((l3->flags & FLAG_DST_MAC_KNOWN) && (tsc < l3->gw.reachable_timeout)){
+ // MAC is valid in the table => send also the mbuf
+ memcpy(mac, &l3->gw.mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF_AND_ARP_ND;
+ } else {
+ // MAC still unknown, or timed out => only send ARP
+ return SEND_ARP_ND;
+ }
+ } else {
+ // MAC is unknown and we already sent an ARP recently, drop mbuf and wait for ARP reply
+ return DROP_MBUF;
}
- return -2;
}
- uint16_t len = rte_pktmbuf_pkt_len(mbuf);
- if (find_ip(packet, len, ip_dst) != 0) {
- return 0;
+ if (find_ip(packet, len, ip_dst, vlan) != 0) {
+ // Unable to find IP address => non IP packet => send it as it
+ return SEND_MBUF;
}
if (likely(l3->n_pkts < 4)) {
for (unsigned int idx = 0; idx < l3->n_pkts; idx++) {
if (*ip_dst == l3->optimized_arp_table[idx].ip) {
- if ((tsc < l3->optimized_arp_table[idx].arp_update_time) && (tsc < l3->optimized_arp_table[idx].arp_timeout)) {
- memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(struct ether_addr));
- return 0;
- } else if (tsc > l3->optimized_arp_table[idx].arp_update_time) {
- l3->optimized_arp_table[idx].arp_update_time = tsc + hz;
- return -1;
- } else {
- return -2;
- }
+ return update_mac_and_send_mbuf(&l3->optimized_arp_table[idx], mac, tsc, hz, l3->arp_ndp_retransmit_timeout, time);
}
}
+ // IP address not found in table
l3->optimized_arp_table[l3->n_pkts].ip = *ip_dst;
- l3->optimized_arp_table[l3->n_pkts].arp_update_time = tsc + hz;
+ *time = &l3->optimized_arp_table[l3->n_pkts].arp_ndp_retransmit_timeout;
l3->n_pkts++;
- if (l3->n_pkts < 4)
- return -1;
+ if (l3->n_pkts < 4) {
+ return SEND_ARP_ND;
+ }
- // We have ** many ** IP addresses; lets use hash table instead
+ // We have too many IP addresses to search linearly; lets use hash table instead => copy all entries in hash table
for (uint32_t idx = 0; idx < l3->n_pkts; idx++) {
uint32_t ip = l3->optimized_arp_table[idx].ip;
int ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
if (ret < 0) {
- plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip));
+ // This should not happen as few entries so far.
+ // If it happens, we still send the ARP as easier:
+ // If the ARP corresponds to this error, the ARP reply will be ignored
+ // If ARP does not correspond to this error/ip, then ARP reply will be handled.
+ plogx_err("Unable add ip "IPv4_BYTES_FMT" in mac_hash (already %d entries)\n", IP4(ip), idx);
} else {
memcpy(&l3->arp_table[ret], &l3->optimized_arp_table[idx], sizeof(struct arp_table));
}
}
- return -1;
+ return SEND_ARP_ND;
} else {
- // Find mac in lookup table. Send ARP if not found
+ // Find IP in lookup table. Send ARP if not found
int ret = rte_hash_lookup(l3->ip_hash, (const void *)ip_dst);
if (unlikely(ret < 0)) {
- int ret = rte_hash_add_key(l3->ip_hash, (const void *)ip_dst);
+ // IP not found, try to send an ARP
+ return add_key_and_send_arp(l3->ip_hash, ip_dst, &l3->arp_table[ret], tsc, hz, l3->arp_ndp_retransmit_timeout, MAX_HOP_INDEX, time);
+ } else {
+ // IP has been found
+ return update_mac_and_send_mbuf(&l3->arp_table[ret], mac, tsc, hz, l3->arp_ndp_retransmit_timeout, time);
+ }
+ }
+ // Should not happen
+ return DROP_MBUF;
+}
+
+int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan, uint64_t tsc)
+{
+ const uint64_t hz = rte_get_tsc_hz();
+ prox_rte_ether_hdr *packet = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_addr *mac = &packet->d_addr;
+ struct ipv6_addr *used_ip_src;
+
+ uint16_t len = rte_pktmbuf_pkt_len(mbuf);
+
+ struct ipv6_addr *pkt_src_ip6;
+ if ((pkt_src_ip6 = find_ip6(packet, len, ip_dst, vlan)) == NULL) {
+ // Unable to find IP address => non IP packet => send it as it
+ return SEND_MBUF;
+ }
+ struct l3_base *l3 = &(tbase->l3);
+
+ // Configure source IP
+ if (*(uint64_t *)(&l3->local_ipv6) == *(uint64_t *)ip_dst) {
+ // Same prefix as local -> use local
+ used_ip_src = &l3->local_ipv6;
+ } else if (*(uint64_t *)(&l3->global_ipv6) == *(uint64_t *)ip_dst) {
+ // Same prefix as global -> use global
+ used_ip_src = &l3->global_ipv6;
+ } else if (*(__int128 *)(&l3->gw.ip6) != 0) {
+ used_ip_src = &l3->global_ipv6;
+ memcpy(ip_dst, &l3->gw.ip6, sizeof(struct ipv6_addr));
+ } else if (*(__int128 *)(&l3->global_ipv6) != 0) {
+ // Global IP is defined -> use it
+ used_ip_src = &l3->global_ipv6;
+ } else {
+ plog_info("Error as trying to send a packet to "IPv6_BYTES_FMT" using "IPv6_BYTES_FMT" (local)\n", IPv6_BYTES(ip_dst->bytes), IPv6_BYTES(l3->local_ipv6.bytes));
+ return DROP_MBUF;
+ }
+ rte_memcpy(pkt_src_ip6, used_ip_src, sizeof(struct ipv6_addr));
+
+ // Configure dst mac
+ if (likely(l3->n_pkts < 4)) {
+ for (unsigned int idx = 0; idx < l3->n_pkts; idx++) {
+ if (*(__int128 *)ip_dst == *(__int128 *)(&l3->optimized_arp_table[idx].ip6)) {
+ // IP address already in table
+ if ((tsc < l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout) && (tsc < l3->optimized_arp_table[idx].reachable_timeout)) {
+ // MAC address was recently updated in table, use it
+ // plog_dbg("Valid MAC address found => send packet\n");
+ rte_memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF;
+ } else if (tsc > l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout) {
+ // NDP not sent since a long time, send NDP
+ l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout = tsc + l3->arp_ndp_retransmit_timeout * hz / 1000;
+ if (tsc < l3->optimized_arp_table[idx].reachable_timeout) {
+ // MAC still valid => also send mbuf
+ plog_dbg("Valid MAC found but NDP retransmit timeout => send packet and NDP\n");
+ memcpy(mac, &l3->optimized_arp_table[idx].mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF_AND_ARP_ND;
+ } else {
+ plog_dbg("Unknown MAC => send NDP but cannot send packet\n");
+ // MAC unvalid => only send NDP
+ return SEND_ARP_ND;
+ }
+ } else {
+ // NDP timeout elapsed, MAC not valid anymore but waiting for NDP reply
+ // plog_dbg("NDP reachable timeout elapsed - waiting for NDP reply\n");
+ return DROP_MBUF;
+ }
+ }
+ }
+ // IP address not found in table
+ memcpy(&l3->optimized_arp_table[l3->n_pkts].ip6, ip_dst, sizeof(struct ipv6_addr));
+ l3->optimized_arp_table[l3->n_pkts].arp_ndp_retransmit_timeout = tsc + l3->arp_ndp_retransmit_timeout * hz / 1000;
+ l3->n_pkts++;
+
+ if (l3->n_pkts < 4) {
+ return SEND_ARP_ND;
+ }
+
+ // We have too many IP addresses to search linearly; lets use hash table instead => copy all entries in hash table
+ for (uint32_t idx = 0; idx < l3->n_pkts; idx++) {
+ struct ipv6_addr *ip6 = &l3->optimized_arp_table[idx].ip6;
+ int ret = rte_hash_add_key(l3->ip6_hash, (const void *)ip6);
+ if (ret < 0) {
+ // This should not happen as few entries so far.
+ // If it happens, we still send the NDP as easier:
+ // If the NDP corresponds to this error, the NDP reply will be ignored
+ // If NDP does not correspond to this error/ip, then NDP reply will be handled.
+ plogx_err("Unable add ip "IPv6_BYTES_FMT" in mac_hash (already %d entries)\n", IPv6_BYTES(ip6->bytes), idx);
+ } else {
+ memcpy(&l3->arp_table[ret], &l3->optimized_arp_table[idx], sizeof(struct arp_table));
+ }
+ }
+ return SEND_ARP_ND;
+ } else {
+ // Find IP in lookup table. Send ND if not found
+ int ret = rte_hash_lookup(l3->ip6_hash, (const void *)ip_dst);
+ if (unlikely(ret < 0)) {
+ // IP not found, try to send an ND
+ int ret = rte_hash_add_key(l3->ip6_hash, (const void *)ip_dst);
if (ret < 0) {
- plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(*ip_dst));
- return -2;
+ // No reason to send NDP, as reply would be anyhow ignored
+ plogx_err("Unable to add ip "IPv6_BYTES_FMT" in mac_hash\n", IPv6_BYTES(ip_dst->bytes));
+ return DROP_MBUF;
} else {
- l3->arp_table[ret].ip = *ip_dst;
- l3->arp_table[ret].arp_update_time = tsc + hz;
+ memcpy(&l3->arp_table[ret].ip6, ip_dst, sizeof(struct ipv6_addr));
+ l3->arp_table[ret].arp_ndp_retransmit_timeout = tsc + l3->arp_ndp_retransmit_timeout * hz / 1000;
}
- return -1;
+ return SEND_ARP_ND;
} else {
- if ((tsc < l3->arp_table[ret].arp_update_time) && (tsc < l3->arp_table[ret].arp_timeout)) {
- memcpy(mac, &l3->arp_table[ret].mac, sizeof(struct ether_addr));
- return 0;
- } else if (tsc > l3->arp_table[ret].arp_update_time) {
- l3->arp_table[ret].arp_update_time = tsc + hz;
- return -1;
+ // IP has been found
+ if (likely((tsc < l3->arp_table[ret].arp_ndp_retransmit_timeout) && (tsc < l3->arp_table[ret].reachable_timeout))) {
+ // MAC still valid and NDP sent recently
+ memcpy(mac, &l3->arp_table[ret].mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF;
+ } else if (tsc > l3->arp_table[ret].arp_ndp_retransmit_timeout) {
+ // NDP not sent since a long time, send NDP
+ l3->arp_table[ret].arp_ndp_retransmit_timeout = tsc + l3->arp_ndp_retransmit_timeout * hz / 1000;
+ if (tsc < l3->arp_table[ret].reachable_timeout) {
+ // MAC still valid => send also MBUF
+ memcpy(mac, &l3->arp_table[ret].mac, sizeof(prox_rte_ether_addr));
+ return SEND_MBUF_AND_ARP_ND;
+ } else {
+ return SEND_ARP_ND;
+ }
} else {
- return -2;
+ return DROP_MBUF;
}
}
}
- return 0;
+ // Should not happen
+ return DROP_MBUF;
}
void task_init_l3(struct task_base *tbase, struct task_args *targ)
@@ -169,32 +492,159 @@ void task_init_l3(struct task_base *tbase, struct task_args *targ)
.key_len = sizeof(uint32_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
+ .socket_id = socket_id,
};
- tbase->l3.ip_hash = rte_hash_create(&hash_params);
- PROX_PANIC(tbase->l3.ip_hash == NULL, "Failed to set up ip hash table\n");
+ if (targ->flags & TASK_ARG_L3) {
+ plog_info("\t\tInitializing L3 (IPv4)\n");
+ tbase->l3.ip_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(tbase->l3.ip_hash == NULL, "Failed to set up ip hash table\n");
+ hash_name[0]++;
+ }
+ if (targ->flags & TASK_ARG_NDP) {
+ plog_info("\t\tInitializing NDP (IPv6)\n");
+ hash_params.key_len = sizeof(struct ipv6_addr);
+ tbase->l3.ip6_hash = rte_hash_create(&hash_params);
+ PROX_PANIC(tbase->l3.ip6_hash == NULL, "Failed to set up ip hash table\n");
+ }
tbase->l3.arp_table = (struct arp_table *)prox_zmalloc(n_entries * sizeof(struct arp_table), socket_id);
- PROX_PANIC(tbase->l3.arp_table == NULL, "Failed to allocate memory for %u entries in arp table\n", n_entries);
- plog_info("\tarp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
+ PROX_PANIC(tbase->l3.arp_table == NULL, "Failed to allocate memory for %u entries in arp/ndp table\n", n_entries);
+ plog_info("\t\tarp/ndp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
targ->lconf->ctrl_func_p[targ->task] = handle_ctrl_plane_pkts;
targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
tbase->l3.gw.ip = rte_cpu_to_be_32(targ->gateway_ipv4);
+ memcpy(&tbase->l3.gw.ip6, &targ->gateway_ipv6, sizeof(struct ipv6_addr));
tbase->flags |= TASK_L3;
tbase->l3.core_id = targ->lconf->id;
tbase->l3.task_id = targ->id;
tbase->l3.tmaster = targ->tmaster;
+ tbase->l3.seed = (uint)rte_rdtsc();
+ if (targ->reachable_timeout != 0)
+ tbase->l3.reachable_timeout = targ->reachable_timeout;
+ else
+ tbase->l3.reachable_timeout = DEFAULT_ARP_TIMEOUT;
+ if (targ->arp_ndp_retransmit_timeout != 0)
+ tbase->l3.arp_ndp_retransmit_timeout = targ->arp_ndp_retransmit_timeout;
+ else
+ tbase->l3.arp_ndp_retransmit_timeout = DEFAULT_ARP_UPDATE_TIME;
}
void task_start_l3(struct task_base *tbase, struct task_args *targ)
{
+ const int socket_id = rte_lcore_to_socket_id(targ->lconf->id);
+ const int NB_ARP_ND_MBUF = 1024;
+ const int ARP_ND_MBUF_SIZE = 2048;
+ const int NB_CACHE_ARP_ND_MBUF = 256;
+
struct prox_port_cfg *port = find_reachable_port(targ);
- if (port) {
+ if (port && (tbase->l3.arp_nd_pool == NULL)) {
+ static char name[] = "arp0_pool";
tbase->l3.reachable_port_id = port - prox_port_cfg;
- if (targ->local_ipv4) {
- tbase->local_ipv4 = rte_be_to_cpu_32(targ->local_ipv4);
- register_ip_to_ctrl_plane(tbase->l3.tmaster, tbase->local_ipv4, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
+ if ((targ->local_ipv4 && port->ip_addr[0].ip) && (targ->local_ipv4 != port->ip_addr[0].ip)) {
+ PROX_PANIC(1, "local_ipv4 in core section ("IPv4_BYTES_FMT") differs from port section ("IPv4_BYTES_FMT")\n", IP4(rte_be_to_cpu_32(targ->local_ipv4)), IP4(rte_be_to_cpu_32(port->ip_addr[0].ip)));
+ }
+ if ((targ->local_ipv4 && port->ip_addr[0].ip) && (targ->local_prefix != port->ip_addr[0].prefix)) {
+ PROX_PANIC(1, "local_ipv4 prefix in core section (%d) differs from port section (%d)\n", targ->local_prefix, port->ip_addr[0].prefix);
+ }
+ if (!port->ip_addr[0].ip && targ->local_ipv4) {
+ port->ip_addr[0].ip = targ->local_ipv4;
+ port->ip_addr[0].prefix = targ->local_prefix;
+ port->n_vlans = 1;
+ port->vlan_tags[0] = 0;
+ plog_info("Setting port local_ipv4 from core %d local_ipv4 to "IPv4_BYTES_FMT"\n", tbase->l3.reachable_port_id, IP4(rte_be_to_cpu_32(port->ip_addr[0].ip)));
+ }
+ for (int vlan_id = 0; vlan_id < port->n_vlans; vlan_id++) {
+ if (port->ip_addr[vlan_id].ip)
+ register_ip_to_ctrl_plane(tbase->l3.tmaster, rte_be_to_cpu_32(port->ip_addr[vlan_id].ip), tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
}
+ if (strcmp(targ->route_table, "") != 0) {
+ struct lpm4 *lpm;
+ int ret;
+
+ PROX_PANIC(port->n_vlans == 0, "missing local_ipv4 while route table is specified in L3 mode\n");
+
+ // LPM might be modified runtime => do not share with other cores
+ ret = lua_to_lpm4(prox_lua(), GLOBAL, targ->route_table, socket_id, &lpm);
+ PROX_PANIC(ret, "Failed to load IPv4 LPM:\n%s\n", get_lua_to_errors());
+
+ tbase->l3.ipv4_lpm = lpm->rte_lpm;
+ tbase->l3.next_hops = prox_zmalloc(sizeof(*tbase->l3.next_hops) * MAX_HOP_INDEX, socket_id);
+ PROX_PANIC(tbase->l3.next_hops == NULL, "Could not allocate memory for next hop\n");
+
+ for (uint32_t i = 0; i < MAX_HOP_INDEX; i++) {
+ if (!lpm->next_hops[i].ip_dst)
+ continue;
+ tbase->l3.nb_gws++;
+ tbase->l3.next_hops[i].ip = rte_bswap32(lpm->next_hops[i].ip_dst);
+ int tx_port = lpm->next_hops[i].mac_port.out_idx;
+ // gen only supports one port right now .... hence port = 0
+ if ((tx_port > targ->nb_txports - 1) && (tx_port > targ->nb_txrings - 1)) {
+ PROX_PANIC(1, "Routing Table contains port %d but only %d tx port/ %d ring:\n", tx_port, targ->nb_txports, targ->nb_txrings);
+ }
+ }
+ plog_info("Using routing table %s in l3 mode, with %d gateways\n", targ->route_table, tbase->l3.nb_gws);
+
+ // Last but one (x n_vlans) "next_hop_index" is not a gateway but direct routes
+ for (int vlan_id = 0; vlan_id < port->n_vlans; vlan_id++) {
+ tbase->l3.next_hops[tbase->l3.nb_gws].ip = 0;
+ ret = rte_lpm_add(tbase->l3.ipv4_lpm, port->ip_addr[vlan_id].ip, port->ip_addr[vlan_id].prefix, tbase->l3.nb_gws++);
+ PROX_PANIC(ret, "Failed to add local_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(port->ip_addr[vlan_id].ip), port->ip_addr[vlan_id].prefix);
+ }
+
+ // Last "next_hop_index" is default gw
+ tbase->l3.next_hops[tbase->l3.nb_gws].ip = rte_bswap32(targ->gateway_ipv4);
+ if (targ->gateway_ipv4) {
+ ret = rte_lpm_add(tbase->l3.ipv4_lpm, targ->gateway_ipv4, 0, tbase->l3.nb_gws++);
+ PROX_PANIC(ret, "Failed to add gateway_ipv4 "IPv4_BYTES_FMT"/%d to lpm\n", IP4(tbase->l3.gw.ip), 0);
+ }
+ }
+
+ master_init_vdev(tbase->l3.tmaster, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
+
+ // Create IPv6 addr if none were configured
+ if (targ->flags & TASK_ARG_NDP) {
+ if (!memcmp(&targ->local_ipv6, &null_addr, sizeof(struct ipv6_addr))) {
+ set_link_local(&targ->local_ipv6);
+ set_EUI(&targ->local_ipv6, &port->eth_addr);
+ }
+ plog_info("\tCore %d, task %d, local IPv6 addr is "IPv6_BYTES_FMT" (%s)\n",
+ targ->lconf->id, targ->id,
+ IPv6_BYTES(targ->local_ipv6.bytes),
+ IP6_Canonical(&targ->local_ipv6));
+ memcpy(&tbase->l3.local_ipv6, &targ->local_ipv6, sizeof(struct ipv6_addr));
+
+ if (memcmp(&targ->global_ipv6, &null_addr, sizeof(struct ipv6_addr))) {
+ memcpy(&tbase->l3.global_ipv6, &targ->global_ipv6, sizeof(struct ipv6_addr));
+ plog_info("\tCore %d, task %d, global IPv6 addr is "IPv6_BYTES_FMT" (%s)\n",
+ targ->lconf->id, targ->id,
+ IPv6_BYTES(targ->global_ipv6.bytes),
+ IP6_Canonical(&targ->global_ipv6));
+ }
+ if (targ->ipv6_router)
+ register_router_to_ctrl_plane(tbase->l3.tmaster, tbase->l3.reachable_port_id, targ->lconf->id, targ->id, &targ->local_ipv6, &targ->global_ipv6, &targ->router_prefix);
+ else
+ register_node_to_ctrl_plane(tbase->l3.tmaster, &targ->local_ipv6, &targ->global_ipv6, tbase->l3.reachable_port_id, targ->lconf->id, targ->id);
+ }
+
+ name[3]++;
+ struct rte_mempool *ret = rte_mempool_create(name, NB_ARP_ND_MBUF, ARP_ND_MBUF_SIZE, NB_CACHE_ARP_ND_MBUF,
+ sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
+ rte_socket_id(), 0);
+ PROX_PANIC(ret == NULL, "Failed to allocate ARP/ND memory pool on socket %u with %u elements\n",
+ rte_socket_id(), NB_ARP_ND_MBUF);
+ plog_info("\tMempool %p (%s) size = %u * %u cache %u, socket %d (for ARP/ND)\n", ret, name, NB_ARP_ND_MBUF,
+ ARP_ND_MBUF_SIZE, NB_CACHE_ARP_ND_MBUF, rte_socket_id());
+ tbase->l3.arp_nd_pool = ret;
+ if ((targ->flags & TASK_ARG_NDP) && (!targ->ipv6_router)) {
+ plog_info("Sending Router Sollicitation\n");
+ send_router_sollicitation(tbase, targ);
+ }
+ if ((targ->flags & TASK_ARG_NDP) && (targ->flags & TASK_ARG_SEND_NA_AT_STARTUP)) {
+ plog_info("Sending unsollicited Neighbour Advertisement\n");
+ send_unsollicited_neighbour_advertisement(tbase);
+
+ }
}
}
@@ -204,21 +654,68 @@ void task_set_gateway_ip(struct task_base *tbase, uint32_t ip)
tbase->flags &= ~FLAG_DST_MAC_KNOWN;
}
-void task_set_local_ip(struct task_base *tbase, uint32_t ip)
+static void reset_arp_ndp_retransmit_timeout(struct l3_base *l3, uint32_t ip)
{
- tbase->local_ipv4 = ip;
+ uint32_t idx;
+ plogx_dbg("MAC entry for IP "IPv4_BYTES_FMT" timeout in kernel\n", IP4(ip));
+
+ if (l3->ipv4_lpm) {
+ int ret = rte_hash_lookup(l3->ip_hash, (const void *)&ip);
+ if (ret >= 0)
+ l3->arp_table[ret].arp_ndp_retransmit_timeout = 0;
+ } else if (ip == l3->gw.ip) {
+ l3->gw.arp_ndp_retransmit_timeout = 0;
+ } else if (l3->n_pkts < 4) {
+ for (idx = 0; idx < l3->n_pkts; idx++) {
+ uint32_t ip_dst = l3->optimized_arp_table[idx].ip;
+ if (ip_dst == ip)
+ break;
+ }
+ if (idx < l3->n_pkts) {
+ l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout = 0;
+ }
+ } else {
+ int ret = rte_hash_lookup(l3->ip_hash, (const void *)&ip);
+ if (ret >= 0)
+ l3->arp_table[ret].arp_ndp_retransmit_timeout = 0;
+ }
+ return;
}
+static prox_next_hop_index_type get_nh_index(struct task_base *tbase, uint32_t gw_ip)
+{
+ // Check if gateway already exists
+ for (prox_next_hop_index_type i = 0; i < tbase->l3.nb_gws; i++) {
+ if (tbase->l3.next_hops[i].ip == gw_ip) {
+ return i;
+ }
+ }
+ if (tbase->l3.nb_gws < MAX_HOP_INDEX) {
+ tbase->l3.next_hops[tbase->l3.nb_gws].ip = gw_ip;
+ tbase->l3.nb_gws++;
+ return tbase->l3.nb_gws - 1;
+ } else
+ return MAX_HOP_INDEX;
+}
void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
{
uint8_t out[1];
const uint64_t hz = rte_get_tsc_hz();
- uint32_t ip, ip_dst, idx;
- int j;
+ uint32_t ip, ip_dst, idx, gateway_ip, prefix;
+ prox_next_hop_index_type gateway_index;
+ int j, ret, modified_route;
+ uint64_t addr;
+ struct ipv6_addr *ip6, *ip6_dst;
uint16_t command;
- struct ether_hdr_arp *hdr;
+ prox_rte_ether_hdr *hdr;
+ struct ether_hdr_arp *hdr_arp;
struct l3_base *l3 = &tbase->l3;
uint64_t tsc= rte_rdtsc();
+ uint64_t reachable_timeout = l3->reachable_timeout * hz / 1000;
+ uint32_t nh;
+ prox_rte_ipv4_hdr *pip;
+ prox_rte_udp_hdr *udp_hdr;
+ uint8_t port = tbase->l3.reachable_port_id;
for (j = 0; j < n_pkts; ++j) {
PREFETCH0(mbufs[j]);
@@ -228,19 +725,83 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui
}
for (j = 0; j < n_pkts; ++j) {
+ pip = NULL;
+ udp_hdr = NULL;
out[0] = OUT_HANDLED;
- command = mbufs[j]->udata64 & 0xFFFF;
+ command = get_command(mbufs[j]);
plogx_dbg("\tReceived %s mbuf %p\n", actions_string[command], mbufs[j]);
switch(command) {
- case UPDATE_FROM_CTRL:
- hdr = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *);
- ip = (mbufs[j]->udata64 >> 32) & 0xFFFFFFFF;
+ case ROUTE_ADD_FROM_MASTER:
+ ip = ctrl_ring_get_ip(mbufs[j]);
+ gateway_ip = ctrl_ring_get_gateway_ip(mbufs[j]);
+ prefix = ctrl_ring_get_prefix(mbufs[j]);
+ gateway_index = get_nh_index(tbase, gateway_ip);
+ if (gateway_index >= MAX_HOP_INDEX) {
+ plog_err("Unable to find or define gateway index - too many\n");
+ return;
+ }
+ modified_route = rte_lpm_is_rule_present(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, &nh);
+ ret = rte_lpm_add(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, gateway_index);
+ if (ret < 0) {
+ plog_err("Failed to add route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index);
+ } else if (modified_route)
+ plogx_dbg("Modified route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d) (was using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index, IP4(tbase->l3.next_hops[nh].ip), nh);
+ else {
+ plogx_dbg("Added new route to "IPv4_BYTES_FMT"/%d using "IPv4_BYTES_FMT"(index = %d)\n", IP4(ip), prefix, IP4(gateway_ip), gateway_index);
+ }
+ tx_drop(mbufs[j]);
+ break;
+ case ROUTE_DEL_FROM_MASTER:
+ ip = ctrl_ring_get_ip(mbufs[j]);
+ prefix = ctrl_ring_get_prefix(mbufs[j]);
+
+ ret = rte_lpm_is_rule_present(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix, &nh);
+ if (ret > 0) {
+ ret = rte_lpm_delete(tbase->l3.ipv4_lpm, rte_bswap32(ip), prefix);
+ if (ret < 0) {
+ plog_err("Failed to add rule\n");
+ }
+ plog_info("Deleting route to "IPv4_BYTES_FMT"/%d\n", IP4(ip), prefix);
+ }
+ tx_drop(mbufs[j]);
+ break;
+ case MAC_INFO_FROM_MASTER:
+ hdr_arp = rte_pktmbuf_mtod(mbufs[j], struct ether_hdr_arp *);
+ ip = get_ip(mbufs[j]);
- if (ip == l3->gw.ip) {
+ if (prox_rte_is_zero_ether_addr(&hdr_arp->arp.data.sha)) {
+ // MAC timeout or deleted from kernel table => reset update_time
+ // This will cause us to send new ARP request
+ // However, as reachable_timeout not touched, we should continue sending our regular IP packets
+ reset_arp_ndp_retransmit_timeout(l3, ip);
+ return;
+ } else
+ plogx_dbg("\tUpdating MAC entry for IP "IPv4_BYTES_FMT" with MAC "MAC_BYTES_FMT"\n",
+ IP4(ip), MAC_BYTES(hdr_arp->arp.data.sha.addr_bytes));
+
+ if (l3->ipv4_lpm) {
+ uint32_t nh;
+ struct arp_table *entry;
+ ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
+ if (ret < 0) {
+ plogx_info("Unable add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(ip));
+ } else if ((nh = l3->arp_table[ret].nh) != MAX_HOP_INDEX) {
+ entry = &l3->next_hops[nh];
+ memcpy(&entry->mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
+ entry->reachable_timeout = tsc + reachable_timeout;
+ update_arp_ndp_retransmit_timeout(l3, &entry->arp_ndp_retransmit_timeout, l3->arp_ndp_retransmit_timeout);
+ } else {
+ memcpy(&l3->arp_table[ret].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
+ l3->arp_table[ret].reachable_timeout = tsc + reachable_timeout;
+ update_arp_ndp_retransmit_timeout(l3, &l3->arp_table[ret].arp_ndp_retransmit_timeout, l3->arp_ndp_retransmit_timeout);
+ }
+ }
+ else if (ip == l3->gw.ip) {
// MAC address of the gateway
- memcpy(&l3->gw.mac, &hdr->arp.data.sha, 6);
+ memcpy(&l3->gw.mac, &hdr_arp->arp.data.sha, 6);
l3->flags |= FLAG_DST_MAC_KNOWN;
- l3->gw.arp_timeout = tsc + 30 * hz;
+ l3->gw.reachable_timeout = tsc + reachable_timeout;
+ update_arp_ndp_retransmit_timeout(l3, &l3->gw.arp_ndp_retransmit_timeout, l3->arp_ndp_retransmit_timeout);
} else if (l3->n_pkts < 4) {
// Few packets tracked - should be faster to loop through them thean using a hash table
for (idx = 0; idx < l3->n_pkts; idx++) {
@@ -249,25 +810,121 @@ void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, ui
break;
}
if (idx < l3->n_pkts) {
- // IP not found; this is a reply while we never asked for the request!
- memcpy(&l3->optimized_arp_table[idx].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr));
- l3->optimized_arp_table[idx].arp_timeout = tsc + 30 * hz;
+ memcpy(&l3->optimized_arp_table[idx].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
+ l3->optimized_arp_table[idx].reachable_timeout = tsc + reachable_timeout;
+ update_arp_ndp_retransmit_timeout(l3, &l3->optimized_arp_table[idx].arp_ndp_retransmit_timeout, l3->arp_ndp_retransmit_timeout);
+ }
+ } else {
+ ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
+ if (ret < 0) {
+ plogx_info("Unable add ip "IPv4_BYTES_FMT" in mac_hash\n", IP4(ip));
+ } else {
+ memcpy(&l3->arp_table[ret].mac, &(hdr_arp->arp.data.sha), sizeof(prox_rte_ether_addr));
+ l3->arp_table[ret].reachable_timeout = tsc + reachable_timeout;
+ update_arp_ndp_retransmit_timeout(l3, &l3->arp_table[ret].arp_ndp_retransmit_timeout, l3->arp_ndp_retransmit_timeout);
+ }
+ }
+ tx_drop(mbufs[j]);
+ break;
+ case MAC_INFO_FROM_MASTER_FOR_IPV6:
+ ip6 = ctrl_ring_get_ipv6_addr(mbufs[j]);
+ uint64_t data = ctrl_ring_get_data(mbufs[j]);
+
+ if (l3->n_pkts < 4) {
+ // Few packets tracked - should be faster to loop through them thean using a hash table
+ for (idx = 0; idx < l3->n_pkts; idx++) {
+ ip6_dst = &l3->optimized_arp_table[idx].ip6;
+ if (memcmp(ip6_dst, ip6, sizeof(struct ipv6_addr)) == 0)
+ break;
+ }
+ if (idx < l3->n_pkts) {
+ // IP found; this is a reply for one of our requests!
+ memcpy(&l3->optimized_arp_table[idx].mac, &data, sizeof(prox_rte_ether_addr));
+ l3->optimized_arp_table[idx].reachable_timeout = tsc + l3->reachable_timeout * hz / 1000;
}
} else {
- int ret = rte_hash_add_key(l3->ip_hash, (const void *)&ip);
+ int ret = rte_hash_add_key(l3->ip6_hash, (const void *)ip6);
if (ret < 0) {
- plogx_info("Unable add ip %d.%d.%d.%d in mac_hash\n", IP4(ip));
+ plogx_info("Unable add ip "IPv6_BYTES_FMT" in mac_hash\n", IPv6_BYTES(ip6->bytes));
} else {
- memcpy(&l3->arp_table[ret].mac, &(hdr->arp.data.sha), sizeof(struct ether_addr));
- l3->arp_table[ret].arp_timeout = tsc + 30 * hz;
+ memcpy(&l3->arp_table[ret].mac, &data, sizeof(prox_rte_ether_addr));
+ l3->arp_table[ret].reachable_timeout = tsc + l3->reachable_timeout * hz / 1000;
}
}
tx_drop(mbufs[j]);
break;
- case ARP_REPLY_FROM_CTRL:
- case ARP_REQ_FROM_CTRL:
+ case SEND_NDP_FROM_MASTER:
+ case SEND_ARP_REQUEST_FROM_MASTER:
+ case SEND_ARP_REPLY_FROM_MASTER:
+ out[0] = 0;
+ // tx_ctrlplane_pkt does not drop packets
+ plogx_dbg("\tForwarding (ARP) packet from master\n");
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ break;
+ case SEND_ICMP_FROM_MASTER:
out[0] = 0;
- tbase->aux->tx_pkt_l2(tbase, &mbufs[j], 1, out);
+ // tx_ctrlplane_pkt does not drop packets
+ plogx_dbg("\tForwarding (PING) packet from master\n");
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ break;
+ case PKT_FROM_TAP:
+ // Drop Pseudo packets sent to generate ARP requests
+ // There are other IPv4 packets sent from TAP which we cannot delete e.g. BGP packets
+ out[0] = 0;
+ hdr = rte_pktmbuf_mtod(mbufs[j], prox_rte_ether_hdr *);
+ if (hdr->ether_type == ETYPE_IPv4) {
+ pip = (prox_rte_ipv4_hdr *)(hdr + 1);
+ } else if (hdr->ether_type == ETYPE_VLAN) {
+ prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(hdr + 1);
+ vlan = (prox_rte_vlan_hdr *)(hdr + 1);
+ if (vlan->eth_proto == ETYPE_IPv4) {
+ pip = (prox_rte_ipv4_hdr *)(vlan + 1);
+ }
+ }
+ if (pip && (pip->next_proto_id == IPPROTO_UDP)) {
+ udp_hdr = (prox_rte_udp_hdr *)(pip + 1);
+ if ((udp_hdr->dst_port == rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT)) &&
+ (udp_hdr->src_port == rte_cpu_to_be_16(PROX_PSEUDO_PKT_PORT)) &&
+ (rte_be_to_cpu_16(udp_hdr->dgram_len) == 8)) {
+ plogx_dbg("Dropping PROX packet\n");
+ tx_drop(mbufs[j]);
+ return;
+ }
+ }
+/* Debugging ...
+ uint16_t src_port = 0, dst_port = 0, len = 0;
+ if (udp_hdr) {
+ src_port = udp_hdr->src_port;
+ dst_port = udp_hdr->dst_port;
+ len = rte_be_to_cpu_16(udp_hdr->dgram_len);
+ }
+ plogx_dbg("tForwarding TAP packet from master. Type = %x, pip=%p, udp = %p, udp = {src = %x, dst = %x, len = %d}\n", hdr->ether_type, pip, udp_hdr, src_port, dst_port,len );
+*/
+ // tx_ctrlplane_pkt does not drop packets
+ tbase->aux->tx_ctrlplane_pkt(tbase, &mbufs[j], 1, out);
+ TASK_STATS_ADD_TX_NON_DP(&tbase->aux->stats, 1);
+ break;
+ case IPV6_INFO_FROM_MASTER:
+ // addr = ctrl_ring_get_data(mbufs[j]);
+ ip6 = ctrl_ring_get_ipv6_addr(mbufs[j]);
+ if (memcmp(&l3->global_ipv6 , &null_addr, 16) == 0) {
+ memcpy(&l3->global_ipv6, ip6, sizeof(struct ipv6_addr));
+ plog_info("Core %d task %d received global IP "IPv6_BYTES_FMT"\n", l3->core_id, l3->task_id, IPv6_BYTES(ip6->bytes));
+ } else if (memcmp(&l3->global_ipv6, ip6, 8) == 0) {
+ if (l3->prefix_printed == 0) {
+ plog_info("Core %d task %d received expected prefix "IPv6_PREFIX_FMT"\n", l3->core_id, l3->task_id, IPv6_PREFIX(ip6->bytes));
+ l3->prefix_printed = 1;
+ }
+ } else {
+ plog_warn("Core %d task %d received unexpected prefix "IPv6_PREFIX_FMT", IP = "IPv6_PREFIX_FMT"\n", l3->core_id, l3->task_id, IPv6_PREFIX(ip6->bytes), IPv6_PREFIX(l3->global_ipv6.bytes));
+ }
+ tx_drop(mbufs[j]);
+ break;
+ default:
+ plog_err("Unexpected message received: %d\n", command);
+ tx_drop(mbufs[j]);
break;
}
}
diff --git a/VNFs/DPPD-PROX/packet_utils.h b/VNFs/DPPD-PROX/packet_utils.h
index 0017a89e..ef15cd22 100644
--- a/VNFs/DPPD-PROX/packet_utils.h
+++ b/VNFs/DPPD-PROX/packet_utils.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -13,7 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
*/
+#ifndef _PACKET_UTILS_H_
+#define _PACKET_UTILS_H_
+#include <rte_cycles.h>
+
+#include "prox_compat.h"
#include "arp.h"
#include "quit.h"
#include "prox_malloc.h"
@@ -24,31 +29,73 @@
#define FLAG_DST_MAC_KNOWN 1
#define MAX_ARP_ENTRIES 65536
+#define IP4(x) x & 0xff, (x >> 8) & 0xff, (x >> 16) & 0xff, x >> 24 // From Network (BE)
+enum {
+ SEND_MBUF_AND_ARP_ND,
+ SEND_MBUF,
+ SEND_ARP_ND,
+ DROP_MBUF
+};
+#define DEFAULT_ARP_TIMEOUT (1000 * 3600 * 24 * 15) // ~15 days = disabled by default
+#define DEFAULT_ARP_UPDATE_TIME (1000) // 1 second
+
struct task_base;
struct task_args;
+struct task_master;
struct arp_table {
- uint64_t arp_update_time;
- uint64_t arp_timeout;
+ uint64_t arp_ndp_retransmit_timeout;
+ uint64_t reachable_timeout;
uint32_t ip;
- struct ether_addr mac;
+ uint32_t nh;
+ prox_rte_ether_addr mac;
+ struct ipv6_addr ip6;
};
struct l3_base {
struct rte_ring *ctrl_plane_ring;
struct task_base *tmaster;
uint32_t flags;
uint32_t n_pkts;
+ uint32_t local_ipv4;
uint8_t reachable_port_id;
uint8_t core_id;
uint8_t task_id;
+ uint seed;
+ prox_next_hop_index_type nb_gws;
+ uint32_t arp_ndp_retransmit_timeout;
+ uint32_t reachable_timeout;
struct arp_table gw;
struct arp_table optimized_arp_table[4];
struct rte_hash *ip_hash;
+ struct rte_hash *ip6_hash;
struct arp_table *arp_table;
+ struct rte_lpm *ipv4_lpm;
+ struct arp_table *next_hops;
+ struct rte_mempool *arp_nd_pool;
+ struct ipv6_addr local_ipv6;
+ struct ipv6_addr global_ipv6;
+ uint8_t prefix_printed;
};
void task_init_l3(struct task_base *tbase, struct task_args *targ);
void task_start_l3(struct task_base *tbase, struct task_args *targ);
-int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst);
+int write_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, uint32_t *ip_dst, uint16_t *vlan, uint64_t **time, uint64_t tsc);
+int write_ip6_dst_mac(struct task_base *tbase, struct rte_mbuf *mbuf, struct ipv6_addr *ip_dst, uint16_t *vlan, uint64_t tsc);
void task_set_gateway_ip(struct task_base *tbase, uint32_t ip);
void task_set_local_ip(struct task_base *tbase, uint32_t ip);
void handle_ctrl_plane_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
+void send_unsollicited_neighbour_advertisement(struct task_base *tbase);
+
+static inline void update_arp_ndp_retransmit_timeout(struct l3_base *l3, uint64_t *ptr, uint32_t base)
+{
+ // randomize timers - from 0.5 to 1.5 * configured time
+ const uint64_t hz = rte_get_tsc_hz();
+ uint64_t tsc = rte_rdtsc();
+ uint64_t rand = 500 + (1000L * rand_r(&l3->seed)) / RAND_MAX;
+ *ptr = tsc + (base * rand / 1000) * hz / 1000;
+}
+static inline uint8_t get_port(struct rte_mbuf *mbuf)
+{
+ return mbuf->port;
+}
+
+#endif /* _PACKET_UTILS_H_ */
diff --git a/VNFs/DPPD-PROX/parse_utils.c b/VNFs/DPPD-PROX/parse_utils.c
index 32db5de6..8d846fd3 100644
--- a/VNFs/DPPD-PROX/parse_utils.c
+++ b/VNFs/DPPD-PROX/parse_utils.c
@@ -27,13 +27,14 @@
#include "quit.h"
#include "cfgfile.h"
-#include "ip6_addr.h"
#include "parse_utils.h"
#include "prox_globals.h"
#include "prox_cfg.h"
#include "log.h"
#include "prox_lua.h"
#include "prox_lua_types.h"
+#include "prox_ipv6.h"
+#include "prox_compat.h"
#define MAX_NB_PORT_NAMES PROX_MAX_PORTS
#define MAX_LEN_PORT_NAME 24
@@ -117,7 +118,7 @@ int parse_single_var(char *val, size_t len, const char *name)
match->name, match->val);
return -1;
}
- strncpy(val, match->val, len);
+ prox_strncpy(val, match->val, len);
return 0;
}
else {
@@ -195,7 +196,7 @@ int parse_vars(char *val, size_t len, const char *name)
break;
}
}
- strncpy(val, result, len);
+ prox_strncpy(val, result, len);
return 0;
}
@@ -310,7 +311,7 @@ int parse_ip(uint32_t *addr, const char *str2)
return 0;
}
-int parse_ip4_cidr(struct ip4_subnet *val, const char *str2)
+int parse_ip4_and_prefix(struct ip4_subnet *val, const char *str2)
{
char str[MAX_STR_LEN_PROC];
char *slash;
@@ -340,10 +341,16 @@ int parse_ip4_cidr(struct ip4_subnet *val, const char *str2)
if (parse_ip(&val->ip, str))
return -2;
+ return 0;
+}
+
+int parse_ip4_cidr(struct ip4_subnet *val, const char *str2)
+{
+ int rc = parse_ip4_and_prefix(val, str2);
/* Apply mask making all bits outside the prefix zero */
- val->ip &= ((int)(1 << 31)) >> (prefix - 1);
+ val->ip &= ((int)(1 << 31)) >> (val->prefix - 1);
- return 0;
+ return rc;
}
int parse_ip6_cidr(struct ip6_subnet *val, const char *str2)
@@ -405,12 +412,12 @@ int parse_ip6(struct ipv6_addr *addr, const char *str2)
for (uint8_t i = 0, j = 0; i < ret; ++i, ++j) {
if (*addr_parts[i] == 0) {
- if (omitted == 0) {
+ if (omitted) {
set_errf("Can only omit zeros once");
return -1;
}
omitted = 1;
- j += 8 - ret;
+ j += 2 * (8 - ret) + 1;
}
else {
uint16_t w = strtoll(addr_parts[i], NULL, 16);
@@ -421,7 +428,7 @@ int parse_ip6(struct ipv6_addr *addr, const char *str2)
return 0;
}
-int parse_mac(struct ether_addr *ether_addr, const char *str2)
+int parse_mac(prox_rte_ether_addr *ether_addr, const char *str2)
{
char str[MAX_STR_LEN_PROC];
char *addr_parts[7];
@@ -839,6 +846,72 @@ int parse_task_set(struct core_task_set *cts, const char *str2)
return 0;
}
+int parse_ip_set(struct ip4_subnet *list, const char *str2, uint32_t max_list)
+{
+ char str[MAX_STR_LEN_PROC];
+ char *parts[MAX_STR_LEN_PROC];
+ int n = 0, rc;
+
+ if (parse_vars(str, sizeof(str), str2))
+ return -1;
+ int n_parts = rte_strsplit(str, strlen(str), parts, MAX_STR_LEN_PROC, ',');
+ for (int i = 0; i < n_parts; i++) {
+ if ((rc = parse_ip4_and_prefix(&list[i], parts[i])) < 0) {
+ set_errf("Unable to parse ip4/prefix");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int parse_int_set(uint32_t *list, const char *str2, uint32_t max_list)
+{
+ char str[MAX_STR_LEN_PROC];
+ char *parts[MAX_STR_LEN_PROC];
+ uint32_t n = 0;
+
+ if (parse_vars(str, sizeof(str), str2))
+ return -1;
+
+ int n_parts = rte_strsplit(str, strlen(str), parts, MAX_STR_LEN_PROC, ',');
+ for (int i = 0; i < n_parts; i++) {
+ char *cur_part = parts[i];
+ char *sub_parts[3];
+ int n_sub_parts = rte_strsplit(cur_part, strlen(cur_part), sub_parts, 3, '-');
+ uint32_t n1, n2;
+ int ret = 0;
+
+ if (n_sub_parts == 1) {
+ if (n >= max_list - 1) {
+ set_errf("Too many entries\n");
+ return -1;
+ }
+ if (parse_int(&list[n], sub_parts[0]))
+ return -1;
+ n++;
+ } else if (n_sub_parts == 2) {
+ if (parse_int(&n1, sub_parts[0]))
+ return -1;
+ if (parse_int(&n2, sub_parts[1]))
+ return -1;
+ if (n + n2 - n1 >= max_list) {
+ set_errf("Too many entries\n");
+ return -1;
+ }
+ for (uint32_t j = n1; j < n2; j++) {
+ list[n++] = j;
+ }
+ } else if (n_sub_parts >= 3) {
+ set_errf("Multiple '-' characters in range syntax found");
+ return -1;
+ } else {
+ set_errf("Invalid list syntax");
+ return -1;
+ }
+ }
+ return 0;
+}
+
int parse_list_set(uint32_t *list, const char *str2, uint32_t max_list)
{
char str[MAX_STR_LEN_PROC];
@@ -897,7 +970,7 @@ int parse_list_set(uint32_t *list, const char *str2, uint32_t max_list)
effective_core = cur_core;
if (list_count >= max_list) {
- set_errf("Too many elements in list\n");
+ set_errf("Too many elements in list");
return -1;
}
list[list_count++] = effective_core;
@@ -922,10 +995,12 @@ int parse_kmg(uint32_t* val, const char *str2)
if (*val >> 22)
return -2;
*val <<= 10;
+ // __attribute__ ((fallthrough));
case 'M':
if (*val >> 22)
return -2;
*val <<= 10;
+ // __attribute__ ((fallthrough));
case 'K':
if (*val >> 22)
return -2;
@@ -1048,7 +1123,7 @@ int parse_str(char* dst, const char *str2, size_t max_len)
return -2;
}
- strncpy(dst, str, max_len);
+ prox_strncpy(dst, str, max_len);
return 0;
}
@@ -1122,7 +1197,7 @@ int parse_remap(uint8_t *mapping, const char *str)
set_errf("String too long (max supported: %d)", MAX_STR_LEN_PROC);
return -2;
}
- strncpy(str_cpy, str, MAX_STR_LEN_PROC);
+ prox_strncpy(str_cpy, str, MAX_STR_LEN_PROC);
ret = rte_strsplit(str_cpy, strlen(str_cpy), elements, PROX_MAX_PORTS + 1, ',');
if (ret <= 0) {
@@ -1179,7 +1254,7 @@ int add_port_name(uint32_t val, const char *str2)
}
pn = &port_names[nb_port_names];
- strncpy(pn->name, str, sizeof(pn->name));
+ prox_strncpy(pn->name, str, sizeof(pn->name));
pn->id = val;
++nb_port_names;
@@ -1197,7 +1272,7 @@ int set_self_var(const char *str)
struct var *v = &vars[nb_vars];
- strncpy(v->name, "$self", strlen("$self"));
+ prox_strncpy(v->name, "$self", strlen("$self") + 1);
sprintf(v->val, "%s", str);
nb_vars++;
@@ -1245,8 +1320,8 @@ int add_var(const char* name, const char *str2, uint8_t cli)
v = &vars[nb_vars];
PROX_PANIC(strlen(name) > sizeof(v->name), "\tUnable to parse var %s: too long\n", name);
PROX_PANIC(strlen(str) > sizeof(v->val), "\tUnable to parse var %s=%s: too long\n", name,str);
- strncpy(v->name, name, sizeof(v->name));
- strncpy(v->val, str, sizeof(v->val));
+ prox_strncpy(v->name, name, sizeof(v->name));
+ prox_strncpy(v->val, str, sizeof(v->val));
v->cli = cli;
++nb_vars;
diff --git a/VNFs/DPPD-PROX/parse_utils.h b/VNFs/DPPD-PROX/parse_utils.h
index 27ebb0bd..03c03188 100644
--- a/VNFs/DPPD-PROX/parse_utils.h
+++ b/VNFs/DPPD-PROX/parse_utils.h
@@ -18,12 +18,12 @@
#define _PARSE_UTILS_H_
#include <inttypes.h>
+#include "prox_compat.h"
#include "ip_subnet.h"
-#define MAX_STR_LEN_PROC (3 * 1518 + 20)
+#define MAX_STR_LEN_PROC (3 * MAX_PKT_SIZE + 20)
struct ipv6_addr;
-struct ether_addr;
enum ctrl_type {CTRL_TYPE_DP, CTRL_TYPE_MSG, CTRL_TYPE_PKT};
@@ -46,6 +46,7 @@ int parse_range(uint32_t* lo, uint32_t* hi, const char *saddr);
/* parses CIDR notation. Note that bits within the address that are
outside the subnet (as specified by the prefix) are set to 0. */
+int parse_ip4_and_prefix(struct ip4_subnet *val, const char *saddr);
int parse_ip4_cidr(struct ip4_subnet *val, const char *saddr);
int parse_ip6_cidr(struct ip6_subnet *val, const char *saddr);
@@ -53,7 +54,7 @@ int parse_ip(uint32_t *paddr, const char *saddr);
int parse_ip6(struct ipv6_addr *addr, const char *saddr);
-int parse_mac(struct ether_addr *paddr, const char *saddr);
+int parse_mac(prox_rte_ether_addr *paddr, const char *saddr);
/* return error on overflow or invalid suffix*/
int parse_kmg(uint32_t* val, const char *str);
@@ -63,6 +64,8 @@ int parse_bool(uint32_t* val, const char *str);
int parse_flag(uint32_t* val, uint32_t flag, const char *str);
int parse_list_set(uint32_t *list, const char *str, uint32_t max_limit);
+int parse_ip_set(struct ip4_subnet *list, const char *str2, uint32_t max_list);
+int parse_int_set(uint32_t *list, const char *str2, uint32_t max_list);
int parse_task_set(struct core_task_set *val, const char *str);
diff --git a/VNFs/DPPD-PROX/pkt_parser.h b/VNFs/DPPD-PROX/pkt_parser.h
index 285d42f9..746830bf 100644
--- a/VNFs/DPPD-PROX/pkt_parser.h
+++ b/VNFs/DPPD-PROX/pkt_parser.h
@@ -24,6 +24,7 @@
#include <rte_tcp.h>
#include <rte_byteorder.h>
+#include "prox_compat.h"
#include "log.h"
#include "etypes.h"
@@ -69,28 +70,28 @@ static void pkt_tuple_debug(const struct pkt_tuple *pt)
/* Return 0 on success, i.e. packets parsed without any error. */
static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta *l4_meta)
{
- struct ether_hdr *peth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
size_t l2_types_count = 0;
- struct ipv4_hdr* pip = 0;
+ prox_rte_ipv4_hdr* pip = 0;
/* L2 */
pt->l2_types[l2_types_count++] = peth->ether_type;
switch (peth->ether_type) {
case ETYPE_IPv4:
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
break;
case ETYPE_VLAN: {
- struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1);
+ prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1);
pt->l2_types[l2_types_count++] = vlan->eth_proto;
if (vlan->eth_proto == ETYPE_IPv4) {
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
}
else if (vlan->eth_proto == ETYPE_VLAN) {
- struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1);
+ prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1);
pt->l2_types[l2_types_count++] = vlan->eth_proto;
if (vlan->eth_proto == ETYPE_IPv4) {
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
}
else if (vlan->eth_proto == ETYPE_IPv6) {
return 1;
@@ -103,13 +104,13 @@ static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta
}
break;
case ETYPE_8021ad: {
- struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1);
+ prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1);
pt->l2_types[l2_types_count++] = vlan->eth_proto;
if (vlan->eth_proto == ETYPE_VLAN) {
- struct vlan_hdr *vlan = (struct vlan_hdr *)(peth + 1);
+ prox_rte_vlan_hdr *vlan = (prox_rte_vlan_hdr *)(peth + 1);
pt->l2_types[l2_types_count++] = vlan->eth_proto;
if (vlan->eth_proto == ETYPE_IPv4) {
- pip = (struct ipv4_hdr *)(peth + 1);
+ pip = (prox_rte_ipv4_hdr *)(peth + 1);
}
else {
return 1;
@@ -148,21 +149,21 @@ static int parse_pkt(struct rte_mbuf *mbuf, struct pkt_tuple *pt, struct l4_meta
/* L4 parser */
if (pt->proto_id == IPPROTO_UDP) {
- struct udp_hdr *udp = (struct udp_hdr*)(pip + 1);
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr*)(pip + 1);
l4_meta->l4_hdr = (uint8_t*)udp;
pt->src_port = udp->src_port;
pt->dst_port = udp->dst_port;
- l4_meta->payload = ((uint8_t*)udp) + sizeof(struct udp_hdr);
- l4_meta->len = rte_be_to_cpu_16(udp->dgram_len) - sizeof(struct udp_hdr);
+ l4_meta->payload = ((uint8_t*)udp) + sizeof(prox_rte_udp_hdr);
+ l4_meta->len = rte_be_to_cpu_16(udp->dgram_len) - sizeof(prox_rte_udp_hdr);
}
else if (pt->proto_id == IPPROTO_TCP) {
- struct tcp_hdr *tcp = (struct tcp_hdr*)(pip + 1);
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(pip + 1);
l4_meta->l4_hdr = (uint8_t*)tcp;
pt->src_port = tcp->src_port;
pt->dst_port = tcp->dst_port;
l4_meta->payload = ((uint8_t*)tcp) + ((tcp->data_off >> 4)*4);
- l4_meta->len = rte_be_to_cpu_16(pip->total_length) - sizeof(struct ipv4_hdr) - ((tcp->data_off >> 4)*4);
+ l4_meta->len = rte_be_to_cpu_16(pip->total_length) - sizeof(prox_rte_ipv4_hdr) - ((tcp->data_off >> 4)*4);
}
else {
plog_err("unsupported protocol %d\n", pt->proto_id);
diff --git a/VNFs/DPPD-PROX/pkt_prototypes.h b/VNFs/DPPD-PROX/pkt_prototypes.h
index 5d55bacb..9acde34a 100644
--- a/VNFs/DPPD-PROX/pkt_prototypes.h
+++ b/VNFs/DPPD-PROX/pkt_prototypes.h
@@ -31,7 +31,7 @@ static const struct gre_hdr gre_hdr_proto = {
.bits = GRE_KEY_PRESENT
};
-static const struct ipv4_hdr tunnel_ip_proto = {
+static const prox_rte_ipv4_hdr tunnel_ip_proto = {
.version_ihl = 0x45,
.type_of_service = 0,
.packet_id = 0,
diff --git a/VNFs/DPPD-PROX/prox_args.c b/VNFs/DPPD-PROX/prox_args.c
index 08f27e9e..dc212494 100644
--- a/VNFs/DPPD-PROX/prox_args.c
+++ b/VNFs/DPPD-PROX/prox_args.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -35,10 +35,13 @@
#include "defaults.h"
#include "prox_lua.h"
#include "cqm.h"
+#include "defines.h"
+#include "prox_ipv6.h"
#include "prox_compat.h"
+#include "ip_subnet.h"
#define MAX_RTE_ARGV 64
-#define MAX_ARG_LEN 64
+#define MAX_ARG_LEN 256
struct cfg_depr {
const char *opt;
@@ -137,6 +140,15 @@ static struct cfg_section core_cfg = {
.error = 0
};
+struct deferred_port {
+ struct task_args *targ;
+ char name[256];
+ uint8_t is_rx_port;
+};
+
+static struct deferred_port deferred_port[PROX_MAX_PORTS];
+static int n_deferred_ports = 0;
+
static void set_errf(const char *format, ...)
{
va_list ap;
@@ -263,7 +275,7 @@ static int get_lua_cfg(__attribute__((unused)) unsigned sindex, __attribute__((u
struct lua_State *l = prox_lua();
char str_cpy[1024];
- strncpy(str_cpy, str, sizeof(str_cpy));
+ prox_strncpy(str_cpy, str, sizeof(str_cpy));
uint32_t len = strlen(str_cpy);
str_cpy[len++] = '\n';
str_cpy[len++] = 0;
@@ -338,6 +350,12 @@ static int get_global_cfg(__attribute__((unused))unsigned sindex, char *str, voi
if (STR_EQ(str, "enable bypass")) {
return parse_flag(&pset->flags, DSF_ENABLE_BYPASS, pkey);
}
+ if (STR_EQ(str, "poll timeout")) {
+ return parse_int(&pset->poll_timeout, pkey);
+ }
+ if (STR_EQ(str, "heartbeat timeout")) {
+ return parse_int(&pset->heartbeat_timeout, pkey);
+ }
if (STR_EQ(str, "cpe table map")) {
/* The config defined ports through 0, 1, 2 ... which
@@ -514,7 +532,7 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
}
else if (STR_EQ(str, "name")) {
uint32_t val;
- strncpy(cfg->name, pkey, MAX_NAME_SIZE);
+ prox_strncpy(cfg->names[0], pkey, MAX_NAME_SIZE);
PROX_ASSERT(cur_if < PROX_MAX_PORTS);
return add_port_name(cur_if, pkey);
}
@@ -524,6 +542,16 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
else if (STR_EQ(str, "tx desc")) {
return parse_int(&cfg->n_txd, pkey);
}
+ else if (STR_EQ(str, "ipv6 mask length")) {
+ return parse_int(&cfg->v6_mask_length, pkey);
+ }
+ else if (STR_EQ(str, "all_rx_queues")) {
+ uint32_t val;
+ if (parse_bool(&val, pkey)) {
+ return -1;
+ }
+ cfg->all_rx_queues = val;
+ }
else if (STR_EQ(str, "promiscuous")) {
uint32_t val;
if (parse_bool(&val, pkey)) {
@@ -531,6 +559,17 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
}
cfg->promiscuous = val;
}
+ else if (STR_EQ(str, "multicast")) {
+ uint32_t val;
+ if (cfg->nb_mc_addr >= NB_MCAST_ADDR) {
+ plog_err("too many multicast addresses\n");
+ return -1;
+ }
+ if (parse_mac(&cfg->mc_addr[cfg->nb_mc_addr], pkey)) {
+ return -1;
+ }
+ cfg->nb_mc_addr++ ;
+ }
else if (STR_EQ(str, "lsc")) {
cfg->lsc_set_explicitely = 1;
uint32_t val;
@@ -539,21 +578,102 @@ static int get_port_cfg(unsigned sindex, char *str, void *data)
}
cfg->lsc_val = val;
}
+ else if (STR_EQ(str, "local ipv4")) {
+ if (parse_ip_set(cfg->ip_addr, pkey, PROX_MAX_VLAN_TAGS) != 0) {
+ cfg->ip_addr[0].ip = 24;
+ return parse_ip(&cfg->ip_addr[0].ip, pkey);
+ }
+ return 0;
+ }
+ else if (STR_EQ(str, "virtual")) {
+ uint32_t val;
+ if (parse_bool(&val, pkey)) {
+ return -1;
+ }
+ cfg->virtual = val;
+ }
+ else if (STR_EQ(str, "vdev")) {
+ prox_strncpy(cfg->vdev, pkey, MAX_NAME_SIZE);
+ }
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ else if (STR_EQ(str, "disable tx offload")) {
+ uint32_t val;
+ if (parse_int(&val, pkey)) {
+ return -1;
+ }
+ if (val)
+ cfg->disabled_tx_offload = val;
+ }
+#endif
else if (STR_EQ(str, "strip crc")) {
uint32_t val;
if (parse_bool(&val, pkey)) {
return -1;
}
- cfg->port_conf.rxmode.hw_strip_crc = val;
+#if defined(RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (val)
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_CRC_STRIP;
+ else
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_CRC_STRIP;
+#else
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ if (val)
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+ else
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
+#endif
+#endif
+
+ }
+ else if (STR_EQ(str, "vlan tag")) {
+ return parse_int_set(cfg->vlan_tags, pkey, sizeof(cfg->vlan_tags) / sizeof(cfg->vlan_tags[0]));
+ }
+ else if (STR_EQ(str, "vlan")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ uint32_t val;
+ if (parse_bool(&val, pkey)) {
+ return -1;
+ }
+ if (val) {
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ cfg->requested_tx_offload |= RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ } else {
+ cfg->requested_rx_offload &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ cfg->requested_tx_offload &= ~RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+ }
+#else
+ plog_warn("vlan option not supported : update DPDK at least to 18.08 to support this option\n");
+#endif
+ }
+ else if (STR_EQ(str, "mtu size")) {
+ uint32_t val;
+ if (parse_int(&val, pkey)) {
+ return -1;
+ }
+ if (val) {
+ cfg->mtu = val;
+ // A frame of 1526 bytes (1500 bytes mtu, 14 bytes hdr, 4 bytes crc and 8 bytes vlan)
+ // should not be considered as a jumbo frame. However rte_ethdev.c considers that
+ // the max_rx_pkt_len for a non jumbo frame is 1518
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+ cfg->port_conf.rxmode.max_rx_pkt_len = cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN;
+ if (cfg->port_conf.rxmode.max_rx_pkt_len > PROX_RTE_ETHER_MAX_LEN)
+#else
+ cfg->port_conf.rxmode.mtu = cfg->mtu;
+ if (cfg->port_conf.rxmode.mtu > PROX_MTU)
+#endif
+ cfg->requested_rx_offload |= RTE_ETH_RX_OFFLOAD_JUMBO_FRAME;
+ }
}
+
else if (STR_EQ(str, "rss")) {
uint32_t val;
if (parse_bool(&val, pkey)) {
return -1;
}
if (val) {
- cfg->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
- cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4;
+ cfg->port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+ cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IPV4;
}
}
else if (STR_EQ(str, "rx_ring")) {
@@ -806,10 +926,10 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "fast path handle arp")) {
return parse_flag(&targ->runtime_flags, TASK_FP_HANDLE_ARP, pkey);
}
- if (STR_EQ(str, "multiple arp")) {
- return parse_flag(&targ->flags, TASK_MULTIPLE_MAC, pkey);
- }
+ if (STR_EQ(str, "do not forward geneve")) {
+ return parse_flag(&targ->runtime_flags, TASK_DO_NOT_FWD_GENEVE, pkey);
+ }
/* Using tx port name, only a _single_ port can be assigned to a task. */
if (STR_EQ(str, "tx port")) {
if (targ->nb_txports > 0) {
@@ -821,7 +941,17 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
uint32_t ports[PROX_MAX_PORTS];
if(parse_port_name_list(ports, &n_if, PROX_MAX_PORTS, pkey)) {
- return -1;
+ // Port name not found, but could be a virtual device of a secondary process
+ // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
+ if (rte_cfg.eal &&
+ (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
+ (n_deferred_ports < PROX_MAX_PORTS)) {
+ prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
+ deferred_port[n_deferred_ports].is_rx_port = 0;
+ deferred_port[n_deferred_ports++].targ = targ;
+ return 0;
+ } else
+ return -1;
}
PROX_ASSERT(n_if-1 < PROX_MAX_PORTS);
@@ -877,6 +1007,9 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "streams")) {
return parse_str(targ->streams, pkey, sizeof(targ->streams));
}
+ if (STR_EQ(str, "Unsollicited NA")) {
+ return parse_flag(&targ->flags, TASK_ARG_SEND_NA_AT_STARTUP, pkey);
+ }
if (STR_EQ(str, "local lpm")) {
return parse_flag(&targ->flags, TASK_ARG_LOCAL_LPM, pkey);
}
@@ -896,6 +1029,18 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "random")) {
return parse_str(targ->rand_str[targ->n_rand_str++], pkey, sizeof(targ->rand_str[0]));
}
+ if (STR_EQ(str, "range")) {
+ int rc = parse_range(&targ->range[targ->n_ranges].min, &targ->range[targ->n_ranges].max, pkey);
+ targ->n_ranges++;
+ return rc;
+ }
+ if (STR_EQ(str, "range_offset")) {
+ if (targ->n_ranges == 0) {
+ set_errf("No range defined previously (use range=...)");
+ return -1;
+ }
+ return parse_int(&targ->range[targ->n_ranges - 1].offset, pkey);
+ }
if (STR_EQ(str, "rand_offset")) {
if (targ->n_rand_str == 0) {
set_errf("No random defined previously (use random=...)");
@@ -910,6 +1055,34 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "pcap file")) {
return parse_str(targ->pcap_file, pkey, sizeof(targ->pcap_file));
}
+ if (STR_EQ(str, "imix")) {
+ char pkey2[MAX_CFG_STRING_LEN], *ptr;
+ if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
+ set_errf("Error while parsing imix, too long\n");
+ return -1;
+ }
+ const size_t pkey_len = strlen(pkey2);
+ targ->imix_nb_pkts = 0;
+ ptr = pkey2;
+ while (targ->imix_nb_pkts < MAX_IMIX_PKTS) {
+ if (parse_int(&targ->imix_pkt_sizes[targ->imix_nb_pkts], ptr) != 0)
+ break;
+ targ->imix_nb_pkts++;
+ if ((ptr = strchr(ptr, ',')) == NULL)
+ break;
+ ptr++;
+ if (targ->imix_nb_pkts == MAX_IMIX_PKTS) {
+ set_errf("Too many packet sizes specified");
+ return -1;
+ }
+ }
+ plog_info("%d IMIX packets:", targ->imix_nb_pkts);
+ for (size_t i = 0; i < targ->imix_nb_pkts; ++i) {
+ plog_info("%d ", targ->imix_pkt_sizes[i]);
+ }
+ plog_info("\n");
+ return 0;
+ }
if (STR_EQ(str, "pkt inline")) {
char pkey2[MAX_CFG_STRING_LEN];
if (parse_str(pkey2, pkey, sizeof(pkey2)) != 0) {
@@ -959,7 +1132,8 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return -1;
}
if (targ->pkt_size == sizeof(targ->pkt_inline)) {
- set_errf("Inline packet definition can't be longer than 1518");
+ set_errf("Inline packet definition can't be longer than %u", sizeof(targ->pkt_inline));
+ return -1;
}
targ->pkt_inline[targ->pkt_size++] = byte;
@@ -977,6 +1151,9 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "latency buffer size")) {
return parse_int(&targ->latency_buffer_size, pkey);
}
+ if (STR_EQ(str, "loss buffer size")) {
+ return parse_int(&targ->loss_buffer_size, pkey);
+ }
if (STR_EQ(str, "accuracy pos")) {
return parse_int(&targ->accur_pos, pkey);
}
@@ -993,7 +1170,16 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (STR_EQ(str, "packet id pos")) {
return parse_int(&targ->packet_id_pos, pkey);
}
- if (STR_EQ(str, "probability")) {
+ if (STR_EQ(str, "flow id pos")) {
+ return parse_int(&targ->flow_id_pos, pkey);
+ }
+ if (STR_EQ(str, "packet id in flow pos")) {
+ return parse_int(&targ->packet_id_in_flow_pos, pkey);
+ }
+ if (STR_EQ(str, "flow count")) {
+ return parse_int(&targ->flow_count, pkey);
+ }
+ if (STR_EQ(str, "probability")) { // old - use "probability no drop" instead
float probability;
int rc = parse_float(&probability, pkey);
if (probability == 0) {
@@ -1003,9 +1189,44 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
set_errf("Probability must be < 100\n");
return -1;
}
- targ->probability = probability * 10000;
+ targ->probability_no_drop = probability * 10000;
return rc;
}
+ if (STR_EQ(str, "proba no drop")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability == 0) {
+ set_errf("probability no drop must be != 0\n");
+ return -1;
+ } else if (probability > 100.0) {
+ set_errf("Probability must be < 100\n");
+ return -1;
+ }
+ targ->probability_no_drop = probability * 10000;
+ return rc;
+ }
+ if (STR_EQ(str, "proba delay")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability > 100.0) {
+ set_errf("Probability must be < 100\n");
+ return -1;
+ }
+ targ->probability_delay = probability * 10000;
+ return rc;
+ }
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ if (STR_EQ(str, "proba duplicate")) {
+ float probability;
+ int rc = parse_float(&probability, pkey);
+ if (probability > 100.0) {
+ set_errf("probability duplicate must be < 100\n");
+ return -1;
+ }
+ targ->probability_duplicate = probability * 10000;
+ return rc;
+ }
+#endif
if (STR_EQ(str, "concur conn")) {
return parse_int(&targ->n_concur_conn, pkey);
}
@@ -1030,7 +1251,17 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
uint32_t n_if;
if (parse_port_name_list(vals, &n_if, PROX_MAX_PORTS, pkey)) {
- return -1;
+ // Port name not found, but could be a virtual device of a secondary process
+ // As DPDK not started yet, we can only check the config file to see whether we are a secondary process
+ if (rte_cfg.eal &&
+ (strstr(rte_cfg.eal, "secondary") || strstr(rte_cfg.eal, "auto")) &&
+ (n_deferred_ports < PROX_MAX_PORTS)) {
+ prox_strncpy(deferred_port[n_deferred_ports].name, pkey, sizeof(deferred_port[n_deferred_ports].name));
+ deferred_port[n_deferred_ports].is_rx_port = 1;
+ deferred_port[n_deferred_ports++].targ = targ;
+ return 0;
+ } else
+ return -1;
}
for (uint8_t i = 0; i < n_if; ++i) {
@@ -1204,7 +1435,6 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
}
else if (STR_EQ(str, "mbuf size")) {
- targ->mbuf_size_set_explicitely = 1;
return parse_int(&targ->mbuf_size, pkey);
}
if (STR_EQ(str, "memcache size")) {
@@ -1215,6 +1445,9 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return parse_int(&targ->byte_offset, pkey);
}
+ if (STR_EQ(str, "realtime scheduling")) {
+ return parse_flag(&lconf->flags, LCONF_FLAG_SCHED_RR, pkey);
+ }
if (STR_EQ(str, "name")) {
return parse_str(lconf->name, pkey, sizeof(lconf->name));
}
@@ -1241,7 +1474,7 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
targ->task_init = to_task_init(mode_str, sub_mode_str);
if (!targ->task_init) {
- if (strcmp(sub_mode_str, "l3") != 0) {
+ if ((strcmp(sub_mode_str, "l3") != 0) && (strcmp(sub_mode_str, "ndp") != 0)) {
set_errf("sub mode %s not supported for mode %s", sub_mode_str, mode_str);
return -1;
}
@@ -1252,9 +1485,13 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
}
}
if (strcmp(sub_mode_str, "l3") == 0) {
- prox_cfg.flags |= DSF_CTRL_PLANE_ENABLED;
+ prox_cfg.flags |= DSF_L3_ENABLED;
targ->flags |= TASK_ARG_L3;
strcpy(targ->sub_mode_str, "l3");
+ } else if (strcmp(sub_mode_str, "ndp") == 0) {
+ prox_cfg.flags |= DSF_NDP_ENABLED;
+ targ->flags |= TASK_ARG_NDP;
+ strcpy(targ->sub_mode_str, "ndp");
} else {
strcpy(targ->sub_mode_str, targ->task_init->sub_mode_str);
}
@@ -1303,20 +1540,89 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
targ->flags |= TASK_ARG_SRC_MAC_SET;
return 0;
}
+ if (STR_EQ(str, "igmp ipv4")) { /* IGMP Group */
+ return parse_ip(&targ->igmp_address, pkey);
+ }
if (STR_EQ(str, "gateway ipv4")) { /* Gateway IP address used when generating */
+ if ((targ->flags & TASK_ARG_L3) == 0)
+ plog_warn("gateway ipv4 configured but L3 sub mode not enabled\n");
+ if (targ->local_ipv4)
+ targ->local_prefix = 32;
return parse_ip(&targ->gateway_ipv4, pkey);
}
+ if (STR_EQ(str, "ipv6 router")) { /* we simulate an IPV6 router */
+ int rc = parse_flag(&targ->ipv6_router, 1, pkey);
+ if (!rc && targ->ipv6_router) {
+ plog_info("\tipv6 router configured => NDP enabled\n");
+ prox_cfg.flags |= DSF_NDP_ENABLED;
+ targ->flags |= TASK_ARG_NDP;
+ strcpy(targ->sub_mode_str, "ndp");
+ }
+ return 0;
+ }
+ if (STR_EQ(str, "gateway ipv6")) { /* Gateway IP address used when generating */
+ if ((targ->flags & TASK_ARG_NDP) == 0)
+ plog_warn("gateway ipv6 configured but NDP sub mode not enabled\n");
+ return parse_ip6(&targ->gateway_ipv6, pkey);
+ }
if (STR_EQ(str, "local ipv4")) { /* source IP address to be used for packets */
- return parse_ip(&targ->local_ipv4, pkey);
+ struct ip4_subnet cidr;
+ if (parse_ip4_and_prefix(&cidr, pkey) != 0) {
+ if (targ->gateway_ipv4)
+ targ->local_prefix = 32;
+ else
+ targ->local_prefix = 0;
+ return parse_ip(&targ->local_ipv4, pkey);
+ } else {
+ targ->local_ipv4 = cidr.ip;
+ targ->local_prefix = cidr.prefix;
+ return 0;
+ }
}
if (STR_EQ(str, "remote ipv4")) { /* source IP address to be used for packets */
return parse_ip(&targ->remote_ipv4, pkey);
}
+ if (STR_EQ(str, "global ipv6")) {
+ if (parse_ip6(&targ->global_ipv6, pkey) == 0) {
+ plog_info("\tglobal ipv6 configured => NDP enabled\n");
+ targ->flags |= TASK_ARG_NDP;
+ prox_cfg.flags |= DSF_NDP_ENABLED;
+ strcpy(targ->sub_mode_str, "ndp");
+ } else {
+ plog_err("Unable to parse content of local ipv6: %s\n", pkey);
+ return -1;
+ }
+ return 0;
+ }
if (STR_EQ(str, "local ipv6")) { /* source IPv6 address to be used for packets */
- return parse_ip6(&targ->local_ipv6, pkey);
+ if (parse_ip6(&targ->local_ipv6, pkey) == 0) {
+ plog_info("\tlocal ipv6 configured => NDP enabled\n");
+ targ->flags |= TASK_ARG_NDP;
+ prox_cfg.flags |= DSF_NDP_ENABLED;
+ strcpy(targ->sub_mode_str, "ndp");
+ } else {
+ plog_err("Unable to parse content of local ipv6: %s\n", pkey);
+ return -1;
+ }
+ return 0;
}
+ if (STR_EQ(str, "router prefix")) {
+ if (parse_ip6(&targ->router_prefix, pkey) == 0) {
+ plog_info("\trouter prefix set to "IPv6_BYTES_FMT" (%s)\n", IPv6_BYTES(targ->router_prefix.bytes), IP6_Canonical(&targ->router_prefix));
+ } else {
+ plog_err("Unable to parse content of router prefix: %s\n", pkey);
+ return -1;
+ }
+ return 0;
+ }
+ if (STR_EQ(str, "arp timeout"))
+ return parse_int(&targ->reachable_timeout, pkey);
+ if (STR_EQ(str, "arp update time"))
+ return parse_int(&targ->arp_ndp_retransmit_timeout, pkey);
if (STR_EQ(str, "number of packets"))
return parse_int(&targ->n_pkts, pkey);
+ if (STR_EQ(str, "store size"))
+ return parse_int(&targ->store_max, pkey);
if (STR_EQ(str, "pipes")) {
uint32_t val;
int err = parse_int(&val, pkey);
@@ -1336,30 +1642,84 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (err) {
return -1;
}
-
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ targ->qos_conf.subport_params[0].qsize[0] = val;
+ targ->qos_conf.subport_params[0].qsize[1] = val;
+ targ->qos_conf.subport_params[0].qsize[2] = val;
+ targ->qos_conf.subport_params[0].qsize[3] = val;
+#else
targ->qos_conf.port_params.qsize[0] = val;
targ->qos_conf.port_params.qsize[1] = val;
targ->qos_conf.port_params.qsize[2] = val;
targ->qos_conf.port_params.qsize[3] = val;
+#endif
return 0;
}
if (STR_EQ(str, "subport tb rate")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_rate, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tb_rate, pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tb_rate, pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tb size")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tb_size, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tb_size, pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tb_size, pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tc 0 rate")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[0], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[0], pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tc 1 rate")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[1], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[1], pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tc 2 rate")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[2], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[2], pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tc 3 rate")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_rate[3], pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tc_rate[3], pkey);
+#endif
+#endif
}
if (STR_EQ(str, "subport tc rate")) {
@@ -1369,21 +1729,44 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return -1;
}
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ targ->qos_conf.port_params.subport_profiles->tc_rate[0] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[1] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[2] = val;
+ targ->qos_conf.port_params.subport_profiles->tc_rate[3] = val;
+#else
targ->qos_conf.subport_params[0].tc_rate[0] = val;
targ->qos_conf.subport_params[0].tc_rate[1] = val;
targ->qos_conf.subport_params[0].tc_rate[2] = val;
targ->qos_conf.subport_params[0].tc_rate[3] = val;
+#endif
return 0;
}
if (STR_EQ(str, "subport tc period")) {
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ return parse_u64(&targ->qos_conf.port_params.subport_profiles->tc_period, pkey);
+#else
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.subport_params[0].tc_period, pkey);
+#else
return parse_int(&targ->qos_conf.subport_params[0].tc_period, pkey);
+#endif
+#endif
}
if (STR_EQ(str, "pipe tb rate")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tb_rate, pkey);
+#endif
}
if (STR_EQ(str, "pipe tb size")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tb_size, pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tb_size, pkey);
+#endif
}
if (STR_EQ(str, "pipe tc rate")) {
uint32_t val;
@@ -1399,19 +1782,39 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return 0;
}
if (STR_EQ(str, "pipe tc 0 rate")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[0], pkey);
+#endif
}
if (STR_EQ(str, "pipe tc 1 rate")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[1], pkey);
+#endif
}
if (STR_EQ(str, "pipe tc 2 rate")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[2], pkey);
+#endif
}
if (STR_EQ(str, "pipe tc 3 rate")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_rate[3], pkey);
+#endif
}
if (STR_EQ(str, "pipe tc period")) {
+#if RTE_VERSION > RTE_VERSION_NUM(19,11,0,0)
+ return parse_u64(&targ->qos_conf.pipe_params[0].tc_period, pkey);
+#else
return parse_int(&targ->qos_conf.pipe_params[0].tc_period, pkey);
+#endif
}
if (STR_EQ(str, "police action")) {
char *in = strstr(pkey, " io=");
@@ -1474,6 +1877,10 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
if (err) {
return -1;
}
+ if (queue_id >= RTE_SCHED_BE_QUEUES_PER_PIPE) {
+ set_errf("queue_id must be < %d", RTE_SCHED_BE_QUEUES_PER_PIPE);
+ return -1;
+ }
targ->qos_conf.pipe_params[0].wrr_weights[queue_id] = val;
return 0;
}
@@ -1500,31 +1907,41 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
return parse_int(&targ->n_max_rules, pkey);
}
- if (STR_EQ(str, "tunnel hop limit")) {
- uint32_t val;
- int err = parse_int(&val, pkey);
- if (err) {
- return -1;
- }
- targ->tunnel_hop_limit = val;
- return 0;
- }
+ if (STR_EQ(str, "tunnel hop limit")) {
+ uint32_t val;
+ int err = parse_int(&val, pkey);
+ if (err) {
+ return -1;
+ }
+ targ->tunnel_hop_limit = val;
+ return 0;
+ }
- if (STR_EQ(str, "lookup port mask")) {
- uint32_t val;
- int err = parse_int(&val, pkey);
- if (err) {
- return -1;
- }
- targ->lookup_port_mask = val;
- return 0;
- }
+ if (STR_EQ(str, "lookup port mask")) {
+ uint32_t val;
+ int err = parse_int(&val, pkey);
+ if (err) {
+ return -1;
+ }
+ targ->lookup_port_mask = val;
+ return 0;
+ }
if (STR_EQ(str, "irq debug")) {
parse_int(&targ->irq_debug, pkey);
return 0;
}
+ if (STR_EQ(str, "multiplier")) {
+ parse_int(&targ->multiplier, pkey);
+ return 0;
+ }
+
+ if (STR_EQ(str, "mirror size")) {
+ parse_int(&targ->mirror_size, pkey);
+ return 0;
+ }
+
set_errf("Option '%s' is not known", str);
/* fail on unknown keys */
return -1;
@@ -1576,14 +1993,14 @@ int prox_parse_args(int argc, char **argv)
}
}
- strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE);
+ prox_strncpy(prox_cfg.name, cfg_file + offset, MAX_NAME_SIZE);
break;
case 'v':
plog_set_lvl(atoi(optarg));
break;
case 'l':
prox_cfg.log_name_pid = 0;
- strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE);
+ prox_strncpy(prox_cfg.log_name, optarg, MAX_NAME_SIZE);
break;
case 'p':
prox_cfg.log_name_pid = 1;
@@ -1605,7 +2022,7 @@ int prox_parse_args(int argc, char **argv)
case 'r':
if (!str_is_number(optarg) || strlen(optarg) > 11)
return -1;
- strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str));
+ prox_strncpy(prox_cfg.update_interval_str, optarg, sizeof(prox_cfg.update_interval_str));
break;
case 'o':
if (prox_cfg.flags & DSF_DAEMON)
@@ -1670,7 +2087,7 @@ int prox_parse_args(int argc, char **argv)
(tmp2 = strchr(tmp, '='))) {
*tmp2 = 0;
tmp3[0] = '$';
- strncpy(tmp3 + 1, tmp, 63);
+ prox_strncpy(tmp3 + 1, tmp, 63);
plog_info("\tAdding variable: %s = %s\n", tmp3, tmp2 + 1);
ret = add_var(tmp3, tmp2 + 1, 1);
if (ret == -2) {
@@ -1883,10 +2300,14 @@ int prox_setup_rte(const char *prog_name)
sprintf(rte_arg[++argc], "-c%s", tmp);
rte_argv[argc] = rte_arg[argc];
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
+ uint32_t master_core = prox_cfg.master;
if (prox_cfg.flags & DSF_USE_DUMMY_CPU_TOPO)
- sprintf(rte_arg[++argc], "--master-lcore=%u", 0);
- else
- sprintf(rte_arg[++argc], "--master-lcore=%u", prox_cfg.master);
+ master_core = 0;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+ sprintf(rte_arg[++argc], "--master-lcore=%u", master_core);
+#else
+ sprintf(rte_arg[++argc], "--main-lcore=%u", master_core);
+#endif
rte_argv[argc] = rte_arg[argc];
#else
/* For old DPDK versions, the master core had to be the first
@@ -1949,7 +2370,7 @@ int prox_setup_rte(const char *prog_name)
if (ptr) {
*ptr++ = '\0';
}
- strcpy(rte_arg[++argc], ptr2);
+ prox_strncpy(rte_arg[++argc], ptr2, MAX_ARG_LEN);
rte_argv[argc] = rte_arg[argc];
}
}
@@ -2000,5 +2421,20 @@ int prox_setup_rte(const char *prog_name)
return -1;
}
}
+ uint16_t port_id;
+ for (int i = 0; i < n_deferred_ports; i++) {
+ if (prox_rte_eth_dev_get_port_by_name(deferred_port[i].name, &port_id) != 0) {
+ plog_err("Did not find port name %s used while reading %s\n", deferred_port[i].name, deferred_port[i].is_rx_port ? "rx port" : "tx_port");
+ return -1;
+ }
+ plog_info("\tport %s is port id %d\n", deferred_port[i].name, port_id);
+ if (deferred_port[i].is_rx_port) {
+ deferred_port[i].targ->rx_port_queue[0].port = port_id;
+ deferred_port[i].targ->nb_rxports = 1;
+ } else {
+ deferred_port[i].targ->tx_port_queue[0].port = port_id;
+ deferred_port[i].targ->nb_txports = 1;
+ }
+ }
return 0;
}
diff --git a/VNFs/DPPD-PROX/prox_cfg.h b/VNFs/DPPD-PROX/prox_cfg.h
index ed54ecc5..e23c8ed9 100644
--- a/VNFs/DPPD-PROX/prox_cfg.h
+++ b/VNFs/DPPD-PROX/prox_cfg.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,8 +18,11 @@
#define _PROX_CFG_H
#include <inttypes.h>
+#include <rte_ether.h>
#include "prox_globals.h"
+#include "ip6_addr.h"
+#include "prox_compat.h"
#define PROX_CM_STR_LEN (2 + 2 * sizeof(prox_cfg.core_mask) + 1)
#define PROX_CM_DIM (RTE_MAX_LCORE/(sizeof(uint64_t) * 8))
@@ -40,7 +43,9 @@
#define DSF_DISABLE_CMT 0x00002000 /* CMT disabled */
#define DSF_LIST_TASK_MODES 0x00004000 /* list supported task modes and exit */
#define DSF_ENABLE_BYPASS 0x00008000 /* Use Multi Producer rings to enable ring bypass */
-#define DSF_CTRL_PLANE_ENABLED 0x00010000 /* ctrl plane enabled */
+#define DSF_L3_ENABLED 0x00010000 /* ctrl plane enabled for IPv4 */
+#define DSF_NDP_ENABLED 0x00020000 /* ctrl plane enabled for IPv6 */
+#define DSF_CTRL_PLANE_ENABLED (DSF_L3_ENABLED|DSF_NDP_ENABLED) /* ctrl plane enabled */
#define MAX_PATH_LEN 1024
@@ -66,6 +71,14 @@ struct prox_cfg {
uint32_t logbuf_size;
uint32_t logbuf_pos;
char *logbuf;
+ uint32_t heartbeat_timeout;
+ uint32_t poll_timeout;
+ uint64_t heartbeat_tsc;
+ struct ipv6_addr all_routers_ipv6_mcast_addr;
+ struct ipv6_addr all_nodes_ipv6_mcast_addr;
+ struct ipv6_addr random_ip;
+ prox_rte_ether_addr all_routers_mac_addr;
+ prox_rte_ether_addr all_nodes_mac_addr;
};
extern struct prox_cfg prox_cfg;
diff --git a/VNFs/DPPD-PROX/prox_cksum.c b/VNFs/DPPD-PROX/prox_cksum.c
index 9a05097e..401191f6 100644
--- a/VNFs/DPPD-PROX/prox_cksum.c
+++ b/VNFs/DPPD-PROX/prox_cksum.c
@@ -20,13 +20,23 @@
#include "log.h"
/* compute IP 16 bit checksum */
-void prox_ip_cksum_sw(struct ipv4_hdr *buf)
+/* The hdr_checksum field must be set to 0 by the caller. */
+inline void prox_ip_cksum_sw(prox_rte_ipv4_hdr *buf)
{
- const uint16_t size = sizeof(struct ipv4_hdr);
+ const uint16_t size = sizeof(prox_rte_ipv4_hdr);
uint32_t cksum = 0;
uint32_t nb_dwords;
uint32_t tail, mask;
- uint32_t *pdwd = (uint32_t *)buf;
+ /* Defining pdwd as (uint32_t *) causes some optimization issues (gcc -O2).
+ In prox_ip_cksum(), hdr_checksum is set to 0, as expected by the code below,
+ but when *pdwd is plain uint32_t, GCC does not see the pointer aliasing on
+ the IPv4 header, optimizes this hdr_checksum initialization away, and hence
+ breaks the expectations of the checksum computation loop below.
+ The following typedef tells GCC that the IPv4 header may be aliased by
+ pdwd, which prevents GCC from removing the hdr_checksum = 0 assignment.
+ */
+ typedef uint32_t __attribute__((__may_alias__)) uint32_may_alias;
+ uint32_may_alias *pdwd = (uint32_may_alias *)buf;
/* compute 16 bit checksum using hi and low parts of 32 bit integers */
for (nb_dwords = (size >> 2); nb_dwords > 0; --nb_dwords) {
@@ -52,7 +62,7 @@ void prox_ip_cksum_sw(struct ipv4_hdr *buf)
buf->hdr_checksum = ~((uint16_t)cksum);
}
-static uint16_t calc_pseudo_checksum(uint8_t ipproto, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
+static inline uint16_t calc_pseudo_checksum(uint8_t ipproto, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
{
uint32_t csum = 0;
@@ -63,7 +73,7 @@ static uint16_t calc_pseudo_checksum(uint8_t ipproto, uint16_t len, uint32_t src
return csum;
}
-static void prox_write_udp_pseudo_hdr(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
+static inline void prox_write_udp_pseudo_hdr(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
{
/* Note that the csum is not complemented, while the pseaudo
header checksum is calculated as "... the 16-bit one's
@@ -74,38 +84,42 @@ static void prox_write_udp_pseudo_hdr(struct udp_hdr *udp, uint16_t len, uint32_
udp->dgram_cksum = calc_pseudo_checksum(IPPROTO_UDP, len, src_ip_addr, dst_ip_addr);
}
-static void prox_write_tcp_pseudo_hdr(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
+static inline void prox_write_tcp_pseudo_hdr(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
{
tcp->cksum = calc_pseudo_checksum(IPPROTO_TCP, len, src_ip_addr, dst_ip_addr);
}
-void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload)
+inline void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *pip, uint16_t l2_len, uint16_t l3_len, int cksum_offload)
{
- prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & IPV4_CKSUM);
+ prox_ip_cksum(mbuf, pip, l2_len, l3_len, cksum_offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
uint32_t l4_len = rte_bswap16(pip->total_length) - l3_len;
if (pip->next_proto_id == IPPROTO_UDP) {
- struct udp_hdr *udp = (struct udp_hdr *)(((uint8_t*)pip) + l3_len);
+ prox_rte_udp_hdr *udp = (prox_rte_udp_hdr *)(((uint8_t*)pip) + l3_len);
#ifndef SOFT_CRC
- if (cksum_offload & UDP_CKSUM) {
- mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
+ if (cksum_offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
prox_write_udp_pseudo_hdr(udp, l4_len, pip->src_addr, pip->dst_addr);
} else
#endif
prox_udp_cksum_sw(udp, l4_len, pip->src_addr, pip->dst_addr);
} else if (pip->next_proto_id == IPPROTO_TCP) {
- struct tcp_hdr *tcp = (struct tcp_hdr *)(((uint8_t*)pip) + l3_len);
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(((uint8_t*)pip) + l3_len);
#ifndef SOFT_CRC
- if (cksum_offload & UDP_CKSUM) {
+ if (cksum_offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
prox_write_tcp_pseudo_hdr(tcp, l4_len, pip->src_addr, pip->dst_addr);
- mbuf->ol_flags |= PKT_TX_UDP_CKSUM;
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
} else
#endif
prox_tcp_cksum_sw(tcp, l4_len, pip->src_addr, pip->dst_addr);
+ } else if (pip->next_proto_id == IPPROTO_IGMP) {
+ struct igmpv2_hdr *igmp = (struct igmpv2_hdr *)(((uint8_t*)pip) + l3_len);
+ // Not sure NIC can offload IGMP checkum => do it in software
+ prox_igmp_cksum_sw(igmp, l4_len);
}
}
-static uint16_t checksum_byte_seq(uint16_t *buf, uint16_t len)
+static inline uint16_t checksum_byte_seq(uint16_t *buf, uint16_t len)
{
uint32_t csum = 0;
@@ -129,17 +143,24 @@ static uint16_t checksum_byte_seq(uint16_t *buf, uint16_t len)
return ~csum;
}
-void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
+inline void prox_udp_cksum_sw(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
{
prox_write_udp_pseudo_hdr(udp, len, src_ip_addr, dst_ip_addr);
uint16_t csum = checksum_byte_seq((uint16_t *)udp, len);
udp->dgram_cksum = csum;
}
-void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
+inline void prox_tcp_cksum_sw(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr)
{
prox_write_tcp_pseudo_hdr(tcp, len, src_ip_addr, dst_ip_addr);
uint16_t csum = checksum_byte_seq((uint16_t *)tcp, len);
tcp->cksum = csum;
}
+
+inline void prox_igmp_cksum_sw(struct igmpv2_hdr *igmp, uint16_t len)
+{
+ igmp->checksum = 0;
+ uint16_t csum = checksum_byte_seq((uint16_t *)igmp, len);
+ igmp->checksum = csum;
+}
diff --git a/VNFs/DPPD-PROX/prox_cksum.h b/VNFs/DPPD-PROX/prox_cksum.h
index c11b17a5..d4ac5a6b 100644
--- a/VNFs/DPPD-PROX/prox_cksum.h
+++ b/VNFs/DPPD-PROX/prox_cksum.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,6 +25,9 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_mbuf.h>
+#include "igmp.h"
+#include "prox_compat.h"
+#include "prox_ipv6.h"
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
#define CALC_TX_OL(l2_len, l3_len) ((uint64_t)(l2_len) | (uint64_t)(l3_len) << 7)
@@ -39,12 +42,12 @@ static void prox_ip_cksum_hw(struct rte_mbuf *mbuf, uint16_t l2_len, uint16_t l3
#else
mbuf->tx_offload = CALC_TX_OL(l2_len, l3_len);
#endif
- mbuf->ol_flags |= PKT_TX_IP_CKSUM;
+ mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
}
-void prox_ip_cksum_sw(struct ipv4_hdr *buf);
+void prox_ip_cksum_sw(prox_rte_ipv4_hdr *buf);
-static inline void prox_ip_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int offload)
+static inline void prox_ip_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int offload)
{
buf->hdr_checksum = 0;
#ifdef SOFT_CRC
@@ -59,10 +62,18 @@ static inline void prox_ip_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, ui
#endif
}
-void prox_ip_udp_cksum(struct rte_mbuf *mbuf, struct ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int cksum_offload);
+void prox_ip_udp_cksum(struct rte_mbuf *mbuf, prox_rte_ipv4_hdr *buf, uint16_t l2_len, uint16_t l3_len, int cksum_offload);
/* src_ip_addr/dst_ip_addr are in network byte order */
-void prox_udp_cksum_sw(struct udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr);
-void prox_tcp_cksum_sw(struct tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr);
+void prox_udp_cksum_sw(prox_rte_udp_hdr *udp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr);
+void prox_tcp_cksum_sw(prox_rte_tcp_hdr *tcp, uint16_t len, uint32_t src_ip_addr, uint32_t dst_ip_addr);
+void prox_igmp_cksum_sw(struct igmpv2_hdr *igmp, uint16_t len);
+struct ipv6_pseudo_hdr {
+ struct ipv6_addr src;
+ struct ipv6_addr dst;
+ uint32_t length;
+ uint32_t protocl:8;
+ uint32_t reserved:24;
+} __attribute__((__packed__));
#endif /* _PROX_CKSUM_H_ */
diff --git a/VNFs/DPPD-PROX/prox_compat.c b/VNFs/DPPD-PROX/prox_compat.c
new file mode 100644
index 00000000..572872ee
--- /dev/null
+++ b/VNFs/DPPD-PROX/prox_compat.c
@@ -0,0 +1,30 @@
+/*
+// Copyright (c) 2010-2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include <stddef.h>
+#include "quit.h"
+#include "prox_compat.h"
+
+char *prox_strncpy(char * dest, const char * src, size_t count)
+{
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Wstringop-truncation"
+ strncpy(dest, src, count);
+#pragma GCC diagnostic pop
+ PROX_PANIC(dest[count - 1] != 0, "\t\tError in strncpy: buffer overrun (%lu bytes)", count);
+ return dest;
+}
diff --git a/VNFs/DPPD-PROX/prox_compat.h b/VNFs/DPPD-PROX/prox_compat.h
index ee61ee4a..d4b7f247 100644
--- a/VNFs/DPPD-PROX/prox_compat.h
+++ b/VNFs/DPPD-PROX/prox_compat.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -13,10 +13,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
*/
+#ifndef _PROX_COMPAT_H_
+#define _PROX_COMPAT_H_
#include <rte_common.h>
#include <rte_table_hash.h>
+#include <rte_ethdev.h>
#include <rte_hash_crc.h>
+#include <rte_cryptodev.h>
+
#include "hash_utils.h"
+#include "log.h"
/* This is a copy of the rte_table_hash_params from DPDK 17.11 *
* So if DPDK decides to change the structure the modifications *
@@ -33,6 +39,19 @@ struct prox_rte_table_params {
uint64_t seed;
};
+#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0)
+typedef uint8_t prox_next_hop_index_type;
+#else
+typedef uint32_t prox_next_hop_index_type;
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(16,7,0,0)
+static void rte_mempool_free(struct rte_mempool *mp)
+{
+ plog_warn("rte_mempool_free not supported in this DPDK - upgrade DPDK to avoid memory leaks\n");
+}
+#endif
+
#if RTE_VERSION < RTE_VERSION_NUM(17,11,0,0)
static void *prox_rte_table_create(struct prox_rte_table_params *params, int socket_id, uint32_t entry_size)
@@ -61,6 +80,16 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc
}
};
+static inline int prox_rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
+{
+#if RTE_VERSION < RTE_VERSION_NUM(16,7,0,0)
+ plog_err("Not supported in DPDK version <= 16.04 by lack of rte_eth_dev_get_port_by_name support\n");
+ return -1;
+#else
+ return rte_eth_dev_get_port_by_name(name, (uint8_t *)port_id);
+#endif
+}
+
#define prox_rte_table_free rte_table_hash_ext_dosig_ops.f_free
#define prox_rte_table_add rte_table_hash_ext_dosig_ops.f_add
#define prox_rte_table_delete rte_table_hash_ext_dosig_ops.f_delete
@@ -100,6 +129,8 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc
}
}
+#define prox_rte_eth_dev_get_port_by_name rte_eth_dev_get_port_by_name
+
#define prox_rte_table_free rte_table_hash_ext_ops.f_free
#define prox_rte_table_add rte_table_hash_ext_ops.f_add
#define prox_rte_table_delete rte_table_hash_ext_ops.f_delete
@@ -117,3 +148,515 @@ static void *prox_rte_table_create(struct prox_rte_table_params *params, int soc
#define prox_rte_table_key8_stats rte_table_hash_key8_ext_ops.f_stats
#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(18,8,0,0)
+#define rte_cryptodev_sym_get_private_session_size rte_cryptodev_get_private_session_size
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,2,0,0)
+#define RTE_COLOR_GREEN e_RTE_METER_GREEN
+#define RTE_COLOR_YELLOW e_RTE_METER_YELLOW
+#define RTE_COLOR_RED e_RTE_METER_RED
+#define prox_rte_color rte_meter_color
+#define prox_rte_sched_port_pkt_read_tree_path(A,B,C,D,E,F) rte_sched_port_pkt_read_tree_path(B,C,D,E,F)
+#define prox_rte_sched_port_pkt_write(A,B,C,D,E,F,G) rte_sched_port_pkt_write(B,C,D,E,F,G);
+#else
+#define prox_rte_color rte_color
+#define prox_rte_sched_port_pkt_read_tree_path(A,B,C,D,E,F) rte_sched_port_pkt_read_tree_path(A,B,C,D,E,F)
+#define prox_rte_sched_port_pkt_write(A,B,C,D,E,F,G) rte_sched_port_pkt_write(A,B,C,D,E,F,G);
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+typedef struct vxlan_gpe_hdr prox_rte_vxlan_gpe_hdr;
+#endif
+#define PROX_RTE_ETHER_CRC_LEN ETHER_CRC_LEN
+#define PROX_RTE_ETHER_MIN_LEN ETHER_MIN_LEN
+#define PROX_RTE_ETHER_MAX_LEN ETHER_MAX_LEN
+#define PROX_RTE_ETHER_HDR_LEN ETHER_HDR_LEN
+#define PROX_RTE_TCP_SYN_FLAG TCP_SYN_FLAG
+#define PROX_RTE_TCP_FIN_FLAG TCP_FIN_FLAG
+#define PROX_RTE_TCP_RST_FLAG TCP_RST_FLAG
+#define PROX_RTE_TCP_ACK_FLAG TCP_ACK_FLAG
+#define PROX_RTE_IP_ICMP_ECHO_REPLY IP_ICMP_ECHO_REPLY
+#define PROX_RTE_IP_ICMP_ECHO_REQUEST IP_ICMP_ECHO_REQUEST
+
+#define prox_rte_ether_addr_copy ether_addr_copy
+#define prox_rte_eth_random_addr eth_random_addr
+
+typedef struct ipv6_hdr prox_rte_ipv6_hdr;
+typedef struct ipv4_hdr prox_rte_ipv4_hdr;
+typedef struct ether_addr prox_rte_ether_addr;
+typedef struct ether_hdr prox_rte_ether_hdr;
+typedef struct vlan_hdr prox_rte_vlan_hdr;
+typedef struct udp_hdr prox_rte_udp_hdr;
+typedef struct tcp_hdr prox_rte_tcp_hdr;
+typedef struct icmp_hdr prox_rte_icmp_hdr;
+
+#ifndef RTE_SCHED_BE_QUEUES_PER_PIPE
+#define RTE_SCHED_BE_QUEUES_PER_PIPE RTE_SCHED_QUEUES_PER_PIPE
+#endif
+
+#define PROX_RTE_IS_IPV4_MCAST IS_IPV4_MCAST
+#define prox_rte_is_same_ether_addr is_same_ether_addr
+#define prox_rte_is_zero_ether_addr is_zero_ether_addr
+#else // >= 19.08
+
+#define PROX_RTE_ETHER_CRC_LEN RTE_ETHER_CRC_LEN
+#define PROX_RTE_ETHER_MIN_LEN RTE_ETHER_MIN_LEN
+#define PROX_RTE_ETHER_MAX_LEN RTE_ETHER_MAX_LEN
+#define PROX_RTE_ETHER_HDR_LEN RTE_ETHER_HDR_LEN
+#define PROX_RTE_TCP_SYN_FLAG RTE_TCP_SYN_FLAG
+#define PROX_RTE_TCP_FIN_FLAG RTE_TCP_FIN_FLAG
+#define PROX_RTE_TCP_RST_FLAG RTE_TCP_RST_FLAG
+#define PROX_RTE_TCP_ACK_FLAG RTE_TCP_ACK_FLAG
+#define PROX_RTE_IP_ICMP_ECHO_REPLY RTE_IP_ICMP_ECHO_REPLY
+#define PROX_RTE_IP_ICMP_ECHO_REQUEST RTE_IP_ICMP_ECHO_REQUEST
+
+#define prox_rte_ether_addr_copy rte_ether_addr_copy
+#define prox_rte_eth_random_addr rte_eth_random_addr
+
+typedef struct rte_ipv6_hdr prox_rte_ipv6_hdr;
+typedef struct rte_ipv4_hdr prox_rte_ipv4_hdr;
+typedef struct rte_ether_addr prox_rte_ether_addr;
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+typedef struct rte_ether_hdr prox_rte_ether_hdr;
+#else
+typedef struct prox_rte_ether_hdr
+{
+ struct rte_ether_addr d_addr; /**< Destination address. */
+ struct rte_ether_addr s_addr; /**< Source address. */
+ rte_be16_t ether_type; /**< Frame type. */
+} __rte_aligned(2) prox_rte_ether_hdr;
+#endif
+typedef struct rte_vlan_hdr prox_rte_vlan_hdr;
+typedef struct rte_vxlan_gpe_hdr prox_rte_vxlan_gpe_hdr;
+typedef struct rte_udp_hdr prox_rte_udp_hdr;
+typedef struct rte_tcp_hdr prox_rte_tcp_hdr;
+typedef struct rte_icmp_hdr prox_rte_icmp_hdr;
+
+#define PROX_RTE_IS_IPV4_MCAST RTE_IS_IPV4_MCAST
+#define prox_rte_is_same_ether_addr rte_is_same_ether_addr
+#define prox_rte_is_zero_ether_addr rte_is_zero_ether_addr
+
+#endif
+
+char *prox_strncpy(char * dest, const char * src, size_t count);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_MB
+#if RTE_VERSION < RTE_VERSION_NUM(19,5,0,0)
+//RFC4303
+struct prox_esp_hdr {
+ uint32_t spi;
+ uint32_t seq;
+};
+struct prox_rte_cryptodev_qp_conf {
+ uint32_t nb_descriptors; /**< Number of descriptors per queue pair */
+ struct rte_mempool * mp_session;
+ struct rte_mempool * mp_session_private;
+};
+
+static int prox_rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, struct prox_rte_cryptodev_qp_conf *qp_conf, int socket_id)
+{
+ struct rte_mempool *session_pool = qp_conf->mp_session;
+ return rte_cryptodev_queue_pair_setup(dev_id, queue_pair_id, (struct rte_cryptodev_qp_conf *)qp_conf, socket_id, session_pool);
+}
+
+#else
+#define prox_rte_cryptodev_qp_conf rte_cryptodev_qp_conf
+static int prox_rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, struct prox_rte_cryptodev_qp_conf *qp_conf, int socket_id)
+{
+ return rte_cryptodev_queue_pair_setup(dev_id, queue_pair_id, (struct rte_cryptodev_qp_conf *)qp_conf, socket_id);
+}
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,8,0,0)
+#define prox_esp_hdr esp_hdr
+
+#else // From DPDK 19.08
+#define prox_esp_hdr rte_esp_hdr
+
+#endif
+#endif
+#endif // CONFIG_RTE_LIBRTE_PMD_AESNI_MB
+
+#if RTE_VERSION < RTE_VERSION_NUM(19,11,0,0)
+#define prox_rte_eth_dev_count_avail() rte_eth_dev_count()
+#else
+#define prox_rte_eth_dev_count_avail() rte_eth_dev_count_avail()
+#endif
+
+// deal with RTE_DEPRECATED symbols
+
+#if RTE_VERSION < RTE_VERSION_NUM(20,11,0,0)
+#define SKIP_MAIN SKIP_MASTER
+#define CALL_MAIN CALL_MASTER
+#define RTE_DEVTYPE_ALLOWED RTE_DEVTYPE_WHITELISTED_PCI
+#define RTE_DEVTYPE_BLOCKED RTE_DEVTYPE_BLACKLISTED_PCI
+#define RTE_LCORE_FOREACH_WORKER RTE_LCORE_FOREACH_SLAVE
+#if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,0)
+#define RTE_DEV_ALLOWED RTE_DEV_WHITELISTED
+#define RTE_DEV_BLOCKED RTE_DEV_BLACKLISTED
+#define RTE_BUS_SCAN_ALLOWLIST RTE_BUS_SCAN_WHITELIST
+#define RTE_BUS_SCAN_BLOCKLIST RTE_BUS_SCAN_BLACKLIST
+#endif
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(21,5,0,0)
+#define RTE_PCI_ANY_ID PCI_ANY_ID
+#define PKT_RX_OUTER_IP_CKSUM_BAD PKT_RX_EIP_CKSUM_BAD
+#endif
+
+#if RTE_VERSION < RTE_VERSION_NUM(21,11,0,0)
+#define RTE_MEMPOOL_HEADER_SIZE MEMPOOL_HEADER_SIZE
+#define RTE_MBUF_F_RX_RSS_HASH PKT_RX_RSS_HASH
+#define RTE_MBUF_F_RX_FDIR PKT_RX_FDIR
+#define RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD PKT_RX_OUTER_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_IP_CKSUM_BAD PKT_RX_IP_CKSUM_BAD
+#define RTE_MBUF_F_RX_L4_CKSUM_BAD PKT_RX_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_IEEE1588_PTP PKT_RX_IEEE1588_PTP
+#define RTE_MBUF_F_RX_IEEE1588_TMST PKT_RX_IEEE1588_TMST
+#define RTE_MBUF_F_RX_FDIR_ID PKT_RX_FDIR_ID
+#define RTE_MBUF_F_RX_FDIR_FLX PKT_RX_FDIR_FLX
+#define RTE_MBUF_F_TX_QINQ PKT_TX_QINQ_PKT
+#define RTE_MBUF_F_TX_TCP_SEG PKT_TX_TCP_SEG
+#define RTE_MBUF_F_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define RTE_MBUF_F_TX_L4_NO_CKSUM PKT_TX_L4_NO_CKSUM
+#define RTE_MBUF_F_TX_TCP_CKSUM PKT_TX_TCP_CKSUM
+#define RTE_MBUF_F_TX_SCTP_CKSUM PKT_TX_SCTP_CKSUM
+#define RTE_MBUF_F_TX_UDP_CKSUM PKT_TX_UDP_CKSUM
+#define RTE_MBUF_F_TX_L4_MASK PKT_TX_L4_MASK
+#define RTE_MBUF_F_TX_IP_CKSUM PKT_TX_IP_CKSUM
+#define RTE_MBUF_F_TX_IPV4 PKT_TX_IPV4
+#define RTE_MBUF_F_TX_IPV6 PKT_TX_IPV6
+#define RTE_MBUF_F_TX_VLAN PKT_TX_VLAN_PKT
+#define RTE_MBUF_F_TX_OUTER_IP_CKSUM PKT_TX_OUTER_IP_CKSUM
+#define RTE_MBUF_F_TX_OUTER_IPV4 PKT_TX_OUTER_IPV4
+#define RTE_MBUF_F_TX_OUTER_IPV6 PKT_TX_OUTER_IPV6
+#define RTE_MBUF_F_INDIRECT IND_ATTACHED_MBUF
+#define RTE_ETH_LINK_SPEED_AUTONEG ETH_LINK_SPEED_AUTONEG
+#define RTE_ETH_LINK_SPEED_FIXED ETH_LINK_SPEED_FIXED
+#define RTE_ETH_LINK_SPEED_10M_HD ETH_LINK_SPEED_10M_HD
+#define RTE_ETH_LINK_SPEED_10M ETH_LINK_SPEED_10M
+#define RTE_ETH_LINK_SPEED_100M_HD ETH_LINK_SPEED_100M_HD
+#define RTE_ETH_LINK_SPEED_100M ETH_LINK_SPEED_100M
+#define RTE_ETH_LINK_SPEED_1G ETH_LINK_SPEED_1G
+#define RTE_ETH_LINK_SPEED_2_5G ETH_LINK_SPEED_2_5G
+#define RTE_ETH_LINK_SPEED_5G ETH_LINK_SPEED_5G
+#define RTE_ETH_LINK_SPEED_10G ETH_LINK_SPEED_10G
+#define RTE_ETH_LINK_SPEED_20G ETH_LINK_SPEED_20G
+#define RTE_ETH_LINK_SPEED_25G ETH_LINK_SPEED_25G
+#define RTE_ETH_LINK_SPEED_40G ETH_LINK_SPEED_40G
+#define RTE_ETH_LINK_SPEED_50G ETH_LINK_SPEED_50G
+#define RTE_ETH_LINK_SPEED_56G ETH_LINK_SPEED_56G
+#define RTE_ETH_LINK_SPEED_100G ETH_LINK_SPEED_100G
+#define RTE_ETH_SPEED_NUM_NONE ETH_SPEED_NUM_NONE
+#define RTE_ETH_SPEED_NUM_10M ETH_SPEED_NUM_10M
+#define RTE_ETH_SPEED_NUM_100M ETH_SPEED_NUM_100M
+#define RTE_ETH_SPEED_NUM_1G ETH_SPEED_NUM_1G
+#define RTE_ETH_SPEED_NUM_2_5G ETH_SPEED_NUM_2_5G
+#define RTE_ETH_SPEED_NUM_5G ETH_SPEED_NUM_5G
+#define RTE_ETH_SPEED_NUM_10G ETH_SPEED_NUM_10G
+#define RTE_ETH_SPEED_NUM_20G ETH_SPEED_NUM_20G
+#define RTE_ETH_SPEED_NUM_25G ETH_SPEED_NUM_25G
+#define RTE_ETH_SPEED_NUM_40G ETH_SPEED_NUM_40G
+#define RTE_ETH_SPEED_NUM_50G ETH_SPEED_NUM_50G
+#define RTE_ETH_SPEED_NUM_56G ETH_SPEED_NUM_56G
+#define RTE_ETH_SPEED_NUM_100G ETH_SPEED_NUM_100G
+#define RTE_ETH_LINK_HALF_DUPLEX ETH_LINK_HALF_DUPLEX
+#define RTE_ETH_LINK_FULL_DUPLEX ETH_LINK_FULL_DUPLEX
+#define RTE_ETH_LINK_DOWN ETH_LINK_DOWN
+#define RTE_ETH_LINK_UP ETH_LINK_UP
+#define RTE_ETH_LINK_FIXED ETH_LINK_FIXED
+#define RTE_ETH_LINK_AUTONEG ETH_LINK_AUTONEG
+#define RTE_ETH_MQ_RX_RSS_FLAG ETH_MQ_RX_RSS_FLAG
+#define RTE_ETH_MQ_RX_DCB_FLAG ETH_MQ_RX_DCB_FLAG
+#define RTE_ETH_MQ_RX_VMDQ_FLAG ETH_MQ_RX_VMDQ_FLAG
+#define RTE_ETH_MQ_RX_NONE ETH_MQ_RX_NONE
+#define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS
+#define RTE_ETH_MQ_RX_DCB ETH_MQ_RX_DCB
+#define RTE_ETH_MQ_RX_DCB_RSS ETH_MQ_RX_DCB_RSS
+#define RTE_ETH_MQ_RX_VMDQ_ONLY ETH_MQ_RX_VMDQ_ONLY
+#define RTE_ETH_MQ_RX_VMDQ_RSS ETH_MQ_RX_VMDQ_RSS
+#define RTE_ETH_MQ_RX_VMDQ_DCB ETH_MQ_RX_VMDQ_DCB
+#define RTE_ETH_MQ_RX_VMDQ_DCB_RSS ETH_MQ_RX_VMDQ_DCB_RSS
+#define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE
+#define RTE_ETH_MQ_TX_DCB ETH_MQ_TX_DCB
+#define RTE_ETH_MQ_TX_VMDQ_DCB ETH_MQ_TX_VMDQ_DCB
+#define RTE_ETH_MQ_TX_VMDQ_ONLY ETH_MQ_TX_VMDQ_ONLY
+#define RTE_ETH_VLAN_TYPE_UNKNOWN ETH_VLAN_TYPE_UNKNOWN
+#define RTE_ETH_VLAN_TYPE_INNER ETH_VLAN_TYPE_INNER
+#define RTE_ETH_VLAN_TYPE_OUTER ETH_VLAN_TYPE_OUTER
+#define RTE_ETH_VLAN_TYPE_MAX ETH_VLAN_TYPE_MAX
+#define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4
+#define RTE_ETH_RSS_FRAG_IPV4 ETH_RSS_FRAG_IPV4
+#define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP ETH_RSS_NONFRAG_IPV4_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER ETH_RSS_NONFRAG_IPV4_OTHER
+#define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6
+#define RTE_ETH_RSS_FRAG_IPV6 ETH_RSS_FRAG_IPV6
+#define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP
+#define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP
+#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP ETH_RSS_NONFRAG_IPV6_SCTP
+#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER ETH_RSS_NONFRAG_IPV6_OTHER
+#define RTE_ETH_RSS_L2_PAYLOAD ETH_RSS_L2_PAYLOAD
+#define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX
+#define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX
+#define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX
+#define RTE_ETH_RSS_IP ETH_RSS_IP
+#define RTE_ETH_RSS_UDP ETH_RSS_UDP
+#define RTE_ETH_RSS_TCP ETH_RSS_TCP
+#define RTE_ETH_RSS_SCTP ETH_RSS_SCTP
+#define RTE_ETH_RSS_PROTO_MASK ETH_RSS_PROTO_MASK
+#define RTE_ETH_RSS_RETA_SIZE_64 ETH_RSS_RETA_SIZE_64
+#define RTE_ETH_RSS_RETA_SIZE_128 ETH_RSS_RETA_SIZE_128
+#define RTE_ETH_RSS_RETA_SIZE_512 ETH_RSS_RETA_SIZE_512
+#define RTE_ETH_RETA_GROUP_SIZE RTE_RETA_GROUP_SIZE
+#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS ETH_VMDQ_MAX_VLAN_FILTERS
+#define RTE_ETH_DCB_NUM_USER_PRIORITIES ETH_DCB_NUM_USER_PRIORITIES
+#define RTE_ETH_VMDQ_DCB_NUM_QUEUES ETH_VMDQ_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_NUM_QUEUES ETH_DCB_NUM_QUEUES
+#define RTE_ETH_DCB_PG_SUPPORT ETH_DCB_PG_SUPPORT
+#define RTE_ETH_DCB_PFC_SUPPORT ETH_DCB_PFC_SUPPORT
+#define RTE_ETH_VLAN_STRIP_OFFLOAD ETH_VLAN_STRIP_OFFLOAD
+#define RTE_ETH_VLAN_FILTER_OFFLOAD ETH_VLAN_FILTER_OFFLOAD
+#define RTE_ETH_VLAN_EXTEND_OFFLOAD ETH_VLAN_EXTEND_OFFLOAD
+#define RTE_ETH_VLAN_STRIP_MASK ETH_VLAN_STRIP_MASK
+#define RTE_ETH_VLAN_FILTER_MASK ETH_VLAN_FILTER_MASK
+#define RTE_ETH_VLAN_EXTEND_MASK ETH_VLAN_EXTEND_MASK
+#define RTE_ETH_VLAN_ID_MAX ETH_VLAN_ID_MAX
+#define RTE_ETH_NUM_RECEIVE_MAC_ADDR ETH_NUM_RECEIVE_MAC_ADDR
+#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY ETH_VMDQ_NUM_UC_HASH_ARRAY
+#define RTE_ETH_VMDQ_ACCEPT_UNTAG ETH_VMDQ_ACCEPT_UNTAG
+#define RTE_ETH_VMDQ_ACCEPT_HASH_MC ETH_VMDQ_ACCEPT_HASH_MC
+#define RTE_ETH_VMDQ_ACCEPT_HASH_UC ETH_VMDQ_ACCEPT_HASH_UC
+#define RTE_ETH_VMDQ_ACCEPT_BROADCAST ETH_VMDQ_ACCEPT_BROADCAST
+#define RTE_ETH_VMDQ_ACCEPT_MULTICAST ETH_VMDQ_ACCEPT_MULTICAST
+#define RTE_ETH_4_TCS ETH_4_TCS
+#define RTE_ETH_8_TCS ETH_8_TCS
+#define RTE_ETH_8_POOLS ETH_8_POOLS
+#define RTE_ETH_16_POOLS ETH_16_POOLS
+#define RTE_ETH_32_POOLS ETH_32_POOLS
+#define RTE_ETH_64_POOLS ETH_64_POOLS
+#define RTE_ETH_FC_NONE RTE_FC_NONE
+#define RTE_ETH_FC_RX_PAUSE RTE_FC_RX_PAUSE
+#define RTE_ETH_FC_TX_PAUSE RTE_FC_TX_PAUSE
+#define RTE_ETH_FC_FULL RTE_FC_FULL
+#define RTE_ETH_TUNNEL_TYPE_NONE RTE_TUNNEL_TYPE_NONE
+#define RTE_ETH_TUNNEL_TYPE_VXLAN RTE_TUNNEL_TYPE_VXLAN
+#define RTE_ETH_TUNNEL_TYPE_GENEVE RTE_TUNNEL_TYPE_GENEVE
+#define RTE_ETH_TUNNEL_TYPE_TEREDO RTE_TUNNEL_TYPE_TEREDO
+#define RTE_ETH_TUNNEL_TYPE_NVGRE RTE_TUNNEL_TYPE_NVGRE
+#define RTE_ETH_TUNNEL_TYPE_IP_IN_GRE RTE_TUNNEL_TYPE_IP_IN_GRE
+#define RTE_ETH_L2_TUNNEL_TYPE_E_TAG RTE_L2_TUNNEL_TYPE_E_TAG
+#define RTE_ETH_TUNNEL_TYPE_MAX RTE_TUNNEL_TYPE_MAX
+#define RTE_ETH_FDIR_PBALLOC_64K RTE_FDIR_PBALLOC_64K
+#define RTE_ETH_FDIR_PBALLOC_128K RTE_FDIR_PBALLOC_128K
+#define RTE_ETH_FDIR_PBALLOC_256K RTE_FDIR_PBALLOC_256K
+#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP DEV_RX_OFFLOAD_VLAN_STRIP
+#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_TCP_LRO DEV_RX_OFFLOAD_TCP_LRO
+#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP DEV_RX_OFFLOAD_QINQ_STRIP
+#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT DEV_TX_OFFLOAD_VLAN_INSERT
+#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM DEV_TX_OFFLOAD_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM DEV_TX_OFFLOAD_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM DEV_TX_OFFLOAD_TCP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM DEV_TX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_TCP_TSO DEV_TX_OFFLOAD_TCP_TSO
+#define RTE_ETH_TX_OFFLOAD_UDP_TSO DEV_TX_OFFLOAD_UDP_TSO
+#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM
+#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT DEV_TX_OFFLOAD_QINQ_INSERT
+#define RTE_ETH_DCB_NUM_TCS ETH_DCB_NUM_TCS
+#define RTE_ETH_MAX_VMDQ_POOL ETH_MAX_VMDQ_POOL
+#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
+#define RTE_MEMPOOL_REGISTER_OPS MEMPOOL_REGISTER_OPS
+#define RTE_MBUF_F_RX_VLAN_STRIPPED PKT_RX_VLAN_STRIPPED
+#define RTE_MBUF_F_RX_QINQ_STRIPPED PKT_RX_QINQ_STRIPPED
+#define RTE_ETH_RSS_PORT ETH_RSS_PORT
+#define RTE_ETH_RSS_VXLAN ETH_RSS_VXLAN
+#define RTE_ETH_RSS_GENEVE ETH_RSS_GENEVE
+#define RTE_ETH_RSS_NVGRE ETH_RSS_NVGRE
+#define RTE_ETH_RSS_TUNNEL ETH_RSS_TUNNEL
+#define RTE_ETH_RSS_RETA_SIZE_256 ETH_RSS_RETA_SIZE_256
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(16,11,0,0)
+#define RTE_MBUF_F_RX_IP_CKSUM_MASK PKT_RX_IP_CKSUM_MASK
+#define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN PKT_RX_IP_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_IP_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD
+#define RTE_MBUF_F_RX_IP_CKSUM_NONE PKT_RX_IP_CKSUM_NONE
+#define RTE_MBUF_F_RX_L4_CKSUM_MASK PKT_RX_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN PKT_RX_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_L4_CKSUM_GOOD PKT_RX_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_L4_CKSUM_NONE PKT_RX_L4_CKSUM_NONE
+#define RTE_MBUF_F_RX_LRO PKT_RX_LRO
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN PKT_TX_TUNNEL_VXLAN
+#define RTE_MBUF_F_TX_TUNNEL_GRE PKT_TX_TUNNEL_GRE
+#define RTE_MBUF_F_TX_TUNNEL_IPIP PKT_TX_TUNNEL_IPIP
+#define RTE_MBUF_F_TX_TUNNEL_GENEVE PKT_TX_TUNNEL_GENEVE
+#define RTE_MBUF_F_TX_TUNNEL_MASK PKT_TX_TUNNEL_MASK
+#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO DEV_TX_OFFLOAD_VXLAN_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO DEV_TX_OFFLOAD_GRE_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO DEV_TX_OFFLOAD_IPIP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO DEV_TX_OFFLOAD_GENEVE_TNL_TSO
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,2,0,0)
+#define RTE_MBUF_F_TX_MACSEC PKT_TX_MACSEC
+#define RTE_MBUF_F_TX_OFFLOAD_MASK PKT_TX_OFFLOAD_MASK
+#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP DEV_RX_OFFLOAD_MACSEC_STRIP
+#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT DEV_TX_OFFLOAD_MACSEC_INSERT
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,8,0,0)
+#define RTE_MBUF_F_TX_TUNNEL_MPLSINUDP PKT_TX_TUNNEL_MPLSINUDP
+#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE DEV_TX_OFFLOAD_MT_LOCKFREE
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0)
+#define RTE_MBUF_F_RX_VLAN PKT_RX_VLAN
+#define RTE_MBUF_F_RX_SEC_OFFLOAD PKT_RX_SEC_OFFLOAD
+#define RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED PKT_RX_SEC_OFFLOAD_FAILED
+#define RTE_MBUF_F_RX_QINQ PKT_RX_QINQ
+#define RTE_MBUF_F_TX_SEC_OFFLOAD PKT_TX_SEC_OFFLOAD
+#define RTE_ETH_RX_OFFLOAD_HEADER_SPLIT DEV_RX_OFFLOAD_HEADER_SPLIT
+#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER DEV_RX_OFFLOAD_VLAN_FILTER
+#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND DEV_RX_OFFLOAD_VLAN_EXTEND
+#define RTE_ETH_RX_OFFLOAD_SCATTER DEV_RX_OFFLOAD_SCATTER
+#define RTE_ETH_RX_OFFLOAD_TIMESTAMP DEV_RX_OFFLOAD_TIMESTAMP
+#define RTE_ETH_RX_OFFLOAD_SECURITY DEV_RX_OFFLOAD_SECURITY
+#define RTE_ETH_RX_OFFLOAD_CHECKSUM DEV_RX_OFFLOAD_CHECKSUM
+#define RTE_ETH_RX_OFFLOAD_VLAN DEV_RX_OFFLOAD_VLAN
+#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS DEV_TX_OFFLOAD_MULTI_SEGS
+#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE DEV_TX_OFFLOAD_MBUF_FAST_FREE
+#define RTE_ETH_TX_OFFLOAD_SECURITY DEV_TX_OFFLOAD_SECURITY
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,2,0,0)
+#define RTE_MBUF_F_TX_UDP_SEG PKT_TX_UDP_SEG
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,5,0,0)
+#define RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE PKT_TX_TUNNEL_VXLAN_GPE
+#define RTE_MBUF_F_TX_TUNNEL_IP PKT_TX_TUNNEL_IP
+#define RTE_MBUF_F_TX_TUNNEL_UDP PKT_TX_TUNNEL_UDP
+#define RTE_MBUF_F_EXTERNAL EXT_ATTACHED_MBUF
+#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO DEV_TX_OFFLOAD_UDP_TNL_TSO
+#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO DEV_TX_OFFLOAD_IP_TNL_TSO
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(18,11,0,0)
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_MASK PKT_RX_OUTER_L4_CKSUM_MASK
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN PKT_RX_OUTER_L4_CKSUM_UNKNOWN
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD PKT_RX_OUTER_L4_CKSUM_BAD
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD PKT_RX_OUTER_L4_CKSUM_GOOD
+#define RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID PKT_RX_OUTER_L4_CKSUM_INVALID
+#define RTE_MBUF_F_TX_OUTER_UDP_CKSUM PKT_TX_OUTER_UDP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM DEV_RX_OFFLOAD_SCTP_CKSUM
+#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM DEV_RX_OFFLOAD_OUTER_UDP_CKSUM
+#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM DEV_TX_OFFLOAD_OUTER_UDP_CKSUM
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,5,0,0)
+#define RTE_ETH_TUNNEL_TYPE_VXLAN_GPE RTE_TUNNEL_TYPE_VXLAN_GPE
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,8,0,0)
+#define RTE_ETH_QINQ_STRIP_OFFLOAD ETH_QINQ_STRIP_OFFLOAD
+#define RTE_ETH_QINQ_STRIP_MASK ETH_QINQ_STRIP_MASK
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+#define RTE_MBUF_DYNFLAG_RX_METADATA PKT_RX_DYNF_METADATA
+#define RTE_MBUF_DYNFLAG_TX_METADATA PKT_TX_DYNF_METADATA
+#define RTE_MBUF_F_FIRST_FREE PKT_FIRST_FREE
+#define RTE_MBUF_F_LAST_FREE PKT_LAST_FREE
+#define RTE_MBUF_F_TX_TUNNEL_GTP PKT_TX_TUNNEL_GTP
+#define RTE_ETH_RSS_GTPU ETH_RSS_GTPU
+#define RTE_ETH_RSS_L3_SRC_ONLY ETH_RSS_L3_SRC_ONLY
+#define RTE_ETH_RSS_L3_DST_ONLY ETH_RSS_L3_DST_ONLY
+#define RTE_ETH_RSS_L4_SRC_ONLY ETH_RSS_L4_SRC_ONLY
+#define RTE_ETH_RSS_L4_DST_ONLY ETH_RSS_L4_DST_ONLY
+#define RTE_ETH_RX_OFFLOAD_RSS_HASH DEV_RX_OFFLOAD_RSS_HASH
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,5,0,0)
+#define RTE_ETH_LINK_SPEED_200G ETH_LINK_SPEED_200G
+#define RTE_ETH_SPEED_NUM_200G ETH_SPEED_NUM_200G
+#define RTE_ETH_RSS_ETH ETH_RSS_ETH
+#define RTE_ETH_RSS_S_VLAN ETH_RSS_S_VLAN
+#define RTE_ETH_RSS_C_VLAN ETH_RSS_C_VLAN
+#define RTE_ETH_RSS_ESP ETH_RSS_ESP
+#define RTE_ETH_RSS_AH ETH_RSS_AH
+#define RTE_ETH_RSS_L2TPV3 ETH_RSS_L2TPV3
+#define RTE_ETH_RSS_PFCP ETH_RSS_PFCP
+#define RTE_ETH_RSS_L2_SRC_ONLY ETH_RSS_L2_SRC_ONLY
+#define RTE_ETH_RSS_L2_DST_ONLY ETH_RSS_L2_DST_ONLY
+#define RTE_ETH_RSS_VLAN ETH_RSS_VLAN
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,8,0,0)
+#define RTE_ETH_RSS_PPPOE ETH_RSS_PPPOE
+#define RTE_ETH_RSS_IPV6_PRE32 ETH_RSS_IPV6_PRE32
+#define RTE_ETH_RSS_IPV6_PRE40 ETH_RSS_IPV6_PRE40
+#define RTE_ETH_RSS_IPV6_PRE48 ETH_RSS_IPV6_PRE48
+#define RTE_ETH_RSS_IPV6_PRE56 ETH_RSS_IPV6_PRE56
+#define RTE_ETH_RSS_IPV6_PRE64 ETH_RSS_IPV6_PRE64
+#define RTE_ETH_RSS_IPV6_PRE96 ETH_RSS_IPV6_PRE96
+#define RTE_ETH_RSS_IPV6_PRE32_UDP ETH_RSS_IPV6_PRE32_UDP
+#define RTE_ETH_RSS_IPV6_PRE40_UDP ETH_RSS_IPV6_PRE40_UDP
+#define RTE_ETH_RSS_IPV6_PRE48_UDP ETH_RSS_IPV6_PRE48_UDP
+#define RTE_ETH_RSS_IPV6_PRE56_UDP ETH_RSS_IPV6_PRE56_UDP
+#define RTE_ETH_RSS_IPV6_PRE64_UDP ETH_RSS_IPV6_PRE64_UDP
+#define RTE_ETH_RSS_IPV6_PRE96_UDP ETH_RSS_IPV6_PRE96_UDP
+#define RTE_ETH_RSS_IPV6_PRE32_TCP ETH_RSS_IPV6_PRE32_TCP
+#define RTE_ETH_RSS_IPV6_PRE40_TCP ETH_RSS_IPV6_PRE40_TCP
+#define RTE_ETH_RSS_IPV6_PRE48_TCP ETH_RSS_IPV6_PRE48_TCP
+#define RTE_ETH_RSS_IPV6_PRE56_TCP ETH_RSS_IPV6_PRE56_TCP
+#define RTE_ETH_RSS_IPV6_PRE64_TCP ETH_RSS_IPV6_PRE64_TCP
+#define RTE_ETH_RSS_IPV6_PRE96_TCP ETH_RSS_IPV6_PRE96_TCP
+#define RTE_ETH_RSS_IPV6_PRE32_SCTP ETH_RSS_IPV6_PRE32_SCTP
+#define RTE_ETH_RSS_IPV6_PRE40_SCTP ETH_RSS_IPV6_PRE40_SCTP
+#define RTE_ETH_RSS_IPV6_PRE48_SCTP ETH_RSS_IPV6_PRE48_SCTP
+#define RTE_ETH_RSS_IPV6_PRE56_SCTP ETH_RSS_IPV6_PRE56_SCTP
+#define RTE_ETH_RSS_IPV6_PRE64_SCTP ETH_RSS_IPV6_PRE64_SCTP
+#define RTE_ETH_RSS_IPV6_PRE96_SCTP ETH_RSS_IPV6_PRE96_SCTP
+#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+#define RTE_ETH_SPEED_NUM_UNKNOWN ETH_SPEED_NUM_UNKNOWN
+#define RTE_ETH_RSS_ECPRI ETH_RSS_ECPRI
+#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT ETH_RSS_LEVEL_PMD_DEFAULT
+#define RTE_ETH_RSS_LEVEL_OUTERMOST ETH_RSS_LEVEL_OUTERMOST
+#define RTE_ETH_RSS_LEVEL_INNERMOST ETH_RSS_LEVEL_INNERMOST
+#define RTE_ETH_RSS_LEVEL_MASK ETH_RSS_LEVEL_MASK
+#define RTE_ETH_RSS_LEVEL ETH_RSS_LEVEL
+#endif
+#if RTE_VERSION >= RTE_VERSION_NUM(21,2,0,0)
+#define RTE_ETH_RSS_MPLS ETH_RSS_MPLS
+#define RTE_ETH_TUNNEL_TYPE_ECPRI RTE_TUNNEL_TYPE_ECPRI
+#endif
+
+#ifndef DEV_RX_OFFLOAD_JUMBO_FRAME
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME 0x00000800
+#else
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME DEV_RX_OFFLOAD_JUMBO_FRAME
+#endif
+
+#ifndef DEV_RX_OFFLOAD_KEEP_CRC
+#ifndef DEV_RX_OFFLOAD_CRC_STRIP
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP 0x00001000
+#else
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP DEV_RX_OFFLOAD_CRC_STRIP
+#endif
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_KEEP_CRC
+
+#else
+#ifndef DEV_RX_OFFLOAD_CRC_STRIP
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_CRC_STRIP
+#else
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP DEV_RX_OFFLOAD_CRC_STRIP
+#endif
+#define RTE_ETH_RX_OFFLOAD_KEEP_CRC DEV_RX_OFFLOAD_KEEP_CRC
+#endif
+
+#else // >= 21.11
+#define RTE_ETH_RX_OFFLOAD_JUMBO_FRAME RTE_BIT64(11)
+#define RTE_ETH_RX_OFFLOAD_CRC_STRIP _force_error_if_defined_
+#undef RTE_ETH_RX_OFFLOAD_CRC_STRIP
+#endif
+
+#endif // _PROX_COMPAT_H
diff --git a/VNFs/DPPD-PROX/prox_globals.h b/VNFs/DPPD-PROX/prox_globals.h
index b09f3a52..7463ded5 100644
--- a/VNFs/DPPD-PROX/prox_globals.h
+++ b/VNFs/DPPD-PROX/prox_globals.h
@@ -18,6 +18,7 @@
#define MAX_TASKS_PER_CORE 8
#define MAX_SOCKETS 64
#define MAX_NAME_SIZE 64
+#define MAX_NAME_BUFFER_SIZE 128
#define MAX_PROTOCOLS 3
#define MAX_RINGS_PER_TASK (MAX_WT_PER_LB*MAX_PROTOCOLS)
#define MAX_WT_PER_LB 64
diff --git a/VNFs/DPPD-PROX/prox_ipv6.c b/VNFs/DPPD-PROX/prox_ipv6.c
new file mode 100644
index 00000000..f8ec147f
--- /dev/null
+++ b/VNFs/DPPD-PROX/prox_ipv6.c
@@ -0,0 +1,339 @@
+/*
+// Copyright (c) 2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#include "task_base.h"
+#include "handle_master.h"
+#include "prox_cfg.h"
+#include "prox_ipv6.h"
+
+struct ipv6_addr null_addr = {{0}};
+char ip6_str[40]; // 8 blocks of 2 bytes (4 char) + 1x ":" between blocks
+
+void set_mcast_mac_from_ipv6(prox_rte_ether_addr *mac, struct ipv6_addr *ipv6_addr)
+{
+ mac->addr_bytes[0] = 0x33;
+ mac->addr_bytes[1] = 0x33;
+ memcpy(((uint32_t *)&mac->addr_bytes[2]), (uint32_t *)(&ipv6_addr->bytes[12]), sizeof(uint32_t));
+}
+
+// Note that this function is not Mthread safe and would result in garbage if called simultaneously from multiple threads
+// This function is however only used for debugging, printing errors...
+char *IP6_Canonical(struct ipv6_addr *addr)
+{
+ uint8_t *a = (uint8_t *)addr;
+ char *ptr = ip6_str;
+ int field = -1, len = 0, stored_field = 0, stored_len = 0;
+
+ // Find longest run of consecutive 16-bit 0 fields
+ for (int i = 0; i < 8; i++) {
+ if (((int)a[i * 2] == 0) && ((int)a[i * 2 + 1] == 0)) {
+ len++;
+ if (field == -1)
+ field = i; // Store where the first 0 field started
+ } else {
+ if (len > stored_len) {
+ // the longest run of consecutive 16-bit 0 fields MUST be shortened
+ stored_len = len;
+ stored_field = field;
+ }
+ len = 0;
+ field = -1;
+ }
+ }
+ if (len > stored_len) {
+ // the longest run of consecutive 16-bit 0 fields MUST be shortened
+ stored_len = len;
+ stored_field = field;
+ }
+ if (stored_len <= 1) {
+ // The symbol "::" MUST NOT be used to shorten just one 16-bit 0 field.
+ stored_len = 0;
+ stored_field = -1;
+ }
+ for (int i = 0; i < 8; i++) {
+ if (i == stored_field) {
+ sprintf(ptr, ":");
+ ptr++;
+ if (i == 0) {
+ sprintf(ptr, ":");
+ ptr++;
+ }
+ i +=stored_len - 1; // ++ done in for loop
+ continue;
+ }
+ if ((int)a[i * 2] & 0xF0) {
+ sprintf(ptr, "%02x%02x", (int)a[i * 2], (int)a[i * 2 + 1]);
+ ptr+=4;
+ } else if ((int)a[i * 2] & 0x0F) {
+ sprintf(ptr, "%x%02x", (int)a[i * 2] >> 4, (int)a[i * 2] + 1);
+ ptr+=3;
+ } else if ((int)a[i * 2 + 1] & 0xF0) {
+ sprintf(ptr, "%02x", (int)a[i * 2 + 1]);
+ ptr+=2;
+ } else {
+ sprintf(ptr, "%x", ((int)a[i * 2 + 1]) & 0xF);
+ ptr++;
+ }
+ if (i != 7) {
+ sprintf(ptr, ":");
+ ptr++;
+ }
+ }
+ return ip6_str;
+}
+
+void set_link_local(struct ipv6_addr *ipv6_addr)
+{
+ ipv6_addr->bytes[0] = 0xfe;
+ ipv6_addr->bytes[1] = 0x80;
+}
+
+// Create Extended Unique Identifier (RFC 2373)
+// Store it in LSB of IPv6 address
+void set_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac)
+{
+ memcpy(&ipv6_addr->bytes[8], mac, 3); // Copy first 3 bytes of MAC
+ ipv6_addr->bytes[8] = ipv6_addr->bytes[8] ^ 0x02; // Invert Universal/local bit
+ ipv6_addr->bytes[11] = 0xff; // Next 2 bytes are 0xfffe
+ ipv6_addr->bytes[12] = 0xfe;
+ memcpy(&ipv6_addr->bytes[13], &mac->addr_bytes[3], 3); // Copy last 3 bytes
+ // plog_info("mac = "MAC_BYTES_FMT", eui = "IPv6_BYTES_FMT"\n", MAC_BYTES(mac->addr_bytes), IPv6_BYTES(ipv6_addr->bytes));
+}
+
+void create_mac_from_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac)
+{
+ memcpy(mac, &ipv6_addr->bytes[8], 3);
+ mac->addr_bytes[0] = mac->addr_bytes[0] ^ 0x02;
+ memcpy(&mac->addr_bytes[3], &ipv6_addr->bytes[13], 3);
+}
+
+static inline prox_rte_ipv6_hdr *prox_set_vlan_ipv6(prox_rte_ether_hdr *peth, uint16_t vlan)
+{
+ prox_rte_ipv6_hdr *ipv6_hdr;
+
+ if (vlan) {
+ prox_rte_vlan_hdr *vlan_hdr = (prox_rte_vlan_hdr *)(peth + 1);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
+ peth->ether_type = ETYPE_VLAN;
+ vlan_hdr->eth_proto = ETYPE_IPv6;
+ vlan_hdr->vlan_tci = rte_cpu_to_be_16(vlan);
+ } else {
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(peth + 1);
+ peth->ether_type = ETYPE_IPv6;
+ }
+ return ipv6_hdr;
+}
+
+void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix, uint16_t vlan)
+{
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ init_mbuf_seg(mbuf);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
+
+ memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_nodes_mac_addr, sizeof(prox_rte_ether_addr));
+ memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
+
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
+ ipv6_hdr->vtc_flow = 0x00000060;
+ ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RA) + sizeof(struct icmpv6_prefix_option));
+ ipv6_hdr->proto = ICMPv6;
+ ipv6_hdr->hop_limits = 255;
+ memcpy(ipv6_hdr->src_addr, ipv6_s_addr, sizeof(struct ipv6_addr)); // 0 = "Unspecified address" if unknown
+ memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_nodes_ipv6_mcast_addr, sizeof(struct ipv6_addr));
+
+ struct icmpv6_RA *router_advertisement = (struct icmpv6_RA *)(ipv6_hdr + 1);
+ router_advertisement->type = ICMPv6_RA;
+ router_advertisement->code = 0;
+ router_advertisement->hop_limit = 255;
+ router_advertisement->bits = 0; // M and O bits set to 0 => no dhcpv6
+ router_advertisement->router_lifespan = rte_cpu_to_be_16(9000); // 9000 sec
+ router_advertisement->reachable_timeout = rte_cpu_to_be_32(30000); // 1 sec
+ router_advertisement->retrans_timeout = rte_cpu_to_be_32(1000); // 30 sec
+
+ struct icmpv6_option *option = &router_advertisement->options;
+ option->type = ICMPv6_source_link_layer_address;
+ option->length = 1; // 8 bytes
+ memcpy(&option->data, s_addr, sizeof(prox_rte_ether_addr));
+
+ struct icmpv6_prefix_option *prefix_option = (struct icmpv6_prefix_option *)(option + 1);
+ prefix_option->type = ICMPv6_prefix_information;
+ prefix_option->length = 4; // 32 bytes
+ prefix_option->prefix_length = 64; // 64 bits in prefix
+ prefix_option->flag = 0xc0; // on-link flag & autonamous address-configuration flag are set
+ prefix_option->valid_lifetime = rte_cpu_to_be_32(86400); // 1 day
+ prefix_option->preferred_lifetime = rte_cpu_to_be_32(43200); // 12 hours
+ prefix_option->reserved = 0;
+ memcpy(&prefix_option->prefix, router_prefix, sizeof(struct ipv6_addr));
+ // Could Add MTU Option
+ router_advertisement->checksum = 0;
+ router_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_advertisement);
+
+ uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
+}
+
+void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, uint16_t vlan)
+{
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+
+ init_mbuf_seg(mbuf);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
+
+ memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_routers_mac_addr, sizeof(prox_rte_ether_addr));
+ memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
+
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
+ ipv6_hdr->vtc_flow = 0x00000060;
+ ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_RS));
+ ipv6_hdr->proto = ICMPv6;
+ ipv6_hdr->hop_limits = 255;
+ memcpy(ipv6_hdr->src_addr, ipv6_s_addr, sizeof(struct ipv6_addr)); // 0 = "Unspecified address" if unknown
+ memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_routers_ipv6_mcast_addr, sizeof(struct ipv6_addr));
+
+ struct icmpv6_RS *router_sollicitation = (struct icmpv6_RS *)(ipv6_hdr + 1);
+ router_sollicitation->type = ICMPv6_RS;
+ router_sollicitation->code = 0;
+ router_sollicitation->options.type = ICMPv6_source_link_layer_address;
+ router_sollicitation->options.length = 1; // 8 bytes
+ memcpy(&router_sollicitation->options.data, s_addr, sizeof(prox_rte_ether_addr));
+
+ router_sollicitation->checksum = 0;
+ router_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, router_sollicitation);
+ uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
+}
+
+void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src, uint16_t vlan)
+{
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ether_addr mac_dst;
+ set_mcast_mac_from_ipv6(&mac_dst, dst);
+
+ init_mbuf_seg(mbuf);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
+
+ memcpy(peth->d_addr.addr_bytes, &mac_dst, sizeof(prox_rte_ether_addr));
+ memcpy(peth->s_addr.addr_bytes, s_addr, sizeof(prox_rte_ether_addr));
+
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
+
+ ipv6_hdr->vtc_flow = 0x00000060;
+ ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NS));
+ ipv6_hdr->proto = ICMPv6;
+ ipv6_hdr->hop_limits = 255;
+ memcpy(ipv6_hdr->src_addr, src, 16);
+ memcpy(ipv6_hdr->dst_addr, dst, 16);
+
+ struct icmpv6_NS *neighbour_sollicitation = (struct icmpv6_NS *)(ipv6_hdr + 1);
+ neighbour_sollicitation->type = ICMPv6_NS;
+ neighbour_sollicitation->code = 0;
+ neighbour_sollicitation->reserved = 0;
+ memcpy(&neighbour_sollicitation->target_address, dst, sizeof(struct ipv6_addr));
+ neighbour_sollicitation->options.type = ICMPv6_source_link_layer_address;
+ neighbour_sollicitation->options.length = 1; // 8 bytes
+ memcpy(&neighbour_sollicitation->options.data, s_addr, sizeof(prox_rte_ether_addr));
+ neighbour_sollicitation->checksum = 0;
+ neighbour_sollicitation->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_sollicitation);
+
+ uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
+}
+
+void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target, struct ipv6_addr *src_ipv6_addr, int sollicited, uint16_t vlan)
+{
+ struct task_master *task = (struct task_master *)tbase;
+ prox_rte_ether_hdr *peth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+
+ uint8_t port_id = get_port(mbuf);
+
+ init_mbuf_seg(mbuf);
+ mbuf->ol_flags &= ~(RTE_MBUF_F_TX_IP_CKSUM|RTE_MBUF_F_TX_UDP_CKSUM); // Software calculates the checksum
+
+ prox_rte_ipv6_hdr *ipv6_hdr = prox_set_vlan_ipv6(peth, vlan);
+
+ // If source mac is null, use all_nodes_mac_addr.
+ if ((!sollicited) || (memcmp(peth->s_addr.addr_bytes, &null_addr, sizeof(struct ipv6_addr)) == 0)) {
+ memcpy(peth->d_addr.addr_bytes, &prox_cfg.all_nodes_mac_addr, sizeof(prox_rte_ether_addr));
+ memcpy(ipv6_hdr->dst_addr, &prox_cfg.all_nodes_ipv6_mcast_addr, sizeof(struct ipv6_addr));
+ } else {
+ memcpy(peth->d_addr.addr_bytes, peth->s_addr.addr_bytes, sizeof(prox_rte_ether_addr));
+ memcpy(ipv6_hdr->dst_addr, ipv6_hdr->src_addr, sizeof(struct ipv6_addr));
+ }
+
+ memcpy(peth->s_addr.addr_bytes, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));
+
+ ipv6_hdr->vtc_flow = 0x00000060;
+ ipv6_hdr->payload_len = rte_cpu_to_be_16(sizeof(struct icmpv6_NA));
+ ipv6_hdr->proto = ICMPv6;
+ ipv6_hdr->hop_limits = 255;
+ memcpy(ipv6_hdr->src_addr, src_ipv6_addr, sizeof(struct ipv6_addr));
+
+ struct icmpv6_NA *neighbour_advertisement = (struct icmpv6_NA *)(ipv6_hdr + 1);
+ neighbour_advertisement->type = ICMPv6_NA;
+ neighbour_advertisement->code = 0;
+ neighbour_advertisement->reserved = 0;
+ if (task->internal_port_table[port_id].flags & IPV6_ROUTER)
+ neighbour_advertisement->bits = 0xC0; // R+S bit set
+ else
+ neighbour_advertisement->bits = 0x40; // S bit set
+ if (!sollicited) {
+ memcpy(&neighbour_advertisement->destination_address, src_ipv6_addr, sizeof(struct ipv6_addr));
+ neighbour_advertisement->bits &= 0xBF; // Clear S bit
+ neighbour_advertisement->bits |= 0x20; // Overide bit
+ }
+ // else neighbour_advertisement->destination_address is already set to neighbour_sollicitation->target_address
+
+ struct icmpv6_option *option = &neighbour_advertisement->options;
+ // Do not think this is necessary
+ // option->type = ICMPv6_source_link_layer_address;
+ // option->length = 1; // 8 bytes
+ // memcpy(&option->data, &task->internal_port_table[port_id].mac, sizeof(prox_rte_ether_addr));
+
+ // option = option + 1;
+ option->type = ICMPv6_target_link_layer_address;
+ option->length = 1; // 8 bytes
+ memcpy(&option->data, target, sizeof(prox_rte_ether_addr));
+
+ neighbour_advertisement->checksum = 0;
+ neighbour_advertisement->checksum = rte_ipv6_udptcp_cksum(ipv6_hdr, neighbour_advertisement);
+ uint16_t pktlen = rte_be_to_cpu_16(ipv6_hdr->payload_len) + sizeof(prox_rte_ipv6_hdr) + sizeof(prox_rte_ether_hdr);
+ rte_pktmbuf_pkt_len(mbuf) = pktlen + (vlan ? 4 : 0);
+ rte_pktmbuf_data_len(mbuf) = pktlen + (vlan ? 4 : 0);
+}
+
+prox_rte_ipv6_hdr *prox_get_ipv6_hdr(prox_rte_ether_hdr *hdr, uint16_t len, uint16_t *vlan)
+{
+ prox_rte_vlan_hdr *vlan_hdr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
+ uint16_t ether_type = hdr->ether_type;
+ uint16_t l2_len = sizeof(prox_rte_ether_hdr);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr + 1);
+
+ while (((ether_type == ETYPE_8021ad) || (ether_type == ETYPE_VLAN)) && (l2_len + sizeof(prox_rte_vlan_hdr) < len)) {
+ vlan_hdr = (prox_rte_vlan_hdr *)((uint8_t *)hdr + l2_len);
+ l2_len +=4;
+ ether_type = vlan_hdr->eth_proto;
+ *vlan = rte_be_to_cpu_16(vlan_hdr->vlan_tci & 0xFF0F);
+ ipv6_hdr = (prox_rte_ipv6_hdr *)(vlan_hdr + 1);
+ }
+ if (ether_type == ETYPE_IPv6)
+ return ipv6_hdr;
+ else
+ return NULL;
+}
diff --git a/VNFs/DPPD-PROX/prox_ipv6.h b/VNFs/DPPD-PROX/prox_ipv6.h
new file mode 100644
index 00000000..e2ae7d61
--- /dev/null
+++ b/VNFs/DPPD-PROX/prox_ipv6.h
@@ -0,0 +1,141 @@
+/*
+// Copyright (c) 2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+#ifndef _PROX_IP_V6_H_
+#define _PROX_IP_V6_H_
+
+#include "ip6_addr.h"
+
+#define ALL_NODES_IPV6_MCAST_ADDR "ff02:0000:0000:0000:0000:0000:0000:0001" // FF02::1
+#define ALL_ROUTERS_IPV6_MCAST_ADDR "ff02:0000:0000:0000:0000:0000:0000:0002" // FF02::2
+
+#define RANDOM_IPV6 "1234:1234:1234:1234:1234:1234:1234:1234" // Used by PROX as a flag forrandom IP
+
+#define ALL_DHCP_RELAY_AGENTS_AND_SERVERS "ff02:0000:0000:0000:0000:0000:0001:0002" // FF02::1:2
+#define ALL_DHCP_SERVERS "ff05:0000:0000:0000:0000:0000:0001:0003" // FF02::1:2
+
+#define DHCP_CLIENT_UDP_PORT 546
+#define DHCP_SERVER_UDP_PORT 547
+
+#define PROX_UNSOLLICITED 0
+#define PROX_SOLLICITED 1
+
+#define ICMPv6 0x3a
+
+#define ICMPv6_DU 0x01
+#define ICMPv6_PTB 0x02
+#define ICMPv6_TE 0x03
+#define ICMPv6_PaPr 0x04
+#define ICMPv6_RS 0x85
+#define ICMPv6_RA 0x86
+#define ICMPv6_NS 0x87
+#define ICMPv6_NA 0x88
+#define ICMPv6_RE 0x89
+
+#define ICMPv6_source_link_layer_address 1
+#define ICMPv6_target_link_layer_address 2
+#define ICMPv6_prefix_information 3
+#define ICMPv6_redirect_header 4
+#define ICMPv6_mtu 5
+
+extern struct ipv6_addr null_addr;
+
+struct icmpv6_prefix_option {
+ uint8_t type;
+ uint8_t length;
+ uint8_t prefix_length;
+ uint8_t flag;
+ uint32_t valid_lifetime;
+ uint32_t preferred_lifetime;
+ uint32_t reserved;
+ struct ipv6_addr prefix;
+};
+
+struct icmpv6_option {
+ uint8_t type;
+ uint8_t length;
+ uint8_t data[6];
+} __attribute__((__packed__));
+
+struct icmpv6 {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+};
+
+struct icmpv6_RA {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+ uint8_t hop_limit;
+ uint8_t bits;
+ uint16_t router_lifespan;
+ uint32_t reachable_timeout;
+ uint32_t retrans_timeout;
+ struct icmpv6_option options;
+} __attribute__((__packed__));
+
+struct icmpv6_RS {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+ uint32_t reserved;
+ struct icmpv6_option options;
+} __attribute__((__packed__));
+
+struct icmpv6_NS {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+ uint32_t reserved;
+ struct ipv6_addr target_address;
+ struct icmpv6_option options;
+} __attribute__((__packed__));
+
+struct icmpv6_NA {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+ uint16_t bits;
+ uint16_t reserved;
+ struct ipv6_addr destination_address;
+ struct icmpv6_option options;
+} __attribute__((__packed__));
+
+struct icmpv6_RE {
+ uint8_t type;
+ uint8_t code;
+ uint16_t checksum;
+ uint32_t reserved;
+ struct ipv6_addr destination_address_hop;
+ struct ipv6_addr destination_address;
+ uint32_t Options;
+} __attribute__((__packed__));
+
+void set_mcast_mac_from_ipv6(prox_rte_ether_addr *mac, struct ipv6_addr *ipv6_addr);
+char *IP6_Canonical(struct ipv6_addr *addr);
+void set_link_local(struct ipv6_addr *ipv6_addr);
+void set_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac);
+void create_mac_from_EUI(struct ipv6_addr *ipv6_addr, prox_rte_ether_addr *mac);
+
+struct task_base;
+prox_rte_ipv6_hdr *prox_get_ipv6_hdr(prox_rte_ether_hdr *hdr, uint16_t len, uint16_t *vlan);
+void build_router_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, uint16_t vlan);
+void build_router_advertisement(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *ipv6_s_addr, struct ipv6_addr *router_prefix, uint16_t vlan);
+void build_neighbour_sollicitation(struct rte_mbuf *mbuf, prox_rte_ether_addr *s_addr, struct ipv6_addr *dst, struct ipv6_addr *src, uint16_t vlan);
+void build_neighbour_advertisement(struct task_base *tbase, struct rte_mbuf *mbuf, prox_rte_ether_addr *target_mac, struct ipv6_addr *ipv6_addr, int sollicited, uint16_t vlan);
+
+#endif /* _PROX_IP_V6_H_ */
diff --git a/VNFs/DPPD-PROX/prox_lua.c b/VNFs/DPPD-PROX/prox_lua.c
index b5c2fec9..4e2f18a6 100644
--- a/VNFs/DPPD-PROX/prox_lua.c
+++ b/VNFs/DPPD-PROX/prox_lua.c
@@ -20,6 +20,7 @@
#include "prox_lua.h"
#include "lua_compat.h"
#include "parse_utils.h"
+#include "prox_compat.h"
static struct lua_State *lua_instance;
@@ -137,7 +138,7 @@ static int l_mac(lua_State *L)
if (lua_isstring(L, -1)) {
const char *arg = lua_tostring(L, -1);
char arg2[128];
- strncpy(arg2, arg, sizeof(arg2));
+ prox_strncpy(arg2, arg, sizeof(arg2));
char *p = arg2;
int count = 0;
@@ -223,7 +224,7 @@ static int l_ip6(lua_State *L)
int next_str = 1;
int ret;
- strncpy(arg2, arg, sizeof(arg2));
+ prox_strncpy(arg2, arg, sizeof(arg2));
for (size_t i = 0; i < str_len; ++i) {
if (next_str) {
@@ -270,7 +271,7 @@ static int l_cidr(lua_State *L)
const char *arg = lua_tostring(L, -1);
char tmp[128];
- strncpy(tmp, arg, sizeof(tmp));
+ prox_strncpy(tmp, arg, sizeof(tmp));
char *slash = strchr(tmp, '/');
*slash = 0;
@@ -296,7 +297,7 @@ static int l_cidr6(lua_State *L)
const char *arg = lua_tostring(L, -1);
char tmp[128];
- strncpy(tmp, arg, sizeof(tmp));
+ prox_strncpy(tmp, arg, sizeof(tmp));
char *slash = strchr(tmp, '/');
*slash = 0;
diff --git a/VNFs/DPPD-PROX/prox_lua_types.c b/VNFs/DPPD-PROX/prox_lua_types.c
index 7a0b6e08..1b237356 100644
--- a/VNFs/DPPD-PROX/prox_lua_types.c
+++ b/VNFs/DPPD-PROX/prox_lua_types.c
@@ -39,6 +39,7 @@
#include "handle_qinq_encap4.h"
#include "toeplitz.h"
#include "handle_lb_5tuple.h"
+#include "prox_compat.h"
#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
@@ -182,7 +183,7 @@ int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8
return 0;
}
-int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac)
+int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, prox_rte_ether_addr *mac)
{
uint32_t n_entries;
uint32_t mac_array[4];
@@ -345,7 +346,7 @@ int lua_to_string(struct lua_State *L, enum lua_place from, const char *name, ch
}
str = lua_tostring(L, -1);
- strncpy(dst, str, size);
+ prox_strncpy(dst, str, size);
lua_pop(L, pop);
return 0;
@@ -417,8 +418,8 @@ int lua_to_next_hop(struct lua_State *L, enum lua_place from, const char *name,
uint32_t next_hop_index;
uint32_t port_id;
uint32_t ip;
- uint32_t mpls;
- struct ether_addr mac;
+ uint32_t mpls = 0;
+ prox_rte_ether_addr mac;
int pop;
if ((pop = lua_getfrom(L, from, name)) < 0)
@@ -436,11 +437,11 @@ int lua_to_next_hop(struct lua_State *L, enum lua_place from, const char *name,
while (lua_next(L, -2)) {
if (lua_to_int(L, TABLE, "id", &next_hop_index) ||
lua_to_int(L, TABLE, "port_id", &port_id) ||
- lua_to_ip(L, TABLE, "ip", &ip) ||
- lua_to_mac(L, TABLE, "mac", &mac) ||
- lua_to_int(L, TABLE, "mpls", &mpls))
+ lua_to_ip(L, TABLE, "ip", &ip))
return -1;
+ lua_to_mac(L, TABLE, "mac", &mac);
+ lua_to_int(L, TABLE, "mpls", &mpls);
PROX_PANIC(port_id >= PROX_MAX_PORTS, "Port id too high (only supporting %d ports)\n", PROX_MAX_PORTS);
PROX_PANIC(next_hop_index >= MAX_HOP_INDEX, "Next-hop to high (only supporting %d next hops)\n", MAX_HOP_INDEX);
@@ -462,7 +463,7 @@ int lua_to_next_hop6(struct lua_State *L, enum lua_place from, const char *name,
{
struct next_hop6 *ret;
uint32_t next_hop_index, port_id, mpls;
- struct ether_addr mac;
+ prox_rte_ether_addr mac;
uint8_t ip[16];
int pop;
@@ -503,6 +504,7 @@ int lua_to_next_hop6(struct lua_State *L, enum lua_place from, const char *name,
return 0;
}
+#define MAX_NEW_RULES 128
int lua_to_routes4(struct lua_State *L, enum lua_place from, const char *name, uint8_t socket, struct lpm4 *lpm)
{
struct ip4_subnet dst;
@@ -513,11 +515,12 @@ int lua_to_routes4(struct lua_State *L, enum lua_place from, const char *name, u
char lpm_name[64];
int ret;
int pop;
+ static int count = 1;
if ((pop = lua_getfrom(L, from, name)) < 0)
return -1;
- snprintf(lpm_name, sizeof(lpm_name), "IPv4_lpm_s%u", socket);
+ snprintf(lpm_name, sizeof(lpm_name), "IPv4_lpm_s%u_%d", socket, count++);
if (!lua_istable(L, -1)) {
set_err("Data is not a table\n");
@@ -530,12 +533,12 @@ int lua_to_routes4(struct lua_State *L, enum lua_place from, const char *name, u
lua_pop(L, 1);
#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,1)
struct rte_lpm_config conf;
- conf.max_rules = 2 * n_tot_rules;
+ conf.max_rules = 2 * n_tot_rules + MAX_NEW_RULES;
conf.number_tbl8s = 256;
conf.flags = 0;
new_lpm = rte_lpm_create(lpm_name, socket, &conf);
#else
- new_lpm = rte_lpm_create(lpm_name, socket, 2 * n_tot_rules, 0);
+ new_lpm = rte_lpm_create(lpm_name, socket, 2 * n_tot_rules + MAX_NEW_RULES, 0);
#endif
PROX_PANIC(NULL == new_lpm, "Failed to allocate lpm\n");
@@ -918,7 +921,7 @@ int lua_to_cpe_table_data(struct lua_State *L, enum lua_place from, const char *
struct ip4_subnet cidr;
uint32_t n_entries = 0;
uint32_t port_idx, gre_id, svlan, cvlan, user;
- struct ether_addr mac;
+ prox_rte_ether_addr mac;
uint32_t idx = 0;
lua_pushnil(L);
diff --git a/VNFs/DPPD-PROX/prox_lua_types.h b/VNFs/DPPD-PROX/prox_lua_types.h
index 182c9055..83cc73cd 100644
--- a/VNFs/DPPD-PROX/prox_lua_types.h
+++ b/VNFs/DPPD-PROX/prox_lua_types.h
@@ -21,10 +21,10 @@
#include <rte_ether.h>
#include <rte_hash.h>
+#include "prox_compat.h"
#include "ip6_addr.h"
struct lua_State;
-struct ether_addr;
struct ip4_subnet;
struct ip6_subnet;
struct next_hop;
@@ -70,10 +70,10 @@ struct lpm6 {
struct ipv6_tun_binding_entry {
struct ipv6_addr endpoint_addr; // IPv6 local addr
- struct ether_addr next_hop_mac; // mac addr of next hop towards lwB4
+ prox_rte_ether_addr next_hop_mac; // mac addr of next hop towards lwB4
uint32_t public_ipv4; // Public IPv4 address
uint16_t public_port; // Public base port (together with port mask, defines the Port Set)
-} __attribute__((__packed__));
+} __attribute__((__packed__)) __attribute__((__aligned__(2)));
struct ipv6_tun_binding_table {
uint32_t num_binding_entries;
@@ -86,7 +86,7 @@ struct cpe_table_entry {
uint32_t svlan;
uint32_t cvlan;
uint32_t ip;
- struct ether_addr eth_addr;
+ prox_rte_ether_addr eth_addr;
uint32_t user;
};
@@ -115,7 +115,7 @@ int lua_getfrom(struct lua_State *L, enum lua_place from, const char *name);
int lua_to_port(struct lua_State *L, enum lua_place from, const char *name, uint16_t *port);
int lua_to_ip(struct lua_State *L, enum lua_place from, const char *name, uint32_t *ip);
int lua_to_ip6(struct lua_State *L, enum lua_place from, const char *name, uint8_t *ip);
-int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, struct ether_addr *mac);
+int lua_to_mac(struct lua_State *L, enum lua_place from, const char *name, prox_rte_ether_addr *mac);
int lua_to_cidr(struct lua_State *L, enum lua_place from, const char *name, struct ip4_subnet *cidr);
int lua_to_cidr6(struct lua_State *L, enum lua_place from, const char *name, struct ip6_subnet *cidr);
int lua_to_int(struct lua_State *L, enum lua_place from, const char *name, uint32_t *val);
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.c b/VNFs/DPPD-PROX/prox_port_cfg.c
index c4787b1e..3b7f778d 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.c
+++ b/VNFs/DPPD-PROX/prox_port_cfg.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -31,6 +31,9 @@
#endif
#endif
+#include <sys/ioctl.h>
+#include <net/if.h>
+
#include "prox_port_cfg.h"
#include "prox_globals.h"
#include "log.h"
@@ -40,8 +43,12 @@
#include "defines.h"
#include "prox_cksum.h"
#include "stats_irq.h"
+#include "prox_compat.h"
+#include "rte_ethdev.h"
+#include "lconf.h"
struct prox_port_cfg prox_port_cfg[PROX_MAX_PORTS];
+
rte_atomic32_t lsc;
int prox_nb_active_ports(void)
@@ -102,10 +109,10 @@ void prox_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *_m, unsig
struct rte_mbuf *mbuf = _m;
#if RTE_VERSION >= RTE_VERSION_NUM(1,8,0,0)
- mbuf->tx_offload = CALC_TX_OL(sizeof(struct ether_hdr), sizeof(struct ipv4_hdr));
+ mbuf->tx_offload = CALC_TX_OL(sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr));
#else
- mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mbuf->pkt.vlan_macip.f.l2_len = sizeof(prox_rte_ether_hdr);
+ mbuf->pkt.vlan_macip.f.l3_len = sizeof(prox_rte_ipv4_hdr);
#endif
rte_pktmbuf_init(mp, opaque_arg, mbuf, i);
@@ -123,14 +130,159 @@ void prox_pktmbuf_reinit(void *arg, void *start, __attribute__((unused)) void *e
prox_pktmbuf_init(init_args->mp, init_args->lconf, obj, idx);
}
+#define CONFIGURE_TX_OFFLOAD(flag) \
+ if (port_cfg->requested_tx_offload & flag) {\
+ if (port_cfg->disabled_tx_offload & flag) {\
+ plog_info("\t\t%s disabled by configuration\n", #flag);\
+ port_cfg->requested_tx_offload &= ~flag;\
+ } else if (port_cfg->dev_info.tx_offload_capa & flag) {\
+ port_cfg->port_conf.txmode.offloads |= flag;\
+ plog_info("\t\t%s enabled on port\n", #flag);\
+ } else if (port_cfg->dev_info.tx_queue_offload_capa & flag) {\
+ port_cfg->tx_conf.offloads |= flag;\
+ plog_info("\t\t%s enabled on queue\n", #flag);\
+ } else {\
+ port_cfg->requested_tx_offload &= ~flag;\
+ plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
+ }\
+ } else {\
+ plog_info("\t\t%s disabled\n", #flag);\
+ }\
+
+#define CONFIGURE_RX_OFFLOAD(flag) \
+ if (port_cfg->requested_rx_offload & flag) {\
+ if (port_cfg->dev_info.rx_offload_capa & flag) {\
+ port_cfg->port_conf.rxmode.offloads |= flag;\
+ plog_info("\t\t%s enabled on port\n", #flag);\
+ } else if (port_cfg->dev_info.rx_queue_offload_capa & flag) {\
+ port_cfg->rx_conf.offloads |= flag;\
+ plog_info("\t\t%s enabled on queue\n", #flag);\
+ } else {\
+ port_cfg->requested_rx_offload &= ~flag;\
+ plog_info("\t\t%s disabled as neither port or queue supports it\n", #flag);\
+ }\
+ } else {\
+ plog_info("\t\t%s disabled\n", #flag);\
+ }\
+
+static inline uint32_t get_netmask(uint8_t prefix)
+{
+ if (prefix == 0)
+ return(~((uint32_t) -1));
+ else
+ return rte_cpu_to_be_32(~((1 << (32 - prefix)) - 1));
+}
+
+static void set_ip_address(char *devname, uint32_t ip, uint8_t prefix)
+{
+ struct ifreq ifreq;
+ struct sockaddr_in in_addr;
+ int fd, rc;
+ uint32_t netmask = get_netmask(prefix);
+ plog_info("Setting netmask to %x\n", netmask);
+ uint32_t ip_cpu = rte_be_to_cpu_32(ip);
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+
+ memset(&ifreq, 0, sizeof(struct ifreq));
+ memset(&in_addr, 0, sizeof(struct sockaddr_in));
+
+ in_addr.sin_family = AF_INET;
+ in_addr.sin_addr = *(struct in_addr *)&ip_cpu;
+
+ prox_strncpy(ifreq.ifr_name, devname, IFNAMSIZ);
+ ifreq.ifr_addr = *(struct sockaddr *)&in_addr;
+ rc = ioctl(fd, SIOCSIFADDR, &ifreq);
+ PROX_PANIC(rc < 0, "Failed to set IP address %x on device %s: error = %d (%s)\n", ip_cpu, devname, errno, strerror(errno));
+
+ in_addr.sin_addr = *(struct in_addr *)&netmask;
+ ifreq.ifr_netmask = *(struct sockaddr *)&in_addr;
+ rc = ioctl(fd, SIOCSIFNETMASK, &ifreq);
+ PROX_PANIC(rc < 0, "Failed to set netmask %x (prefix %d) on device %s: error = %d (%s)\n", netmask, prefix, devname, errno, strerror(errno));
+ close(fd);
+}
+
/* initialize rte devices and check the number of available ports */
void init_rte_dev(int use_dummy_devices)
{
uint8_t nb_ports, port_id_max;
- int port_id_last;
+ int port_id_last, rc = 0;
struct rte_eth_dev_info dev_info;
+ const struct rte_pci_device *pci_dev;
+
+ for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
+ if (prox_port_cfg[port_id].active && (prox_port_cfg[port_id].virtual == 0) && (port_id >= prox_rte_eth_dev_count_avail())) {
+ PROX_PANIC(1, "port %u used but only %u available\n", port_id, prox_rte_eth_dev_count_avail());
+ }
+ }
+ for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
+ if (!prox_port_cfg[port_id].active) {
+ continue;
+ }
+ struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
+
+ prox_port_cfg[port_id].n_vlans = 0;
+ while ((prox_port_cfg[port_id].n_vlans < PROX_MAX_VLAN_TAGS) && (prox_port_cfg[port_id].vlan_tags[prox_port_cfg[port_id].n_vlans])) {
+ prox_port_cfg[port_id].n_vlans++;
+ }
- nb_ports = rte_eth_dev_count();
+ if (port_cfg->vdev[0]) {
+ char name[MAX_NAME_BUFFER_SIZE], tap[MAX_NAME_SIZE];
+ snprintf(tap, MAX_NAME_SIZE, "net_tap%d", port_id);
+#if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1))
+ snprintf(name, MAX_NAME_BUFFER_SIZE, "iface=%s", port_cfg->vdev);
+ rc = rte_vdev_init(tap, name);
+#else
+ PROX_PANIC(1, "vdev not supported in DPDK < 17.05\n");
+#endif
+ PROX_PANIC(rc != 0, "Unable to create device %s %s\n", "net tap", port_cfg->vdev);
+ int vdev_port_id = prox_rte_eth_dev_count_avail() - 1;
+ PROX_PANIC(vdev_port_id >= PROX_MAX_PORTS, "Too many port defined %d >= %d\n", vdev_port_id, PROX_MAX_PORTS);
+ plog_info("\tCreating device %s, port %d\n", port_cfg->vdev, vdev_port_id);
+ prox_port_cfg[vdev_port_id].is_vdev = 1;
+ prox_port_cfg[vdev_port_id].active = 1;
+ prox_port_cfg[vdev_port_id].dpdk_mapping = port_id;
+ prox_port_cfg[vdev_port_id].n_txq = 1;
+ prox_port_cfg[vdev_port_id].n_vlans = prox_port_cfg[port_id].n_vlans;
+
+ for (uint32_t tag_id = 0; tag_id < prox_port_cfg[port_id].n_vlans; tag_id++) {
+ prox_port_cfg[vdev_port_id].vlan_tags[tag_id] = prox_port_cfg[port_id].vlan_tags[tag_id];
+ char command[1024];
+ snprintf(prox_port_cfg[vdev_port_id].names[tag_id], MAX_NAME_BUFFER_SIZE, "%s_%d", port_cfg->vdev, prox_port_cfg[port_id].vlan_tags[tag_id]);
+ sprintf(command, "ip link add link %s name %s type vlan id %d", port_cfg->vdev, prox_port_cfg[vdev_port_id].names[tag_id], prox_port_cfg[port_id].vlan_tags[tag_id]);
+ system(command);
+ plog_info("\tRunning %s\n", command);
+ plog_info("\tUsing vlan tag %d - added device %s\n", prox_port_cfg[port_id].vlan_tags[tag_id], prox_port_cfg[vdev_port_id].names[tag_id]);
+ }
+ if (prox_port_cfg[port_id].n_vlans == 0) {
+ strncpy(prox_port_cfg[vdev_port_id].names[0], port_cfg->vdev, MAX_NAME_SIZE);
+ prox_port_cfg[vdev_port_id].n_vlans = 1;
+ prox_port_cfg[vdev_port_id].vlan_tags[0] = 0;
+ }
+
+ prox_port_cfg[port_id].dpdk_mapping = vdev_port_id;
+ uint32_t i = 0;
+ while ((i < PROX_MAX_VLAN_TAGS) && (prox_port_cfg[port_id].ip_addr[i].ip)) {
+ prox_port_cfg[vdev_port_id].ip_addr[i].ip = prox_port_cfg[port_id].ip_addr[i].ip;
+ prox_port_cfg[vdev_port_id].ip_addr[i].prefix = prox_port_cfg[port_id].ip_addr[i].prefix;
+ i++;
+ }
+ prox_port_cfg[vdev_port_id].type = prox_port_cfg[port_id].type;
+ if (prox_port_cfg[vdev_port_id].type == PROX_PORT_MAC_HW) {
+ // If DPDK port MAC set to HW, then make sure the vdev has the same MAC as DPDK port
+ prox_port_cfg[vdev_port_id].type = PROX_PORT_MAC_SET;
+ rte_eth_macaddr_get(port_id, &prox_port_cfg[vdev_port_id].eth_addr);
+ plog_info("\tDPDK port %d MAC address pre-configured to MAC from port %d: "MAC_BYTES_FMT"\n",
+ vdev_port_id, port_id, MAC_BYTES(prox_port_cfg[vdev_port_id].eth_addr.addr_bytes));
+ } else
+ memcpy(&prox_port_cfg[vdev_port_id].eth_addr, &prox_port_cfg[port_id].eth_addr, sizeof(prox_port_cfg[port_id].eth_addr));
+ }
+ if (prox_port_cfg[port_id].n_vlans == 0) {
+ prox_port_cfg[port_id].n_vlans = 1;
+ prox_port_cfg[port_id].vlan_tags[0] = 0;
+ }
+ }
+ nb_ports = prox_rte_eth_dev_count_avail();
/* get available ports configuration */
PROX_PANIC(use_dummy_devices && nb_ports, "Can't use dummy devices while there are also real ports\n");
@@ -142,9 +294,9 @@ void init_rte_dev(int use_dummy_devices)
char port_name[32] = "0dummy_dev";
for (uint32_t i = 0; i < nb_ports; ++i) {
#if (RTE_VERSION > RTE_VERSION_NUM(17,5,0,1))
- rte_vdev_init(port_name, "size=ETHER_MIN_LEN,copy=0");
+ rte_vdev_init(port_name, "size=64,copy=0");
#else
- eth_dev_null_create(port_name, 0, ETHER_MIN_LEN, 0);
+ eth_dev_null_create(port_name, 0, PROX_RTE_ETHER_MIN_LEN, 0);
#endif
port_name[0]++;
}
@@ -163,46 +315,94 @@ void init_rte_dev(int use_dummy_devices)
nb_ports = PROX_MAX_PORTS;
}
+
+#if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
+ port_id_max = -1;
+ uint16_t id;
+ RTE_ETH_FOREACH_DEV(id) {
+ char name[256];
+ rte_eth_dev_get_name_by_port(id, name);
+ plog_info("\tFound DPDK port id %u %s\n", id, name);
+ if (id >= PROX_MAX_PORTS) {
+ plog_warn("\tWarning: I can deal with at most %u ports."
+ " Please update PROX_MAX_PORTS and recompile.\n", PROX_MAX_PORTS);
+ } else {
+ prox_port_cfg[id].available = 1;
+ if (id > port_id_max)
+ port_id_max = id;
+ }
+ }
+#else
port_id_max = nb_ports - 1;
+#endif
+
port_id_last = prox_last_port_active();
PROX_PANIC(port_id_last > port_id_max,
"\tError: invalid port(s) specified, last port index active: %d (max index is %d)\n",
port_id_last, port_id_max);
/* Assign ports to PROX interfaces & Read max RX/TX queues per port */
- for (uint8_t port_id = 0; port_id < nb_ports; ++port_id) {
+#if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
+ for (uint8_t port_id = 0; port_id <= port_id_last; ++port_id) {
+#else
+ for (uint8_t port_id = 0; port_id <= nb_ports; ++port_id) {
+#endif
/* skip ports that are not enabled */
if (!prox_port_cfg[port_id].active) {
continue;
+#if (RTE_VERSION >= RTE_VERSION_NUM(17,5,0,0))
+ } else if (prox_port_cfg[port_id].available == 0) {
+ PROX_PANIC(1, "port %u enabled but not available\n", port_id);
+#endif
}
plog_info("\tGetting info for rte dev %u\n", port_id);
rte_eth_dev_info_get(port_id, &dev_info);
struct prox_port_cfg* port_cfg = &prox_port_cfg[port_id];
port_cfg->socket = -1;
+ memcpy(&port_cfg->dev_info, &dev_info, sizeof(struct rte_eth_dev_info));
port_cfg->max_txq = dev_info.max_tx_queues;
port_cfg->max_rxq = dev_info.max_rx_queues;
-
- if (!dev_info.pci_dev)
- continue;
-
- snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr),
- "%04x:%02x:%02x.%1x", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function);
- strncpy(port_cfg->driver_name, dev_info.driver_name, sizeof(port_cfg->driver_name));
+ port_cfg->max_rx_pkt_len = dev_info.max_rx_pktlen;
+ port_cfg->min_rx_bufsize = dev_info.min_rx_bufsize;
+ port_cfg->min_tx_desc = dev_info.tx_desc_lim.nb_min;
+ port_cfg->max_tx_desc = dev_info.tx_desc_lim.nb_max;
+ port_cfg->min_rx_desc = dev_info.rx_desc_lim.nb_min;
+ port_cfg->max_rx_desc = dev_info.rx_desc_lim.nb_max;
+
+ prox_strncpy(port_cfg->driver_name, dev_info.driver_name, sizeof(port_cfg->driver_name));
plog_info("\tPort %u : driver='%s' tx_queues=%d rx_queues=%d\n", port_id, !strcmp(port_cfg->driver_name, "")? "null" : port_cfg->driver_name, port_cfg->max_txq, port_cfg->max_rxq);
+ plog_info("\tPort %u : %d<=nb_tx_desc<=%d %d<=nb_rx_desc<=%d\n", port_id, port_cfg->min_tx_desc, port_cfg->max_tx_desc, port_cfg->min_rx_desc, port_cfg->max_rx_desc);
if (strncmp(port_cfg->driver_name, "rte_", 4) == 0) {
- strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
+ prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
} else if (strncmp(port_cfg->driver_name, "net_", 4) == 0) {
- strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
+ prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name + 4, sizeof(port_cfg->short_name));
} else {
- strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name, sizeof(port_cfg->short_name));
+ prox_strncpy(port_cfg->short_name, prox_port_cfg[port_id].driver_name, sizeof(port_cfg->short_name));
}
char *ptr;
if ((ptr = strstr(port_cfg->short_name, "_pmd")) != NULL) {
*ptr = '\x0';
}
+ // Set socket for vdev device identical to socket of corresponding port
+ if (prox_port_cfg[port_id].is_vdev) {
+ prox_port_cfg[port_id].socket = prox_port_cfg[prox_port_cfg[port_id].dpdk_mapping].socket;
+ continue;
+ }
+
+#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
+ pci_dev = dev_info.pci_dev;
+#else
+ if (!dev_info.device)
+ continue;
+ pci_dev = RTE_DEV_TO_PCI(dev_info.device);
+#endif
+ if (!pci_dev)
+ continue;
+ snprintf(port_cfg->pci_addr, sizeof(port_cfg->pci_addr),
+ "%04x:%02x:%02x.%1x", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
/* Try to find the device's numa node */
char buf[1024];
snprintf(buf, sizeof(buf), "/sys/bus/pci/devices/%s/numa_node", port_cfg->pci_addr);
@@ -218,11 +418,21 @@ void init_rte_dev(int use_dummy_devices)
fclose(numa_node_fd);
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
- port_cfg->capabilities.tx_offload_cksum |= IPV4_CKSUM;
+ // In DPDK 18.08 vmxnet3 reports it supports IPV4 checksum, but packets does not go through when IPv4 cksum is enabled
+ if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) {
+ plog_info("\t\tDisabling IPV4 cksum on vmxnet3\n");
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
}
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
- port_cfg->capabilities.tx_offload_cksum |= UDP_CKSUM;
+ if ((!strcmp(port_cfg->short_name, "vmxnet3")) && (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+ plog_info("\t\tDisabling UDP cksum on vmxnet3\n");
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
+ }
+ // Some OVS versions reports that they support UDP offload and no IPv4 offload, but fails when UDP offload is enabled
+ if ((!strcmp(port_cfg->short_name, "virtio")) &&
+ ((port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) &&
+ (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) {
+ plog_info("\t\tDisabling UDP cksum on virtio\n");
+ port_cfg->disabled_tx_offload |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
}
}
}
@@ -246,7 +456,7 @@ uint8_t init_rte_ring_dev(void)
struct rte_ring* tx_ring = rte_ring_lookup(port_cfg->tx_ring);
PROX_PANIC(tx_ring == NULL, "Ring %s not found for port %d!\n", port_cfg->tx_ring, port_id);
- int ret = rte_eth_from_rings(port_cfg->name, &rx_ring, 1, &tx_ring, 1, rte_socket_id());
+ int ret = rte_eth_from_rings(port_cfg->names[0], &rx_ring, 1, &tx_ring, 1, rte_socket_id());
PROX_PANIC(ret != 0, "Failed to create eth_dev from rings for port %d\n", port_id);
port_cfg->port_conf.intr_conf.lsc = 0; /* Link state interrupt not supported for ring-backed ports */
@@ -258,19 +468,167 @@ uint8_t init_rte_ring_dev(void)
return nb_ring_dev;
}
-static void init_port(struct prox_port_cfg *port_cfg)
+static void print_port_capa(struct prox_port_cfg *port_cfg)
{
- static char dummy_pool_name[] = "0_dummy";
- struct rte_eth_link link;
uint8_t port_id;
- int ret;
port_id = port_cfg - prox_port_cfg;
plog_info("\t*** Initializing port %u ***\n", port_id);
- plog_info("\t\tPort name is set to %s\n", port_cfg->name);
+ plog_info("\t\tPort name is set to %s\n", port_cfg->names[0]);
plog_info("\t\tPort max RX/TX queue is %u/%u\n", port_cfg->max_rxq, port_cfg->max_txq);
plog_info("\t\tPort driver is %s\n", port_cfg->driver_name);
+#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
+ plog_info("\t\tSupported speed mask = 0x%x\n", port_cfg->dev_info.speed_capa);
+#endif
+ if (port_cfg->max_link_speed != UINT32_MAX) {
+ plog_info("\t\tHighest link speed capa = %d Mbps\n", port_cfg->max_link_speed);
+ }
+
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ plog_info("\t\tRX offload capa = 0x%lx = ", port_cfg->dev_info.rx_offload_capa);
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ plog_info("VLAN STRIP | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
+ plog_info("IPV4 CKSUM | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM)
+ plog_info("UDP CKSUM | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
+ plog_info("TCP CKSUM | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+ plog_info("TCP LRO | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+ plog_info("QINQ STRIP | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)
+ plog_info("OUTER_IPV4_CKSUM | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_MACSEC_STRIP)
+ plog_info("MACSEC STRIP | ");
+#if defined(RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_HEADER_SPLIT)
+ plog_info("HEADER SPLIT | ");
+#endif
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ plog_info("VLAN FILTER | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ plog_info("VLAN EXTEND | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME)
+ plog_info("JUMBO FRAME | ");
+#if defined(RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ plog_info("CRC STRIP | ");
+#endif
+#if defined(RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ plog_info("KEEP CRC | ");
+#endif
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_SCATTER)
+ plog_info("SCATTER | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ plog_info("TIMESTAMP | ");
+ if (port_cfg->dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_SECURITY)
+ plog_info("SECURITY ");
+ plog_info("\n");
+
+ plog_info("\t\tTX offload capa = 0x%lx = ", port_cfg->dev_info.tx_offload_capa);
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VLAN_INSERT)
+ plog_info("VLAN INSERT | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
+ plog_info("IPV4 CKSUM | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
+ plog_info("UDP CKSUM | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+ plog_info("TCP CKSUM | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)
+ plog_info("SCTP CKSUM | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_TSO)
+ plog_info("TCP TS0 | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TSO)
+ plog_info("UDP TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
+ plog_info("OUTER IPV4 CKSUM | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_QINQ_INSERT)
+ plog_info("QINQ INSERT | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO)
+ plog_info("VLAN TNL TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO)
+ plog_info("GRE TNL TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO)
+ plog_info("IPIP TNL TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)
+ plog_info("GENEVE TNL TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MACSEC_INSERT)
+ plog_info("MACSEC INSERT | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MT_LOCKFREE)
+ plog_info("MT LOCKFREE | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+ plog_info("MULTI SEG | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_SECURITY)
+ plog_info("SECURITY | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO)
+ plog_info("UDP TNL TSO | ");
+ if (port_cfg->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IP_TNL_TSO)
+ plog_info("IP TNL TSO | ");
+ plog_info("\n");
+
+ plog_info("\t\trx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.rx_queue_offload_capa);
+ plog_info("\t\ttx_queue_offload_capa = 0x%lx\n", port_cfg->dev_info.tx_queue_offload_capa);
+ plog_info("\t\tflow_type_rss_offloads = 0x%lx\n", port_cfg->dev_info.flow_type_rss_offloads);
+ plog_info("\t\tdefault RX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_rxportconf.burst_size, port_cfg->dev_info.default_rxportconf.ring_size, port_cfg->dev_info.default_rxportconf.nb_queues);
+ plog_info("\t\tdefault TX port conf: burst_size = %d, ring_size = %d, nb_queues = %d\n", port_cfg->dev_info.default_txportconf.burst_size, port_cfg->dev_info.default_txportconf.ring_size, port_cfg->dev_info.default_txportconf.nb_queues);
+#endif
+}
+
+static void get_max_link_speed(struct prox_port_cfg *port_cfg)
+{
+ port_cfg->max_link_speed = UINT32_MAX;
+
+#if RTE_VERSION >= RTE_VERSION_NUM(16,4,0,0)
+ // virtio and vmxnet3 reports fake max_link_speed
+ if (strcmp(port_cfg->short_name, "vmxnet3") && strcmp(port_cfg->short_name, "virtio")) {
+ // Get link_speed from highest capability from the port
+ // This will be used by gen and lat for extrapolation purposes
+ // The negotiated link_speed (as reported by rte_eth_link_get
+ // or rte_eth_link_get_nowait) might be reported too late
+ // and might result in wrong exrapolation, and hence should not be used
+ // for extrapolation purposes
+ if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_100G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_100G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_56G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_56G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_50G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_50G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_40G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_40G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_25G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_25G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_20G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_20G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_10G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_10G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_5G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_5G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_2_5G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_2_5G;
+ else if (port_cfg->dev_info.speed_capa & RTE_ETH_LINK_SPEED_1G)
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_1G;
+ else if (port_cfg->dev_info.speed_capa & (RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M))
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_100M;
+ else if (port_cfg->dev_info.speed_capa & (RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M))
+ port_cfg->max_link_speed = RTE_ETH_SPEED_NUM_10M;
+
+ }
+#endif
+}
+
+static void init_port(struct prox_port_cfg *port_cfg)
+{
+ static char dummy_pool_name[] = "0_dummy";
+ struct rte_eth_link link;
+ uint8_t port_id;
+ int ret;
+ get_max_link_speed(port_cfg);
+ print_port_capa(port_cfg);
+ port_id = port_cfg - prox_port_cfg;
PROX_PANIC(port_cfg->n_rxq == 0 && port_cfg->n_txq == 0,
"\t\t port %u is enabled but no RX or TX queues have been configured", port_id);
@@ -278,10 +636,10 @@ static void init_port(struct prox_port_cfg *port_cfg)
/* not receiving on this port */
plog_info("\t\tPort %u had no RX queues, setting to 1\n", port_id);
port_cfg->n_rxq = 1;
- uint32_t mbuf_size = MBUF_SIZE;
- if (strcmp(port_cfg->short_name, "vmxnet3") == 0) {
- mbuf_size = MBUF_SIZE + RTE_PKTMBUF_HEADROOM;
- }
+ uint32_t mbuf_size = TX_MBUF_SIZE;
+ if (mbuf_size < port_cfg->min_rx_bufsize + RTE_PKTMBUF_HEADROOM + sizeof(struct rte_mbuf))
+ mbuf_size = port_cfg->min_rx_bufsize + RTE_PKTMBUF_HEADROOM + sizeof(struct rte_mbuf);
+
plog_info("\t\tAllocating dummy memory pool on socket %u with %u elements of size %u\n",
port_cfg->socket, port_cfg->n_rxd, mbuf_size);
port_cfg->pool[0] = rte_mempool_create(dummy_pool_name, port_cfg->n_rxd, mbuf_size,
@@ -294,12 +652,15 @@ static void init_port(struct prox_port_cfg *port_cfg)
port_cfg->socket, port_cfg->n_rxd);
dummy_pool_name[0]++;
} else {
- // Most pmd do not support setting mtu yet...
- if (!strcmp(port_cfg->short_name, "ixgbe")) {
- plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id);
- ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu);
- PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret);
+ // Most pmd should now support setting mtu
+ if (port_cfg->mtu + PROX_RTE_ETHER_HDR_LEN + PROX_RTE_ETHER_CRC_LEN > port_cfg->max_rx_pkt_len) {
+ plog_info("\t\tMTU is too big for the port, reducing MTU from %d to %d\n", port_cfg->mtu, port_cfg->max_rx_pkt_len);
+ port_cfg->mtu = port_cfg->max_rx_pkt_len;
}
+ plog_info("\t\tSetting MTU size to %u for port %u ...\n", port_cfg->mtu, port_id);
+ ret = rte_eth_dev_set_mtu(port_id, port_cfg->mtu);
+ if (ret)
+ plog_err("\t\t\trte_eth_dev_set_mtu() failed on port %u: error %d\n", port_id, ret);
if (port_cfg->n_txq == 0) {
/* not sending on this port */
@@ -310,15 +671,89 @@ static void init_port(struct prox_port_cfg *port_cfg)
if (port_cfg->n_rxq > 1) {
// Enable RSS if multiple receive queues
- port_cfg->port_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
+ if (strcmp(port_cfg->short_name, "virtio")) {
+ port_cfg->port_conf.rxmode.mq_mode |= RTE_ETH_MQ_RX_RSS;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key = toeplitz_init_key;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_key_len = TOEPLITZ_KEY_LEN;
#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONFRAG_IPV4_UDP;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IP|RTE_ETH_RSS_UDP;
#else
- port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf = RTE_ETH_RSS_IPV4|ETH_RSS_NONF_IPV4_UDP;
+#endif
+ }
+ }
+
+ // Make sure that the requested RSS offload is supported by the PMD
+#if RTE_VERSION >= RTE_VERSION_NUM(2,0,0,0)
+ port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf &= port_cfg->dev_info.flow_type_rss_offloads;
+#endif
+ if (strcmp(port_cfg->short_name, "virtio")) {
+ plog_info("\t\t Enabling RSS rss_hf = 0x%lx (requested 0x%llx, supported 0x%lx)\n", port_cfg->port_conf.rx_adv_conf.rss_conf.rss_hf, RTE_ETH_RSS_IP|RTE_ETH_RSS_UDP, port_cfg->dev_info.flow_type_rss_offloads);
+ } else {
+ plog_info("\t\t Not enabling RSS on virtio port");
+ }
+
+ // rxmode such as hw src strip
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_CRC_STRIP);
+#endif
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_KEEP_CRC);
#endif
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_JUMBO_FRAME);
+ CONFIGURE_RX_OFFLOAD(RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
+#else
+ if (port_cfg->requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP) {
+ port_cfg->port_conf.rxmode.hw_strip_crc = 1;
+ }
+ if (port_cfg->requested_rx_offload & RTE_ETH_RX_OFFLOAD_JUMBO_FRAME) {
+ port_cfg->port_conf.rxmode.jumbo_frame = 1;
}
+#endif
+
+ // IPV4, UDP, SCTP Checksums
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_IPV4_CKSUM);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_VLAN_INSERT);
+#else
+ if ((port_cfg->dev_info.tx_offload_capa & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) == 0) {
+ port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
+ plog_info("\t\tDisabling TX offloads as pmd reports that it does not support them)\n");
+ }
+ if (!strcmp(port_cfg->short_name, "vmxnet3")) {
+ port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
+ plog_info("\t\tDisabling SCTP offload on port %d as vmxnet3 does not support them\n", port_id);
+ }
+#endif
+ // Multi Segments
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_MULTI_SEGS);
+#else
+ if (!strcmp(port_cfg->short_name, "vmxnet3")) {
+ port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+ plog_info("\t\tDisabling TX multsegs on port %d as vmxnet3 does not support them\n", port_id);
+ } else if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS)
+ plog_info("\t\tDisabling TX multsegs on port %d\n", port_id);
+ else
+ plog_info("\t\tEnabling TX multsegs on port %d\n", port_id);
+
+ if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
+ plog_info("\t\tEnabling No TX offloads on port %d\n", port_id);
+ else
+ plog_info("\t\tTX offloads enabled on port %d\n", port_id);
+#endif
+
+ // Refcount
+#if RTE_VERSION >= RTE_VERSION_NUM(18,8,0,1)
+ CONFIGURE_TX_OFFLOAD(RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE);
+#else
+ if (port_cfg->tx_conf.txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT)
+ plog_info("\t\tEnabling No refcnt on port %d\n", port_id);
+ else
+ plog_info("\t\tRefcnt enabled on port %d\n", port_id);
+#endif
plog_info("\t\tConfiguring port %u... with %u RX queues and %u TX queues\n",
port_id, port_cfg->n_rxq, port_cfg->n_txq);
@@ -343,12 +778,24 @@ static void init_port(struct prox_port_cfg *port_cfg)
port_cfg->port_conf.intr_conf.lsc = port_cfg->lsc_val;
plog_info("\t\tOverriding link state interrupt configuration to '%s'\n", port_cfg->lsc_val? "enabled" : "disabled");
}
- if (!strcmp(port_cfg->short_name, "vmxnet3")) {
- if (port_cfg->n_txd < 512) {
- // Vmxnet3 driver requires minimum 512 tx descriptors
- plog_info("\t\tNumber of TX descriptors is set to 512 (minimum required for vmxnet3\n");
- port_cfg->n_txd = 512;
- }
+ if (port_cfg->n_txd < port_cfg->min_tx_desc) {
+ plog_info("\t\tNumber of TX descriptors is set to %d (minimum required for %s\n", port_cfg->min_tx_desc, port_cfg->short_name);
+ port_cfg->n_txd = port_cfg->min_tx_desc;
+ }
+
+ if (port_cfg->n_rxd < port_cfg->min_rx_desc) {
+ plog_info("\t\tNumber of RX descriptors is set to %d (minimum required for %s\n", port_cfg->min_rx_desc, port_cfg->short_name);
+ port_cfg->n_rxd = port_cfg->min_rx_desc;
+ }
+
+ if (port_cfg->n_txd > port_cfg->max_tx_desc) {
+ plog_info("\t\tNumber of TX descriptors is set to %d (maximum required for %s\n", port_cfg->max_tx_desc, port_cfg->short_name);
+ port_cfg->n_txd = port_cfg->max_tx_desc;
+ }
+
+ if (port_cfg->n_rxd > port_cfg->max_rx_desc) {
+ plog_info("\t\tNumber of RX descriptors is set to %d (maximum required for %s\n", port_cfg->max_rx_desc, port_cfg->short_name);
+ port_cfg->n_rxd = port_cfg->max_rx_desc;
}
ret = rte_eth_dev_configure(port_id, port_cfg->n_rxq,
@@ -361,36 +808,26 @@ static void init_port(struct prox_port_cfg *port_cfg)
plog_info("\t\tMAC address set to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
+ /* initialize TX queues first */
+ for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) {
+ plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n",
+ queue_id, port_cfg->socket, port_cfg->n_txd);
+ ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd,
+ port_cfg->socket, &port_cfg->tx_conf);
+ PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret);
+ }
+
/* initialize RX queues */
for (uint16_t queue_id = 0; queue_id < port_cfg->n_rxq; ++queue_id) {
plog_info("\t\tSetting up RX queue %u on port %u on socket %u with %u desc (pool 0x%p)\n",
queue_id, port_id, port_cfg->socket,
port_cfg->n_rxd, port_cfg->pool[queue_id]);
-
ret = rte_eth_rx_queue_setup(port_id, queue_id,
port_cfg->n_rxd,
port_cfg->socket, &port_cfg->rx_conf,
port_cfg->pool[queue_id]);
-
PROX_PANIC(ret < 0, "\t\t\trte_eth_rx_queue_setup() failed on port %u: error %s (%d)\n", port_id, strerror(-ret), ret);
}
- if (!strcmp(port_cfg->short_name, "virtio")) {
- port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS;
- plog_info("\t\tDisabling TX offloads (virtio does not support TX offloads)\n");
- }
-
- if (!strcmp(port_cfg->short_name, "vmxnet3")) {
- port_cfg->tx_conf.txq_flags |= ETH_TXQ_FLAGS_NOOFFLOADS | ETH_TXQ_FLAGS_NOMULTSEGS;
- plog_info("\t\tDisabling TX offloads and multsegs on port %d as vmxnet3 does not support them\n", port_id);
- }
- /* initialize one TX queue per logical core on each port */
- for (uint16_t queue_id = 0; queue_id < port_cfg->n_txq; ++queue_id) {
- plog_info("\t\tSetting up TX queue %u on socket %u with %u desc\n",
- queue_id, port_cfg->socket, port_cfg->n_txd);
- ret = rte_eth_tx_queue_setup(port_id, queue_id, port_cfg->n_txd,
- port_cfg->socket, &port_cfg->tx_conf);
- PROX_PANIC(ret < 0, "\t\t\trte_eth_tx_queue_setup() failed on port %u: error %d\n", port_id, ret);
- }
plog_info("\t\tStarting up port %u ...", port_id);
ret = rte_eth_dev_start(port_id);
@@ -398,6 +835,11 @@ static void init_port(struct prox_port_cfg *port_cfg)
PROX_PANIC(ret < 0, "\n\t\t\trte_eth_dev_start() failed on port %u: error %d\n", port_id, ret);
plog_info(" done: ");
+ if (prox_port_cfg[port_id].is_vdev) {
+ for (int vlan_id = 0; vlan_id < prox_port_cfg[port_id].n_vlans; vlan_id++) {
+ set_ip_address(prox_port_cfg[port_id].names[vlan_id], prox_port_cfg[port_id].ip_addr[vlan_id].ip, prox_port_cfg[port_id].ip_addr[vlan_id].prefix);
+ }
+ }
/* Getting link status can be done without waiting if Link
State Interrupt is enabled since in that case, if the link
is recognized as being down, an interrupt will notify that
@@ -409,10 +851,11 @@ static void init_port(struct prox_port_cfg *port_cfg)
port_cfg->link_up = link.link_status;
port_cfg->link_speed = link.link_speed;
+
if (link.link_status) {
plog_info("Link Up - speed %'u Mbps - %s\n",
link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"full-duplex" : "half-duplex");
}
else {
@@ -428,21 +871,41 @@ static void init_port(struct prox_port_cfg *port_cfg)
strcmp(port_cfg->short_name, "i40e") &&
strcmp(port_cfg->short_name, "i40e_vf") &&
strcmp(port_cfg->short_name, "vmxnet3")) {
- for (uint8_t i = 0; i < 16; ++i) {
+ for (uint8_t i = 0; i < port_cfg->n_rxq; ++i) {
ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
if (ret) {
plog_info("\t\trte_eth_dev_set_rx_queue_stats_mapping() failed: error %d\n", ret);
}
+ }
+ for (uint8_t i = 0; i < port_cfg->n_txq; ++i) {
ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
if (ret) {
plog_info("\t\trte_eth_dev_set_tx_queue_stats_mapping() failed: error %d\n", ret);
}
}
}
+ if (port_cfg->nb_mc_addr) {
+ rte_eth_allmulticast_enable(port_id);
+ if ((ret = rte_eth_dev_set_mc_addr_list(port_id, port_cfg->mc_addr, port_cfg->nb_mc_addr)) != 0) {
+ plog_err("\t\trte_eth_dev_set_mc_addr_list returns %d on port %u\n", ret, port_id);
+ port_cfg->nb_mc_addr = 0;
+ rte_eth_allmulticast_disable(port_id);
+ plog_info("\t\tport %u NOT in multicast mode as failed to add mcast address\n", port_id);
+ } else {
+ plog_info("\t\trte_eth_dev_set_mc_addr_list(%d addr) on port %u\n", port_cfg->nb_mc_addr, port_id);
+ plog_info("\t\tport %u in multicast mode\n", port_id);
+ }
+ }
}
void init_port_all(void)
{
+ enum rte_proc_type_t proc_type;
+ proc_type = rte_eal_process_type();
+ if (proc_type == RTE_PROC_SECONDARY) {
+ plog_info("\tSkipping port initialization as secondary process\n");
+ return;
+ }
uint8_t max_port_idx = prox_last_port_active() + 1;
for (uint8_t portid = 0; portid < max_port_idx; ++portid) {
@@ -461,13 +924,29 @@ void close_ports_atexit(void)
if (!prox_port_cfg[portid].active) {
continue;
}
+ plog_info("Closing port %u\n", portid);
rte_eth_dev_close(portid);
}
+
+ if (lcore_cfg == NULL)
+ return;
+
+ struct lcore_cfg *lconf = NULL;
+ struct task_args *targ;
+ while (core_targ_next(&lconf, &targ, 0) == 0) {
+ if (targ->pool) {
+ rte_mempool_free(targ->pool);
+ plog_info("freeing pool %p\n", targ->pool);
+ targ->pool = NULL;
+ }
+ }
}
void init_port_addr(void)
{
struct prox_port_cfg *port_cfg;
+ enum rte_proc_type_t proc_type;
+ int rc;
for (uint8_t port_id = 0; port_id < PROX_MAX_PORTS; ++port_id) {
if (!prox_port_cfg[port_id].active) {
@@ -480,9 +959,16 @@ void init_port_addr(void)
rte_eth_macaddr_get(port_id, &port_cfg->eth_addr);
break;
case PROX_PORT_MAC_RAND:
- eth_random_addr(port_cfg->eth_addr.addr_bytes);
+ prox_rte_eth_random_addr(port_cfg->eth_addr.addr_bytes);
break;
case PROX_PORT_MAC_SET:
+ proc_type = rte_eal_process_type();
+ if (proc_type == RTE_PROC_SECONDARY) {
+ plog_warn("\tport %u: unable to change port mac address as secondary process\n", port_id);
+ } else if ((rc = rte_eth_dev_default_mac_addr_set(port_id, &port_cfg->eth_addr)) != 0)
+ plog_warn("\tport %u: failed to set mac address. Error = %d\n", port_id, rc);
+ else
+ plog_info("Setting MAC to "MAC_BYTES_FMT"\n", MAC_BYTES(port_cfg->eth_addr.addr_bytes));
break;
}
}
diff --git a/VNFs/DPPD-PROX/prox_port_cfg.h b/VNFs/DPPD-PROX/prox_port_cfg.h
index 455e2b2e..82d58f76 100644
--- a/VNFs/DPPD-PROX/prox_port_cfg.h
+++ b/VNFs/DPPD-PROX/prox_port_cfg.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,17 +21,25 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(22,11,0,0)
+#include <bus_pci_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#else
#if RTE_VERSION >= RTE_VERSION_NUM(17,11,0,0)
#include <rte_bus_pci.h>
#endif
+#endif
#include <rte_pci.h>
+#include "prox_compat.h"
#include "prox_globals.h"
+#include "ip_subnet.h"
enum addr_type {PROX_PORT_MAC_HW, PROX_PORT_MAC_SET, PROX_PORT_MAC_RAND};
#define IPV4_CKSUM 1
#define UDP_CKSUM 2
+#define NB_MCAST_ADDR 16
+#define PROX_MAX_VLAN_TAGS 256
struct prox_port_cfg {
struct rte_mempool *pool[32]; /* Rx/Tx mempool */
@@ -49,10 +57,12 @@ struct prox_port_cfg {
uint32_t n_txd;
uint8_t link_up;
uint32_t link_speed;
+ uint32_t max_link_speed;
uint32_t mtu;
enum addr_type type;
- struct ether_addr eth_addr; /* port MAC address */
- char name[MAX_NAME_SIZE];
+ prox_rte_ether_addr eth_addr; /* port MAC address */
+ char names[PROX_MAX_VLAN_TAGS][MAX_NAME_BUFFER_SIZE];
+ char vdev[MAX_NAME_SIZE];
char short_name[MAX_NAME_SIZE];
char driver_name[MAX_NAME_SIZE];
char rx_ring[MAX_NAME_SIZE];
@@ -61,9 +71,31 @@ struct prox_port_cfg {
struct rte_eth_conf port_conf;
struct rte_eth_rxconf rx_conf;
struct rte_eth_txconf tx_conf;
+ uint64_t requested_rx_offload;
+ uint64_t requested_tx_offload;
+ uint64_t disabled_tx_offload;
+ struct rte_eth_dev_info dev_info;
struct {
int tx_offload_cksum;
} capabilities;
+ uint32_t max_rx_pkt_len;
+ uint32_t min_rx_bufsize;
+ uint16_t min_rx_desc;
+ uint16_t max_rx_desc;
+ uint16_t min_tx_desc;
+ uint16_t max_tx_desc;
+ uint32_t nb_mc_addr;
+ uint8_t available;
+ prox_rte_ether_addr mc_addr[NB_MCAST_ADDR];
+ int dpdk_mapping;
+ struct ip4_subnet ip_addr[PROX_MAX_VLAN_TAGS];
+ int fds[PROX_MAX_VLAN_TAGS];
+ uint32_t vlan_tags[PROX_MAX_VLAN_TAGS];
+ uint8_t is_vdev;
+ uint8_t virtual;
+ uint8_t all_rx_queues;
+ uint16_t n_vlans;
+ uint32_t v6_mask_length;
};
extern rte_atomic32_t lsc;
diff --git a/VNFs/DPPD-PROX/prox_shared.c b/VNFs/DPPD-PROX/prox_shared.c
index 890d564b..de26441d 100644
--- a/VNFs/DPPD-PROX/prox_shared.c
+++ b/VNFs/DPPD-PROX/prox_shared.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@
#include "log.h"
#include "prox_shared.h"
#include "prox_globals.h"
+#include "prox_compat.h"
#define INIT_HASH_TABLE_SIZE 8192
@@ -54,13 +55,14 @@ static void prox_sh_create_hash(struct prox_shared *ps, size_t size)
{
param.entries = size;
param.name = get_sh_name();
+ param.socket_id = rte_socket_id();
ps->hash = rte_hash_create(&param);
PROX_PANIC(ps->hash == NULL, "Failed to create hash table for shared data");
ps->size = size;
if (ps->size == INIT_HASH_TABLE_SIZE)
- plog_info("Shared data tracking hash table created with size %zu\n", ps->size);
+ plog_info("\tShared data tracking hash table created with size %zu\n", ps->size);
else
- plog_info("Shared data tracking hash table grew to %zu\n", ps->size);
+ plog_info("\tShared data tracking hash table grew to %zu\n", ps->size);
}
#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
@@ -84,7 +86,7 @@ static int prox_sh_add(struct prox_shared *ps, const char *name, void *data)
char key[256] = {0};
int ret;
- strncpy(key, name, sizeof(key));
+ prox_strncpy(key, name, sizeof(key));
if (ps->size == 0) {
prox_sh_create_hash(ps, INIT_HASH_TABLE_SIZE);
}
@@ -121,7 +123,7 @@ static void *prox_sh_find(struct prox_shared *sh, const char *name)
if (!sh->hash)
return NULL;
- strncpy(key, name, sizeof(key));
+ prox_strncpy(key, name, sizeof(key));
ret = rte_hash_lookup_data(sh->hash, key, &data);
if (ret >= 0)
return data;
diff --git a/VNFs/DPPD-PROX/qinq.h b/VNFs/DPPD-PROX/qinq.h
index 14da9753..03c89b9b 100644
--- a/VNFs/DPPD-PROX/qinq.h
+++ b/VNFs/DPPD-PROX/qinq.h
@@ -18,6 +18,7 @@
#define _QINQ_H_
#include <rte_ether.h>
+#include "prox_compat.h"
struct my_vlan_hdr {
uint16_t eth_proto;
@@ -30,11 +31,11 @@ struct vlans {
};
struct qinq_hdr {
- struct ether_addr d_addr;
- struct ether_addr s_addr;
+ prox_rte_ether_addr d_addr;
+ prox_rte_ether_addr s_addr;
struct my_vlan_hdr svlan;
struct my_vlan_hdr cvlan;
uint16_t ether_type;
-} __attribute__((packed));
+} __attribute__((packed)) __attribute__((__aligned__(2)));
#endif /* _QINQ_H_ */
diff --git a/VNFs/DPPD-PROX/quit.h b/VNFs/DPPD-PROX/quit.h
index a01c0a02..c3cd0569 100644
--- a/VNFs/DPPD-PROX/quit.h
+++ b/VNFs/DPPD-PROX/quit.h
@@ -25,6 +25,7 @@
#include "display.h"
#include "prox_cfg.h"
+#include "log.h"
/* PROX_PANIC for checks that are possibly hit due to configuration or
when feature is not implemented. */
@@ -33,6 +34,8 @@
if (cond) { \
plog_info(__VA_ARGS__); \
display_end(); \
+ plog_end(); \
+ plog_info(__VA_ARGS__); \
if (prox_cfg.flags & DSF_DAEMON) { \
pid_t ppid = getppid(); \
plog_info("sending SIGUSR2 to %d\n", ppid);\
diff --git a/VNFs/DPPD-PROX/run.c b/VNFs/DPPD-PROX/run.c
index 3abdb819..c05f0a9f 100644
--- a/VNFs/DPPD-PROX/run.c
+++ b/VNFs/DPPD-PROX/run.c
@@ -78,8 +78,11 @@ static void update_link_states(void)
port_cfg = &prox_port_cfg[portid];
rte_eth_link_get_nowait(portid, &link);
- port_cfg->link_up = link.link_status;
port_cfg->link_speed = link.link_speed;
+ if (port_cfg->link_up != link.link_status) {
+ port_cfg->link_up = link.link_status;
+ plog_info("port %d: Link speed now %d Mbps\n", portid, link.link_speed);
+ }
}
}
@@ -234,6 +237,13 @@ void __attribute__((noreturn)) run(uint32_t flags)
if (stop_tsc && rte_rdtsc() >= stop_tsc) {
stop_prox = 1;
}
+ if ((prox_cfg.heartbeat_tsc) && (prox_cfg.heartbeat_timeout) && (rte_rdtsc() >= prox_cfg.heartbeat_tsc)) {
+ plog_info("Stopping to handle client as heartbeat timed out\n");
+ stop_core_all(-1);
+ stop_handling_client();
+ req_refresh();
+ prox_cfg.heartbeat_tsc = 0;
+ }
}
} else {
while (stop_prox == 0) {
@@ -251,6 +261,13 @@ void __attribute__((noreturn)) run(uint32_t flags)
if (stop_tsc && rte_rdtsc() >= stop_tsc) {
stop_prox = 1;
}
+ if ((prox_cfg.heartbeat_tsc) && (prox_cfg.heartbeat_timeout) && (rte_rdtsc() >= prox_cfg.heartbeat_tsc)) {
+ plog_info("Stopping to handle client as heartbeat timed out\n");
+ stop_core_all(-1);
+ stop_handling_client();
+ req_refresh();
+ prox_cfg.heartbeat_tsc = 0;
+ }
}
}
diff --git a/VNFs/DPPD-PROX/rw_reg.c b/VNFs/DPPD-PROX/rw_reg.c
index a0e59085..b4f6c214 100644
--- a/VNFs/DPPD-PROX/rw_reg.c
+++ b/VNFs/DPPD-PROX/rw_reg.c
@@ -14,6 +14,10 @@
// limitations under the License.
*/
+#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(21,11,0,0)
+#include <ethdev_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#endif
#include <rte_ethdev.h>
#include "rw_reg.h"
diff --git a/VNFs/DPPD-PROX/rx_pkt.c b/VNFs/DPPD-PROX/rx_pkt.c
index f6adeb4b..e1756cb3 100644
--- a/VNFs/DPPD-PROX/rx_pkt.c
+++ b/VNFs/DPPD-PROX/rx_pkt.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -28,7 +28,10 @@
#include "arp.h"
#include "tx_pkt.h"
#include "handle_master.h"
-#include "input.h" /* Needed for callback on dump */
+#include "input.h"
+#include "prox_ipv6.h" /* Needed for callback on dump */
+
+#define TCP_PORT_BGP rte_cpu_to_be_16(179)
/* _param version of the rx_pkt_hw functions are used to create two
instances of very similar variations of these functions. The
@@ -42,7 +45,9 @@
packets are received if the dequeue step involves finding 32 packets.
*/
-#define MIN_PMD_RX 32
+#define MIN_PMD_RX 32
+#define PROX_L3 1
+#define PROX_NDP 2
static uint16_t rx_pkt_hw_port_queue(struct port_queue *pq, struct rte_mbuf **mbufs, int multi)
{
@@ -105,11 +110,107 @@ static inline void dump_l3(struct task_base *tbase, struct rte_mbuf *mbuf)
}
}
+static inline void handle_ipv4(struct task_base *tbase, struct rte_mbuf **mbufs, int i, prox_rte_ipv4_hdr *pip, int *skip)
+{
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr *)(pip + 1);
+ if (pip->next_proto_id == IPPROTO_ICMP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ICMP_TO_MASTER, mbufs[i]);
+ (*skip)++;
+ } else if ((tcp->src_port == TCP_PORT_BGP) || (tcp->dst_port == TCP_PORT_BGP)) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, BGP_TO_MASTER, mbufs[i]);
+ (*skip)++;
+ } else if (unlikely(*skip)) {
+ mbufs[i - *skip] = mbufs[i];
+ }
+}
+static inline int handle_l3(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
+{
+ struct rte_mbuf **mbufs = *mbufs_ptr;
+ int i;
+ struct ether_hdr_arp *hdr_arp[MAX_PKT_BURST];
+ prox_rte_ether_hdr *hdr;
+ prox_rte_ipv4_hdr *pip;
+ prox_rte_vlan_hdr *vlan;
+ int skip = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ PREFETCH0(mbufs[i]);
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ hdr_arp[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
+ PREFETCH0(hdr_arp[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ if (likely(hdr_arp[i]->ether_hdr.ether_type == ETYPE_IPv4)) {
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ pip = (prox_rte_ipv4_hdr *)(hdr + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else {
+ switch (hdr_arp[i]->ether_hdr.ether_type) {
+ case ETYPE_VLAN:
+ hdr = (prox_rte_ether_hdr *)hdr_arp[i];
+ vlan = (prox_rte_vlan_hdr *)(hdr + 1);
+ if (vlan->eth_proto == ETYPE_IPv4) {
+ pip = (prox_rte_ipv4_hdr *)(vlan + 1);
+ handle_ipv4(tbase, mbufs, i, pip, &skip);
+ } else if (vlan->eth_proto == ETYPE_ARP) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ }
+ break;
+ case ETYPE_ARP:
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ break;
+ default:
+ if (unlikely(skip)) {
+ mbufs[i - skip] = mbufs[i];
+ }
+ }
+ }
+ }
+ return skip;
+}
+
+static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
+{
+ struct rte_mbuf **mbufs = *mbufs_ptr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
+ int i;
+ prox_rte_ether_hdr *hdr[MAX_PKT_BURST];
+ int skip = 0;
+ uint16_t vlan = 0;
+
+ for (i = 0; i < nb_rx; i++) {
+ PREFETCH0(mbufs[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[i]);
+ }
+ for (i = 0; i < nb_rx; i++) {
+ ipv6_hdr = prox_get_ipv6_hdr(hdr[i], rte_pktmbuf_pkt_len(mbufs[i]), &vlan);
+ if (unlikely((ipv6_hdr) && (ipv6_hdr->proto == ICMPv6))) {
+ dump_l3(tbase, mbufs[i]);
+ tx_ring(tbase, tbase->l3.ctrl_plane_ring, NDP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
+ skip++;
+ } else if (unlikely(skip)) {
+ mbufs[i - skip] = mbufs[i];
+ }
+ }
+ return skip;
+}
+
static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi,
- void (*next)(struct rx_params_hw *rx_param_hw), int l3)
+ void (*next)(struct rx_params_hw *rx_param_hw), int l3_ndp)
{
uint8_t last_read_portid;
- uint16_t nb_rx;
+ uint16_t nb_rx, ret;
int skip = 0;
START_EMPTY_MEASSURE();
@@ -122,30 +223,13 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf
nb_rx = rx_pkt_hw_port_queue(pq, *mbufs_ptr, multi);
next(&tbase->rx_params_hw);
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr[MAX_PKT_BURST];
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
- }
+ if (l3_ndp == PROX_L3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
+ else if (l3_ndp == PROX_NDP)
+ skip = handle_ndp(tbase, nb_rx, mbufs_ptr);
if (skip)
- TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, skip);
+ TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
if (likely(nb_rx > 0)) {
TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
return nb_rx - skip;
@@ -154,7 +238,7 @@ static uint16_t rx_pkt_hw_param(struct task_base *tbase, struct rte_mbuf ***mbuf
return 0;
}
-static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3)
+static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf ***mbufs_ptr, int multi, int l3_ndp)
{
uint16_t nb_rx, n;
int skip = 0;
@@ -178,36 +262,21 @@ static inline uint16_t rx_pkt_hw1_param(struct task_base *tbase, struct rte_mbuf
}
}
- if (l3) {
- struct rte_mbuf **mbufs = *mbufs_ptr;
- int i;
- struct ether_hdr_arp *hdr[MAX_PKT_BURST];
- for (i = 0; i < nb_rx; i++) {
- PREFETCH0(mbufs[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- hdr[i] = rte_pktmbuf_mtod(mbufs[i], struct ether_hdr_arp *);
- PREFETCH0(hdr[i]);
- }
- for (i = 0; i < nb_rx; i++) {
- if (unlikely(hdr[i]->ether_hdr.ether_type == ETYPE_ARP)) {
- dump_l3(tbase, mbufs[i]);
- tx_ring(tbase, tbase->l3.ctrl_plane_ring, ARP_TO_CTRL, mbufs[i]);
- skip++;
- } else if (unlikely(skip)) {
- mbufs[i - skip] = mbufs[i];
- }
- }
+ if (unlikely(nb_rx == 0)) {
+ TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
+ return 0;
}
+ if (l3_ndp == PROX_L3)
+ skip = handle_l3(tbase, nb_rx, mbufs_ptr);
+ else if (l3_ndp == PROX_NDP)
+ skip = handle_ndp(tbase, nb_rx, mbufs_ptr);
+
if (skip)
- TASK_STATS_ADD_DROP_HANDLED(&tbase->aux->stats, skip);
- if (likely(nb_rx > 0)) {
- TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
- return nb_rx - skip;
- }
- TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
- return 0;
+ TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
+
+ TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
+ return nb_rx - skip;
}
uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs)
@@ -242,32 +311,62 @@ uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs)
uint16_t rx_pkt_hw_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 0, next_port, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port, PROX_NDP);
}
uint16_t rx_pkt_hw_pow2_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_pow2_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 0, next_port_pow2, PROX_NDP);
}
uint16_t rx_pkt_hw1_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw1_param(tbase, mbufs, 0, 1);
+ return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_L3);
+}
+
+uint16_t rx_pkt_hw1_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw1_param(tbase, mbufs, 0, PROX_NDP);
}
uint16_t rx_pkt_hw_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 1, next_port, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port, PROX_NDP);
}
uint16_t rx_pkt_hw_pow2_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, 1);
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_L3);
+}
+
+uint16_t rx_pkt_hw_pow2_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw_param(tbase, mbufs, 1, next_port_pow2, PROX_NDP);
}
uint16_t rx_pkt_hw1_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs)
{
- return rx_pkt_hw1_param(tbase, mbufs, 1, 1);
+ return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_L3);
+}
+
+uint16_t rx_pkt_hw1_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs)
+{
+ return rx_pkt_hw1_param(tbase, mbufs, 1, PROX_NDP);
}
/* The following functions implement ring access */
@@ -508,36 +607,3 @@ uint16_t rx_pkt_tsc(struct task_base *tbase, struct rte_mbuf ***mbufs)
return ret;
}
-
-uint16_t rx_pkt_all(struct task_base *tbase, struct rte_mbuf ***mbufs)
-{
- uint16_t tot = 0;
- uint16_t ret = 0;
- struct rte_mbuf **new_mbufs;
- struct rte_mbuf **dst = tbase->aux->all_mbufs;
-
- /* In case we receive less than MAX_PKT_BURST packets in one
- iteration, do no perform any copying of mbuf pointers. Use
- the buffer itself instead. */
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- if (ret < MAX_PKT_BURST/2) {
- *mbufs = new_mbufs;
- return ret;
- }
-
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- *mbufs = dst;
-
- do {
- ret = call_prev_rx_pkt(tbase, &new_mbufs);
- memcpy(dst + tot, new_mbufs, ret * sizeof(*dst));
- tot += ret;
- } while (ret == MAX_PKT_BURST/2 && tot < MAX_RX_PKT_ALL - MAX_PKT_BURST);
-
- if (tot >= MAX_RX_PKT_ALL - MAX_PKT_BURST) {
- plog_err("Could not receive all packets - buffer full\n");
- }
-
- return tot;
-}
diff --git a/VNFs/DPPD-PROX/rx_pkt.h b/VNFs/DPPD-PROX/rx_pkt.h
index 6d8f412c..c610ed98 100644
--- a/VNFs/DPPD-PROX/rx_pkt.h
+++ b/VNFs/DPPD-PROX/rx_pkt.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -29,6 +29,9 @@ uint16_t rx_pkt_hw1(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw_pow2_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw1_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw_pow2_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw1_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
/* The _multi variation of the function is used to work-around the
problem with QoS, multi-seg mbufs and vector PMD. When vector
@@ -40,6 +43,9 @@ uint16_t rx_pkt_hw1_multi(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw_pow2_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_hw1_multi_l3(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw_pow2_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
+uint16_t rx_pkt_hw1_multi_ndp(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_sw(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint16_t rx_pkt_sw_pow2(struct task_base *tbase, struct rte_mbuf ***mbufs);
diff --git a/VNFs/DPPD-PROX/stats_irq.h b/VNFs/DPPD-PROX/stats_irq.h
index 71ff80f7..9a3f6c2f 100644
--- a/VNFs/DPPD-PROX/stats_irq.h
+++ b/VNFs/DPPD-PROX/stats_irq.h
@@ -51,7 +51,7 @@ struct irq_task_stats {
struct irq_rt_stats *stats;
};
-uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
+extern uint64_t irq_bucket_maxtime_cycles[IRQ_BUCKETS_COUNT];
extern uint64_t irq_bucket_maxtime_micro[];
void stats_irq_reset(void);
diff --git a/VNFs/DPPD-PROX/stats_latency.c b/VNFs/DPPD-PROX/stats_latency.c
index 52027892..5b2989df 100644
--- a/VNFs/DPPD-PROX/stats_latency.c
+++ b/VNFs/DPPD-PROX/stats_latency.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -22,6 +22,7 @@
struct stats_latency_manager_entry {
struct task_lat *task;
+ uint32_t bucket_size;
uint8_t lcore_id;
uint8_t task_id;
struct lat_test lat_test;
@@ -32,6 +33,7 @@ struct stats_latency_manager_entry {
struct stats_latency_manager {
uint16_t n_latency;
+ uint32_t bucket_size;
struct stats_latency_manager_entry entries[0]; /* copy of stats when running update stats. */
};
@@ -48,6 +50,11 @@ int stats_get_n_latency(void)
return slm->n_latency;
}
+int stats_get_latency_bucket_size(void)
+{
+ return slm->bucket_size;
+}
+
uint32_t stats_latency_get_core_id(uint32_t i)
{
return slm->entries[i].lcore_id;
@@ -63,6 +70,16 @@ struct stats_latency *stats_latency_get(uint32_t i)
return &slm->entries[i].stats;
}
+uint64_t *stats_latency_get_bucket(uint32_t i)
+{
+ return slm->entries[i].lat_test.buckets;
+}
+
+uint64_t *stats_latency_get_tot_bucket(uint32_t i)
+{
+ return slm->entries[i].tot_lat_test.buckets;
+}
+
struct stats_latency *stats_latency_tot_get(uint32_t i)
{
return &slm->entries[i].tot;
@@ -104,11 +121,11 @@ struct stats_latency *stats_latency_find(uint32_t lcore_id, uint32_t task_id)
static int task_runs_observable_latency(struct task_args *targ)
{
- /* TODO: make this work with multiple ports and with
- rings. Currently, only showing lat tasks which have 1 RX
- port. */
+ /* Note that multiple ports or rings are only supported
+ if they all receive packets configured in the same way
+ e.g. same timestamp pos. */
return !strcmp(targ->task_init->mode_str, "lat") &&
- (targ->nb_rxports == 1 || targ->nb_rxrings == 1);
+ (targ->nb_rxports >= 1 || targ->nb_rxrings >= 1);
}
static struct stats_latency_manager *alloc_stats_latency_manager(void)
@@ -140,8 +157,14 @@ static void stats_latency_add_task(struct lcore_cfg *lconf, struct task_args *ta
struct stats_latency_manager_entry *new_entry = &slm->entries[slm->n_latency];
new_entry->task = (struct task_lat *)targ->tbase;
+ new_entry->bucket_size = task_lat_get_latency_bucket_size(new_entry->task);
new_entry->lcore_id = lconf->id;
new_entry->task_id = targ->id;
+ new_entry->tot_lat_test.min_lat = -1;
+ if (slm->bucket_size == 0)
+ slm->bucket_size = new_entry->bucket_size;
+ else if (slm->bucket_size != new_entry->bucket_size)
+ plog_err("Latency bucket size does not support different bucket sizes per task - using bucket size from first task (%d)\n", slm->bucket_size);
slm->n_latency++;
}
@@ -205,6 +228,9 @@ static void stats_latency_from_lat_test(struct stats_latency *dst, struct lat_te
dst->tot_packets = src->tot_pkts;
dst->tot_all_packets = src->tot_all_pkts;
dst->lost_packets = src->lost_packets;
+ dst->mis_ordered = src->mis_ordered;
+ dst->extent = src->extent;
+ dst->duplicate = src->duplicate;
}
static void stats_latency_update_entry(struct stats_latency_manager_entry *entry)
diff --git a/VNFs/DPPD-PROX/stats_latency.h b/VNFs/DPPD-PROX/stats_latency.h
index 83cd4a18..833bbff4 100644
--- a/VNFs/DPPD-PROX/stats_latency.h
+++ b/VNFs/DPPD-PROX/stats_latency.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -29,6 +29,9 @@ struct stats_latency {
struct time_unit accuracy_limit;
uint64_t lost_packets;
+ uint64_t mis_ordered;
+ uint64_t extent;
+ uint64_t duplicate;
uint64_t tot_packets;
uint64_t tot_all_packets;
};
@@ -36,6 +39,8 @@ struct stats_latency {
uint32_t stats_latency_get_core_id(uint32_t i);
uint32_t stats_latency_get_task_id(uint32_t i);
struct stats_latency *stats_latency_get(uint32_t i);
+uint64_t *stats_latency_get_bucket(uint32_t i);
+uint64_t *stats_latency_get_tot_bucket(uint32_t i);
struct stats_latency *stats_latency_find(uint32_t lcore_id, uint32_t task_id);
struct stats_latency *stats_latency_tot_get(uint32_t i);
@@ -46,6 +51,7 @@ void stats_latency_update(void);
void stats_latency_reset(void);
int stats_get_n_latency(void);
+int stats_get_latency_bucket_size(void);
#ifdef LATENCY_HISTOGRAM
void stats_core_lat_histogram(uint8_t lcore_id, uint8_t task_id, uint64_t **buckets);
diff --git a/VNFs/DPPD-PROX/stats_parser.c b/VNFs/DPPD-PROX/stats_parser.c
index 37e1781b..c9428072 100644
--- a/VNFs/DPPD-PROX/stats_parser.c
+++ b/VNFs/DPPD-PROX/stats_parser.c
@@ -31,6 +31,7 @@
#include "stats_global.h"
#include "stats_prio_task.h"
#include "stats_irq.h"
+#include "prox_compat.h"
struct stats_path_str {
const char *str;
@@ -149,6 +150,23 @@ static uint64_t sp_task_drop_handled(int argc, const char *argv[])
return stats_get_task_stats_sample(c, t, 1)->drop_handled;
}
+static uint64_t sp_task_rx_non_dp(int argc, const char *argv[])
+{
+ struct task_stats_sample *last;
+ uint32_t c, t;
+ if (args_to_core_task(argv[0], argv[1], &c, &t))
+ return -1;
+ return stats_get_task_stats_sample(c, t, 1)->rx_non_dp;
+}
+
+static uint64_t sp_task_tx_non_dp(int argc, const char *argv[])
+{
+ struct task_stats_sample *last;
+ uint32_t c, t;
+ if (args_to_core_task(argv[0], argv[1], &c, &t))
+ return -1;
+ return stats_get_task_stats_sample(c, t, 1)->tx_non_dp;
+}
static uint64_t sp_task_rx_bytes(int argc, const char *argv[])
{
return -1;
@@ -790,6 +808,8 @@ struct stats_path_str stats_paths[] = {
{"task.core(#).task(#).rx_prio(#)", sp_task_rx_prio},
{"task.core(#).task(#).max_irq", sp_task_max_irq},
{"task.core(#).task(#).irq(#)", sp_task_irq},
+ {"task.core(#).task(#).rx_non_dp", sp_task_rx_non_dp},
+ {"task.core(#).task(#).tx_non_dp", sp_task_tx_non_dp},
{"port(#).no_mbufs", sp_port_no_mbufs},
{"port(#).ierrors", sp_port_ierrors},
@@ -891,7 +911,7 @@ uint64_t stats_parser_get(const char *stats_path)
char stats_path_cpy[128];
- strncpy(stats_path_cpy, stats_path, sizeof(stats_path_cpy));
+ prox_strncpy(stats_path_cpy, stats_path, sizeof(stats_path_cpy));
stats_path_len = strlen(stats_path);
size_t max_argc = 16;
diff --git a/VNFs/DPPD-PROX/stats_port.c b/VNFs/DPPD-PROX/stats_port.c
index b5e70dcc..fb6cf10a 100644
--- a/VNFs/DPPD-PROX/stats_port.c
+++ b/VNFs/DPPD-PROX/stats_port.c
@@ -18,6 +18,9 @@
#include <stdio.h>
#include <rte_version.h>
+#if RTE_VERSION >= RTE_VERSION_NUM(21,11,0,0)
+#include <ethdev_driver.h> // Please configure DPDK with meson option -Denable_driver_sdk=true
+#endif
#include <rte_ethdev.h>
#include <rte_cycles.h>
#include <rte_byteorder.h>
@@ -28,6 +31,7 @@
#include "stats_port.h"
#include "prox_port_cfg.h"
#include "rw_reg.h"
+#include "prox_compat.h"
#if defined(PROX_STATS) && defined(PROX_HW_DIRECT_STATS)
@@ -168,7 +172,10 @@ void stats_port_init(void)
for (uint8_t port_id = 0; port_id < nb_interface; ++port_id) {
if (prox_port_cfg[port_id].active) {
#if RTE_VERSION >= RTE_VERSION_NUM(16,7,0,0)
- num_xstats[port_id] = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if ((num_xstats[port_id] = rte_eth_xstats_get_names(port_id, NULL, 0)) < 0) {
+ plog_err("\tport %u: rte_eth_xstats_get_names returns %d\n", port_id, num_xstats[port_id]);
+ continue;
+ }
eth_xstat_names[port_id] = prox_zmalloc(num_xstats[port_id] * sizeof(struct rte_eth_xstat_name), prox_port_cfg[port_id].socket);
PROX_PANIC(eth_xstat_names[port_id] == NULL, "Error allocating memory for xstats");
num_xstats[port_id] = rte_eth_xstats_get_names(port_id, eth_xstat_names[port_id], num_xstats[port_id]);
@@ -285,12 +292,23 @@ static void nic_read_stats(uint8_t port_id)
dropped by the nic". Note that in case CRC
is stripped on ixgbe, the CRC bytes are not
counted. */
- if (prox_port_cfg[port_id].port_conf.rxmode.hw_strip_crc == 1)
+#if defined (RTE_ETH_RX_OFFLOAD_CRC_STRIP)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_CRC_STRIP)
stats->rx_bytes = eth_stat.ibytes +
(24 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
else
stats->rx_bytes = eth_stat.ibytes +
(20 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
+#else
+#if defined (RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ if (prox_port_cfg[port_id].requested_rx_offload & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ stats->rx_bytes = eth_stat.ibytes +
+ (20 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
+ else
+ stats->rx_bytes = eth_stat.ibytes +
+ (24 * eth_stat.ipackets - 20 * (eth_stat.ierrors + eth_stat.imissed));
+#endif
+#endif
}
} else if (strcmp(prox_port_cfg[port_id].short_name, "i40e_vf") == 0) {
// For I40E VF, imissed already part of received packets
diff --git a/VNFs/DPPD-PROX/stats_task.c b/VNFs/DPPD-PROX/stats_task.c
index 6b4dc2dd..3f138982 100644
--- a/VNFs/DPPD-PROX/stats_task.c
+++ b/VNFs/DPPD-PROX/stats_task.c
@@ -58,6 +58,8 @@ void stats_task_reset(void)
cur_task_stats->tot_drop_tx_fail = 0;
cur_task_stats->tot_drop_discard = 0;
cur_task_stats->tot_drop_handled = 0;
+ cur_task_stats->tot_rx_non_dp = 0;
+ cur_task_stats->tot_tx_non_dp = 0;
}
}
@@ -71,6 +73,11 @@ uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id)
return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_pkt_count;
}
+uint64_t stats_core_task_tot_tx_fail(uint8_t lcore_id, uint8_t task_id)
+{
+ return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail;
+}
+
uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id)
{
return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_tx_fail +
@@ -78,6 +85,16 @@ uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id)
lcore_task_stats_all[lcore_id].task_stats[task_id].tot_drop_handled;
}
+uint64_t stats_core_task_tot_tx_non_dp(uint8_t lcore_id, uint8_t task_id)
+{
+ return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_tx_non_dp;
+}
+
+uint64_t stats_core_task_tot_rx_non_dp(uint8_t lcore_id, uint8_t task_id)
+{
+ return lcore_task_stats_all[lcore_id].task_stats[task_id].tot_rx_non_dp;
+}
+
uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id)
{
return lcore_task_stats_all[lcore_id].task_stats[task_id].sample[last_stat].tsc;
@@ -102,6 +119,8 @@ void stats_task_post_proc(void)
cur_task_stats->tot_drop_tx_fail += last->drop_tx_fail - prev->drop_tx_fail;
cur_task_stats->tot_drop_discard += last->drop_discard - prev->drop_discard;
cur_task_stats->tot_drop_handled += last->drop_handled - prev->drop_handled;
+ cur_task_stats->tot_rx_non_dp += last->rx_non_dp - prev->rx_non_dp;
+ cur_task_stats->tot_tx_non_dp += last->tx_non_dp - prev->tx_non_dp;
}
}
@@ -127,6 +146,8 @@ void stats_task_update(void)
last->tx_bytes = stats->tx_bytes;
last->rx_bytes = stats->rx_bytes;
last->drop_bytes = stats->drop_bytes;
+ last->rx_non_dp = stats->rx_non_dp;
+ last->tx_non_dp = stats->tx_non_dp;
after = rte_rdtsc();
last->tsc = (before >> 1) + (after >> 1);
}
diff --git a/VNFs/DPPD-PROX/stats_task.h b/VNFs/DPPD-PROX/stats_task.h
index 156eb326..001ebbc7 100644
--- a/VNFs/DPPD-PROX/stats_task.h
+++ b/VNFs/DPPD-PROX/stats_task.h
@@ -17,6 +17,11 @@
#ifndef _STATS_TASK_H_
#define _STATS_TASK_H_
+#include <rte_common.h>
+#ifndef __rte_cache_aligned
+#include <rte_memory.h>
+#endif
+
#include <inttypes.h>
#include "clock.h"
@@ -45,6 +50,8 @@ struct task_rt_stats {
uint64_t rx_bytes;
uint64_t tx_bytes;
uint64_t drop_bytes;
+ uint64_t rx_non_dp;
+ uint64_t tx_non_dp;
} __attribute__((packed)) __rte_cache_aligned;
#ifdef PROX_STATS
@@ -72,6 +79,14 @@ struct task_rt_stats {
(stats)->rx_pkt_count += ntx; \
} while (0) \
+#define TASK_STATS_ADD_RX_NON_DP(stats, ntx) do { \
+ (stats)->rx_non_dp += ntx; \
+ } while(0)
+
+#define TASK_STATS_ADD_TX_NON_DP(stats, ntx) do { \
+ (stats)->tx_non_dp += ntx; \
+ } while(0)
+
#define TASK_STATS_ADD_RX_BYTES(stats, bytes) do { \
(stats)->rx_bytes += bytes; \
} while (0) \
@@ -109,6 +124,8 @@ struct task_stats_sample {
uint64_t rx_bytes;
uint64_t tx_bytes;
uint64_t drop_bytes;
+ uint64_t rx_non_dp;
+ uint64_t tx_non_dp;
};
struct task_stats {
@@ -117,6 +134,8 @@ struct task_stats {
uint64_t tot_drop_discard;
uint64_t tot_drop_handled;
uint64_t tot_rx_pkt_count;
+ uint64_t tot_tx_non_dp;
+ uint64_t tot_rx_non_dp;
struct task_stats_sample sample[2];
@@ -139,7 +158,10 @@ void stats_task_get_host_rx_tx_packets(uint64_t *rx, uint64_t *tx, uint64_t *tsc
uint64_t stats_core_task_tot_rx(uint8_t lcore_id, uint8_t task_id);
uint64_t stats_core_task_tot_tx(uint8_t lcore_id, uint8_t task_id);
+uint64_t stats_core_task_tot_tx_fail(uint8_t lcore_id, uint8_t task_id);
uint64_t stats_core_task_tot_drop(uint8_t lcore_id, uint8_t task_id);
uint64_t stats_core_task_last_tsc(uint8_t lcore_id, uint8_t task_id);
+uint64_t stats_core_task_tot_rx_non_dp(uint8_t lcore_id, uint8_t task_id);
+uint64_t stats_core_task_tot_tx_non_dp(uint8_t lcore_id, uint8_t task_id);
#endif /* _STATS_TASK_H_ */
diff --git a/VNFs/DPPD-PROX/swap_tap.cfg b/VNFs/DPPD-PROX/swap_tap.cfg
new file mode 100644
index 00000000..5777f9d9
--- /dev/null
+++ b/VNFs/DPPD-PROX/swap_tap.cfg
@@ -0,0 +1,50 @@
+;;
+;; Copyright (c) 2020 Intel Corporation
+;;
+;; Licensed under the Apache License, Version 2.0 (the "License");
+;; you may not use this file except in compliance with the License.
+;; You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing, software
+;; distributed under the License is distributed on an "AS IS" BASIS,
+;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;; See the License for the specific language governing permissions and
+;; limitations under the License.
+;;
+
+[eal options]
+-n=4 ; force number of memory channels
+no-output=no ; disable DPDK debug output
+
+[port 0]
+name=p0
+vdev=swap_tap
+local ipv4=$ip2
+
+[defaults]
+mempool size=16K
+
+[global]
+start time=5
+name=Basic Gen
+
+[variables]
+$hex_ip1=0a 0a 0a 01
+$hex_ip2=0a 0a 0a 02
+$ip1=10.10.10.1
+$ip2=10.10.10.2
+
+[core 0s0]
+mode=master
+
+[core 1s0]
+name=swap
+task=0
+mode=swap
+sub mode=l3
+rx port=p0
+tx port=p0
+drop=no
+local ipv4=${ip2}
diff --git a/VNFs/DPPD-PROX/task_base.h b/VNFs/DPPD-PROX/task_base.h
index 00087ab6..89e5bb9d 100644
--- a/VNFs/DPPD-PROX/task_base.h
+++ b/VNFs/DPPD-PROX/task_base.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -36,6 +36,7 @@
#define TASK_FP_HANDLE_ARP 0x0040
#define TASK_TX_CRC 0x0080
#define TASK_L3 0x0100
+#define TASK_DO_NOT_FWD_GENEVE 0x0200
// flag_features 64 bits
#define TASK_FEATURE_ROUTING 0x0001
@@ -44,7 +45,7 @@
#define TASK_FEATURE_NEVER_DISCARDS 0x0008
#define TASK_FEATURE_NO_RX 0x0010
#define TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS 0x0020
-#define TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS 0x0040
+#define TASK_FEATURE_TXQ_FLAGS_MULTSEGS 0x0040
#define TASK_FEATURE_ZERO_RX 0x0080
#define TASK_FEATURE_TXQ_FLAGS_REFCOUNT 0x0100
#define TASK_FEATURE_TSC_RX 0x0200
@@ -53,13 +54,13 @@
#define TASK_FEATURE_LUT_QINQ_RSS 0x2000
#define TASK_FEATURE_LUT_QINQ_HASH 0x4000
#define TASK_FEATURE_RX_ALL 0x8000
-#define TASK_MULTIPLE_MAC 0x10000
+#define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL 0x20000
-#define FLAG_TX_FLUSH 0x01
-#define FLAG_NEVER_FLUSH 0x02
+#define TBASE_FLAG_TX_FLUSH 0x01
+#define TBASE_FLAG_NEVER_FLUSH 0x02
// Task specific flags
-#define BASE_FLAG_LUT_QINQ_HASH 0x08
-#define BASE_FLAG_LUT_QINQ_RSS 0x10
+#define TBASE_FLAG_LUT_QINQ_HASH 0x08
+#define TBASE_FLAG_LUT_QINQ_RSS 0x10
#define OUT_DISCARD 0xFF
#define OUT_HANDLED 0xFE
@@ -153,7 +154,6 @@ typedef uint16_t (*rx_pkt_func) (struct task_base *tbase, struct rte_mbuf ***mbu
struct task_base_aux {
/* Not used when PROX_STATS is not defined */
struct task_rt_stats stats;
- struct task_rt_dump task_rt_dump;
/* Used if TASK_TSC_RX is enabled*/
struct {
@@ -163,8 +163,8 @@ struct task_base_aux {
struct rte_mbuf **all_mbufs;
- int rx_prev_count;
- int rx_prev_idx;
+ uint16_t rx_prev_count;
+ uint16_t rx_prev_idx;
uint16_t (*rx_pkt_prev[MAX_STACKED_RX_FUCTIONS])(struct task_base *tbase, struct rte_mbuf ***mbufs);
uint32_t rx_bucket[RX_BUCKET_SIZE];
@@ -174,9 +174,12 @@ struct task_base_aux {
int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
void (*stop)(struct task_base *tbase);
+ int (*tx_ctrlplane_pkt)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
void (*start)(struct task_base *tbase);
void (*stop_last)(struct task_base *tbase);
void (*start_first)(struct task_base *tbase);
+ struct task_rt_dump task_rt_dump;
+ struct rte_mbuf *mbuf;
};
/* The task_base is accessed for _all_ task types. In case
@@ -207,7 +210,6 @@ struct task_base {
struct tx_params_hw_sw tx_params_hw_sw;
};
struct l3_base l3;
- uint32_t local_ipv4;
} __attribute__((packed)) __rte_cache_aligned;
static void task_base_add_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_add)
diff --git a/VNFs/DPPD-PROX/task_init.c b/VNFs/DPPD-PROX/task_init.c
index 2361d32c..97f7188c 100644
--- a/VNFs/DPPD-PROX/task_init.c
+++ b/VNFs/DPPD-PROX/task_init.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -176,6 +176,8 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
if (targ->nb_rxports == 1) {
if (targ->flags & TASK_ARG_L3)
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_l3 : rx_pkt_hw1_l3;
+ else if (targ->flags & TASK_ARG_NDP)
+ tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi_ndp : rx_pkt_hw1_ndp;
else
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw1_multi : rx_pkt_hw1;
tbase->rx_params_hw1.rx_pq.port = targ->rx_port_queue[0].port;
@@ -185,6 +187,8 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
PROX_ASSERT((targ->nb_rxports != 0) || (targ->task_init->flag_features & TASK_FEATURE_NO_RX));
if (targ->flags & TASK_ARG_L3)
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_l3 : rx_pkt_hw_l3;
+ else if (targ->flags & TASK_ARG_NDP)
+ tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi_ndp : rx_pkt_hw_ndp;
else
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_multi : rx_pkt_hw;
tbase->rx_params_hw.nb_rxports = targ->nb_rxports;
@@ -198,6 +202,8 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
if (rte_is_power_of_2(targ->nb_rxports)) {
if (targ->flags & TASK_ARG_L3)
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_l3 : rx_pkt_hw_pow2_l3;
+ else if (targ->flags & TASK_ARG_NDP)
+ tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi_ndp : rx_pkt_hw_pow2_ndp;
else
tbase->rx_pkt = (targ->task_init->flag_features & TASK_FEATURE_MULTI_RX)? rx_pkt_hw_pow2_multi : rx_pkt_hw_pow2;
tbase->rx_params_hw.rxport_mask = targ->nb_rxports - 1;
@@ -208,7 +214,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
if ((targ->nb_txrings != 0) && (!targ->tx_opt_ring) && (!(targ->flags & TASK_ARG_DROP))) {
// Transmitting to a ring in NO DROP. We need to make sure the receiving task in not running on the same core.
// Otherwise we might end up in a dead lock: trying in a loop to transmit to a task which cannot receive anymore
- // (as npt being scheduled).
+ // (as not being scheduled).
struct core_task ct;
struct task_args *dtarg;
for (unsigned int j = 0; j < targ->nb_txrings; j++) {
@@ -277,6 +283,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
prev = prev->tx_opt_ring_task;
}
}
+
if (targ->nb_txrings == 1 || targ->nb_txports == 1 || targ->tx_opt_ring) {
if (targ->task_init->flag_features & TASK_FEATURE_NEVER_DISCARDS) {
if (targ->tx_opt_ring) {
@@ -295,7 +302,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_never_discard_sw1 : tx_pkt_no_drop_never_discard_hw1_lat_opt;
}
if ((targ->nb_txrings) || ((targ->task_init->flag_features & TASK_FEATURE_THROUGHPUT_OPT) == 0))
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
else
targ->lconf->flush_queues[targ->task] = flush_function(targ);
}
@@ -309,7 +316,7 @@ static size_t init_rx_tx_rings_ports(struct task_args *targ, struct task_base *t
else {
tbase->tx_pkt = targ->nb_txrings ? tx_pkt_no_drop_sw1 : tx_pkt_no_drop_hw1;
}
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
}
}
else {
@@ -345,22 +352,11 @@ struct task_base *init_task_struct(struct task_args *targ)
offset += t->size;
if (targ->nb_txrings == 0 && targ->nb_txports == 0)
- tbase->flags |= FLAG_NEVER_FLUSH;
+ tbase->flags |= TBASE_FLAG_NEVER_FLUSH;
offset = init_rx_tx_rings_ports(targ, tbase, offset);
tbase->aux = (struct task_base_aux *)(((uint8_t *)tbase) + offset);
- if (targ->nb_txports != 0) {
- if (targ->flags & TASK_ARG_L3) {
- tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
- tbase->tx_pkt = tx_pkt_l3;
- }
- }
-
- if (targ->task_init->flag_features & TASK_FEATURE_RX_ALL) {
- task_base_add_rx_pkt_function(tbase, rx_pkt_all);
- tbase->aux->all_mbufs = prox_zmalloc(MAX_RX_PKT_ALL * sizeof(* tbase->aux->all_mbufs), task_socket);
- }
if (targ->task_init->flag_features & TASK_FEATURE_TSC_RX) {
task_base_add_rx_pkt_function(tbase, rx_pkt_tsc);
}
@@ -369,13 +365,21 @@ struct task_base *init_task_struct(struct task_args *targ)
tbase->handle_bulk = t->handle;
- if (targ->flags & TASK_ARG_L3) {
- plog_info("\tTask configured in L3 mode\n");
+ if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP)) {
+ plog_info("\t\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
- }
- if (targ->nb_txports != 0) {
- if (targ->flags & TASK_ARG_L3)
- task_init_l3(tbase, targ);
+ if (targ->nb_txports != 0) {
+ tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
+ tbase->aux->tx_ctrlplane_pkt = targ->nb_txrings ? tx_ctrlplane_sw : tx_ctrlplane_hw;
+ if (targ->flags & TASK_ARG_L3) {
+ tbase->tx_pkt = tx_pkt_l3;
+ task_init_l3(tbase, targ);
+ } else if (targ->flags & TASK_ARG_NDP) {
+ tbase->tx_pkt = tx_pkt_ndp;
+ task_init_l3(tbase, targ);
+ }
+ // Make sure control plane packets such as arp are not dropped
+ }
}
targ->tbase = tbase;
diff --git a/VNFs/DPPD-PROX/task_init.h b/VNFs/DPPD-PROX/task_init.h
index c5a17796..53bfaf35 100644
--- a/VNFs/DPPD-PROX/task_init.h
+++ b/VNFs/DPPD-PROX/task_init.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -22,11 +22,13 @@
#include <rte_common.h>
#include <rte_sched.h>
#include <rte_ether.h>
+#include "prox_compat.h"
#include "task_base.h"
#include "prox_globals.h"
#include "ip6_addr.h"
#include "flow_iter.h"
#include "parse_utils.h"
+#include "prox_compat.h"
struct rte_mbuf;
struct lcore_cfg;
@@ -47,6 +49,8 @@ struct lcore_cfg;
#define TASK_ARG_DO_NOT_SET_DST_MAC 0x400
#define TASK_ARG_HW_SRC_MAC 0x800
#define TASK_ARG_L3 0x1000
+#define TASK_ARG_NDP 0x2000
+#define TASK_ARG_SEND_NA_AT_STARTUP 0x4000
#define PROX_MODE_LEN 32
@@ -59,7 +63,7 @@ struct qos_cfg {
};
enum task_mode {NOT_SET, MASTER, QINQ_DECAP4, QINQ_DECAP6,
- QINQ_ENCAP4, QINQ_ENCAP6, GRE_DECAP, GRE_ENCAP,CGNAT, ESP_ENC, ESP_DEC,
+ QINQ_ENCAP4, QINQ_ENCAP6, GRE_DECAP, GRE_ENCAP,CGNAT, ESP_ENC, ESP_DEC, QOS, POLICE
};
struct task_args;
@@ -80,7 +84,6 @@ struct task_init {
size_t size;
uint16_t flag_req_data; /* flags from prox_shared.h */
uint64_t flag_features;
- uint32_t mbuf_size;
LIST_ENTRY(task_init) entries;
};
@@ -90,13 +93,21 @@ static int task_init_flag_set(struct task_init *task_init, uint64_t flag)
}
enum police_action {
- ACT_GREEN = e_RTE_METER_GREEN,
- ACT_YELLOW = e_RTE_METER_YELLOW,
- ACT_RED = e_RTE_METER_RED,
+ ACT_GREEN = RTE_COLOR_GREEN,
+ ACT_YELLOW = RTE_COLOR_YELLOW,
+ ACT_RED = RTE_COLOR_RED,
ACT_DROP = 3,
ACT_INVALID = 4
};
+struct range {
+ uint32_t min;
+ uint32_t value;
+ uint32_t max;
+ uint32_t offset;
+ uint8_t range_len;
+};
+
/* Configuration for task that is only used during startup. */
struct task_args {
struct task_base *tbase;
@@ -106,7 +117,6 @@ struct task_args {
struct lcore_cfg *lconf;
uint32_t nb_mbuf;
uint32_t mbuf_size;
- uint8_t mbuf_size_set_explicitely;
uint32_t nb_cache_mbuf;
uint8_t nb_slave_threads;
uint8_t nb_worker_threads;
@@ -126,16 +136,23 @@ struct task_args {
uint8_t tot_rxrings;
uint8_t nb_rxports;
uint32_t byte_offset;
+ uint32_t ipv6_router;
uint32_t gateway_ipv4;
uint32_t local_ipv4;
uint32_t remote_ipv4;
+ uint32_t local_prefix;
+ uint32_t reachable_timeout;
+ uint32_t arp_ndp_retransmit_timeout;
struct ipv6_addr local_ipv6; /* For IPv6 Tunnel, it's the local tunnel endpoint address */
+ struct ipv6_addr global_ipv6;
+ struct ipv6_addr gateway_ipv6;
+ struct ipv6_addr router_prefix;
struct rte_ring *rx_rings[MAX_RINGS_PER_TASK];
struct rte_ring *tx_rings[MAX_RINGS_PER_TASK];
struct rte_ring *ctrl_plane_ring;
uint32_t tot_n_txrings_inited;
- struct ether_addr edaddr;
- struct ether_addr esaddr;
+ prox_rte_ether_addr edaddr;
+ prox_rte_ether_addr esaddr;
struct port_queue tx_port_queue[PROX_MAX_PORTS];
struct port_queue rx_port_queue[PROX_MAX_PORTS];
/* Used to set up actual task at initialization time. */
@@ -182,10 +199,15 @@ struct task_args {
/* gen related*/
uint64_t rate_bps;
uint32_t n_rand_str;
- char rand_str[64][64];
+ uint32_t n_ranges;
uint32_t rand_offset[64];
+ char rand_str[64][64];
+ struct range range[64];
char pcap_file[256];
uint32_t accur_pos;
+ uint32_t flow_id_pos;
+ uint32_t packet_id_in_flow_pos;
+ uint32_t flow_count;
uint32_t sig_pos;
uint32_t sig;
uint32_t lat_pos;
@@ -194,8 +216,10 @@ struct task_args {
uint32_t bucket_size;
uint32_t lat_enabled;
uint32_t pkt_size;
- uint8_t pkt_inline[ETHER_MAX_LEN];
- uint32_t probability;
+ uint8_t pkt_inline[MAX_PKT_SIZE];
+ uint32_t probability_no_drop;
+ uint32_t probability_duplicate;
+ uint32_t probability_delay;
char nat_table[256];
uint32_t use_src;
char route_table[256];
@@ -232,6 +256,13 @@ struct task_args {
uint irq_debug;
struct task_base *tmaster;
char sub_mode_str[PROX_MODE_LEN];
+ uint32_t igmp_address;
+ uint32_t imix_nb_pkts;
+ uint32_t imix_pkt_sizes[MAX_IMIX_PKTS];
+ uint32_t multiplier;
+ uint32_t mirror_size;
+ uint32_t store_max;
+ uint32_t loss_buffer_size;
};
/* Return the first port that is reachable through the task. If the
diff --git a/VNFs/DPPD-PROX/thread_generic.c b/VNFs/DPPD-PROX/thread_generic.c
index f596bf25..39964dea 100644
--- a/VNFs/DPPD-PROX/thread_generic.c
+++ b/VNFs/DPPD-PROX/thread_generic.c
@@ -14,6 +14,7 @@
// limitations under the License.
*/
+#include <pthread.h>
#include <rte_cycles.h>
#include <rte_table_hash.h>
@@ -83,6 +84,26 @@ static uint64_t tsc_ctrl(struct lcore_cfg *lconf)
return lconf->ctrl_timeout;
}
+static void set_thread_policy(int policy)
+{
+ struct sched_param p;
+ int ret, old_policy, old_priority;
+
+ memset(&p, 0, sizeof(p));
+ ret = pthread_getschedparam(pthread_self(), &old_policy, &p);
+ if (ret) {
+ plog_err("Failed getting thread policy: %d\n", ret);
+ return;
+ }
+ old_priority = p.sched_priority;
+ p.sched_priority = sched_get_priority_max(policy);
+ ret = pthread_setschedparam(pthread_self(), policy, &p);
+ if (ret) {
+ plog_err("Failed setting thread priority: %d", ret);
+ } else
+ plog_info("Thread policy/priority changed from %d/%d to %d/%d\n", old_policy, old_priority, policy, p.sched_priority);
+}
+
int thread_generic(struct lcore_cfg *lconf)
{
struct task_base *tasks[MAX_TASKS_PER_CORE];
@@ -99,6 +120,9 @@ int thread_generic(struct lcore_cfg *lconf)
};
uint8_t n_tasks_run = lconf->n_tasks_run;
+ if (lconf->flags & LCONF_FLAG_SCHED_RR)
+ set_thread_policy(SCHED_RR);
+
if (lconf->period_func) {
tsc_tasks[2].tsc = cur_tsc + lconf->period_timeout;
tsc_tasks[2].tsc_task = tsc_period;
@@ -189,7 +213,6 @@ int thread_generic(struct lcore_cfg *lconf)
next[task_id] = t->handle_bulk(t, mbufs, nb_rx);
}
}
-
}
}
return 0;
diff --git a/VNFs/DPPD-PROX/toeplitz.c b/VNFs/DPPD-PROX/toeplitz.c
index 62424579..a9f7e585 100644
--- a/VNFs/DPPD-PROX/toeplitz.c
+++ b/VNFs/DPPD-PROX/toeplitz.c
@@ -25,9 +25,7 @@ uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN] =
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
uint32_t toeplitz_hash(uint8_t *buf_p, int buflen)
diff --git a/VNFs/DPPD-PROX/toeplitz.h b/VNFs/DPPD-PROX/toeplitz.h
index f24ae766..7cf052ae 100644
--- a/VNFs/DPPD-PROX/toeplitz.h
+++ b/VNFs/DPPD-PROX/toeplitz.h
@@ -17,7 +17,7 @@
#ifndef _TOEPLITZ_H_
#define _TOEPLITZ_H_
-#define TOEPLITZ_KEY_LEN 52
+#define TOEPLITZ_KEY_LEN 40
extern uint8_t toeplitz_init_key[TOEPLITZ_KEY_LEN];
uint32_t toeplitz_hash(uint8_t *buf_p, int buflen);
#endif
diff --git a/VNFs/DPPD-PROX/tx_pkt.c b/VNFs/DPPD-PROX/tx_pkt.c
index 49f46898..cd62cc54 100644
--- a/VNFs/DPPD-PROX/tx_pkt.c
+++ b/VNFs/DPPD-PROX/tx_pkt.c
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@
#include "log.h"
#include "mbuf_utils.h"
#include "handle_master.h"
+#include "defines.h"
static void buf_pkt_single(struct task_base *tbase, struct rte_mbuf *mbuf, const uint8_t out)
{
@@ -50,27 +51,118 @@ static inline void buf_pkt_all(struct task_base *tbase, struct rte_mbuf **mbufs,
}
#define MAX_PMD_TX 32
+void store_packet(struct task_base *tbase, struct rte_mbuf *mbuf)
+{
+ // If buffer is full, drop the first mbuf
+ if (tbase->aux->mbuf)
+ tx_drop(tbase->aux->mbuf);
+ tbase->aux->mbuf = mbuf;
+}
+
+int tx_pkt_ndp(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
+{
+ struct ipv6_addr ip_dst;
+ int first = 0, ret, ok = 0, rc;
+ const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
+ struct rte_mbuf *mbuf = NULL; // used when one need to send both an ARP and a mbuf
+ uint16_t vlan;
+ uint64_t tsc = rte_rdtsc();
+
+ for (int j = 0; j < n_pkts; j++) {
+ if ((out) && (out[j] >= OUT_HANDLED))
+ continue;
+ if (unlikely((rc = write_ip6_dst_mac(tbase, mbufs[j], &ip_dst, &vlan, tsc)) != SEND_MBUF)) {
+ if (j - first) {
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out);
+ ok += ret;
+ }
+ first = j + 1;
+ switch(rc) {
+ case SEND_ARP_ND:
+ // Original mbuf (packet) is stored to be sent later -> need to allocate new mbuf
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ store_packet(tbase, mbufs[j]);
+ mbuf->port = tbase->l3.reachable_port_id;
+ tx_ring_cti6(tbase, tbase->l3.ctrl_plane_ring, IP6_REQ_MAC_TO_MASTER, mbuf, tbase->l3.core_id, tbase->l3.task_id, &ip_dst, vlan);
+ } else {
+ plog_err("Failed to get a mbuf from arp/nd mempool\n");
+ tx_drop(mbufs[j]);
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ }
+ break;
+ case SEND_MBUF_AND_ARP_ND:
+ // We send the mbuf and an ND - we need to allocate another mbuf for ND
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&mbuf);
+ if (likely(ret == 0)) {
+ mbuf->port = tbase->l3.reachable_port_id;
+ tx_ring_cti6(tbase, tbase->l3.ctrl_plane_ring, IP6_REQ_MAC_TO_MASTER, mbuf, tbase->l3.core_id, tbase->l3.task_id, &ip_dst, vlan);
+ } else {
+ plog_err("Failed to get a mbuf from arp/nd mempool\n");
+ // We still send the initial mbuf
+ }
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + j, 1, out);
+ break;
+ case DROP_MBUF:
+ tx_drop(mbufs[j]);
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ break;
+ }
+ }
+ }
+ if (n_pkts - first) {
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, n_pkts - first, out);
+ ok += ret;
+ }
+ return ok;
+}
int tx_pkt_l3(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out)
{
uint32_t ip_dst;
+ uint16_t vlan;
int first = 0, ret, ok = 0, rc;
const struct port_queue *port_queue = &tbase->tx_params_hw.tx_port_queue[0];
+ struct rte_mbuf *arp_mbuf = NULL; // used when one need to send both an ARP and a mbuf
+ uint64_t *time;
+ uint64_t tsc = rte_rdtsc();
for (int j = 0; j < n_pkts; j++) {
if ((out) && (out[j] >= OUT_HANDLED))
continue;
- if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst)) < 0)) {
+ if (unlikely((rc = write_dst_mac(tbase, mbufs[j], &ip_dst, &vlan, &time, tsc)) != SEND_MBUF)) {
if (j - first) {
ret = tbase->aux->tx_pkt_l2(tbase, mbufs + first, j - first, out);
ok += ret;
}
first = j + 1;
- if (rc == -1) {
+ switch(rc) {
+ case SEND_ARP_ND:
+ // We re-use the mbuf - no need to create a arp_mbuf and delete the existing mbuf
mbufs[j]->port = tbase->l3.reachable_port_id;
- tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, REQ_MAC_TO_CTRL, mbufs[j], tbase->l3.core_id, tbase->l3.task_id, ip_dst);
- } else if (rc == -2) {
+ if (tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, IP4_REQ_MAC_TO_MASTER, mbufs[j], tbase->l3.core_id, tbase->l3.task_id, ip_dst, vlan) == 0)
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 1000);
+ else
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 100);
+ break;
+ case SEND_MBUF_AND_ARP_ND:
+ // We send the mbuf and an ARP - we need to allocate another mbuf for ARP
+ ret = rte_mempool_get(tbase->l3.arp_nd_pool, (void **)&arp_mbuf);
+ if (likely(ret == 0)) {
+ arp_mbuf->port = tbase->l3.reachable_port_id;
+ if (tx_ring_cti(tbase, tbase->l3.ctrl_plane_ring, IP4_REQ_MAC_TO_MASTER, arp_mbuf, tbase->l3.core_id, tbase->l3.task_id, ip_dst, vlan) == 0)
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 1000);
+ else
+ update_arp_ndp_retransmit_timeout(&tbase->l3, time, 100);
+ } else {
+ plog_err("Failed to get a mbuf from arp mempool\n");
+ // We still send the initial mbuf
+ }
+ ret = tbase->aux->tx_pkt_l2(tbase, mbufs + j, 1, out);
+ break;
+ case DROP_MBUF:
tx_drop(mbufs[j]);
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ break;
}
}
}
@@ -194,7 +286,7 @@ void flush_queues_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_sw(struct task_base *tbase)
@@ -211,7 +303,7 @@ void flush_queues_sw(struct task_base *tbase)
ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_hw(struct task_base *tbase)
@@ -229,7 +321,7 @@ void flush_queues_no_drop_hw(struct task_base *tbase)
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
void flush_queues_no_drop_sw(struct task_base *tbase)
@@ -246,7 +338,7 @@ void flush_queues_no_drop_sw(struct task_base *tbase)
ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), prod - cons, tbase);
}
}
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
}
/* "try" functions try to send packets to sw/hw w/o failing or blocking;
@@ -335,7 +427,7 @@ int tx_pkt_no_drop_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct r
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -364,7 +456,7 @@ int tx_pkt_never_discard_hw1_thrpt_opt(struct task_base *tbase, struct rte_mbuf
cons = tbase->ws_mbuf->idx[0].cons;
if ((uint16_t)(prod - cons)){
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[0].prod = 0;
tbase->ws_mbuf->idx[0].cons = 0;
ret+= txhw_drop(&tbase->tx_params_hw.tx_port_queue[0], tbase->ws_mbuf->mbuf[0] + (cons & WS_MBUF_MASK), (uint16_t)(prod - cons), tbase);
@@ -503,7 +595,7 @@ int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -524,7 +616,7 @@ int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += ring_enq_no_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -545,7 +637,7 @@ int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret += txhw_drop(&tbase->tx_params_hw.tx_port_queue[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -565,7 +657,7 @@ int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts,
cons = tbase->ws_mbuf->idx[i].cons;
if (((uint16_t)(prod - cons)) >= MAX_PKT_BURST) {
- tbase->flags &= ~FLAG_TX_FLUSH;
+ tbase->flags &= ~TBASE_FLAG_TX_FLUSH;
tbase->ws_mbuf->idx[i].cons = cons + MAX_PKT_BURST;
ret+= ring_enq_drop(tbase->tx_params_sw.tx_rings[i], tbase->ws_mbuf->mbuf[i] + (cons & WS_MBUF_MASK), MAX_PKT_BURST, tbase);
}
@@ -619,7 +711,7 @@ static inline void trace_one_tx_pkt(struct task_base *tbase, struct rte_mbuf *mb
static void unset_trace(struct task_base *tbase)
{
if (0 == tbase->aux->task_rt_dump.n_trace) {
- if (tbase->tx_pkt == tx_pkt_l3) {
+ if ((tbase->tx_pkt == tx_pkt_l3) || (tbase->tx_pkt == tx_pkt_ndp)){
tbase->aux->tx_pkt_l2 = tbase->aux->tx_pkt_orig;
tbase->aux->tx_pkt_orig = NULL;
} else {
@@ -688,7 +780,7 @@ int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkt
ret = tbase->aux->tx_pkt_orig(tbase, mbufs, n_pkts, out);
if (0 == tbase->aux->task_rt_dump.n_print_tx) {
- if (tbase->tx_pkt == tx_pkt_l3) {
+ if ((tbase->tx_pkt == tx_pkt_l3) || (tbase->tx_pkt == tx_pkt_ndp)) {
tbase->aux->tx_pkt_l2 = tbase->aux->tx_pkt_orig;
tbase->aux->tx_pkt_orig = NULL;
} else {
@@ -745,33 +837,68 @@ int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n
}
return n_pkts;
}
+static inline void dump_pkts(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
+{
+ uint32_t n_dump = tbase->aux->task_rt_dump.n_print_tx;
+ uint32_t n_trace = tbase->aux->task_rt_dump.n_trace;
-static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
+ if (unlikely(n_dump)) {
+ n_dump = n_pkts < n_dump? n_pkts : n_dump;
+ for (uint32_t i = 0; i < n_dump; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_print_tx -= n_dump;
+ } else if (unlikely(n_trace)) {
+ n_trace = n_pkts < n_trace? n_pkts : n_trace;
+ for (uint32_t i = 0; i < n_trace; ++i) {
+ plogdx_info(mbufs[i], "TX: ");
+ }
+ tbase->aux->task_rt_dump.n_trace -= n_trace;
+ }
+}
+
+// ctrlplane packets are slow path, hence cost of checking if dump ortrace is needed in not too important
+// easier to have this implementation than an implementation similar to dataplane tx
+int tx_ctrlplane_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return txhw_no_drop(&tbase->tx_params_hw.tx_port_queue[0], mbufs, n_pkts, tbase);
+}
+
+int tx_ctrlplane_sw(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, __attribute__((unused)) uint8_t *out)
+{
+ dump_pkts(tbase, mbufs, n_pkts);
+ return ring_enq_no_drop(tbase->tx_params_sw.tx_rings[0], mbufs, n_pkts, tbase);
+}
+
+static inline int tx_ring_all(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
{
if (tbase->aux->task_rt_dump.cur_trace) {
trace_one_rx_pkt(tbase, mbuf);
}
- mbuf->udata64 = ((uint64_t)ip << 32) | (core_id << 16) | (task_id << 8) | command;
+ ctrl_ring_set_command_core_task_ip(mbuf, ((uint64_t)ip << 32) | (core_id << 16) | (task_id << 8) | command);
return rte_ring_enqueue(ring, mbuf);
}
-void tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip)
+int tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip, uint16_t vlan)
{
- plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ ctrl_ring_set_vlan(mbuf, vlan);
int ret = tx_ring_all(tbase, ring, command, mbuf, core_id, task_id, ip);
if (unlikely(ret != 0)) {
- plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], ip, ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
rte_pktmbuf_free(mbuf);
}
+ return ret;
}
-void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint32_t ip)
+void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint32_t ip)
{
- plogx_dbg("\tSending command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring));
+ plogx_dbg("\tSending command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring));
int ret = tx_ring_all(tbase, ring, command, mbuf, 0, 0, ip);
if (unlikely(ret != 0)) {
- plogx_dbg("\tFail to send command %s with ip %x to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ip, ring, mbuf, rte_ring_free_count(ring));
+ plogx_dbg("\tFail to send command %s with ip %d.%d.%d.%d to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IP4(ip), ring, mbuf, rte_ring_free_count(ring));
TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
rte_pktmbuf_free(mbuf);
}
@@ -787,3 +914,84 @@ void tx_ring(struct task_base *tbase, struct rte_ring *ring, uint16_t command,
rte_pktmbuf_free(mbuf);
}
}
+
+void tx_ring_route(struct task_base *tbase, struct rte_ring *ring, int add, struct rte_mbuf *mbuf, uint32_t ip, uint32_t gateway_ip, uint32_t prefix)
+{
+ uint8_t command;
+ if (add)
+ command = ROUTE_ADD_FROM_MASTER;
+ else
+ command = ROUTE_DEL_FROM_MASTER;
+
+ plogx_dbg("\tSending command %s to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ring, mbuf, rte_ring_free_count(ring));
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ip(mbuf, ip);
+ ctrl_ring_set_gateway_ip(mbuf, gateway_ip);
+ ctrl_ring_set_prefix(mbuf, prefix);
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ int ret = rte_ring_enqueue(ring, mbuf);
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s to ring %p using mbuf %p - ring size now %d\n", actions_string[command], ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_cti6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, struct ipv6_addr *ip, uint16_t vlan)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command_core_task_ip(mbuf, (core_id << 16) | (task_id << 8) | command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ctrl_ring_set_vlan(mbuf, vlan);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p, core %d and task %d - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, core_id, task_id, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_ip6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+}
+
+void tx_ring_ip6_data(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip, uint64_t data)
+{
+ int ret;
+ plogx_dbg("\tSending command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ if (tbase->aux->task_rt_dump.cur_trace) {
+ trace_one_rx_pkt(tbase, mbuf);
+ }
+ ctrl_ring_set_command(mbuf, command);
+ ctrl_ring_set_ipv6_addr(mbuf, ip);
+ ctrl_ring_set_data(mbuf, data);
+ ret = rte_ring_enqueue(ring, mbuf);
+
+ if (unlikely(ret != 0)) {
+ plogx_dbg("\tFail to send command %s with ip "IPv6_BYTES_FMT" to ring %p using mbuf %p - ring size now %d\n", actions_string[command], IPv6_BYTES(ip->bytes), ring, mbuf, rte_ring_free_count(ring));
+ TASK_STATS_ADD_DROP_DISCARD(&tbase->aux->stats, 1);
+ rte_pktmbuf_free(mbuf);
+ }
+
+}
diff --git a/VNFs/DPPD-PROX/tx_pkt.h b/VNFs/DPPD-PROX/tx_pkt.h
index e8caed52..0a71bcc3 100644
--- a/VNFs/DPPD-PROX/tx_pkt.h
+++ b/VNFs/DPPD-PROX/tx_pkt.h
@@ -1,5 +1,5 @@
/*
-// Copyright (c) 2010-2017 Intel Corporation
+// Copyright (c) 2010-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,9 +18,21 @@
#define _TX_PKT_H_
#include <inttypes.h>
+#include <string.h>
+#include <rte_mbuf.h>
+#include "ip6_addr.h"
struct task_base;
-struct rte_mbuf;
+
+struct prox_headroom {
+ uint64_t command;
+ uint64_t data64;
+ uint32_t ip;
+ uint32_t prefix;
+ uint32_t gateway_ip;
+ uint16_t vlan;
+ struct ipv6_addr ipv6_addr;
+} __attribute__((packed));
void flush_queues_hw(struct task_base *tbase);
void flush_queues_sw(struct task_base *tbase);
@@ -64,6 +76,8 @@ int tx_pkt_no_drop_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
int tx_pkt_no_drop_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+int tx_ctrlplane_hw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+int tx_ctrlplane_sw(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_trace(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_dump(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
@@ -79,9 +93,120 @@ uint16_t tx_try_self(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t
sink. */
int tx_pkt_drop_all(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
int tx_pkt_l3(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+int tx_pkt_ndp(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts, uint8_t *out);
+
+static inline uint8_t get_command(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->command & 0xFF;
+}
+static inline uint8_t get_task(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return (prox_headroom->command >> 8) & 0xFF;
+}
+static inline uint8_t get_core(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return (prox_headroom->command >> 16) & 0xFF;
+}
+static inline uint32_t get_ip(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return (prox_headroom->command >> 32) & 0xFFFFFFFF;
+}
+
+static inline void ctrl_ring_set_command_core_task_ip(struct rte_mbuf *mbuf, uint64_t udata64)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->command = udata64;
+}
+
+static inline void ctrl_ring_set_command(struct rte_mbuf *mbuf, uint8_t command)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->command = command;
+}
+
+static inline void ctrl_ring_set_ip(struct rte_mbuf *mbuf, uint32_t udata32)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->ip = udata32;
+}
+
+static inline uint32_t ctrl_ring_get_ip(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->ip;
+}
+
+static inline void ctrl_ring_set_gateway_ip(struct rte_mbuf *mbuf, uint32_t udata32)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->gateway_ip = udata32;
+}
+
+static inline uint32_t ctrl_ring_get_gateway_ip(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->gateway_ip;
+}
+
+static inline void ctrl_ring_set_prefix(struct rte_mbuf *mbuf, uint32_t udata32)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->prefix = udata32;
+}
+
+static inline uint32_t ctrl_ring_get_prefix(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->prefix;
+}
+
+static inline void ctrl_ring_set_data(struct rte_mbuf *mbuf, uint64_t data)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->data64 = data;
+}
+
+static inline uint64_t ctrl_ring_get_data(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->data64;
+}
+
+static inline void ctrl_ring_set_ipv6_addr(struct rte_mbuf *mbuf, struct ipv6_addr *ip)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ memcpy(&prox_headroom->ipv6_addr, ip, sizeof(struct ipv6_addr));
+}
+
+static inline struct ipv6_addr *ctrl_ring_get_ipv6_addr(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return &prox_headroom->ipv6_addr;
+}
+
+static inline void ctrl_ring_set_vlan(struct rte_mbuf *mbuf, uint32_t udata16)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ prox_headroom->vlan = udata16;
+}
+
+static inline uint16_t ctrl_ring_get_vlan(struct rte_mbuf *mbuf)
+{
+ struct prox_headroom *prox_headroom = (struct prox_headroom *)(rte_pktmbuf_mtod(mbuf, uint8_t*) - sizeof(struct prox_headroom));
+ return prox_headroom->vlan;
+}
-void tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip);
-void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf, uint32_t ip);
+int tx_ring_cti(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, uint32_t ip, uint16_t vlan);
+void tx_ring_cti6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint8_t core_id, uint8_t task_id, struct ipv6_addr *ip, uint16_t vlan);
+void tx_ring_ip(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, uint32_t ip);
+void tx_ring_ip6(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip);
+void tx_ring_ip6_data(struct task_base *tbase, struct rte_ring *ring, uint8_t command, struct rte_mbuf *mbuf, struct ipv6_addr *ip, uint64_t data);
void tx_ring(struct task_base *tbase, struct rte_ring *ring, uint16_t command, struct rte_mbuf *mbuf);
+void tx_ring_route(struct task_base *tbase, struct rte_ring *ring, int add, struct rte_mbuf *mbuf, uint32_t ip, uint32_t gateway_ip, uint32_t prefix);
+static void store_packet(struct task_base *tbase, struct rte_mbuf *mbufs);
#endif /* _TX_PKT_H_ */
diff --git a/VNFs/DPPD-PROX/version.h b/VNFs/DPPD-PROX/version.h
index a1d01235..355a5dcb 100644
--- a/VNFs/DPPD-PROX/version.h
+++ b/VNFs/DPPD-PROX/version.h
@@ -17,18 +17,18 @@
#ifndef _VERSION_H_
#define _VERSION_H_
-#define STRINGIFY(s) #s
-#define SSTR(s) STRINGIFY(s)
-
/* PROGRAM_NAME defined through Makefile */
-#define VERSION_MAJOR 0
-#define VERSION_MINOR 41
+#define VERSION_MAJOR 0 // Pre-production
+#define VERSION_MINOR 2212 // 22.12 i.e. December 2022
#define VERSION_REV 0
+static inline char *VERSION_STR(void)
+{
+ static char version_buffer[32];
+ snprintf(version_buffer, sizeof(version_buffer), "%02d.%02d", VERSION_MINOR / 100, VERSION_MINOR % 100);
#if VERSION_REV > 0
-#define VERSION_STR "v" SSTR(VERSION_MAJOR) "." SSTR(VERSION_MINOR) "." SSTR(VERSION_REV)
-#else
-#define VERSION_STR "v" SSTR(VERSION_MAJOR) "." SSTR(VERSION_MINOR)
+ snprintf(version_buffer + strlen(version_buffer), sizeof(version_buffer) - strlen(version_buffer), ".%02d", VERSION_REV);
#endif
-
+ return version_buffer;
#endif /* _VERSION_H_ */
+}
diff --git a/VNFs/DPPD-PROX/vxlangpe_nsh.h b/VNFs/DPPD-PROX/vxlangpe_nsh.h
index 2e7cfc76..7aebf380 100644
--- a/VNFs/DPPD-PROX/vxlangpe_nsh.h
+++ b/VNFs/DPPD-PROX/vxlangpe_nsh.h
@@ -17,6 +17,8 @@
#ifndef _VXLANGPE_NSH_H_
#define _VXLANGPE_NSH_H_
+#include <rte_version.h>
+
struct nsh_hdr {
uint16_t version :2;
uint16_t oa_flag :1;
@@ -33,12 +35,13 @@ struct nsh_hdr {
uint32_t ctx_4;
} __attribute__((__packed__));
-struct vxlan_gpe_hdr {
+#if RTE_VERSION < RTE_VERSION_NUM(18,5,0,0)
+typedef struct prox_rte_vxlan_gpe_hdr {
uint8_t flag_0;
uint8_t flag_1;
uint8_t reserved;
- uint8_t next_proto;
+ uint8_t proto;
uint32_t vni_res;
-} __attribute__((__packed__));
-
+} __attribute__((__packed__)) prox_rte_vxlan_gpe_hdr;
+#endif
#endif /* _VXLANGPE_NSH_H_ */
diff --git a/VNFs/vACL/Makefile b/VNFs/vACL/Makefile
index 0995f905..bf1502a2 100644
--- a/VNFs/vACL/Makefile
+++ b/VNFs/vACL/Makefile
@@ -66,7 +66,7 @@ CFLAGS += -I$(VNF_CORE)/common/VIL/gateway
TOP = $(RTE_SDK)/../civetweb
CFLAGS += -I$(TOP)/include $(COPT) -DUSE_WEBSOCKET -DUSE_IPV6 -DUSE_SSL_DH=1
CFLAGS += -DREST_API_SUPPORT
-LDFLAGS += -ljson -lcrypto -lssl
+LDFLAGS += -ljson-c -lcrypto -lssl
LDFLAGS += -L$(RTE_SDK)/../civetweb/ -lcivetweb
# all source are stored in SRCS-y
diff --git a/VNFs/vACL/pipeline/pipeline_acl.h b/VNFs/vACL/pipeline/pipeline_acl.h
index 93b92c45..a0ff84da 100644
--- a/VNFs/vACL/pipeline/pipeline_acl.h
+++ b/VNFs/vACL/pipeline/pipeline_acl.h
@@ -29,7 +29,7 @@
#include "pipeline.h"
#include "pipeline_acl_be.h"
#include <civetweb.h>
-#include <json/json.h>
+#include <json-c/json.h>
/* ACL IPV4 and IPV6 enable flags for debugging (Default both on) */
extern int acl_ipv4_enabled;
diff --git a/VNFs/vACL/pipeline/pipeline_acl_be.c b/VNFs/vACL/pipeline/pipeline_acl_be.c
index d4b92109..5da2e36f 100644
--- a/VNFs/vACL/pipeline/pipeline_acl_be.c
+++ b/VNFs/vACL/pipeline/pipeline_acl_be.c
@@ -777,7 +777,13 @@ pkt_work_acl_key(struct rte_pipeline *p,
uint32_t packet_length = rte_pktmbuf_pkt_len(pkt);
uint32_t dest_if = INVALID_DESTIF;
+ uint32_t dst_phy_port = INVALID_DESTIF;
uint32_t src_phy_port = pkt->port;
+ if(is_phy_port_privte(src_phy_port))
+ dst_phy_port = prv_to_pub_map[src_phy_port];
+ else
+ dst_phy_port = pub_to_prv_map[src_phy_port];
+
if(is_gateway()){
@@ -794,7 +800,7 @@ pkt_work_acl_key(struct rte_pipeline *p,
uint32_t nhip = 0;
uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
- gw_get_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip);
+ gw_get_route_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip, dst_phy_port);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
@@ -1475,6 +1481,11 @@ pkt_work_acl_ipv4_key(struct rte_pipeline *p,
uint32_t dest_if = INVALID_DESTIF;
uint32_t src_phy_port = pkt->port;
+ uint32_t dst_phy_port = INVALID_DESTIF;
+ if(is_phy_port_privte(src_phy_port))
+ dst_phy_port = prv_to_pub_map[src_phy_port];
+ else
+ dst_phy_port = pub_to_prv_map[src_phy_port];
if(is_gateway()){
@@ -1493,7 +1504,7 @@ pkt_work_acl_ipv4_key(struct rte_pipeline *p,
uint32_t src_phy_port = pkt->port;
uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
- gw_get_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip);
+ gw_get_route_nh_port_ipv4(dst_ip_addr, &dest_if, &nhip, dst_phy_port);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if, &dst_mac);
diff --git a/VNFs/vCGNAPT/Makefile b/VNFs/vCGNAPT/Makefile
index 41cacfb7..82efa769 100644
--- a/VNFs/vCGNAPT/Makefile
+++ b/VNFs/vCGNAPT/Makefile
@@ -68,7 +68,7 @@ CFLAGS += -I$(VNF_CORE)/common/VIL/gateway
TOP = $(RTE_SDK)/../civetweb
CFLAGS += -I$(TOP)/include $(COPT) -DUSE_WEBSOCKET -DUSE_IPV6 -DUSE_SSL_DH=1
CFLAGS += -DREST_API_SUPPORT
-LDFLAGS += -ljson -lcrypto -lssl
+LDFLAGS += -ljson-c -lcrypto -lssl
LDFLAGS += -L$(RTE_SDK)/../civetweb/ -lcivetweb
# all source are stored in SRCS-y
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h
index 6497de27..d61273f0 100644
--- a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt.h
@@ -29,7 +29,7 @@
#include "pipeline.h"
#include "pipeline_cgnapt_common.h"
#include <civetweb.h>
-#include <json/json.h>
+#include <json-c/json.h>
/**
* Add NAPT rule to the NAPT rule table.
diff --git a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c
index a1779aa0..dc8a627c 100644
--- a/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c
+++ b/VNFs/vCGNAPT/pipeline/pipeline_cgnapt_be.c
@@ -1970,10 +1970,10 @@ static int cgnapt_in_port_ah_mix(struct rte_pipeline *rte_p,
/* Gateway Proc Starts */
struct arp_entry_data *ret_arp_data = NULL;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address,
- &dest_if, &nhip);
+ uint32_t src_phy_port = pkts[pkt_index]->port;
+ dest_if = prv_to_pub_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
if (dest_if == INVALID_DESTIF) {
p_nat->invalid_packets |=
@@ -2217,10 +2217,10 @@ static int cgnapt_in_port_ah_mix(struct rte_pipeline *rte_p,
struct arp_entry_data *ret_arp_data = NULL;
dest_if = INVALID_DESTIF;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address,
- &dest_if, &nhip);
+ uint32_t src_phy_port = pkts[pkt_index]->port;
+ dest_if = pub_to_prv_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
if (dest_if == INVALID_DESTIF) {
p_nat->invalid_packets |=
@@ -3727,9 +3727,10 @@ pkt_work_cgnapt_ipv4_prv(
uint32_t nhip = 0;
struct arp_entry_data *ret_arp_data = NULL;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ uint32_t src_phy_port = pkt->port;
+ dest_if = prv_to_pub_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
@@ -4080,9 +4081,11 @@ pkt_work_cgnapt_ipv4_pub(
dest_address = entry->data.u.prv_ip;
struct arp_entry_data *ret_arp_data = NULL;
- uint32_t src_phy_port = *src_port;
+ uint32_t src_phy_port = pkt->port;
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ dest_if = pub_to_prv_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
@@ -4529,9 +4532,10 @@ pkt4_work_cgnapt_ipv4_prv(
dest_address = rte_bswap32(*dst_addr);
struct arp_entry_data *ret_arp_data = NULL;
uint64_t start, end;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ uint32_t src_phy_port = pkt->port;
+ dest_if = prv_to_pub_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
@@ -4899,9 +4903,10 @@ pkt4_work_cgnapt_ipv4_pub(
}
dest_address = entry->data.u.prv_ip;
struct arp_entry_data *ret_arp_data = NULL;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ uint32_t src_phy_port = pkt->port;
+ dest_if = pub_to_prv_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
@@ -6078,9 +6083,10 @@ pkt_work_cgnapt_ipv6_prv(
struct arp_entry_data *ret_arp_data;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ uint32_t src_phy_port = pkt->port;
+ dest_if = prv_to_pub_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
@@ -6569,9 +6575,10 @@ pkt4_work_cgnapt_ipv6_prv(
{
struct arp_entry_data *ret_arp_data;
- uint32_t src_phy_port = *src_port;
-
- gw_get_nh_port_ipv4(dest_address, &dest_if, &nhip);
+ uint32_t src_phy_port = pkt->port;
+ dest_if = prv_to_pub_map[src_phy_port];
+ gw_get_route_nh_port_ipv4(dest_address,
+ &dest_if, &nhip, dest_if);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dest_if,
(struct ether_addr *)eth_dest);
diff --git a/VNFs/vFW/Makefile b/VNFs/vFW/Makefile
index b011eca2..80dd91ad 100644
--- a/VNFs/vFW/Makefile
+++ b/VNFs/vFW/Makefile
@@ -68,7 +68,7 @@ CFLAGS += -I$(VNF_CORE)/common/VIL/gateway
TOP = $(RTE_SDK)/../civetweb
CFLAGS += -I$(TOP)/include $(COPT) -DUSE_WEBSOCKET -DUSE_IPV6 -DUSE_SSL_DH=1 -DREST_API_SUPPORT=1
-LDFLAGS += -ljson -lcrypto -lssl
+LDFLAGS += -ljson-c -lcrypto -lssl
LDFLAGS += -L$(RTE_SDK)/../civetweb/ -lcivetweb
# all source are stored in SRCS-y
diff --git a/VNFs/vFW/pipeline/pipeline_vfw.h b/VNFs/vFW/pipeline/pipeline_vfw.h
index 96e7ad33..7cfc95c3 100644
--- a/VNFs/vFW/pipeline/pipeline_vfw.h
+++ b/VNFs/vFW/pipeline/pipeline_vfw.h
@@ -31,7 +31,7 @@
#include "pipeline_vfw_be.h"
#include <civetweb.h>
-#include <json/json.h>
+#include <json-c/json.h>
/* VFW IPV4 and IPV6 enable flags for debugging (Default both on) */
extern int vfw_ipv4_enabled;
diff --git a/VNFs/vFW/pipeline/pipeline_vfw_be.c b/VNFs/vFW/pipeline/pipeline_vfw_be.c
index 71fd5cde..23e2aa04 100644
--- a/VNFs/vFW/pipeline/pipeline_vfw_be.c
+++ b/VNFs/vFW/pipeline/pipeline_vfw_be.c
@@ -1163,7 +1163,13 @@ static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
src_phy_port = pkt->port;
uint32_t dst_phy_port = INVALID_DESTIF;
- if(is_gateway()){
+ if(is_phy_port_privte(src_phy_port))
+ dst_phy_port = prv_to_pub_map[src_phy_port];
+ else
+ dst_phy_port = pub_to_prv_map[src_phy_port];
+
+
+ if(likely(is_gateway())){
struct ipv4_hdr *ipv4hdr = (struct ipv4_hdr *)
RTE_MBUF_METADATA_UINT32_PTR(pkt, IP_START);
@@ -1174,13 +1180,13 @@ static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
uint32_t nhip = 0;
uint32_t dst_ip_addr = rte_bswap32(ipv4hdr->dst_addr);
- gw_get_nh_port_ipv4(dst_ip_addr, &dst_phy_port, &nhip);
+ gw_get_route_nh_port_ipv4(dst_ip_addr, &dst_phy_port, &nhip, dst_phy_port);
ret_arp_data = get_dest_mac_addr_ipv4(nhip, dst_phy_port, &dst_mac);
/* Gateway Proc Ends */
- if (arp_cache_dest_mac_present(dst_phy_port)) {
+ if (likely(arp_cache_dest_mac_present(dst_phy_port))) {
ether_addr_copy(&dst_mac, &ehdr->d_addr);
ether_addr_copy(get_link_hw_addr(dst_phy_port), &ehdr->s_addr);
@@ -1221,11 +1227,6 @@ static void vfw_fwd_pkts_ipv4(struct rte_mbuf **pkts, uint64_t *pkts_mask,
}
} else {
/* IP Pkt forwarding based on pub/prv mapping */
- if(is_phy_port_privte(src_phy_port))
- dst_phy_port = prv_to_pub_map[src_phy_port];
- else
- dst_phy_port = pub_to_prv_map[src_phy_port];
-
meta_data_addr->output_port = vfw_pipe->outport_id[dst_phy_port];
if(VFW_DEBUG) {
diff --git a/common/VIL/gateway/gateway.c b/common/VIL/gateway/gateway.c
index baf22cf5..7ec3c68f 100644
--- a/common/VIL/gateway/gateway.c
+++ b/common/VIL/gateway/gateway.c
@@ -122,29 +122,16 @@ uint32_t is_gateway(void)
*/
void gw_get_nh_port_ipv4(uint32_t dst_ip_addr,
- uint32_t *dst_port, uint32_t *nhip)
+ uint32_t *dst_port, uint32_t *nhip)
{
- int i;
uint32_t j;
*nhip = 0;
*dst_port = 0xff;
- for(j = 0; j < gw_get_num_ports(); j++){
-
- for (i = 0; i < p_route_data[j]->route_ent_cnt; i++) {
-
- if ((p_route_data[j]->route_table[i].nh_mask) ==
- (dst_ip_addr &
- p_route_data[j]->route_table[i].mask)) {
-
- *dst_port = p_route_data[j]->route_table[i].port;
- *nhip = p_route_data[j]->route_table[i].nh;
-
- lib_arp_nh_found++;
- return;
- }
- }
+ for(j = 0; j < num_out_ports; j++) {
+ if (gw_get_route_nh_port_ipv4(dst_ip_addr, dst_port, nhip, j))
+ return;
}
}
diff --git a/common/VIL/gateway/gateway.h b/common/VIL/gateway/gateway.h
index 47a3b8a7..2da8ff64 100644
--- a/common/VIL/gateway/gateway.h
+++ b/common/VIL/gateway/gateway.h
@@ -68,17 +68,17 @@ struct route_table_entry {
struct route_data {
struct route_table_entry route_table[MAX_ROUTE_ENTRY_SIZE];
uint8_t route_ent_cnt;
-};
+}__rte_cache_aligned;
/**
* A structure for Route table entires of IPv6
*
*/
struct nd_route_table_entry {
+ uint32_t port; /**< Port */
uint8_t nhipv6[16]; /**< next hop Ipv6 */
uint8_t depth; /**< Depth */
- uint32_t port; /**< Port */
-};
+}__rte_cache_aligned;
/**
* Routing table for IPv6
@@ -87,7 +87,7 @@ struct nd_route_table_entry {
struct nd_route_data {
struct nd_route_table_entry nd_route_table[MAX_ND_ROUTE_ENTRY_SIZE];
uint8_t nd_route_ent_cnt;
-};
+}__rte_cache_aligned;
extern void gw_init(uint32_t num_ports);
@@ -95,6 +95,44 @@ extern uint32_t gw_get_num_ports(void);
extern uint32_t is_gateway(void);
+/**
+ * Get the route next hop ip address and port number for IPv4
+ * @param dst_ip_addr
+ * Destination IPv4 address
+ * @param dst_port
+ * A pointer to destination port
+ * @param nhip
+ * A pointer to next hop ip address
+ */
+
+static inline int gw_get_route_nh_port_ipv4(uint32_t dst_ip_addr,
+ uint32_t *dst_port, uint32_t *nhip, uint32_t nport)
+{
+ int i = 0;
+ uint32_t j = nport;
+
+ while(likely(i < p_route_data[j]->route_ent_cnt)) {
+ if (likely((p_route_data[j]->route_table[i].nh_mask) ==
+ (dst_ip_addr &
+ p_route_data[j]->route_table[i].mask))) {
+
+ *dst_port = p_route_data[j]->route_table[i].port;
+ *nhip = p_route_data[j]->route_table[i].nh;
+
+#ifdef ARPICMP_DEBUG
+ lib_arp_nh_found++;
+#endif
+ return 1;
+ }
+ i++;
+ }
+
+ *nhip = 0;
+ *dst_port = 0xff;
+ return 0;
+}
+
+
extern void gw_get_nh_port_ipv4(uint32_t dst_ip_addr,
uint32_t *dst_port, uint32_t *nhip);
diff --git a/common/VIL/l2l3_stack/lib_arp.c b/common/VIL/l2l3_stack/lib_arp.c
index d59f4b79..90bcc064 100644
--- a/common/VIL/l2l3_stack/lib_arp.c
+++ b/common/VIL/l2l3_stack/lib_arp.c
@@ -300,9 +300,9 @@ struct arp_entry_data *get_dest_mac_addr_ipv4(const uint32_t nhip,
/* as part of optimization we store mac address in cache
* & thus can be sent without having to retrieve
*/
- if (arp_cache_dest_mac_present(phy_port)) {
+ if (likely(arp_cache_dest_mac_present(phy_port))) {
x = get_local_cache_hw_addr(phy_port, nhip);
- if (!x) {
+ if (unlikely(!x)) {
printf("local copy of address not stored\n");
return NULL;
}
@@ -2496,7 +2496,7 @@ struct ether_addr *get_local_cache_hw_addr(uint8_t out_port, uint32_t nhip)
limit = p_arp_data->arp_local_cache[out_port].num_nhip;
for (i=0; i < limit; i++) {
tmp = p_arp_data->arp_local_cache[out_port].nhip[i];
- if (tmp == nhip) {
+ if (likely(tmp == nhip)) {
x = &p_arp_data->arp_local_cache[out_port].link_hw_laddr[i];
return x;
}
diff --git a/common/vnf_common/rest_api.c b/common/vnf_common/rest_api.c
index 9cfbe88f..8efd7730 100644
--- a/common/vnf_common/rest_api.c
+++ b/common/vnf_common/rest_api.c
@@ -52,7 +52,7 @@
#include "pipeline_arpicmp.h"
#include <civetweb.h>
-#include <json/json.h>
+#include <json-c/json.h>
#include "app.h"
#include "lib_arp.h"
#include "interface.h"
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 00000000..4821d0aa
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,4 @@
+from docs_conf.conf import *
+linkcheck_ignore = [
+ r'https://trex-tgn.cisco.com/',
+ ]
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 00000000..11787c9b
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,3 @@
+---
+project_cfg: opnfv
+project: SAMPLEVNF
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 00000000..ccf2d8ad
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,17 @@
+.. _samplevnf:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. SPDX-License-Identifier CC-BY-4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+*********************************
+OPNFV Samplevnf
+*********************************
+
+.. toctree::
+ :numbered:
+ :maxdepth: 1
+
+ release/release-notes/index
+ testing/user/userguide/index
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index e76e5098..f9daae50 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -3,63 +3,20 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) OPNFV, Intel Corporation and others.
-=======
-License
-=======
-OPNFV release note for SampleVNF Docs
-are licensed under a Creative Commons Attribution 4.0 International License.
-You should have received a copy of the license along with this.
-If not, see <http://creativecommons.org/licenses/by/4.0/>.
-:
+OPNFV Jerma Release
+===================
+* The only supported test VNF in this release for dataplane benchmarking purposes is PROX
+* PROX supporting up to DPDK:20.05
+* Introducing ability to make test cloud-configured dataplane networking benchmarks using
+ ETSI NFV TST009 standard methods
+* Test automation using X-testing
-The *SampleVNFs*, the *SampleVNF test cases* are opensource software,
-licensed under the terms of the Apache License, Version 2.0.
-==========================================
-OPNFV Farser Release Note for SampleVNF
-==========================================
+OPNFV Hunter Release
+====================
-.. toctree::
- :maxdepth: 2
-
-.. _SampleVNF: https://wiki.opnfv.org/SAM
-
-.. _Yardstick: https://wiki.opnfv.org/yardstick
-
-.. _NFV-TST001: http://www.etsi.org/deliver/etsi_gs/NFV-TST/001_099/001/01.01.01_60/gs_NFV-TST001v010101p.pdf
-
-
-Abstract
-========
-
-This document describes the release note of SampleVNF project.
-
-
-Version History
-===============
-
-+----------------+--------------------+---------------------------------+
-| *Date* | *Version* | *Comment* |
-| | | |
-+----------------+--------------------+---------------------------------+
-| "Oct 20 2017" | 6.0 | SampleVNF for Farser release |
-| | | |
-+----------------+--------------------+---------------------------------+
-
-
-Important Notes
-===============
-
-The software delivered in the OPNFV SampleVNF_ Project, comprising the
-*SampleVNF VNFs* and performance test case are part of OPNFV Yardstick_
-Project is a realization of the methodology in ETSI-ISG NFV-TST001_.
-
-
-OPNFV Farser Release
-======================
-
-This Farser release provides *SampleVNF* as a approx VNF repository for
+This Hunter release provides *SampleVNF* as a approx VNF repository for
VNF/NFVI testing, characterization and OPNFV feature testing, automated on
OPNFV platform, including:
@@ -73,19 +30,19 @@ OPNFV platform, including:
* Results
-* Automated SampleVNF test suit in OPNFV Yardstick_ Project
+* Automated SampleVNF test suit in OPNFV Yardstick Project
* SampleVNF source code
-For Farser release, the *SampleVNF* supported:
+For Hunter release, the *SampleVNF* supported:
+----------------+---------------------------------------------------------+-------------------+
| *VNF* | *Name* | *version* |
+----------------+---------------------------------------------------------+-------------------+
| *CGNAPT* | Carrier Grade Network Address and port Translation .5.0 | v0.1.0 |
+----------------+---------------------------------------------------------+-------------------+
-| *Prox* | Packet pROcessing eXecution engine | v0.39.0 |
-| | acts as traffic generator, L3FWD, L2FWD, BNG etc | |
+| *Prox* | Packet pROcessing eXecution engine | v0.40.0 |
+| | acts as traffic generator, L3FWD, L2FWD, BNG etc | |
+----------------+---------------------------------------------------------+-------------------+
| *vACL* | Access Control List | v0.1.0 |
+----------------+---------------------------------------------------------+-------------------+
@@ -97,7 +54,7 @@ For Farser release, the *SampleVNF* supported:
.. note:: Highlevel Desgin and features supported by each of the VNFs is described in Developer
and user guide.
-For Farser release, the *SampleVNF* is used for the following
+For Hunter release, the *SampleVNF* is used for the following
testing:
* OPNFV platform testing - generic test cases to measure the categories:
@@ -108,55 +65,64 @@ testing:
* VNF Characterization:
- * Network - rfc2544, rfc36.0, latency, http_test etc
+ * Network - rfc2544, rfc3511, latency, http_test etc
-The *SampleVNF* is developed in the OPNFV community, by the SampleVNF_ team.
+The *SampleVNF* is developed in the OPNFV community, by the SampleVNF team.
The *Network Service Benchmarking* SampleVNF Characterization Testing tool is a part of the
Yardstick Project.
.. note:: The test case description template used for the SampleVNF in yardstick
- test cases is based on the document ETSI-ISG NFV-TST001_; the results report template
+ test cases is based on the document `ETSI GS NFV-TST 001`_; the results report template
used for the SampleVNF test results is based on the IEEE Std 829-2008.
+.. _ETSI GS NFV-TST 001: https://portal.etsi.org/webapp/workprogram/Report_WorkItem.asp?WKI_ID=46009
+
Release Data
-============
+------------
+--------------------------------------+--------------------------------------+
| **Project** | SampleVNF |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | samplevnf/Farser.6.0 |
+| **Repo/tag** | opnfv-8.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **SampleVNF Docker image tag** | Farser.6.0 |
+| **SampleVNF Docker image tag** | Hunter 8.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Farser |
+| **Release designation** | Hunter 8.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | "October 20 2017" |
+| **Release date** | "May 10 2019" |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | OPNFV Farser release 6.0 |
+| **Purpose of the delivery** | Hunter alignment to Released |
+| | bug-fixes for the following: |
+| | |
+| | - Memory leak |
+| | - minimum latency |
+| | - Increase default mbuf size and |
+| | code simplification/cleanup |
+| | - Crash in rx/tx distribution |
| | |
+--------------------------------------+--------------------------------------+
Deliverables
-============
+------------
Documents
----------
+^^^^^^^^^
- - User Guide: http://artifacts.opnfv.org/samplevnf/euphrates/5.0.0/docs/testing_user_userguide/index.html
+ - User Guide: http://artifacts.opnfv.org/samplevnf/docs/testing_user_userguide/index.html
- - Developer Guide: http://artifacts.opnfv.org/samplevnf/euphrates/5.0.0/docs/testing_developer/index.html
+ - Developer Guide: http://artifacts.opnfv.org/samplevnf/docs/testing_developer/index.html
Software Deliverables
----------------------
+^^^^^^^^^^^^^^^^^^^^^
- The SampleVNF Docker image: To be added
@@ -178,7 +144,7 @@ Software Deliverables
+---------------------+-------------------------------------------------------+
Document Version Changes
-------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^
This is the first version of the SampleVNF in OPNFV.
It includes the following documentation updates:
@@ -191,15 +157,32 @@ It includes the following documentation updates:
Feature additions
------------------
-
-- SampleVNF RESTful API support
-
-- Introduce Network service benchmarking
+^^^^^^^^^^^^^^^^^
+
+- Support for DPDK 18.05 and DPDK 18.08
+- Add support for counting non dataplane related packets
+- test improvements and fixes for image creation
+- Local Documentation Builds
+- Improve l3fwd performance
+- Enable the local cache mac address
+- Initial support for DPDK 18.05
+- Adding centos.json to be used with packer to generate a VM with PROX
+- Adding support for Ubuntu 17.10...
+- Get multiple port stats simultaneously
+- Increase default mbuf size and code simplification/cleanup
+- update from src port in the pvt/pub handler
+
+Bug fixes:
+- Fix potential crash with latency accuracy
+- TempFix: vCGNAPT/vACL ipv4 perf issue
+- Temp Fix for vFW perf issue
+- fix code standard in VNFs/DPPD-PROX/handle_esp.c
+- Workaround DPDK net/virtio queue setup issue
+- Fix potential crash when shuffling mbufs
Known Issues/Faults
--------------------
+^^^^^^^^^^^^^^^^^^^
- Huge page freeing needs to be handled properly while running the application else it might
cause system crash. Known issue from DPDK.
- UDP Replay is used to capture throughput for dynamic cgnapt
@@ -209,21 +192,61 @@ Known Issues/Faults
- Rest API uses port 80, make sure other webservices are stopped before using SampleVNF RestAPI.
Corrected Faults
-----------------
-
-Farser.6.0:
-
-+----------------------------+------------------------------------------------+
-| **JIRA REFERENCE** | **DESCRIPTION** |
-| | |
-+----------------------------+------------------------------------------------+
-| | |
-| | |
-+----------------------------+------------------------------------------------+
-
-
-Farser known restrictions/issues
-====================================
+^^^^^^^^^^^^^^^^
+
+Hunter 8.2:
+
++----------------------------+----------------------------------------------------------------------+
+| **JIRA REFERENCE** | **DESCRIPTION** |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-129 | Support for DPDK 18.05 and DPDK 18.08 |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-130 | Add support for counting non dataplane related packets |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-131 | test improvements and fixes for image creation |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-132 | Local Documentation Builds |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-133 | Improve l3fwd performance |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-134 | Enable the local cache mac address |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-135 | Initial support for DPDK 18.05 |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-136 | Adding centos.json to be used with packer to generate a VM with PROX|
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-137 | Adding support for Ubuntu 17.20... |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-138 | Get multiple port stats simultaneously |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-139 | Increase default mbuf size and code simplification/cleanup |
++----------------------------+----------------------------------------------------------------------+
+| SAMPLEVNF-140 | update from src port in the pvt/pub handler |
++----------------------------+----------------------------------------------------------------------+
+
+
+
+
+Bug Fix Jira:
+
++----------------------------+-------------------------------------------------------------------+
+| **JIRA REFERENCE** | **DESCRIPTION** |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-141 | Fix potential crash with latency accuracy |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-142 | TempFix: vCGNAPT/vACL ipv4 perf issue |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-143 | Temp Fix for vFW perf issue |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-144 | fix code standard in VNFs/DPPD-PROX/handle_esp.c |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-145 | Workaround DPDK net/virtio queue setup issue |
++----------------------------+-------------------------------------------------------------------+
+| SAMPLEVNF-146 | Fix potential crash when shuffling mbufs |
++----------------------------+-------------------------------------------------------------------+
+
+Hunter known restrictions/issues
+--------------------------------
+-----------+-----------+----------------------------------------------+
| Installer | Scenario | Issue |
+===========+===========+==============================================+
@@ -232,7 +255,7 @@ Farser known restrictions/issues
Open JIRA tickets
-=================
+-----------------
+----------------------------+------------------------------------------------+
| **JIRA REFERENCE** | **DESCRIPTION** |
@@ -244,12 +267,12 @@ Open JIRA tickets
Useful links
-============
+------------
- - wiki project page: https://wiki.opnfv.org/display/SAM
+ - wiki project page: https://wiki-old.opnfv.org/display/SAM
- - wiki SampleVNF Farser release planing page: https://wiki.opnfv.org/display/SAM/F+Release+Plan+for+SampleVNF
+ - wiki SampleVNF Hunter release planing page: https://wiki.opnfv.org/display/SAM/G+-+Release+SampleVNF+planning
- - SampleVNF repo: https://git.opnfv.org/cgit/samplevnf
+ - SampleVNF repo: https://git.opnfv.org/samplevnf/
- SampleVNF IRC chanel: #opnfv-samplevnf
diff --git a/docs/release/results/overview.rst b/docs/release/results/overview.rst
index df04d327..5a2f2b8a 100644
--- a/docs/release/results/overview.rst
+++ b/docs/release/results/overview.rst
@@ -6,7 +6,7 @@
SampleVNF test tesult document overview
=======================================
-.. _`SampleVNF user guide`: artifacts.opnfv.org/samplevnf/docs/userguide/index.html
+.. _`SampleVNF user guide`: http://artifacts.opnfv.org/samplevnf/docs/testing_user_userguide/index.html
This document provides an overview of the results of test cases developed by
the OPNFV SampleVNF Project & test cases executed part of yardstick
diff --git a/docs/release/results/results.rst b/docs/release/results/results.rst
index bda09af1..c100bd2d 100644
--- a/docs/release/results/results.rst
+++ b/docs/release/results/results.rst
@@ -7,8 +7,8 @@ Results listed by scenario
==========================
The following sections describe the yardstick results as evaluated for the
-Farser release. Each section describes the determined state of the specific
-test case in Farser release.
+Hunter release. Each section describes the determined state of the specific
+test case in Hunter release.
Feature Test Results
====================
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..9fde2df2
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,2 @@
+lfdocs-conf
+sphinx_opnfv_theme
diff --git a/docs/testing/developer/design/02-Get_started_Guide.rst b/docs/testing/developer/design/02-Get_started_Guide.rst
index c8f35ed3..2a9806b5 100644
--- a/docs/testing/developer/design/02-Get_started_Guide.rst
+++ b/docs/testing/developer/design/02-Get_started_Guide.rst
@@ -6,7 +6,7 @@
====================================
Get started as a SampleVNF developer
-===================================
+====================================
.. _SampleVNF: https://wiki.opnfv.org/samplevnf
.. _Gerrit: https://www.gerritcodereview.com/
diff --git a/docs/testing/developer/design/04-SampleVNF_Design.rst b/docs/testing/developer/design/04-SampleVNF_Design.rst
index a3332e27..f813a297 100644
--- a/docs/testing/developer/design/04-SampleVNF_Design.rst
+++ b/docs/testing/developer/design/04-SampleVNF_Design.rst
@@ -348,7 +348,7 @@ transmit takes packets from worker thread in a dedicated ring and sent to the
hardware queue.
Master pipeline
-^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^
This component does not process any packets and should configure with Core 0,
to save cores for other components which processes traffic. The component
is responsible for:
@@ -359,7 +359,7 @@ is responsible for:
4. ARP and ICMP are handled here.
Load Balancer pipeline
-^^^^^^^^^^^^^^^^^^^^^^^
+^^^^^^^^^^^^^^^^^^^^^^
Load balancer is part of the Multi-Threaded CGMAPT release which distributes
the flows to Multiple ACL worker threads.
@@ -371,7 +371,7 @@ affinity of flows to worker threads.
Tuple can be modified/configured using configuration file
vCGNAPT - Static
-------------------
+----------------
The vCGNAPT component performs translation of private IP & port to public IP &
port at egress side and public IP & port to private IP & port at Ingress side
@@ -383,7 +383,7 @@ match will be taken a default action. The default action may result in drop of
the packets.
vCGNAPT- Dynamic
------------------
+----------------
The vCGNAPT component performs translation of private IP & port to public IP &
port at egress side and public IP & port to private IP & port at Ingress side
@@ -399,11 +399,13 @@ Dynamic vCGNAPT acts as static one too, we can do NAT entries statically.
Static NAT entries port range must not conflict to dynamic NAT port range.
vCGNAPT Static Topology
-----------------------
+-----------------------
-IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 1) IXIA
+IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 1)IXIA
operation:
+
Egress --> The packets sent out from ixia(port 0) will be CGNAPTed to ixia(port 1).
+
Igress --> The packets sent out from ixia(port 1) will be CGNAPTed to ixia(port 0).
vCGNAPT Dynamic Topology (UDP_REPLAY)
@@ -411,9 +413,11 @@ vCGNAPT Dynamic Topology (UDP_REPLAY)
IXIA(Port 0)-->(Port 0)VNF(Port 1)-->(Port 0)UDP_REPLAY
operation:
+
Egress --> The packets sent out from ixia will be CGNAPTed to L3FWD/L4REPLAY.
+
Ingress --> The L4REPLAY upon reception of packets (Private to Public Network),
- will immediately replay back the traffic to IXIA interface. (Pub -->Priv).
+ will immediately replay back the traffic to IXIA interface. (Pub -->Priv).
How to run L4Replay
-------------------
@@ -431,7 +435,7 @@ vACL - Design
=============
Introduction
---------------
+------------
This application implements Access Control List (ACL). ACL is typically used
for rule based policy enforcement. It restricts access to a destination IP
address/port based on various header fields, such as source IP address/port,
@@ -439,12 +443,12 @@ destination IP address/port and protocol. It is built on top of DPDK and uses
the packet framework infrastructure.
Scope
-------
+-----
This application provides a standalone DPDK based high performance ACL Virtual
Network Function implementation.
High Level Design
-------------------
+-----------------
The ACL Filter performs bulk filtering of incoming packets based on rules in
current ruleset, discarding any packets not permitted by the rules. The
mechanisms needed for building the rule database and performing lookups are
@@ -460,12 +464,12 @@ The Input and Output FIFOs will be implemented using DPDK Ring Buffers.
The DPDK ACL example:
-http://dpdk.org/doc/guides/sample_app_ug/l3_forward_access_ctrl.html
+http://doc.dpdk.org/guides/sample_app_ug/l3_forward.html
#figure-ipv4-acl-rule contains a suitable syntax and parser for ACL rules.
Components of ACL
-------------------
+-----------------
In ACL, each component is constructed as a packet framework. It includes
Master pipeline component, driver, load balancer pipeline component and ACL
worker pipeline component. A pipeline framework is a collection of input ports,
@@ -607,27 +611,33 @@ Edge Router has the following functionalities in Upstream.
Update the packet color in MPLS EXP field in each MPLS header.
Components of vPE
--------------------
+-----------------
The vPE has downstream and upstream pipelines controlled by Master component.
-Edge router processes two different types of traffic through pipelines
-I. Downstream (Core-to-Customer)
- 1. Receives TCP traffic from core
- 2. Routes the packet based on the routing rules
- 3. Performs traffic scheduling based on the traffic profile
- a. Qos scheduling is performed using token bucket algorithm
- SVLAN, CVLAN, DSCP fields are used to determine transmission priority.
- 4. Appends QinQ label in each outgoing packet.
-II. Upstream (Customer-to-Core)
- 1. Receives QinQ labelled TCP packets from Customer
- 2. Removes the QinQ label
- 3. Classifies the flow using QinQ label and apply Qos metering
- a. 1st stage Qos metering is performed with flow ID using trTCM algorithm
- b. 2nd stage Qos metering is performed with flow ID and traffic class using
- trTCM algorithm
- c. traffic class maps to DSCP field in the packet.
- 4. Routes the packet based on the routing rules
- 5. Appends two MPLS labels in each outgoing packet.
+Edge router processes two different types of traffic through pipelines:
+
+I) Downstream (Core-to-Customer)
+
+ 1. Receives TCP traffic from core
+ 2. Routes the packet based on the routing rules
+ 3. Performs traffic scheduling based on the traffic profile
+
+ a. Qos scheduling is performed using token bucket algorithm.
+ SVLAN, CVLAN, DSCP fields are used to determine transmission priority.
+ 4. Appends QinQ label in each outgoing packet.
+
+II) Upstream (Customer-to-Core)
+
+ 1. Receives QinQ labelled TCP packets from Customer
+ 2. Removes the QinQ label
+ 3. Classifies the flow using QinQ label and apply Qos metering
+
+ a. 1st stage Qos metering is performed with flow ID using trTCM algorithm
+ b. 2nd stage Qos metering is performed with flow ID and traffic class using
+ trTCM algorithm
+ c. traffic class maps to DSCP field in the packet.
+ 4. Routes the packet based on the routing rules
+ 5. Appends two MPLS labels in each outgoing packet.
Master Component
^^^^^^^^^^^^^^^^
@@ -635,7 +645,8 @@ Master Component
The Master component is part of all the IP Pipeline applications. This
component does not process any packets and should configure with Core0,
to save cores for other components which processes traffic. The component
-is responsible for
+is responsible for:
+
1. Initializing each component of the Pipeline application in different threads
2. Providing CLI shell for the user
3. Propagating the commands from user to the corresponding components.
@@ -656,7 +667,7 @@ To run the VNF, execute the following:
Prox - Packet pROcessing eXecution engine
-==========================================
+=========================================
Introduction
------------
diff --git a/docs/testing/developer/requirements/03-Requirements.rst b/docs/testing/developer/requirements/03-Requirements.rst
index 25798606..97b1813f 100644
--- a/docs/testing/developer/requirements/03-Requirements.rst
+++ b/docs/testing/developer/requirements/03-Requirements.rst
@@ -13,7 +13,7 @@ Requirements
.. _SampleVNF: https://wiki.opnfv.org/samplevnf
.. _Technical_Briefs: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-Supported Test setup:
+Supported Test setup
--------------------
The device under test (DUT) consists of a system following
diff --git a/docs/testing/user/userguide/01-introduction.rst b/docs/testing/user/userguide/01-introduction.rst
index 10c0161f..4ddde201 100755..100644
--- a/docs/testing/user/userguide/01-introduction.rst
+++ b/docs/testing/user/userguide/01-introduction.rst
@@ -9,30 +9,16 @@ Introduction
**Welcome to SampleVNF's documentation !**
-.. _Pharos: https://wiki.opnfv.org/pharos
-.. _SampleVNF: https://wiki.opnfv.org/samplevnf
-.. _Technical_Briefs: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-SampleVNF_ is an OPNFV Project.
-
-The project's goal is to provides a placeholder for various sample VNF
+The project's goal was to provide a placeholder for various sample VNF
(Virtual Network Function (:term:`VNF`)) development which includes example
reference architecture and optimization methods related to VNF/Network service
-for high performance VNFs. This project provides benefits to other OPNFV
-projects like Functest, Models, yardstick etc to perform real life
-use-case based testing and VNF/ Network Function Virtualization Infrastructure
-(:term:`NFVI`) characterization for the same.
-
-The Project's scope to create a repository of sample VNFs to help VNF
-benchmarking and NFVI characterization with real world traffic and host a
-common development environment for developing the VNF using optimized libraries.
-Also, develop a test framework in yardstick to enable VNF/NFVI verification.
-
-*SampleVNF* is used in OPNFV for characterization of NFVI/VNF on OPNFV infrastructure
-and some of the OPNFV features.
+for high performance VNFs.
+Today, we only maintain PROX and rapid scripts as part of this project
+to perform Network Function Virtualization Infrastructure
+(:term:`NFVI`) characterization.
-.. seealso:: Pharos_ for information on OPNFV community labs and this
- Technical_Briefs_ for an overview of *SampleVNF*
+*SampleVNF* is used in OPNFV for characterization of NFVI.
About This Document
@@ -44,24 +30,8 @@ This document consists of the following chapters:
project's background and describes the structure of this document.
* Chapter :doc:`02-methodology` describes the methodology implemented by the
- *SampleVNF* Project for :term:`VNF` and :term:`NFVI` verification.
-
-* Chapter :doc:`03-architecture` provides information on the software architecture
- of *SampleVNF*.
-
-* Chapter :doc:`04-installation` provides instructions to install *SampleVNF*.
-
-* Chapter :doc:`05-How_to_run_SampleVNFs` provides example on how installing and running *SampleVNF*.
-
-* Chapter :doc:`06-How_to_use_REST_api` provides info on how to run REST API *SampleVNF*.
-
-* Chapter :doc:`07-Config_files` provides info *SampleVNF* configuration.
-
-* Chapter :doc:`08-CLI_Commands_Reference` provides info on CLI commands supported by *SampleVNF*
-
-Contact SampleVNF
-=================
+ *SampleVNF* Project for :term:`NFVI` verification.
-Feedback? `Contact us`_
+* Chapter :doc:`03-installation` provides instructions to install *SampleVNF*.
-.. _Contact us: opnfv-users@lists.opnfv.org
+* Chapter :doc:`04-running_the_test` shows how to run the dataplane testing.
diff --git a/docs/testing/user/userguide/01-prox_documentation.rst b/docs/testing/user/userguide/01-prox_documentation.rst
new file mode 100644
index 00000000..12c740da
--- /dev/null
+++ b/docs/testing/user/userguide/01-prox_documentation.rst
@@ -0,0 +1,4 @@
+Testing with PROX
+=================
+The PROX documentation can be found in `Prox - Packet pROcessing eXecution engine <https://wiki-old.opnfv.org/x/AAa9>`_
+How to use PROX with the rapid pyton scripts can be found in `Rapid scripting <https://wiki-old.opnfv.org/x/OwM-Ag>`_
diff --git a/docs/testing/user/userguide/02-methodology.rst b/docs/testing/user/userguide/02-methodology.rst
index 01cbb276..e5a7d383 100644
--- a/docs/testing/user/userguide/02-methodology.rst
+++ b/docs/testing/user/userguide/02-methodology.rst
@@ -6,81 +6,68 @@
===========
Methodology
===========
+.. _NFV-TST009: https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
Abstract
========
This chapter describes the methodology/overview of SampleVNF project from
-the perspective of a :term:`VNF` and :term:`NFVI` Characterization
+the perspective of :term:`NFVI` Characterization
Overview
========
-This project provides a placeholder for various sample VNF (Virtual Network Function (:term:`VNF`))
-development which includes example reference architecture and optimization methods
-related to VNF/Network service for high performance VNFs.
+This project covers the dataplane benchmarking for Network Function Virtualization
+Infrastructure (:term:`NFVI`)) using the PROX tool, according to ETSI GS NFV-TST009_.
-The sample VNFs are Open Source approximations* of Telco grade :term:`VNF`
-using optimized VNF + NFVi Infrastructure libraries, with Performance Characterization of Sample† Traffic Flows.
-• * Not a commercial product. Encourage the community to contribute and close the feature gaps.
-• † No Vendor/Proprietary Workloads
+The test execution and reporting is driven by the Xtesting framework and is fully automated.
-ETSI-NFV
-========
-
-.. _NFV-TST001: http://www.etsi.org/deliver/etsi_gs/NFV-TST/001_099/001/01.01.01_60/gs_NFV-TST001v010101p.pdf
-.. _SampleVNFtst: https://wiki.opnfv.org/display/SAM/Technical+Briefs+of+VNFs
-.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-13-nsb-overview
-
-SampleVNF Test Infrastructure (NSB (Yardstick_NSB_))in yardstick helps to facilitate
-consistent/repeatable methodologies for characterizing & validating the
-sample VNFs (:term:`VNF`) through OPEN SOURCE VNF approximations.
-
-Network Service Benchmarking in yardstick framework follows ETSI GS NFV-TST001_
-to verify/characterize both :term:`NFVI` & :term:`VNF`
-
-The document ETSI GS NFV-TST001_, "Pre-deployment Testing; Report on Validation
-of NFV Environments and Services", recommends methods for pre-deployment
-testing of the functional components of an NFV environment.
+When executing the tests, traffic will be send between 2 or more PROX VMs and all metrics
+will be collected in the Xtesting database.
+The placement of the test VMs (in which the PROX tool is running), can be controlled by
+Heat stacks, but can also be done through other means. This will be explained in the chapter
+covering the PROX instance deployment, and needs to be done prior to the test execution.
-The SampleVNF project implements the methodology described in chapter 13 of Yardstick_NSB_,
-"Pre-deployment validation of NFV infrastructure".
+The PROX tool is a DPDK based application optimized for high throughput packet handling.
+As such, we will not measure limitations imposed by the tool, but the capacity of the
+NFVI. In the rare case that the PROX tool would impose a limit, a warning will be logged.
-The methodology consists in decomposing the typical :term:`VNF` work-load
-performance metrics into a number of characteristics/performance vectors, which
-each can be represented by distinct test-cases.
-
-.. seealso:: SampleVNFtst_ for material on alignment ETSI TST001 and SampleVNF.
+ETSI-NFV
+========
+The document ETSI GS NFV-TST009_, "Specification of Networking Benchmarks and
+Measurement Methods for NFVI", specifies vendor-agnostic definitions of performance
+metrics and the associated methods of measurement for Benchmarking networks supported
+in the NFVI. Throughput, latency, packet loss and delay variation will be measured.
+The delay variation is not represented by the Frame Delay Variation (FDV) as defined in
+the specification, but by the average latency, the 99 percentile latency, the maximum
+latency and the complete latency distribution histogram.
Metrics
=======
-The metrics, as defined by ETSI GS NFV-TST001, are shown in
-:ref:`Table1 <table2_1>`.
+The metrics, as reported by the tool, and aligned with the definitions in ETSI GS NFV-TST009_,
+are shown in :ref:`Table1 <table2_1>`.
.. _table2_1:
-**Table 1 - Performance/Speed Metrics**
-
-+---------+-------------------------------------------------------------------+
-| Category| Performance/Speed |
-| | |
-+---------+-------------------------------------------------------------------+
-| Network | * Throughput per NFVI node (frames/byte per second) |
-| | * Throughput provided to a VM (frames/byte per second) |
-| | * Latency per traffic flow |
-| | * Latency between VMs |
-| | * Latency between NFVI nodes |
-| | * Packet delay variation (jitter) between VMs |
-| | * Packet delay variation (jitter) between NFVI nodes |
-| | * RFC 3511 benchmark |
-| | |
-+---------+-------------------------------------------------------------------+
+**Table 1 - Network Metrics**
+
++-----------------+---------------------------------------------------------------+
+| Measurement | Description |
+| | |
++-----------------+---------------------------------------------------------------+
+| Throughput | Maximum number of traffic that can be sent between 2 VM |
+| | instances, within the allowed packet loss requirements. |
+| | Results are expressed in Mpps and in Gb/s |
++-----------------+---------------------------------------------------------------+
+| Latency | 99 percentile Round trip latency expressed in micro-seconds |
+| | Note that you can also specify the n-th percentile |
++-----------------+---------------------------------------------------------------+
+| Delay Variation | Average latency, maximum latency and the latency histogram |
++-----------------+---------------------------------------------------------------+
+| Loss | Packets per seconds that were lost on their round trip between|
+| | VMs. Total packet loss numbers are also reported |
++-----------------+---------------------------------------------------------------+
.. note:: The description in this OPNFV document is intended as a reference for
- users to understand the scope of the SampleVNF Project and the
- deliverables of the SampleVNF framework. For complete description of
- the methodology, please refer to the ETSI document.
-
-.. rubric:: Footnotes
-.. [1] To be included in future deliveries.
-
+ users to execute the benchmarking. For complete description of the methodology,
+ please refer to the ETSI document.
diff --git a/docs/testing/user/userguide/03-architecture.rst b/docs/testing/user/userguide/03-architecture.rst
index 08e1b2f2..bdc51d3f 100755..100644
--- a/docs/testing/user/userguide/03-architecture.rst
+++ b/docs/testing/user/userguide/03-architecture.rst
@@ -37,8 +37,8 @@ validating the sample VNFs through OPEN SOURCE VNF approximations and test tools
The VNFs belongs to this project are never meant for field deployment.
All the VNF source code part of this project requires Apache License Version 2.0.
-Supported deployment:
-----------------------
+Supported deployment
+--------------------
* Bare-Metal - All VNFs can run on a Bare-Metal DUT
* Standalone Virtualization(SV): All VNFs can run on SV like VPP as switch, ovs,
ovs-dpdk, srioc
@@ -47,7 +47,6 @@ Supported deployment:
VNF supported
-------------
- Carrier Grade Network Address Translation (CG-NAT) VNF
- ::
The Carrier Grade Network Address and port Translation (vCG-NAPT) is a
VNF approximation extending the life of the service providers IPv4 network
infrastructure and mitigate IPv4 address exhaustion by using address and
@@ -55,23 +54,19 @@ VNF supported
It also supports the connectivity between the IPv6 access network to
IPv4 data network using the IPv6 to IPv4 address translation and vice versa.
- Firewall (vFW) VNF
- ::
The Virtual Firewall (vFW) is a VNF approximation serving as a state full
L3/L4 packet filter with connection tracking enabled for TCP, UDP and ICMP.
The VNF could be a part of Network Services (industry use-cases) deployed
to secure the enterprise network from un-trusted network.
- Access Control List (vACL) VNF
- ::
The vACL vNF is implemented as a DPDK application using VNF Infrastructure
Library (VIL). The VIL implements common VNF internal, optimized for
Intel Architecture functions like load balancing between cores, IPv4/IPv6
stack features, and interface to NFV infrastructure like OVS or SRIOV.
- UDP_Replay
- ::
The UDP Replay is implemented as a DPDK application using VNF Infrastructure
Library (VIL). Performs as a refelector of all the traffic on given port.
- Prox - Packet pROcessing eXecution engine.
- ::
Packet pROcessing eXecution Engine (PROX) which is a DPDK application.
PROX can do operations on packets in a highly configurable manner.
The PROX application is also displaying performance statistics that can
@@ -142,14 +137,15 @@ The following features were verified by SampleVNF test cases:
Test Framework
--------------
-.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-13-nsb-overview
+.. _Yardstick_NSB: http://artifacts.opnfv.org/yardstick/docs/testing_user_userguide/index.html#document-11-nsb-overview
+.. _ETSI GS NFV-TST 001: https://portal.etsi.org/webapp/workprogram/Report_WorkItem.asp?WKI_ID=46009
SampleVNF Test Infrastructure (NSB (Yardstick_NSB_)) in yardstick helps to facilitate
consistent/repeatable methodologies for characterizing & validating the
sample VNFs (:term:`VNF`) through OPEN SOURCE VNF approximations.
-Network Service Benchmarking in yardstick framework follows ETSI GS NFV-TST001_
+Network Service Benchmarking in yardstick framework follows `ETSI GS NFV-TST 001`_
to verify/characterize both :term:`NFVI` & :term:`VNF`
For more inforamtion refer, Yardstick_NSB_
diff --git a/docs/testing/user/userguide/03-installation.rst b/docs/testing/user/userguide/03-installation.rst
new file mode 100644
index 00000000..4407b276
--- /dev/null
+++ b/docs/testing/user/userguide/03-installation.rst
@@ -0,0 +1,162 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation and others.
+
+SampleVNF Installation
+======================
+.. _RapidScripting: https://wiki.opnfv.org/display/SAM/Rapid+scripting
+.. _XtestingDocumentation: https://xtesting.readthedocs.io/en/latest/
+
+Abstract
+--------
+The installation procedures described below will result in the deployment of
+all SW components needed to run the benchmarking procedures as defined in ETSI
+GS NFV-TST009 on top of an NFVI instance that is the subject of this characterization.
+Xtesting in combination with the rapid scripts and the PROX tool will be used to achieve this.
+
+The steps needed to run the benchmarking are:
+ 1) Identify a machine on which you will install the containers to support the testing
+ 2) Clone the samplevnf project on that machine
+ 3) Deploy the testing VMs (hosting PROX tool) (Or containers)
+ 4) Deploy your own Xtesting toolchain.
+ 5) Build the test container that will drive the TST009 testing
+ 6) Publish your container on your local repository
+ 7) Execute the testing
+
+In this chapter, we will cover the first 6 installation steps.
+
+Prerequisites
+-------------
+
+Supported Test setup
+^^^^^^^^^^^^^^^^^^^^
+The device under test (DUT) is an NFVI instance on which we can deploy PROX instances.
+A PROX instance is a machine that:
+
+ * has a management interface that can be reached from the test container
+ * has one or more data plane interfaces on a dataplane network.
+ * can be a container, a VM or a bare metal machine. We just need to be able to ssh into the
+ PROX machine from the test container.
+ * is optimized for data plane traffic.
+ * will measure the throughput that is offered through its dataplane interface(s)
+
+There are no requirements on the NFVI instance itself. Of course, the measured throughput will
+depend heavily on the NFVI characteristics.
+In this release, we are supporting an automated deployment of the PROX instance on an NFVI that
+provides the OpenStack Heat interface. You could also deploy the PROX instances using other
+mechanisms. As long as you provide the necessary files describing these instances, the execution
+of the test can also be done automatically (steps 4-7) and hence be executed on different DUTs,
+e.g. VMWare, K8s, bare metal, ...
+
+Below is the basic picture of the deployment needed for the testing.
+
+.. image:: images/rapid.png
+ :width: 800px
+ :alt: supported topology
+
+Different test scenario's can now be executed by deploying the PROX machines on different systems:
+ * The generator machine could be deployed on a well defined compute node, that has network access
+ to the other nodes through the TOR. The generated traffic is very similar to external traffic.
+ * The Generator and the Swap instance could be on the same compute node to test E-W traffic between
+ 2 instance on the same compute node
+ * The Generator and the Swap instance could be on a different compute node
+
+Many VMs can be deployed before the test is running: each test case can then use different pairs of
+PROX instances to test all the above scenarios
+
+Hardware & Software Ingredients
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The only requirement is to have the PROX instances running. There are no stringent requirements to be able
+to run the test. Of course, the dataplane performance will heavily depend on the underlying NFVI HW & SW
+
+Installation Steps
+------------------
+
+Step 1: Identify a machine on which you will install the containers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+This machine will need enough resources to install the Xtesting framework and needs to be enabled
+for containers.
+From a network point of view, it will need to have access to the PROX instances: That means it will need
+to be able to ssh into these machines and that the network also needs to allow for TCP port 8474 traffic.
+
+When using the automation to create the VM through the Heat Stack API, this machine also needs to be able
+to execute the OpenStack API. Alternatively, the creation of the VMs can be executed on another machine, but
+this will involve some manual file copying.
+
+Step 2: Clone the samplevnf project on that machine
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ git clone https://git.opnfv.org/samplevnf
+
+Go to the relevant directory in this repository: samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/
+
+Step 3: Deploy the testing VMs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+In this step, we will be deploying 2 or more instances that host the PROX tool. At the end of this step,
+the instances will be running and an environment file (default name: rapid.env) will be created. This file
+will have all information needed to run the actual test. You can do this step manually on all kinds of
+platforms (OpenStack, VMWare, K8s, bare metal, ...), but the automation tools described in the rest of this
+paragraph will using OpenStack Heat yaml files.
+First, a PROX qcow2 image needs to be downloaded.
+
+.. code-block:: console
+
+ wget http://artifacts.opnfv.org/samplevnf/jerma/prox_jerma.qcow2
+
+This image can also be created mannualy by following instructions in RapidScripting_,
+in the section "Creating an image"
+Now upload this image to Openstack:
+
+.. code-block:: console
+
+ openstack image create --disk-format qcow2 --container-format bare --file prox_jerma.qcow2 rapidVM
+
+Now run createrapid.sh to create the stack. This process takes the config_file as input. Details can be found in
+RapidScripting_, in the section "Deploying the VMs"
+
+.. code-block:: console
+
+ ./createrapid.sh
+
+At the end of this step, VMs should be running and the rapid.env and rapid_key.pem files should be available.
+
+Step 4: Deploy your own Xtesting toolchain
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Install Xtesting as described in XtestingDocumentation_.
+First goto the xtesting directory in samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting (this was cloned
+in step 2)
+
+.. code-block:: console
+
+ virtualenv xtesting
+ . xtesting/bin/activate
+ pip install ansible
+ ansible-galaxy install collivier.xtesting
+ ansible-playbook site.yaml
+ deactivate
+ rm -r xtesting
+
+Step 5: Build the test container that will drive the TST009 testing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Go to the directory samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/xtesting
+While building this container, some files will be copied into the container image. Two of these files
+are generated by Step 3: rapid.env and rapid_key.pem and reside in the samplevnf/VNFs/DPPD-PROX/helper-scripts/rapid/.
+Please copy them into the xtesting directory.
+The 3rd file that will be copied is testcases.yaml.
+
+.. code-block:: console
+
+ docker build -t 127.0.0.1:5000/rapidxt .
+
+Step 6: Publish your container on your local repository
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ docker push 127.0.0.1:5000/rapidxt
+
+You are now ready to execute the testing
diff --git a/docs/testing/user/userguide/04-installation.rst b/docs/testing/user/userguide/04-installation.rst
deleted file mode 100644
index e54243cb..00000000
--- a/docs/testing/user/userguide/04-installation.rst
+++ /dev/null
@@ -1,230 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International
-.. License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) OPNFV, Intel Corporation and others.
-
-SampleVNF Installation
-======================
-
-Abstract
---------
-
-This project provides a placeholder for various sample VNF
-(Virtual Network Function (:term:`VNF`)) development which includes example
-reference architecture and optimization methods related to VNF/Network service
-for high performance VNFs.
-The sample VNFs are Open Source approximations* of Telco grade VNF’s using
-optimized VNF + NFVi Infrastructure libraries, with Performance Characterization
-of Sample† Traffic Flows.
-
-::
-
- * Not a commercial product. Encourage the community to contribute and close the feature gaps.
- † No Vendor/Proprietary Workloads
-
-SampleVNF supports installation directly in Ubuntu. The installation procedure
-are detailed in the sections below.
-
-The steps needed to run SampleVNF are:
- 1) Install and Build SampleVNF.
- 2) Deploy the VNF on the target and modify the config based on the Network under test
- 3) Run the traffic generator to generate the traffic.
-
-Prerequisites
--------------
-
-Supported Test setup
-^^^^^^^^^^^^^^^^^^^^^
-The device under test (DUT) consists of a system following;
- * A single or dual processor and PCH chip, except for System on Chip (SoC) cases
- * DRAM memory size and frequency (normally single DIMM per channel)
- * Specific Intel Network Interface Cards (NICs)
- * BIOS settings noting those that updated from the basic settings
- * DPDK build configuration settings, and commands used for tests
-Connected to the DUT is an IXIA* or Software Traffic generator like pktgen or TRex,
-simulation platform to generate packet traffic to the DUT ports and
-determine the throughput/latency at the tester side.
-
-Below are the supported/tested (:term:`VNF`) deployment type.
-
-.. image:: images/deploy_type.png
- :width: 800px
- :alt: SampleVNF supported topology
-
-Hardware & Software Ingredients
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-SUT requirements:
-
-::
-
- +-----------+------------------+
- | Item | Description |
- +-----------+------------------+
- | Memory | Min 20GB |
- +-----------+------------------+
- | NICs | 2 x 10G |
- +-----------+------------------+
- | OS | Ubuntu 16.04 LTS |
- +-----------+------------------+
- | kernel | 4.4.0-34-generic|
- +-----------+------------------+
- | DPDK | 17.02 |
- +-----------+------------------+
-
-Boot and BIOS settings:
-
-::
-
- +------------------+---------------------------------------------------+
- | Boot settings | default_hugepagesz=1G hugepagesz=1G hugepages=16 |
- | | hugepagesz=2M hugepages=2048 isolcpus=1-11,22-33 |
- | | nohz_full=1-11,22-33 rcu_nocbs=1-11,22-33 |
- | | Note: nohz_full and rcu_nocbs is to disable Linux*|
- | | kernel interrupts, and it’s import |
- +------------------+---------------------------------------------------+
- |BIOS | CPU Power and Performance Policy <Performance> |
- | | CPU C-state Disabled |
- | | CPU P-state Disabled |
- | | Enhanced Intel® Speedstep® Tech Disabled |
- | | Hyper-Threading Technology (If supported) Enable |
- | | Virtualization Techology Enable |
- | | Coherency Enable |
- | | Turbo Boost Disabled |
- +------------------+---------------------------------------------------+
-
-Network Topology for testing VNFs
----------------------------------
-The ethernet cables should be connected between traffic generator and the VNF server (BM,
-SRIOV or OVS) setup based on the test profile.
-
-The connectivity could be
-
-1) Single port pair : One pair ports used for traffic
-
-::
-
- e.g. Single port pair link0 and link1 of VNF are used
- TG:port 0 <------> VNF:Port 0
- TG:port 1 <------> VNF:Port 1
-
- For correalted traffic, use below configuration
- TG_1:port 0 <------> VNF:Port 0
- VNF:Port 1 <------> TG_2:port 0 (UDP Replay)
- (TG_2(UDP_Replay) reflects all the traffic on the given port)
-
-2) Multi port pair : More than one pair of traffic
-
-::
-
- e.g. Two port pair link 0, link1, link2 and link3 of VNF are used
- TG:port 0 <------> VNF:Port 0
- TG:port 1 <------> VNF:Port 1
- TG:port 2 <------> VNF:Port 2
- TG:port 3 <------> VNF:Port 3
-
- For correalted traffic, use below configuration
- TG_1:port 0 <------> VNF:Port 0
- VNF:Port 1 <------> TG_2:port 0 (UDP Replay)
- TG_1:port 1 <------> VNF:Port 2
- VNF:Port 3 <------> TG_2:port 1 (UDP Replay)
- (TG_2(UDP_Replay) reflects all the traffic on the given port)
-
-* Bare-Metal
- Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
-
-* Standalone Virtualization - PHY-VM-PHY
- * SRIOV
- Refer below link to setup sriov
- https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
-
- * OVS_DPDK
- Refer below link to setup ovs-dpdk
- http://docs.openvswitch.org/en/latest/intro/install/general/
- http://docs.openvswitch.org/en/latest/intro/install/dpdk/
-
- * Openstack
- Use any OPNFV installer to deploy the openstack.
-
-
-Build VNFs on the DUT:
-----------------------
-
-1) Clone sampleVNF project repository - git clone https://git.opnfv.org/samplevnf
-
-Auto Build - Using script to build VNFs
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- * Interactive options:
- ::
-
- ./tools/vnf_build.sh -i
- Follow the steps in the screen from option [1] –> [10] and
- select option [9] to build the vnfs.
- It will automatically download selected DPDK version and any
- required patches and will setup everything and build VNFs.
-
- Options [8], If RestAPI feature is needed install 'civetweb'
-
- Following are the options for setup:
- ----------------------------------------------------------
- Step 1: Environment setup.
- ----------------------------------------------------------
- [1] Check OS and network connection
- [2] Select DPDK RTE version
-
- ----------------------------------------------------------
- Step 2: Download and Install
- ----------------------------------------------------------
- [3] Agree to download
- [4] Download packages
- [5] Download DPDK zip
- [6] Build and Install DPDK
- [7] Setup hugepages
- [8] Download and Build civetweb
-
- ----------------------------------------------------------
- Step 3: Build VNFs
- ----------------------------------------------------------
- [9] Build all VNFs (vACL, vCGNAPT, vFW, UDP_Replay, DPPD-PROX)
-
- [10] Exit Script
-
- * non-Interactive options:
- ::
- ./tools/vnf_build.sh -s -d=<dpdk version eg 17.02>
-
-Manual Build
-^^^^^^^^^^^^
-
- ::
-
- 1. Download DPDK supported version from dpdk.org
- * http://dpdk.org/browse/dpdk/snapshot/dpdk-$DPDK_RTE_VER.zip
- * unzip dpdk-$DPDK_RTE_VER.zip and apply dpdk patches only in case of 16.04 (Not required for other DPDK versions)
- * cd dpdk
- * make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc
- * cd x86_64-native-linuxapp-gcc
- * make -j
- 2. Add this to Go to /etc/default/grub configuration file to setup hugepages.
- * Append “default_hugepagesz=1G hugepagesz=1G hugepages=8 hugepagesz=2M hugepages=2048” to the GRUB_CMDLINE_LINUX entry.
- 3. Setup Environment Variable
- * export RTE_SDK=<samplevnf>/dpdk
- * export RTE_TARGET=x86_64-native-linuxapp-gcc
- * export VNF_CORE=<samplevnf> or using ./tools/setenv.sh
- 4. Build SampleVNFs e.g, vACL
- * cd <samplevnf>/VNFs/vACL
- * make clean
- * make
- * The vACL executable will be created at the following location
- <samplevnf>/VNFs/vACL/build/vACL
-
-2) Standalone virtualization/Openstack:
-
- Build VM image from script in yardstick
- ::
- 1) git clone https://git.opnfv.org/samplevnf
- 2) cd samplevnf and run
- ./tools/samplevnf-img-dpdk-samplevnf-modify tools/ubuntu-server-cloudimg-samplevnf-modify.sh
- Image available in: /tmp/workspace/samplevnf/xenial-server-cloudimg-amd64-disk1.img
-
-To run VNFs. Please refer chapter `05-How_to_run_SampleVNFs.rst`
diff --git a/docs/testing/user/userguide/04-running_the_test.rst b/docs/testing/user/userguide/04-running_the_test.rst
new file mode 100644
index 00000000..3d3a1e6c
--- /dev/null
+++ b/docs/testing/user/userguide/04-running_the_test.rst
@@ -0,0 +1,226 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International
+.. License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) OPNFV, Intel Corporation and others.
+
+================
+Running the test
+================
+.. _NFV-TST009: https://docbox.etsi.org/ISG/NFV/open/Publications_pdf/Specs-Reports/NFV-TST%20009v3.2.1%20-%20GS%20-%20NFVI_Benchmarks.pdf
+.. _TST009_Throughput_64B_64F.test: https://github.com/opnfv/samplevnf/blob/master/VNFs/DPPD-PROX/helper-scripts/rapid/tests/TST009_Throughput_64B_64F.test
+.. _rapid_location: https://github.com/opnfv/samplevnf/blob/master/VNFs/DPPD-PROX/helper-scripts/rapid/
+
+Overview
+--------
+A default test will be run automatically when you launch the testing. The
+details and definition of that test are defined in file
+TST009_Throughput_64B_64F.test_.
+
+We will discuss the sections of such a test file and how this can be changed to
+accomodate the testing you want to execute. This will be done by creating your
+own test file and making sure it becomes part of your testcases.yaml, as will
+be shown below.
+
+As the name of the default test file suggests, the test will find the
+throughput, latency and packet loss according to NFV-TST009_, for packets that
+are 64 bytes long and for 64 different flows.
+
+Test File Description
+---------------------
+The test file has multiple sections. The first section is a generic section
+called TestParameters. Then there are 1 or more sections desribing the test
+machines we will be using in the test. The sections are named TestMx, where x
+is a number (starting with 1). The tests to be executed are described in a
+section called testy, where y is the number of the test to be executed,
+starting with 1. In this automated testing driven by Xtesting, we will
+typically only run 1 test.
+
+TestParameters
+^^^^^^^^^^^^^^
+In this section, the name of the test is specified. This is only used in the
+reporting and has no influence on the actual testing.
+
+.. code-block:: console
+
+ name = Rapid_ETSINFV_TST009
+
+The number of test that will be executed by this run and that will be described
+in the [testy] sections, is defined by the number_of_tests parameter. In the
+Xtesting framework that we are using here, this will typically be set to 1.
+
+.. code-block:: console
+
+ number_of_tests = 1
+
+The total number of machines to be used in this testing will be defined by the
+parameter total_number_of_test_machines. The function that these machines have
+in this test will be described in the [TestMx] section. Typically, this number
+will be set to 2, but many more machines can particiapte in a test.
+
+.. code-block:: console
+
+ total_number_of_test_machines = 2
+
+lat_percentile is a variable that is setting which percentile to use during the
+course of this test. This will be used to report the percentile round trip
+latency and is a better measurement for the high latencies during this test than
+the maximum latency which will also be reported. Note that we also report the
+total round trip latency histogram.
+
+.. code-block:: console
+
+ lat_percentile = 99
+
+
+TestMx
+^^^^^^
+In the TestMx sections, where x denotes the index of the machine, the function
+of the machine in the testing, will be described. The machine can be defined as
+a generator, or as a packet reflector (swap function). The machines can be any
+machine that is created upfront (See step 3 of the installation steps). Other
+functions can also be executed by the test machines and examples of test files
+can be found in rapid_location_.
+
+The first parameter is the name of the machine and is only used for referencing
+the machine. This will be the name of the PROX instance and will be shown in
+case you run the PROX UI. In this automated testing, this will be not be
+visible.
+
+The config_file parameter defines which PROX config file is used by the PROX
+program and what PROX will be
+doing. For a generator, this will typically be gen.cfg. Multiple cfg files
+exist in the rapid_location_.
+
+The dest_vm parameter is used by a generator to find out to
+which VM he needs to send the packets. In the example below, the packets will be
+sent to TestM2.
+
+The gencores parameter defines a list of cores to be used for the generator tasks.
+Note that if you specify more than 1 core, the interface will need to support as
+many tx queues as there are generator cores.
+
+The latcores parameter specifies a
+list of cores to be used by the latency measurement tasks. You need as many rx
+queues on the interface as specified in the latcores parameter.
+
+The default value for the
+bucket_size_exp parameter is 12. It is also its minimum value. In case most of
+the latency measurements in the histogram are falling in the last bucket, this
+number needs to be increased. Every time you increase this number by 1, the
+bucket size for the latency histogram is multiplied by 2. There are 128 buckets
+in the histogram.
+
+cores is a parameter that will be used by non-generator configurations that
+don't need a disctinction between generator and latency cores (e.g. swap.cfg).
+
+Changing these parameters requires in depth knowledge of the PROX tool and is
+not something to start with.
+
+.. code-block:: console
+
+ name = Generator
+ config_file = gen.cfg
+ dest_vm = 2
+ gencores = [1]
+ latcores = [3]
+ #bucket_size_exp = 12
+
+testy
+^^^^^
+In the testy sections, where y denotes the index of the test, the test that will
+be executed on the machines that were specified in the TestMx sections, will be
+described. Using Xtesting, we will typically only use 1 test.
+Parameter test is defining which test needs to be run. This is a hardcoded
+string and can only be one of the following ['flowsizetest', 'TST009test',
+'fixed_rate', 'increment_till_fail', 'corestats', 'portstats', 'impairtest',
+'irqtest', 'warmuptest']. In this project, we will use the TST009test testing.
+For examples of the other tests, please check out the other test files in
+rapid_location_.
+
+The pass_threshold parameter defines the success criterium for the test. When
+this test uses multiple combinations of packet size and flows, all combinations
+must be meeting the same threshold. If one of the combinations fails, the test
+will be reported as failed.
+The threshold is expressed in Mpps.
+
+The imixs parameter defines the pakcet sizes that will be used. Each element in
+the imixs list will result in a separate test. Each element is on its turn a
+list of packet sizes which will be used during one test execution. If you only
+want to test 1 imix size, define imixs with only one element. For each element in
+the imixs list, the generator will iterate over the packet lengths and send them
+out in the order as specified in the list. An example of an imix list is [128,
+256, 64, 64, 128]. In this case, 40% of the packets will have a size of 64
+bytes, 40% will have a packet size of 128 and 20% will have a packet size of
+256. When using this with Xtesting, we will typically only use 1 imix. When
+needing results for more sizes, one should create a specific test file per size
+and launch the different tests using Xtesting.
+
+The flows parameter is a list of flow sizes. For each flow size, a test will be
+run with the specified amount of flows. The flow size needs to be a power of 2,
+max 2^30. If not a power of 2, we will use the lowest power of 2 that is larger
+than the requested number of flows. e.g. 9 will result in 16 flows.
+Same remark as for the imixs parameter: we will only use one element in the
+flows list. When more flows need to be tested, create a different test file and
+launch it using Xtesting.
+
+The drop_rate_threshold parameter specifies the maximum ratio of packets than
+can be dropped while still considering
+the test run as succesful. Note that a value of 0 means an absolute zero packet
+loss: even if we lose 1 packet during a certain step in a test run, it will be
+marked as failed.
+
+The lat_avg_threshold, lat_perc_threshold, lat_max_threshold parameters
+are thresholds to define
+the maximal acceptable round trip latency to mark the test step as successful.
+You can set this threshold for the average, the percentile and the maximum
+latency. Which percentile is being used is defined in the TestParameters section.
+All these thresholds are expressed in micro-seconds. You can also put the value
+to inf, which means the threshold will never be reached and hence the threshold
+value is not being used to define if the run is successful or not.
+
+The MAXr, MAXz, MAXFramesPerSecondAllIngress and StepSize parameters are defined in
+NFV-TST009_ and are used to control the binary search algorithm.
+
+The ramp_step variable controls the ramping of the generated traffic. When
+not specified, the requested traffic for each step in the testing will be
+applied immediately. If specified, the generator will slowly go to the requested
+speed by increasing the traffic each second with the value specified in this
+parameter till it reaches the requested speed. This parameter is expressed in
+100Mb/s.
+
+.. code-block:: console
+
+ pass_threshold=0.001
+ imixs=[[128, 256, 64, 64, 128]]
+ flows=[64]
+ drop_rate_threshold = 0
+ lat_avg_threshold = inf
+ lat_perc_threshold = inf
+ lat_max_threshold = inf
+ MAXr = 3
+ MAXz = 5000
+ MAXFramesPerSecondAllIngress = 12000000
+ StepSize = 10000
+ #ramp_step = 1
+
+Modifying the test
+------------------
+In case you want to modify the parameters as specified in
+TST009_Throughput_64B_64F.test_, it is best to create your own test file. Your
+test file will need to be uploaded to the test container. Hence you will have to
+rebuild your container, and add an extra copy command to the Dockerfile so that
+your new test file will be avaialble in the container.
+Then you will need to modify the testcases.yaml file. One of the args that you
+can specify is the test_file. Put your newly created test file as the new value
+for this argument.
+Now build and publish your test container as specified in steps 5 & 6 of the
+installation procedure.
+
+Note that other arguments than test_file can be specified in testcases.yaml. For
+a list of arugments, please check out the test_params dictionary in the
+rapid_defaults.py that you can find in rapid_location_.
+It is adviced not to change these parameters unless you have an in-depth
+knowledge of the code.
+The only 2 arguments that van be changed are the test_file which was already
+discussed and the runtime argument. This argument defines how long each test run
+will take and is expressed in seconds.
diff --git a/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst b/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
index 7ba25fe1..28da0ebd 100644
--- a/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
+++ b/docs/testing/user/userguide/05-How_to_run_SampleVNFs.rst
@@ -17,6 +17,7 @@ The device under test (DUT) consists of a system following;
* Specific Intel Network Interface Cards (NICs)
* BIOS settings noting those that updated from the basic settings
* DPDK build configuration settings, and commands used for tests
+
Connected to the DUT is an IXIA* or Software Traffic generator like pktgen or TRex,
simulation platform to generate packet traffic to the DUT ports and
determine the throughput/latency at the tester side.
@@ -103,17 +104,16 @@ The connectivity could be
(TG_2(UDP_Replay) reflects all the traffic on the given port)
* Bare-Metal
- Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
+ Refer: http://fast.dpdk.org/doc/pdf-guides/ to setup the DUT for VNF to run
* Standalone Virtualization - PHY-VM-PHY
+
* SRIOV
- Refer below link to setup sriov
- https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
+ https://software.intel.com/en-us/articles/using-sr-iov-to-share-an-ethernet-port-among-multiple-vms
* OVS_DPDK
- Refer below link to setup ovs-dpdk
- http://docs.openvswitch.org/en/latest/intro/install/general/
- http://docs.openvswitch.org/en/latest/intro/install/dpdk/
+ http://docs.openvswitch.org/en/latest/intro/install/general/
+ http://docs.openvswitch.org/en/latest/intro/install/dpdk/
* Openstack
Use any OPNFV installer to deploy the openstack.
@@ -132,19 +132,21 @@ Step 0: Preparing hardware connection
Step 1: Setting up Traffic generator (TRex)
TRex Software preparations
- **************************
* Install the OS (Bare metal Linux, not VM!)
* Obtain the latest TRex package: wget https://trex-tgn.cisco.com/trex/release/latest
* Untar the package: tar -xzf latest
* Change dir to unzipped TRex
* Create config file using command: sudo python dpdk_setup_ports.py -i
+
In case of Ubuntu 16 need python3
+
See paragraph config creation for detailed step-by-step
+
(Refer: https://trex-tgn.cisco.com/trex/doc/trex_stateless_bench.html)
Build SampleVNFs
------------------
+----------------
Step 2: Procedure to build SampleVNFs
@@ -487,7 +489,7 @@ step 4: Run Test using traffic geneator
UDP_Replay - How to run
-----------------------------------------
+-----------------------
Step 3: Bind the datapath ports to DPDK
@@ -532,7 +534,7 @@ step 4: Run Test using traffic geneator
For more details refer: https://trex-tgn.cisco.com/trex/doc/trex_stateless_bench.html
PROX - How to run
-------------------
+-----------------
Description
^^^^^^^^^^^
@@ -654,7 +656,7 @@ PROX COMMANDS AND SCREENS
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
| version | Show version | |
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
- | port_stats <port id> | Print rate for no_mbufs, ierrors, rx_bytes, tx_bytes, rx_pkts, | |
+ | port_stats <port id> | Print rate for no_mbufs, ierrors, rx_bytes, tx_bytes, rx_pkts, | |
| | tx_pkts and totals for RX, TX, no_mbufs ierrors for port <port id> | |
+----------------------------------------------+---------------------------------------------------------------------------+----------------------------+
@@ -941,7 +943,7 @@ PROX Compiation installation
* cd samplevnf
* export RTE_SDK=`pwd`/dpdk
* export RTE_TARGET=x86_64-native-linuxapp-gcc
-* git clone http://dpdk.org/git/dpdk
+* git clone git://dpdk.org/dpdk
* cd dpdk
* git checkout v17.05
* make install T=$RTE_TARGET
diff --git a/docs/testing/user/userguide/06-How_to_use_REST_api.rst b/docs/testing/user/userguide/06-How_to_use_REST_api.rst
index b8c0cbea..ba768d78 100644
--- a/docs/testing/user/userguide/06-How_to_use_REST_api.rst
+++ b/docs/testing/user/userguide/06-How_to_use_REST_api.rst
@@ -3,12 +3,12 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) opnfv, national center of scientific research "demokritos" and others.
-========================================================
+========
REST API
-========================================================
+========
Introduction
----------------
+------------
As the internet industry progresses creating REST API becomes more concrete
with emerging best Practices. RESTful web services don’t follow a prescribed
standard except fpr the protocol that is used which is HTTP, its important
@@ -26,7 +26,7 @@ Here are important points to be considered:
always same no matter how many times these operations are invoked.
* PUT and POST operation are nearly same with the difference lying
only in the result where PUT operation is idempotent and POST
- operation can cause different result.
+ operation can cause different result.
REST API in SampleVNF
@@ -45,7 +45,7 @@ REST api on VNF’s will help adapting with the new automation techniques
being adapted in yardstick.
Web server integration with VNF’s
-----------------------------------
+---------------------------------
In order to implement REST api’s in VNF one of the first task is to
identify a simple web server that needs to be integrated with VNF’s.
@@ -150,7 +150,7 @@ API Usage
---------
Run time Usage
-^^^^^^^^^^^^^^
+==============
An application(say vFW) with REST API support is run as follows
with just PORT MASK as input. The following environment variables
@@ -182,6 +182,7 @@ samplevnf directory).
2. Check the Link IP's using the REST API (vCGNAPT/vACL/vFW)
::
+
e.g curl <IP>/vnf/config/link
This would indicate the number of links enabled. You should enable all the links
@@ -194,6 +195,7 @@ samplevnf directory).
3. Now that links are enabled we can configure IP's using link method as follows (vCGNAPT/vACL/vFW)
::
+
e.g curl -X POST -H "Content-Type:application/json" -d '{"ipv4":"<IP to be configured>","depth":"24"}'
http://<IP>/vnf/config/link/0
curl -X POST -H "Content-Type:application/json" -d '{"ipv4":"IP to be configured","depth":"24"}'
@@ -207,6 +209,7 @@ samplevnf directory).
4. Adding arp entries we can use this method (vCGNAPT/vACL/vFW)
::
+
/vnf/config/arp
e.g
@@ -220,15 +223,17 @@ samplevnf directory).
5. Adding route entries we can use this method (vCGNAPT/vACL/vFW)
::
+
/vnf/config/route
e.g curl -X POST -H "Content-Type:application/json" -d '{"type":"net", "depth":"8", "nhipv4":"202.16.100.20",
- "portid":"0"}' http://10.223.166.240/vnf/config/route
+ "portid":"0"}' http://10.223.166.240/vnf/config/route
curl -X POST -H "Content-Type:application/json" -d '{"type":"net", "depth":8", "nhipv4":"172.16.100.20",
"portid":"1"}' http://10.223.166.240/vnf/config/route
5. In order to load the rules a script file needs to be posting a script.(vACL/vFW)
::
+
/vnf/config/rules/load
Typical example for loading a script file is shown below
@@ -239,12 +244,14 @@ samplevnf directory).
6. The following REST api's for runtime configuring through a script (vCGNAPT Only)
::
+
/vnf/config/rules/clear
/vnf/config/nat
/vnf/config/nat/load
7. For debug purpose following REST API's could be used as described above.(vCGNAPT/vACL/vFW)
::
+
/vnf/dbg
e.g curl http://10.223.166.240/vnf/config/dbg
@@ -258,10 +265,12 @@ samplevnf directory).
8. For stats we can use the following method (vCGNAPT/vACL/vFW)
::
+
/vnf/stats
e.g curl <IP>/vnf/stats
9. For quittiong the application (vCGNAPT/vACL/vFW)
::
+
/vnf/quit
e.g curl <IP>/vnf/quit
diff --git a/docs/testing/user/userguide/07-Config_files.rst b/docs/testing/user/userguide/07-Config_files.rst
index d5564e8d..f96462e1 100644
--- a/docs/testing/user/userguide/07-Config_files.rst
+++ b/docs/testing/user/userguide/07-Config_files.rst
@@ -380,7 +380,7 @@ This configuration doesn't require LOADB and TXRX pipelines
vACL Config files
-----------------
+-----------------
The reference configuration files explained here are for Software and Hardware
loadbalancing with IPv4 traffic type and single port pair.
diff --git a/docs/testing/user/userguide/images/rapid.png b/docs/testing/user/userguide/images/rapid.png
new file mode 100644
index 00000000..1c9b05bd
--- /dev/null
+++ b/docs/testing/user/userguide/images/rapid.png
Binary files differ
diff --git a/docs/testing/user/userguide/index.rst b/docs/testing/user/userguide/index.rst
index 8d797627..5cc2c5e1 100644
--- a/docs/testing/user/userguide/index.rst
+++ b/docs/testing/user/userguide/index.rst
@@ -10,15 +10,8 @@ SampleVNF User Guide
.. toctree::
:maxdepth: 4
- :numbered:
- 01-introduction
- 02-methodology
- 03-architecture
- 04-installation
- 05-How_to_run_SampleVNFs
- 06-How_to_use_REST_api
- 07-Config_files
- 08-CLI_Commands_Reference
- glossary
- references
+ 01-introduction.rst
+ 02-methodology.rst
+ 03-installation.rst
+ 04-running_the_test.rst
diff --git a/docs/testing/user/userguide/references.rst b/docs/testing/user/userguide/references.rst
index 30f6e604..f00a872c 100644
--- a/docs/testing/user/userguide/references.rst
+++ b/docs/testing/user/userguide/references.rst
@@ -11,8 +11,8 @@ References
OPNFV
=====
-* Yardstick wiki: https://wiki.opnfv.org/yardstick
-* SampleVNF wiki: https://wiki.opnfv.org/samplevnf
+* Yardstick wiki: https://wiki-old.opnfv.org/display/yardstick
+* SampleVNF wiki: https://wiki-old.opnfv.org/display/SAM
References used in Test Cases
=============================
@@ -22,7 +22,7 @@ References used in Test Cases
* DPDK: http://dpdk.org
* DPDK supported NICs: http://dpdk.org/doc/nics
* fdisk: http://www.tldp.org/HOWTO/Partition/fdisk_partitioning.html
-* fio: http://www.bluestop.org/fio/HOWTO.txt
+* fio: https://github.com/axboe/fio
* free: http://manpages.ubuntu.com/manpages/trusty/en/man1/free.1.html
* iperf3: https://iperf.fr/
* Lmbench man-pages: http://manpages.ubuntu.com/manpages/trusty/lat_mem_rd.8.html
diff --git a/rapidvm/README.rst b/rapidvm/README.rst
new file mode 100644
index 00000000..9ab02f10
--- /dev/null
+++ b/rapidvm/README.rst
@@ -0,0 +1,38 @@
+RAPID VM IMAGE
+++++++++++++++
+
+This repo will build a centos 7 image with dpdk and prox installed.
+Optimizations for dpdk will also be done.
+
+BUILD INSTRUCTIONS
+==================
+
+Build the image
+---------------
+- cd dib
+- update the version number for the image (if needed) by modifying __version__ in build-image.sh
+- setup your http_proxy if needed
+- bash build-image.sh
+
+IMAGE INSTANCE AND CONFIG
+=========================
+
+VM Requirements
+---------------
+The instance must be launched with:
+- 1 network interface for the management network
+- at least 1 interface for the dataplane networks
+- at least 4 vCPUs
+- 4 GB RAM
+- cpu pinning set to exclusive
+
+Auto-configuration
+------------------
+The rapid scripts will configure the prox instances and drive the testing.
+
+
+Hardcoded Username and Password
+--------------------------------
+In case of problems, you can ssh into the VM:
+- Username: rapid
+- Password: rapid
diff --git a/rapidvm/dib/build-image.sh b/rapidvm/dib/build-image.sh
new file mode 100755
index 00000000..23fe17ca
--- /dev/null
+++ b/rapidvm/dib/build-image.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A shell script to build the PROX VM image using diskimage-builder
+#
+usage() {
+ echo "Usage: $0 [-i image_name] [-g gs-url] [-v]"
+ echo " -i image_appendix image name to be pushed to google storage)"
+ echo " -g gs_url url to store the image"
+ echo " -v verify only (build but do not push to google storage)"
+ echo " -w cache cache directory for disk-image-create"
+ exit 1
+}
+
+# set -e
+#default values
+image_appendix="test"
+workspace="/home/jenkins-ci/opnfv/slave_root/workspace"
+gs_url="artifacts.opnfv.org/samplevnf/images"
+verify_only=0
+while getopts i:g:vw: flag
+do
+ case "${flag}" in
+ i) image_appendix=${OPTARG};;
+ g) gs_url=${OPTARG};;
+ v) verify_only=1;;
+ w) workspace=${OPTARG};;
+ *) usage;exit 1;;
+ esac
+done
+echo "gs_url: $gs_url";
+echo "Verify only: $verify_only";
+image_name=rapid-${image_appendix}
+echo "image name: $image_name.qcow2"
+echo "workspace: $workspace"
+
+ install diskimage-builder
+python3 -m venv dib-rapid-venv
+. dib-rapid-venv/bin/activate
+pip3 install --upgrade pip
+pip3 install six
+pip3 install diskimage-builder
+pip3 install gsutil
+
+echo "Checking if image exists in google storage..."
+if command -v gsutil >/dev/null; then
+ if gsutil -q stat gs://$gs_url/$image_name.qcow2; then
+ echo "Image already exists at http://$gs_url/$image_name.qcow2"
+ fi
+ echo "Starting build..."
+ echo
+else
+ echo "Cannot check image availability in OPNFV artifact repository (gsutil not available)"
+fi
+
+# Add rapid elements directory to the DIB elements path
+export ELEMENTS_PATH=`pwd`/elements
+# canned user/password for direct login
+export DIB_DEV_USER_USERNAME=prox
+export DIB_DEV_USER_PASSWORD=prox
+export DIB_DEV_USER_PWDLESS_SUDO=Y
+# Set the data sources to have ConfigDrive only
+export DIB_CLOUD_INIT_DATASOURCES="Ec2, ConfigDrive, OpenStack"
+# Use ELRepo to have latest kernel
+export DIB_USE_ELREPO_KERNEL=True
+echo "Building $image_name.qcow2..."
+cache=$workspace/cache
+mkdir $cache
+time disk-image-create -o $image_name --image-cache $cache centos7 cloud-init rapid vm
+
+ls -l $image_name.qcow2
+
+
+if [ $verify_only -eq 1 ]; then
+ echo "Image verification SUCCESS"
+ echo "NO upload to google storage (-v)"
+else
+ if command -v gsutil >/dev/null; then
+ echo "Uploading $image_name.qcow2..."
+ gsutil cp $image_name.qcow2 gs://$gs_url/$image_name.qcow2
+ echo "You can access image at http://$gs_url/$image_name.qcow2"
+ else
+ echo "Cannot upload new image to the OPNFV artifact repository (gsutil not available)"
+ exit 1
+ fi
+fi
+deactivate
+rm -r dib-rapid-venv
diff --git a/rapidvm/dib/elements/rapid/element-deps b/rapidvm/dib/elements/rapid/element-deps
new file mode 100644
index 00000000..c6be0aa3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/element-deps
@@ -0,0 +1,5 @@
+vm
+cloud-init-datasources
+install-static
+package-installs
+devuser
diff --git a/rapidvm/dib/elements/rapid/package-installs.yaml b/rapidvm/dib/elements/rapid/package-installs.yaml
new file mode 100644
index 00000000..8b3a3cf3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/package-installs.yaml
@@ -0,0 +1,20 @@
+deltarpm:
+yum-utils:
+git:
+wget:
+gcc:
+unzip:
+libpcap-devel:
+ncurses-devel:
+libedit-devel:
+lua-devel:
+kernel-devel:
+iperf3:
+pciutils:
+numactl-devel:
+vim:
+tuna:
+openssl-devel:
+wireshark:
+make:
+driverctl:
diff --git a/rapidvm/dib/elements/rapid/post-install.d/40-mlib b/rapidvm/dib/elements/rapid/post-install.d/40-mlib
new file mode 100755
index 00000000..34dc1b9c
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/40-mlib
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+MULTI_BUFFER_LIB_VER="0.52"
+BUILD_DIR="/opt/rapid"
+export AESNI_MULTI_BUFFER_LIB_PATH="${BUILD_DIR}/intel-ipsec-mb-${MULTI_BUFFER_LIB_VER}"
+# Downloading the Multi-buffer library. Note that the version to download is linked to the DPDK version being used
+pushd ${BUILD_DIR} > /dev/null 2>&1
+wget https://www.nasm.us/pub/nasm/releasebuilds/2.14.02/linux/nasm-2.14.02-0.fc27.x86_64.rpm
+rpm -ivh nasm-2.14.02-0.fc27.x86_64.rpm
+wget https://github.com/01org/intel-ipsec-mb/archive/v${MULTI_BUFFER_LIB_VER}.zip
+unzip v${MULTI_BUFFER_LIB_VER}.zip
+pushd ${AESNI_MULTI_BUFFER_LIB_PATH}
+make -j`getconf _NPROCESSORS_ONLN`
+make install
+popd > /dev/null 2>&1
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk b/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk
new file mode 100755
index 00000000..6a7fdf36
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/50-compile-dpdk
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# pick up the kernel version for the target image
+BUILD_DIR="/opt/rapid"
+export RTE_SDK="${BUILD_DIR}/dpdk"
+export RTE_TARGET="x86_64-native-linuxapp-gcc"
+
+LATEST_KERNEL_INSTALLED=`ls -v1 /lib/modules/ | tail -1`
+export RTE_KERNELDIR="/lib/modules/${LATEST_KERNEL_INSTALLED}/build"
+
+pushd ${RTE_SDK} > /dev/null 2>&1
+make config T=${RTE_TARGET}
+# Starting from DPDK 20.05, the IGB_UIO driver is not compiled by default.
+# Uncomment the sed command to enable the driver compilation
+sed -i 's/CONFIG_RTE_EAL_IGB_UIO=n/CONFIG_RTE_EAL_IGB_UIO=y/g' ${RTE_SDK}/build/.config
+#sed -i 's/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n/CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_TEST=y/CONFIG_RTE_APP_TEST=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_TEST_PMD=y/CONFIG_RTE_TEST_PMD=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_TEST_BBDEV=y/CONFIG_RTE_TEST_BBDEV=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_COMPRESS_PERF=y/CONFIG_RTE_APP_COMPRESS_PERF=n/g' ${RTE_SDK}/build/.config
+sed -i 's/CONFIG_RTE_APP_CRYPTO_PERF=y/CONFIG_RTE_APP_CRYPTO_PERF=n/g' ${RTE_SDK}/build/.config
+#sed -i 's/CONFIG_RTE_APP_EVENTDEV=y/CONFIG_RTE_APP_EVENTDEV=n/g' ${RTE_SDK}/build/.config
+make -j`getconf _NPROCESSORS_ONLN`
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox b/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox
new file mode 100755
index 00000000..ebb87fd8
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/60-compile-prox
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+export RTE_SDK="${BUILD_DIR}/dpdk"
+export RTE_TARGET="build"
+pushd ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX > /dev/null 2>&1
+make -j`getconf _NPROCESSORS_ONLN`
+cp ${BUILD_DIR}/samplevnf/VNFs/DPPD-PROX/build/app/prox ${BUILD_DIR}/prox
+cp helper-scripts/rapid/check_prox_system_setup.sh ${BUILD_DIR}
+cp helper-scripts/rapid/check-prox-system-setup.service ${BUILD_DIR}
+cp helper-scripts/rapid/sharkproxlog.sh ${BUILD_DIR}
+cp helper-scripts/rapid/deploycentostools.sh ${BUILD_DIR}
+cp helper-scripts/rapid/rapid_rsa_key.pub ${BUILD_DIR}
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg b/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg
new file mode 100755
index 00000000..5171a32b
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/70-os-cfg
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+# huge pages to be used by DPDK
+sh -c '(echo "vm.nr_hugepages = 1024") > /etc/sysctl.conf'
+
+sh -c '(echo "options vfio enable_unsafe_noiommu_mode=1") > /etc/modprobe.d/vfio.conf'
+sh -c '(echo "vfio") > /etc/modules-load.d/vfio.conf'
+sh -c '(echo "vfio-pci") > /etc/modules-load.d/vfio.conf'
+# Enabling tuned with the realtime-virtual-guest profile
+pushd ${BUILD_DIR} > /dev/null 2>&1
+wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm
+wget http://linuxsoft.cern.ch/cern/centos/7/rt/x86_64/Packages/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm
+# Install with --nodeps. The latest CentOS cloud images come with a tuned version higher than 2.8. These 2 packages however
+# do not depend on v2.8 and also work with tuned 2.9. Need to be careful in the future
+rpm -ivh ${BUILD_DIR}/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm --nodeps
+rpm -ivh ${BUILD_DIR}/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm --nodeps
+# Although we do no know how many cores the VM will have when begin deployed for real testing, we already put a number for the
+# isolated CPUs so we can start the realtime-virtual-guest profile. If we don't, that command will fail.
+# When the VM will be instantiated, the check_kernel_params service will check for the real number of cores available to this VM
+# and update the realtime-virtual-guest-variables.conf accordingly.
+echo "isolated_cores=1-3" | tee -a /etc/tuned/realtime-virtual-guest-variables.conf
+# The actual tuned-adm profile is now done in check_prox_system_setup.sh and is started through
+# the check-prox-system-setup.service. This will happen when the system is booting.
+
+# Install the check_tuned_params service to make sure that the grub cmd line has the right cpus in isolcpu. The actual number of cpu's
+# assigned to this VM depends on the flavor used. We don't know at this time what that will be.
+chmod +x ${BUILD_DIR}/check_prox_system_setup.sh
+mv ${BUILD_DIR}/check_prox_system_setup.sh /usr/local/libexec/
+mv ${BUILD_DIR}/check-prox-system-setup.service /etc/systemd/system/
+# systemctl daemon-reload, will be skipped when building image with disk-image-builder. That is OK
+systemctl daemon-reload
+systemctl enable check-prox-system-setup.service
+# Add the default rapid key as an authorized key for the rapid user
+cat ${BUILD_DIR}/rapid_rsa_key.pub >> /home/rapid/.ssh/authorized_keys
+popd > /dev/null 2>&1
diff --git a/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions b/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions
new file mode 100755
index 00000000..86368431
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/80-change-permissions
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+chmod ugo+rwx ${BUILD_DIR}
diff --git a/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms b/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms
new file mode 100755
index 00000000..0fc166e3
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/post-install.d/81-clean-rpms
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2021 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+BUILD_DIR="/opt/rapid"
+rm ${BUILD_DIR}/tuned-profiles-realtime-2.9.0-1.el7_5.2.noarch.rpm
+rm ${BUILD_DIR}/tuned-profiles-nfv-guest-2.9.0-1.el7_5.2.noarch.rpm
+rm ${BUILD_DIR}/nasm-2.14.02-0.fc27.x86_64.rpm
diff --git a/rapidvm/dib/elements/rapid/source-repository-dpdk b/rapidvm/dib/elements/rapid/source-repository-dpdk
new file mode 100644
index 00000000..ce19a904
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/source-repository-dpdk
@@ -0,0 +1 @@
+dpdk tar /opt/rapid/dpdk http://fast.dpdk.org/rel/dpdk-20.05.tar.gz *
diff --git a/rapidvm/dib/elements/rapid/source-repository-samplevnf b/rapidvm/dib/elements/rapid/source-repository-samplevnf
new file mode 100644
index 00000000..80331875
--- /dev/null
+++ b/rapidvm/dib/elements/rapid/source-repository-samplevnf
@@ -0,0 +1 @@
+samplevnf git /opt/rapid/samplevnf https://git.opnfv.org/samplevnf
diff --git a/tools/vnf_build.sh b/tools/vnf_build.sh
index 0a085a6a..d1bb898f 100755
--- a/tools/vnf_build.sh
+++ b/tools/vnf_build.sh
@@ -179,8 +179,7 @@ install_libs()
sudo apt-get update
sudo apt-get -y install build-essential linux-headers-$(uname -r) git unzip libpcap0.8-dev gcc \
make libc6 libc6-dev g++-multilib libzmq3-dev libcurl4-openssl-dev net-tools wget gcc unzip \
- libpcap-dev libncurses-dev libedit-dev pciutils liblua5.2-dev libncursesw5-dev libjson0 \
- libjson0-dev libssl-dev
+ libpcap-dev libncurses-dev libedit-dev pciutils liblua5.2-dev libncursesw5-dev libjson-c-dev libssl-dev
touch .download
}
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..840ce6a3
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck